diff options
Diffstat (limited to 'tools')
99 files changed, 5196 insertions, 1445 deletions
diff --git a/tools/binman/binman.rst b/tools/binman/binman.rst index 23cbb99b4b0..8f57b6cfc76 100644 --- a/tools/binman/binman.rst +++ b/tools/binman/binman.rst @@ -727,6 +727,13 @@ optional: Note that missing, optional blobs do not produce a non-zero exit code from binman, although it does show a warning about the missing external blob. +insert-template: + This is not strictly speaking an entry property, since it is processed early + in Binman before the entries are read. It is a list of phandles of nodes to + include in the current (target) node. For each node, its subnodes and their + properties are brought into the target node. See Templates_ below for + more information. + The attributes supported for images and sections are described below. Several are similar to those for entries. @@ -831,6 +838,13 @@ write-symbols: binman. This is automatic for certain entry types, e.g. `u-boot-spl`. See binman_syms_ for more information. +no-write-symbols: + Disables symbol writing for this entry. This can be used in entry types + where symbol writing is automatic. For example, if `u-boot-spl` refers to + the `u_boot_any_image_pos` symbol but U-Boot is not available in the image + containing SPL, this can be used to disable the writing. Quite likely this + indicates a bug in your setup. + elf-filename: Sets the file name of a blob's associated ELF file. For example, if the blob is `zephyr.bin` then the ELF file may be `zephyr.elf`. This allows @@ -1165,6 +1179,86 @@ If you are having trouble figuring out what is going on, you can use arch/arm/dts/u-boot.dtsi ... found: "arch/arm/dts/juno-r2-u-boot.dtsi" +Templates +========= + +Sometimes multiple images need to be created which have all have a common +part. For example, a board may generate SPI and eMMC images which both include +a FIT. Since the FIT includes many entries, it is tedious to repeat them twice +in the image description. + +Templates provide a simple way to handle this:: + + binman { + multiple-images; + common_part: template-1 { + some-property; + fit { + ... lots of entries in here + }; + + text { + text = "base image"; + }; + }; + + spi-image { + filename = "image-spi.bin"; + insert-template = <&fit>; + + /* things specific to SPI follow */ + footer { + ]; + + text { + text = "SPI image"; + }; + }; + + mmc-image { + filename = "image-mmc.bin"; + insert-template = <&fit>; + + /* things specific to MMC follow */ + footer { + ]; + + text { + text = "MMC image"; + }; + }; + }; + +The template node name must start with 'template', so it is not considered to be +an image itself. + +The mechanism is very simple. For each phandle in the 'insert-templates' +property, the source node is looked up. Then the subnodes of that source node +are copied into the target node, i.e. the one containing the `insert-template` +property. + +If the target node has a node with the same name as a template, its properties +override corresponding properties in the template. This allows the template to +be uses as a base, with the node providing updates to the properties as needed. +The overriding happens recursively. + +Template nodes appear first in each node that they are inserted into and +ordering of template nodes is preserved. Other nodes come afterwards. If a +template node also appears in the target node, then the template node sets the +order. Thus the template can be used to set the ordering, even if the target +node provides all the properties. In the above example, `fit` and `text` appear +first in the `spi-image` and `mmc-image` images, followed by `footer`. + +Where there are multiple template nodes, they are inserted in that order. so +the first template node appears first, then the second. + +Properties in the template node are inserted into the destination node if they +do not exist there. In the example above, `some-property` is added to each of +`spi-image` and `mmc-image`. + +Note that template nodes are not removed from the binman description at present. + + Updating an ELF file ==================== diff --git a/tools/binman/bintool.py b/tools/binman/bintool.py index 81629683df6..0b0f56dbbba 100644 --- a/tools/binman/bintool.py +++ b/tools/binman/bintool.py @@ -288,7 +288,7 @@ class Bintool: name = os.path.expanduser(self.name) # Expand paths containing ~ all_args = (name,) + args env = tools.get_env_with_path() - tout.detail(f"bintool: {' '.join(all_args)}") + tout.debug(f"bintool: {' '.join(all_args)}") result = command.run_pipe( [all_args], capture=True, capture_stderr=True, env=env, raise_on_error=False, binary=binary) diff --git a/tools/binman/bintools.rst b/tools/binman/bintools.rst index c30e7eb9ff5..20ee24395af 100644 --- a/tools/binman/bintools.rst +++ b/tools/binman/bintools.rst @@ -155,6 +155,17 @@ Support is provided for fetching this on Debian-like systems, using apt. +Bintool: openssl: openssl tool +------------------------------ + +This bintool supports creating new openssl certificates. + +It also supports fetching a binary openssl + +Documentation about openssl is at https://www.openssl.org/ + + + Bintool: xz: Compression/decompression using the xz algorithm ------------------------------------------------------------- @@ -183,3 +194,25 @@ Documentation is available via:: +Bintool: fdt_add_pubkey: Add public key to device tree +------------------------------------------------------ + +This bintool supports running `fdt_add_pubkey` in order to add a public +key coming from a certificate to a device-tree. + +Normally signing is done using `mkimage` in context of `binman sign`. However, +in this process the public key is not added to the stage before u-boot proper. +Using `fdt_add_pubkey` the key can be injected to the SPL independent of +`mkimage` + + + +Bintool: bootgen: Sign ZynqMP FSBL image +--------------------------------------------- + +This bintool supports running `bootgen` in order to sign a SPL for ZynqMP +devices. + +The bintool automatically creates an appropriate input image file (.bif) for +bootgen based on the passed arguments. The output is a bootable, +authenticated `boot.bin` file. diff --git a/tools/binman/btool/fdt_add_pubkey.py b/tools/binman/btool/fdt_add_pubkey.py new file mode 100644 index 00000000000..a50774200c9 --- /dev/null +++ b/tools/binman/btool/fdt_add_pubkey.py @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2023 Weidmüller Interface GmbH & Co. KG +# Lukas Funke <lukas.funke@weidmueller.com> +# +"""Bintool implementation for fdt_add_pubkey""" + +from binman import bintool + +class Bintoolfdt_add_pubkey(bintool.Bintool): + """Add public key to control dtb (spl or u-boot proper) + + This bintool supports running `fdt_add_pubkey`. + + Normally mkimage adds signature information to the control dtb. However + binman images are built independent from each other. Thus it is required + to add the public key separately from mkimage. + """ + def __init__(self, name): + super().__init__(name, 'Generate image for U-Boot') + + # pylint: disable=R0913 + def run(self, input_fname, keydir, keyname, required, algo): + """Run fdt_add_pubkey + + Args: + input_fname (str): dtb file to sign + keydir (str): Directory with public key. Optional parameter, + default value: '.' (current directory) + keyname (str): Public key name. Optional parameter, + default value: key + required (str): If present this indicates that the key must be + verified for the image / configuration to be considered valid. + algo (str): Cryptographic algorithm. Optional parameter, + default value: sha1,rsa2048 + """ + args = [] + if algo: + args += ['-a', algo] + if keydir: + args += ['-k', keydir] + if keyname: + args += ['-n', keyname] + if required: + args += ['-r', required] + + args += [ input_fname ] + + return self.run_cmd(*args) + + def fetch(self, method): + """Fetch handler for fdt_add_pubkey + + This installs fdt_add_pubkey using the apt utility. + + Args: + method (FETCH_...): Method to use + + Returns: + True if the file was fetched and now installed, None if a method + other than FETCH_BIN was requested + + Raises: + Valuerror: Fetching could not be completed + """ + if method != bintool.FETCH_BIN: + return None + return self.apt_install('u-boot-tools') diff --git a/tools/binman/btool/openssl.py b/tools/binman/btool/openssl.py index 3a4dbdd6d73..aad3b61ae27 100644 --- a/tools/binman/btool/openssl.py +++ b/tools/binman/btool/openssl.py @@ -15,6 +15,13 @@ import hashlib from binman import bintool from u_boot_pylib import tools + +VALID_SHAS = [256, 384, 512, 224] +SHA_OIDS = {256:'2.16.840.1.101.3.4.2.1', + 384:'2.16.840.1.101.3.4.2.2', + 512:'2.16.840.1.101.3.4.2.3', + 224:'2.16.840.1.101.3.4.2.4'} + class Bintoolopenssl(bintool.Bintool): """openssl tool @@ -74,6 +81,243 @@ imageSize = INTEGER:{len(indata)} '-sha512'] return self.run_cmd(*args) + def x509_cert_sysfw(self, cert_fname, input_fname, key_fname, sw_rev, + config_fname, req_dist_name_dict): + """Create a certificate to be booted by system firmware + + Args: + cert_fname (str): Filename of certificate to create + input_fname (str): Filename containing data to sign + key_fname (str): Filename of .pem file + sw_rev (int): Software revision + config_fname (str): Filename to write fconfig into + req_dist_name_dict (dict): Dictionary containing key-value pairs of + req_distinguished_name section extensions, must contain extensions for + C, ST, L, O, OU, CN and emailAddress + + Returns: + str: Tool output + """ + indata = tools.read_file(input_fname) + hashval = hashlib.sha512(indata).hexdigest() + with open(config_fname, 'w', encoding='utf-8') as outf: + print(f'''[ req ] +distinguished_name = req_distinguished_name +x509_extensions = v3_ca +prompt = no +dirstring_type = nobmp + +[ req_distinguished_name ] +C = {req_dist_name_dict['C']} +ST = {req_dist_name_dict['ST']} +L = {req_dist_name_dict['L']} +O = {req_dist_name_dict['O']} +OU = {req_dist_name_dict['OU']} +CN = {req_dist_name_dict['CN']} +emailAddress = {req_dist_name_dict['emailAddress']} + +[ v3_ca ] +basicConstraints = CA:true +1.3.6.1.4.1.294.1.3 = ASN1:SEQUENCE:swrv +1.3.6.1.4.1.294.1.34 = ASN1:SEQUENCE:sysfw_image_integrity +1.3.6.1.4.1.294.1.35 = ASN1:SEQUENCE:sysfw_image_load + +[ swrv ] +swrv = INTEGER:{sw_rev} + +[ sysfw_image_integrity ] +shaType = OID:2.16.840.1.101.3.4.2.3 +shaValue = FORMAT:HEX,OCT:{hashval} +imageSize = INTEGER:{len(indata)} + +[ sysfw_image_load ] +destAddr = FORMAT:HEX,OCT:00000000 +authInPlace = INTEGER:2 +''', file=outf) + args = ['req', '-new', '-x509', '-key', key_fname, '-nodes', + '-outform', 'DER', '-out', cert_fname, '-config', config_fname, + '-sha512'] + return self.run_cmd(*args) + + def x509_cert_rom(self, cert_fname, input_fname, key_fname, sw_rev, + config_fname, req_dist_name_dict, cert_type, bootcore, + bootcore_opts, load_addr, sha): + """Create a certificate + + Args: + cert_fname (str): Filename of certificate to create + input_fname (str): Filename containing data to sign + key_fname (str): Filename of .pem file + sw_rev (int): Software revision + config_fname (str): Filename to write fconfig into + req_dist_name_dict (dict): Dictionary containing key-value pairs of + req_distinguished_name section extensions, must contain extensions for + C, ST, L, O, OU, CN and emailAddress + cert_type (int): Certification type + bootcore (int): Booting core + load_addr (int): Load address of image + sha (int): Hash function + + Returns: + str: Tool output + """ + indata = tools.read_file(input_fname) + hashval = hashlib.sha512(indata).hexdigest() + with open(config_fname, 'w', encoding='utf-8') as outf: + print(f''' +[ req ] + distinguished_name = req_distinguished_name + x509_extensions = v3_ca + prompt = no + dirstring_type = nobmp + + [ req_distinguished_name ] +C = {req_dist_name_dict['C']} +ST = {req_dist_name_dict['ST']} +L = {req_dist_name_dict['L']} +O = {req_dist_name_dict['O']} +OU = {req_dist_name_dict['OU']} +CN = {req_dist_name_dict['CN']} +emailAddress = {req_dist_name_dict['emailAddress']} + + [ v3_ca ] + basicConstraints = CA:true + 1.3.6.1.4.1.294.1.1 = ASN1:SEQUENCE:boot_seq + 1.3.6.1.4.1.294.1.2 = ASN1:SEQUENCE:image_integrity + 1.3.6.1.4.1.294.1.3 = ASN1:SEQUENCE:swrv +# 1.3.6.1.4.1.294.1.4 = ASN1:SEQUENCE:encryption + 1.3.6.1.4.1.294.1.8 = ASN1:SEQUENCE:debug + + [ boot_seq ] + certType = INTEGER:{cert_type} + bootCore = INTEGER:{bootcore} + bootCoreOpts = INTEGER:{bootcore_opts} + destAddr = FORMAT:HEX,OCT:{load_addr:08x} + imageSize = INTEGER:{len(indata)} + + [ image_integrity ] + shaType = OID:{SHA_OIDS[sha]} + shaValue = FORMAT:HEX,OCT:{hashval} + + [ swrv ] + swrv = INTEGER:{sw_rev} + +# [ encryption ] +# initalVector = FORMAT:HEX,OCT:TEST_IMAGE_ENC_IV +# randomString = FORMAT:HEX,OCT:TEST_IMAGE_ENC_RS +# iterationCnt = INTEGER:TEST_IMAGE_KEY_DERIVE_INDEX +# salt = FORMAT:HEX,OCT:TEST_IMAGE_KEY_DERIVE_SALT + + [ debug ] + debugUID = FORMAT:HEX,OCT:0000000000000000000000000000000000000000000000000000000000000000 + debugType = INTEGER:4 + coreDbgEn = INTEGER:0 + coreDbgSecEn = INTEGER:0 +''', file=outf) + args = ['req', '-new', '-x509', '-key', key_fname, '-nodes', + '-outform', 'DER', '-out', cert_fname, '-config', config_fname, + '-sha512'] + return self.run_cmd(*args) + + def x509_cert_rom_combined(self, cert_fname, input_fname, key_fname, sw_rev, + config_fname, req_dist_name_dict, load_addr, sha, total_size, num_comps, + sysfw_inner_cert_ext_boot_sequence_string, dm_data_ext_boot_sequence_string, + imagesize_sbl, hashval_sbl, load_addr_sysfw, imagesize_sysfw, + hashval_sysfw, load_addr_sysfw_data, imagesize_sysfw_data, + hashval_sysfw_data, sysfw_inner_cert_ext_boot_block, + dm_data_ext_boot_block): + """Create a certificate + + Args: + cert_fname (str): Filename of certificate to create + input_fname (str): Filename containing data to sign + key_fname (str): Filename of .pem file + sw_rev (int): Software revision + config_fname (str): Filename to write fconfig into + req_dist_name_dict (dict): Dictionary containing key-value pairs of + req_distinguished_name section extensions, must contain extensions for + C, ST, L, O, OU, CN and emailAddress + cert_type (int): Certification type + bootcore (int): Booting core + load_addr (int): Load address of image + sha (int): Hash function + + Returns: + str: Tool output + """ + indata = tools.read_file(input_fname) + hashval = hashlib.sha512(indata).hexdigest() + sha_type = SHA_OIDS[sha] + with open(config_fname, 'w', encoding='utf-8') as outf: + print(f''' +[ req ] +distinguished_name = req_distinguished_name +x509_extensions = v3_ca +prompt = no +dirstring_type = nobmp + +[ req_distinguished_name ] +C = {req_dist_name_dict['C']} +ST = {req_dist_name_dict['ST']} +L = {req_dist_name_dict['L']} +O = {req_dist_name_dict['O']} +OU = {req_dist_name_dict['OU']} +CN = {req_dist_name_dict['CN']} +emailAddress = {req_dist_name_dict['emailAddress']} + +[ v3_ca ] +basicConstraints = CA:true +1.3.6.1.4.1.294.1.3=ASN1:SEQUENCE:swrv +1.3.6.1.4.1.294.1.9=ASN1:SEQUENCE:ext_boot_info + +[swrv] +swrv=INTEGER:{sw_rev} + +[ext_boot_info] +extImgSize=INTEGER:{total_size} +numComp=INTEGER:{num_comps} +sbl=SEQUENCE:sbl +sysfw=SEQUENCE:sysfw +sysfw_data=SEQUENCE:sysfw_data +{sysfw_inner_cert_ext_boot_sequence_string} +{dm_data_ext_boot_sequence_string} + +[sbl] +compType = INTEGER:1 +bootCore = INTEGER:16 +compOpts = INTEGER:0 +destAddr = FORMAT:HEX,OCT:{load_addr:08x} +compSize = INTEGER:{imagesize_sbl} +shaType = OID:{sha_type} +shaValue = FORMAT:HEX,OCT:{hashval_sbl} + +[sysfw] +compType = INTEGER:2 +bootCore = INTEGER:0 +compOpts = INTEGER:0 +destAddr = FORMAT:HEX,OCT:{load_addr_sysfw:08x} +compSize = INTEGER:{imagesize_sysfw} +shaType = OID:{sha_type} +shaValue = FORMAT:HEX,OCT:{hashval_sysfw} + +[sysfw_data] +compType = INTEGER:18 +bootCore = INTEGER:0 +compOpts = INTEGER:0 +destAddr = FORMAT:HEX,OCT:{load_addr_sysfw_data:08x} +compSize = INTEGER:{imagesize_sysfw_data} +shaType = OID:{sha_type} +shaValue = FORMAT:HEX,OCT:{hashval_sysfw_data} + +{sysfw_inner_cert_ext_boot_block} + +{dm_data_ext_boot_block} + ''', file=outf) + args = ['req', '-new', '-x509', '-key', key_fname, '-nodes', + '-outform', 'DER', '-out', cert_fname, '-config', config_fname, + '-sha512'] + return self.run_cmd(*args) + def fetch(self, method): """Fetch handler for openssl diff --git a/tools/binman/control.py b/tools/binman/control.py index 7e2dd3541b9..d1ee1d69a98 100644 --- a/tools/binman/control.py +++ b/tools/binman/control.py @@ -22,6 +22,7 @@ from binman import bintool from binman import cbfs_util from binman import elf from binman import entry +from dtoc import fdt_util from u_boot_pylib import command from u_boot_pylib import tools from u_boot_pylib import tout @@ -56,8 +57,9 @@ def _ReadImageDesc(binman_node, use_expanded): images = OrderedDict() if 'multiple-images' in binman_node.props: for node in binman_node.subnodes: - images[node.name] = Image(node.name, node, - use_expanded=use_expanded) + if 'template' not in node.name: + images[node.name] = Image(node.name, node, + use_expanded=use_expanded) else: images['image'] = Image('image', binman_node, use_expanded=use_expanded) return images @@ -306,8 +308,8 @@ def BeforeReplace(image, allow_resize): image: Image to prepare """ state.PrepareFromLoadedData(image) - image.LoadData() image.CollectBintools() + image.LoadData(decomp=False) # If repacking, drop the old offset/size values except for the original # ones, so we are only left with the constraints. @@ -478,6 +480,30 @@ def SignEntries(image_fname, input_fname, privatekey_fname, algo, entry_paths, AfterReplace(image, allow_resize=True, write_map=write_map) +def _ProcessTemplates(parent): + """Handle any templates in the binman description + + Args: + parent: Binman node to process (typically /binman) + + Search though each target node looking for those with an 'insert-template' + property. Use that as a list of references to template nodes to use to + adjust the target node. + + Processing involves copying each subnode of the template node into the + target node. + + This is done recursively, so templates can be at any level of the binman + image, e.g. inside a section. + + See 'Templates' in the Binman documnentation for details. + """ + for node in parent.subnodes: + tmpl = fdt_util.GetPhandleList(node, 'insert-template') + if tmpl: + node.copy_subnodes_from_phandles(tmpl) + _ProcessTemplates(node) + def PrepareImagesAndDtbs(dtb_fname, select_images, update_fdt, use_expanded): """Prepare the images to be processed and select the device tree @@ -520,6 +546,8 @@ def PrepareImagesAndDtbs(dtb_fname, select_images, update_fdt, use_expanded): raise ValueError("Device tree '%s' does not have a 'binman' " "node" % dtb_fname) + _ProcessTemplates(node) + images = _ReadImageDesc(node, use_expanded) if select_images: diff --git a/tools/binman/elf.py b/tools/binman/elf.py index 5816284c32a..4219001feac 100644 --- a/tools/binman/elf.py +++ b/tools/binman/elf.py @@ -248,6 +248,9 @@ def LookupAndWriteSymbols(elf_fname, entry, section, is_elf=False, entry: Entry to process section: Section which can be used to lookup symbol values base_sym: Base symbol marking the start of the image + + Returns: + int: Number of symbols written """ if not base_sym: base_sym = '__image_copy_start' @@ -269,12 +272,13 @@ def LookupAndWriteSymbols(elf_fname, entry, section, is_elf=False, if not syms: tout.debug('LookupAndWriteSymbols: no syms') - return + return 0 base = syms.get(base_sym) if not base and not is_elf: tout.debug('LookupAndWriteSymbols: no base') - return + return 0 base_addr = 0 if is_elf else base.address + count = 0 for name, sym in syms.items(): if name.startswith('_binman'): msg = ("Section '%s': Symbol '%s'\n in entry '%s'" % @@ -307,6 +311,11 @@ def LookupAndWriteSymbols(elf_fname, entry, section, is_elf=False, (msg, name, offset, value, len(value_bytes))) entry.data = (entry.data[:offset] + value_bytes + entry.data[offset + sym.size:]) + count += 1 + if count: + tout.detail( + f"Section '{section.GetPath()}': entry '{entry.GetPath()}' : {count} symbols") + return count def GetSymbolValue(sym, data, msg): """Get the value of a symbol diff --git a/tools/binman/elf_test.py b/tools/binman/elf_test.py index c98083961b5..cc95b424b33 100644 --- a/tools/binman/elf_test.py +++ b/tools/binman/elf_test.py @@ -141,7 +141,8 @@ class TestElf(unittest.TestCase): entry = FakeEntry(10) section = FakeSection() elf_fname = self.ElfTestFile('u_boot_binman_syms_bad') - elf.LookupAndWriteSymbols(elf_fname, entry, section) + count = elf.LookupAndWriteSymbols(elf_fname, entry, section) + self.assertEqual(0, count) def testBadSymbolSize(self): """Test that an attempt to use an 8-bit symbol are detected @@ -162,7 +163,7 @@ class TestElf(unittest.TestCase): def testNoValue(self): """Test the case where we have no value for the symbol - This should produce -1 values for all thress symbols, taking up the + This should produce -1 values for all three symbols, taking up the first 16 bytes of the image. """ if not elf.ELF_TOOLS: @@ -170,7 +171,8 @@ class TestElf(unittest.TestCase): entry = FakeEntry(28) section = FakeSection(sym_value=None) elf_fname = self.ElfTestFile('u_boot_binman_syms') - elf.LookupAndWriteSymbols(elf_fname, entry, section) + count = elf.LookupAndWriteSymbols(elf_fname, entry, section) + self.assertEqual(5, count) expected = (struct.pack('<L', elf.BINMAN_SYM_MAGIC_VALUE) + tools.get_bytes(255, 20) + tools.get_bytes(ord('a'), 4)) @@ -369,6 +371,11 @@ class TestElf(unittest.TestCase): elf.GetSymbolOffset(fname, 'embed') self.assertIn('__image_copy_start', str(e.exception)) + def test_get_symbol_address(self): + fname = self.ElfTestFile('embed_data') + addr = elf.GetSymbolAddress(fname, 'region_size') + self.assertEqual(0, addr) + if __name__ == '__main__': unittest.main() diff --git a/tools/binman/entries.rst b/tools/binman/entries.rst index b71af801fda..f2376932be6 100644 --- a/tools/binman/entries.rst +++ b/tools/binman/entries.rst @@ -468,6 +468,92 @@ updating the EC on startup via software sync. +.. _etype_encrypted: + +Entry: encrypted: Externally built encrypted binary blob +-------------------------------------------------------- + +This entry provides the functionality to include information about how to +decrypt an encrypted binary. This information is added to the +resulting device tree by adding a new cipher node in the entry's parent +node (i.e. the binary). + +The key that must be used to decrypt the binary is either directly embedded +in the device tree or indirectly by specifying a key source. The key source +can be used as an id of a key that is stored in an external device. + +Using an embedded key +~~~~~~~~~~~~~~~~~~~~~ + +This is an example using an embedded key:: + + blob-ext { + filename = "encrypted-blob.bin"; + }; + + encrypted { + algo = "aes256-gcm"; + iv-filename = "encrypted-blob.bin.iv"; + key-filename = "encrypted-blob.bin.key"; + }; + +This entry generates the following device tree structure form the example +above:: + + data = [...] + cipher { + algo = "aes256-gcm"; + key = <0x...>; + iv = <0x...>; + }; + +The data property is generated by the blob-ext etype, the cipher node and +its content is generated by this etype. + +Using an external key +~~~~~~~~~~~~~~~~~~~~~ + +Instead of embedding the key itself into the device tree, it is also +possible to address an externally stored key by specifying a 'key-source' +instead of the 'key':: + + blob-ext { + filename = "encrypted-blob.bin"; + }; + + encrypted { + algo = "aes256-gcm"; + iv-filename = "encrypted-blob.bin.iv"; + key-source = "external-key-id"; + }; + +This entry generates the following device tree structure form the example +above:: + + data = [...] + cipher { + algo = "aes256-gcm"; + key-source = "external-key-id"; + iv = <0x...>; + }; + +Properties +~~~~~~~~~~ + +Properties / Entry arguments: + - algo: The encryption algorithm. Currently no algorithm is supported + out-of-the-box. Certain algorithms will be added in future + patches. + - iv-filename: The name of the file containing the initialization + vector (in short iv). See + https://en.wikipedia.org/wiki/Initialization_vector + - key-filename: The name of the file containing the key. Either + key-filename or key-source must be provided. + - key-source: The key that should be used. Either key-filename or + key-source must be provided. + + + .. _etype_fdtmap: Entry: fdtmap: An entry which contains an FDT map @@ -615,6 +701,12 @@ The top-level 'fit' node supports the following special properties: `of-list` meaning that `-a of-list="dtb1 dtb2..."` should be passed to binman. + fit,fdt-list-val + As an alternative to fit,fdt-list the list of device tree files + can be provided in this property as a string list, e.g.:: + + fit,fdt-list-val = "dtb1", "dtb2"; + Substitutions ~~~~~~~~~~~~~ @@ -1658,6 +1750,119 @@ by setting the size of the entry to something larger than the text. +.. _etype_ti_board_config: + +Entry: ti-board-config: An entry containing a TI schema validated board config binary +------------------------------------------------------------------------------------- + +This etype supports generation of two kinds of board configuration +binaries: singular board config binary as well as combined board config +binary. + +Properties / Entry arguments: + - config-file: File containing board configuration data in YAML + - schema-file: File containing board configuration YAML schema against + which the config file is validated + +Output files: + - board config binary: File containing board configuration binary + +These above parameters are used only when the generated binary is +intended to be a single board configuration binary. Example:: + + my-ti-board-config { + ti-board-config { + config = "board-config.yaml"; + schema = "schema.yaml"; + }; + }; + +To generate a combined board configuration binary, we pack the +needed individual binaries into a ti-board-config binary. In this case, +the available supported subnode names are board-cfg, pm-cfg, sec-cfg and +rm-cfg. The final binary is prepended with a header containing details about +the included board config binaries. Example:: + + my-combined-ti-board-config { + ti-board-config { + board-cfg { + config = "board-cfg.yaml"; + schema = "schema.yaml"; + }; + sec-cfg { + config = "sec-cfg.yaml"; + schema = "schema.yaml"; + }; + } + } + + + +.. _etype_ti_secure: + +Entry: ti-secure: Entry containing a TI x509 certificate binary +--------------------------------------------------------------- + +Properties / Entry arguments: + - content: List of phandles to entries to sign + - keyfile: Filename of file containing key to sign binary with + - sha: Hash function to be used for signing + +Output files: + - input.<unique_name> - input file passed to openssl + - config.<unique_name> - input file generated for openssl (which is + used as the config file) + - cert.<unique_name> - output file generated by openssl (which is + used as the entry contents) + +openssl signs the provided data, using the TI templated config file and +writes the signature in this entry. This allows verification that the +data is genuine. + + + +.. _etype_ti_secure_rom: + +Entry: ti-secure-rom: Entry containing a TI x509 certificate binary for images booted by ROM +-------------------------------------------------------------------------------------------- + +Properties / Entry arguments: + - keyfile: Filename of file containing key to sign binary with + - combined: boolean if device follows combined boot flow + - countersign: boolean if device contains countersigned system firmware + - load: load address of SPL + - sw-rev: software revision + - sha: Hash function to be used for signing + - core: core on which bootloader runs, valid cores are 'secure' and 'public' + - content: phandle of SPL in case of legacy bootflow or phandles of component binaries + in case of combined bootflow + +The following properties are only for generating a combined bootflow binary: + - sysfw-inner-cert: boolean if binary contains sysfw inner certificate + - dm-data: boolean if binary contains dm-data binary + - content-sbl: phandle of SPL binary + - content-sysfw: phandle of sysfw binary + - content-sysfw-data: phandle of sysfw-data or tifs-data binary + - content-sysfw-inner-cert (optional): phandle of sysfw inner certificate binary + - content-dm-data (optional): phandle of dm-data binary + - load-sysfw: load address of sysfw binary + - load-sysfw-data: load address of sysfw-data or tifs-data binary + - load-sysfw-inner-cert (optional): load address of sysfw inner certificate binary + - load-dm-data (optional): load address of dm-data binary + +Output files: + - input.<unique_name> - input file passed to openssl + - config.<unique_name> - input file generated for openssl (which is + used as the config file) + - cert.<unique_name> - output file generated by openssl (which is + used as the entry contents) + +openssl signs the provided data, using the TI templated config file and +writes the signature in this entry. This allows verification that the +data is genuine. + + + .. _etype_u_boot: Entry: u-boot: U-Boot flat binary @@ -1912,6 +2117,45 @@ binman uses that to look up symbols to write into the SPL binary. +.. _etype_u_boot_spl_pubkey_dtb: + +Entry: u-boot-spl-pubkey-dtb: U-Boot SPL device tree including public key +------------------------------------------------------------------------- + +Properties / Entry arguments: + - key-name-hint: Public key name without extension (.crt). + Default is determined by underlying + bintool (fdt_add_pubkey), usually 'key'. + - algo: (Optional) Algorithm used for signing. Default is determined by + underlying bintool (fdt_add_pubkey), usually 'sha1,rsa2048' + - required: (Optional) If present this indicates that the key must be + verified for the image / configuration to be + considered valid + +The following example shows an image containing an SPL which +is packed together with the dtb. Binman will add a signature +node to the dtb. + +Example node:: + + image { + ... + spl { + filename = "spl.bin" + + u-boot-spl-nodtb { + }; + u-boot-spl-pubkey-dtb { + algo = "sha384,rsa4096"; + required = "conf"; + key-name-hint = "dev"; + }; + }; + ... + } + + + .. _etype_u_boot_spl_with_ucode_ptr: Entry: u-boot-spl-with-ucode-ptr: U-Boot SPL with embedded microcode pointer diff --git a/tools/binman/entry.py b/tools/binman/entry.py index 39456906a47..42e0b7b9145 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -158,6 +158,7 @@ class Entry(object): self.offset_from_elf = None self.preserve = False self.build_done = False + self.no_write_symbols = False @staticmethod def FindEntryClass(etype, expanded): @@ -321,6 +322,7 @@ class Entry(object): 'offset-from-elf') self.preserve = fdt_util.GetBool(self._node, 'preserve') + self.no_write_symbols = fdt_util.GetBool(self._node, 'no-write-symbols') def GetDefaultFilename(self): return None @@ -472,6 +474,9 @@ class Entry(object): def ObtainContents(self, skip_entry=None, fake_size=0): """Figure out the contents of an entry. + For missing blobs (where allow-missing is enabled), the contents are set + to b'' and self.missing is set to True. + Args: skip_entry (Entry): Entry to skip when obtaining section contents fake_size (int): Size of fake file to create if needed @@ -695,7 +700,7 @@ class Entry(object): Args: section: Section containing the entry """ - if self.auto_write_symbols: + if self.auto_write_symbols and not self.no_write_symbols: # Check if we are writing symbols into an ELF file is_elf = self.GetDefaultFilename() == self.elf_fname elf.LookupAndWriteSymbols(self.elf_fname, self, section.GetImage(), @@ -1309,10 +1314,6 @@ features to produce new behaviours. """ data = b'' for entry in entries: - # First get the input data and put it in a file. If not available, - # try later. - if not entry.ObtainContents(fake_size=fake_size): - return None, None, None data += entry.GetData() uniq = self.GetUniqueName() fname = tools.get_output_filename(f'{prefix}.{uniq}') diff --git a/tools/binman/etype/blob_dtb.py b/tools/binman/etype/blob_dtb.py index 6a3fbc47753..d543de9f759 100644 --- a/tools/binman/etype/blob_dtb.py +++ b/tools/binman/etype/blob_dtb.py @@ -38,7 +38,7 @@ class Entry_blob_dtb(Entry_blob): self.Raise("Invalid prepend in '%s': '%s'" % (self._node.name, self.prepend)) - def ObtainContents(self): + def ObtainContents(self, fake_size=0): """Get the device-tree from the list held by the 'state' module""" self._filename = self.GetDefaultFilename() self._pathname, _ = state.GetFdtContents(self.GetFdtEtype()) diff --git a/tools/binman/etype/blob_phase.py b/tools/binman/etype/blob_phase.py index b937158756f..951d9934050 100644 --- a/tools/binman/etype/blob_phase.py +++ b/tools/binman/etype/blob_phase.py @@ -52,3 +52,8 @@ class Entry_blob_phase(Entry_section): # Read entries again, now that we have some self.ReadEntries() + + # Propagate the no-write-symbols property + if self.no_write_symbols: + for entry in self._entries.values(): + entry.no_write_symbols = True diff --git a/tools/binman/etype/encrypted.py b/tools/binman/etype/encrypted.py new file mode 100644 index 00000000000..53d0e76bab7 --- /dev/null +++ b/tools/binman/etype/encrypted.py @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2023 Weidmüller Interface GmbH & Co. KG +# Written by Christian Taedcke <christian.taedcke@weidmueller.com> +# +# Entry-type module for cipher information of encrypted blobs/binaries +# + +from binman.etype.collection import Entry +from dtoc import fdt_util +from u_boot_pylib import tools + +# This is imported if needed +state = None + + +class Entry_encrypted(Entry): + """Externally built encrypted binary blob + + This entry provides the functionality to include information about how to + decrypt an encrypted binary. This information is added to the + resulting device tree by adding a new cipher node in the entry's parent + node (i.e. the binary). + + The key that must be used to decrypt the binary is either directly embedded + in the device tree or indirectly by specifying a key source. The key source + can be used as an id of a key that is stored in an external device. + + Using an embedded key + ~~~~~~~~~~~~~~~~~~~~~ + + This is an example using an embedded key:: + + blob-ext { + filename = "encrypted-blob.bin"; + }; + + encrypted { + algo = "aes256-gcm"; + iv-filename = "encrypted-blob.bin.iv"; + key-filename = "encrypted-blob.bin.key"; + }; + + This entry generates the following device tree structure form the example + above:: + + data = [...] + cipher { + algo = "aes256-gcm"; + key = <0x...>; + iv = <0x...>; + }; + + The data property is generated by the blob-ext etype, the cipher node and + its content is generated by this etype. + + Using an external key + ~~~~~~~~~~~~~~~~~~~~~ + + Instead of embedding the key itself into the device tree, it is also + possible to address an externally stored key by specifying a 'key-source' + instead of the 'key':: + + blob-ext { + filename = "encrypted-blob.bin"; + }; + + encrypted { + algo = "aes256-gcm"; + iv-filename = "encrypted-blob.bin.iv"; + key-source = "external-key-id"; + }; + + This entry generates the following device tree structure form the example + above:: + + data = [...] + cipher { + algo = "aes256-gcm"; + key-source = "external-key-id"; + iv = <0x...>; + }; + + Properties + ~~~~~~~~~~ + + Properties / Entry arguments: + - algo: The encryption algorithm. Currently no algorithm is supported + out-of-the-box. Certain algorithms will be added in future + patches. + - iv-filename: The name of the file containing the initialization + vector (in short iv). See + https://en.wikipedia.org/wiki/Initialization_vector + - key-filename: The name of the file containing the key. Either + key-filename or key-source must be provided. + - key-source: The key that should be used. Either key-filename or + key-source must be provided. + """ + + def __init__(self, section, etype, node): + # Put this here to allow entry-docs and help to work without libfdt + global state + from binman import state + + super().__init__(section, etype, node) + self.required_props = ['algo', 'iv-filename'] + self._algo = None + self._iv_filename = None + self._key_name_hint = None + self._key_filename = None + + def ReadNode(self): + super().ReadNode() + + self._algo = fdt_util.GetString(self._node, 'algo') + self._iv_filename = fdt_util.GetString(self._node, 'iv-filename') + self._key_filename = fdt_util.GetString(self._node, 'key-filename') + self._key_source = fdt_util.GetString(self._node, 'key-source') + + if self._key_filename is None and self._key_source is None: + self.Raise("Provide either 'key-filename' or 'key-source'") + + def gen_entries(self): + super().gen_entries() + + iv_filename = tools.get_input_filename(self._iv_filename) + iv = tools.read_file(iv_filename, binary=True) + + cipher_node = state.AddSubnode(self._node.parent, "cipher") + cipher_node.AddString("algo", self._algo) + cipher_node.AddData("iv", iv) + + if self._key_filename: + key_filename = tools.get_input_filename(self._key_filename) + key = tools.read_file(key_filename, binary=True) + cipher_node.AddData("key", key) + + if self._key_source: + cipher_node.AddString("key-source", self._key_source) diff --git a/tools/binman/etype/fit.py b/tools/binman/etype/fit.py index c395706ece5..ef4d0667578 100644 --- a/tools/binman/etype/fit.py +++ b/tools/binman/etype/fit.py @@ -81,6 +81,12 @@ class Entry_fit(Entry_section): `of-list` meaning that `-a of-list="dtb1 dtb2..."` should be passed to binman. + fit,fdt-list-val + As an alternative to fit,fdt-list the list of device tree files + can be provided in this property as a string list, e.g.:: + + fit,fdt-list-val = "dtb1", "dtb2"; + Substitutions ~~~~~~~~~~~~~ @@ -361,6 +367,9 @@ class Entry_fit(Entry_section): [EntryArg(self._fit_list_prop.value, str)]) if fdts is not None: self._fdts = fdts.split() + else: + self._fdts = fdt_util.GetStringList(self._node, 'fit,fdt-list-val') + self._fit_default_dt = self.GetEntryArgsOrProps([EntryArg('default-dt', str)])[0] diff --git a/tools/binman/etype/mkimage.py b/tools/binman/etype/mkimage.py index e028c440708..6ae5d0c8a4f 100644 --- a/tools/binman/etype/mkimage.py +++ b/tools/binman/etype/mkimage.py @@ -8,10 +8,11 @@ from collections import OrderedDict from binman.entry import Entry +from binman.etype.section import Entry_section from dtoc import fdt_util from u_boot_pylib import tools -class Entry_mkimage(Entry): +class Entry_mkimage(Entry_section): """Binary produced by mkimage Properties / Entry arguments: @@ -121,54 +122,67 @@ class Entry_mkimage(Entry): """ def __init__(self, section, etype, node): super().__init__(section, etype, node) - self._multiple_data_files = fdt_util.GetBool(self._node, 'multiple-data-files') - self._mkimage_entries = OrderedDict() self._imagename = None - self._filename = fdt_util.GetString(self._node, 'filename') - self.align_default = None + self._multiple_data_files = False def ReadNode(self): super().ReadNode() + self._multiple_data_files = fdt_util.GetBool(self._node, + 'multiple-data-files') self._args = fdt_util.GetArgs(self._node, 'args') self._data_to_imagename = fdt_util.GetBool(self._node, 'data-to-imagename') if self._data_to_imagename and self._node.FindNode('imagename'): self.Raise('Cannot use both imagename node and data-to-imagename') - self.ReadEntries() def ReadEntries(self): """Read the subnodes to find out what should go in this image""" for node in self._node.subnodes: - entry = Entry.Create(self, node) + if self.IsSpecialSubnode(node): + continue + entry = Entry.Create(self, node, + expanded=self.GetImage().use_expanded, + missing_etype=self.GetImage().missing_etype) entry.ReadNode() + entry.SetPrefix(self._name_prefix) if entry.name == 'imagename': self._imagename = entry else: - self._mkimage_entries[entry.name] = entry + self._entries[entry.name] = entry - def ObtainContents(self): + def BuildSectionData(self, required): + """Build mkimage entry contents + + Runs mkimage to build the entry contents + + Args: + required (bool): True if the data must be present, False if it is OK + to return None + + Returns: + bytes: Contents of the section + """ # Use a non-zero size for any fake files to keep mkimage happy # Note that testMkimageImagename() relies on this 'mkimage' parameter fake_size = 1024 if self._multiple_data_files: fnames = [] uniq = self.GetUniqueName() - for entry in self._mkimage_entries.values(): - if not entry.ObtainContents(fake_size=fake_size): - return False - if entry._pathname: - fnames.append(entry._pathname) + for entry in self._entries.values(): + # Put the contents in a temporary file + ename = f'mkimage-in-{uniq}-{entry.name}' + fname = tools.get_output_filename(ename) + data = entry.GetData(required) + tools.write_file(fname, data) + fnames.append(fname) input_fname = ":".join(fnames) + data = b'' else: data, input_fname, uniq = self.collect_contents_to_file( - self._mkimage_entries.values(), 'mkimage', fake_size) - if data is None: - return False + self._entries.values(), 'mkimage', fake_size) if self._imagename: image_data, imagename_fname, _ = self.collect_contents_to_file( [self._imagename], 'mkimage-n', 1024) - if image_data is None: - return False outfile = self._filename if self._filename else 'mkimage-out.%s' % uniq output_fname = tools.get_output_filename(outfile) @@ -176,8 +190,7 @@ class Entry_mkimage(Entry): self.CheckMissing(missing_list) self.missing = bool(missing_list) if self.missing: - self.SetContents(b'') - return self.allow_missing + return b'' args = ['-d', input_fname] if self._data_to_imagename: @@ -186,71 +199,58 @@ class Entry_mkimage(Entry): args += ['-n', imagename_fname] args += self._args + [output_fname] if self.mkimage.run_cmd(*args) is not None: - self.SetContents(tools.read_file(output_fname)) + return tools.read_file(output_fname) else: # Bintool is missing; just use the input data as the output self.record_missing_bintool(self.mkimage) - self.SetContents(data) - - return True + return data def GetEntries(self): # Make a copy so we don't change the original - entries = OrderedDict(self._mkimage_entries) + entries = OrderedDict(self._entries) if self._imagename: entries['imagename'] = self._imagename return entries - def SetAllowMissing(self, allow_missing): - """Set whether a section allows missing external blobs + def AddBintools(self, btools): + super().AddBintools(btools) + self.mkimage = self.AddBintool(btools, 'mkimage') - Args: - allow_missing: True if allowed, False if not allowed - """ - self.allow_missing = allow_missing - for entry in self._mkimage_entries.values(): - entry.SetAllowMissing(allow_missing) - if self._imagename: - self._imagename.SetAllowMissing(allow_missing) + def CheckEntries(self): + pass - def SetAllowFakeBlob(self, allow_fake): - """Set whether the sub nodes allows to create a fake blob + def ProcessContents(self): + # The blob may have changed due to WriteSymbols() + ok = super().ProcessContents() + data = self.BuildSectionData(True) + ok2 = self.ProcessContentsUpdate(data) + return ok and ok2 - Args: - allow_fake: True if allowed, False if not allowed - """ - for entry in self._mkimage_entries.values(): - entry.SetAllowFakeBlob(allow_fake) - if self._imagename: - self._imagename.SetAllowFakeBlob(allow_fake) + def SetImagePos(self, image_pos): + """Set the position in the image - def CheckMissing(self, missing_list): - """Check if any entries in this section have missing external blobs + This sets each subentry's offsets, sizes and positions-in-image + according to where they ended up in the packed mkimage file. - If there are missing (non-optional) blobs, the entries are added to the - list + NOTE: This assumes a legacy mkimage and assumes that the images are + written to the output in order. SoC-specific mkimage handling may not + conform to this, in which case these values may be wrong. Args: - missing_list: List of Entry objects to be added to + image_pos (int): Position of this entry in the image """ - for entry in self._mkimage_entries.values(): - entry.CheckMissing(missing_list) - if self._imagename: - self._imagename.CheckMissing(missing_list) + # The mkimage header consists of 0x40 bytes, following by a table of + # offsets for each file + upto = 0x40 - def CheckFakedBlobs(self, faked_blobs_list): - """Check if any entries in this section have faked external blobs + # Skip the 0-terminated list of offsets (assume a single image) + upto += 4 + 4 + for entry in self.GetEntries().values(): + entry.SetOffsetSize(upto, None) - If there are faked blobs, the entries are added to the list + # Give up if any entries lack a size + if entry.size is None: + return + upto += entry.size - Args: - faked_blobs_list: List of Entry objects to be added to - """ - for entry in self._mkimage_entries.values(): - entry.CheckFakedBlobs(faked_blobs_list) - if self._imagename: - self._imagename.CheckFakedBlobs(faked_blobs_list) - - def AddBintools(self, btools): - super().AddBintools(btools) - self.mkimage = self.AddBintool(btools, 'mkimage') + super().SetImagePos(image_pos) diff --git a/tools/binman/etype/pre_load.py b/tools/binman/etype/pre_load.py index bd3545bffc0..2e4c72359ff 100644 --- a/tools/binman/etype/pre_load.py +++ b/tools/binman/etype/pre_load.py @@ -81,7 +81,8 @@ class Entry_pre_load(Entry_collection): def ReadNode(self): super().ReadNode() - self.key_path, = self.GetEntryArgsOrProps([EntryArg('pre-load-key-path', str)]) + self.key_path, = self.GetEntryArgsOrProps( + [EntryArg('pre-load-key-path', str)]) if self.key_path is None: self.key_path = '' @@ -98,8 +99,7 @@ class Entry_pre_load(Entry_collection): self.Raise(sign_name + " is not supported") # Read the key - with open(key_name, 'rb') as pem: - key = RSA.import_key(pem.read()) + key = RSA.import_key(tools.read_file(key_name)) # Check if the key has the expected size if key.size_in_bytes() != RSAS[sign_name]: diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index c36edd13508..fb49e85a763 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -168,6 +168,7 @@ class Entry_section(Entry): self._end_4gb = False self._ignore_missing = False self._filename = None + self.align_default = 0 def IsSpecialSubnode(self, node): """Check if a node is a special one used by the section itself @@ -178,7 +179,8 @@ class Entry_section(Entry): Returns: bool: True if the node is a special one, else False """ - return node.name.startswith('hash') or node.name.startswith('signature') + start_list = ('cipher', 'hash', 'signature', 'template') + return any(node.name.startswith(name) for name in start_list) def ReadNode(self): """Read properties from the section node""" @@ -315,12 +317,15 @@ class Entry_section(Entry): This should be overridden by subclasses which want to build their own data structure for the section. + Missing entries will have be given empty (or fake) data, so are + processed normally here. + Args: required: True if the data must be present, False if it is OK to return None Returns: - Contents of the section (bytes) + Contents of the section (bytes), None if not available """ section_data = bytearray() @@ -710,6 +715,33 @@ class Entry_section(Entry): def GetEntryContents(self, skip_entry=None): """Call ObtainContents() for each entry in the section + The overall goal of this function is to read in any available data in + this entry and any subentries. This includes reading in blobs, setting + up objects which have predefined contents, etc. + + Since entry types which contain entries call ObtainContents() on all + those entries too, the result is that ObtainContents() is called + recursively for the whole tree below this one. + + Entries with subentries are generally not *themselves& processed here, + i.e. their ObtainContents() implementation simply obtains contents of + their subentries, skipping their own contents. For example, the + implementation here (for entry_Section) does not attempt to pack the + entries into a final result. That is handled later. + + Generally, calling this results in SetContents() being called for each + entry, so that the 'data' and 'contents_size; properties are set, and + subsequent calls to GetData() will return value data. + + Where 'allow_missing' is set, this can result in the 'missing' property + being set to True if there is no data. This is handled by setting the + data to b''. This function will still return success. Future calls to + GetData() for this entry will return b'', or in the case where the data + is faked, GetData() will return that fake data. + + Args: + skip_entry: (single) Entry to skip, or None to process all entries + Note that this may set entry.absent to True if the entry is not actually needed """ @@ -719,7 +751,7 @@ class Entry_section(Entry): next_todo.append(entry) return entry - todo = self._entries.values() + todo = self.GetEntries().values() for passnum in range(3): threads = state.GetThreads() next_todo = [] @@ -892,7 +924,7 @@ class Entry_section(Entry): allow_missing: True if allowed, False if not allowed """ self.allow_missing = allow_missing - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.SetAllowMissing(allow_missing) def SetAllowFakeBlob(self, allow_fake): @@ -902,7 +934,7 @@ class Entry_section(Entry): allow_fake: True if allowed, False if not allowed """ super().SetAllowFakeBlob(allow_fake) - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.SetAllowFakeBlob(allow_fake) def CheckMissing(self, missing_list): @@ -914,7 +946,7 @@ class Entry_section(Entry): Args: missing_list: List of Entry objects to be added to """ - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.CheckMissing(missing_list) def CheckFakedBlobs(self, faked_blobs_list): @@ -925,7 +957,7 @@ class Entry_section(Entry): Args: faked_blobs_list: List of Entry objects to be added to """ - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.CheckFakedBlobs(faked_blobs_list) def CheckOptional(self, optional_list): @@ -936,7 +968,7 @@ class Entry_section(Entry): Args: optional_list (list): List of Entry objects to be added to """ - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.CheckOptional(optional_list) def check_missing_bintools(self, missing_list): @@ -948,7 +980,7 @@ class Entry_section(Entry): missing_list: List of Bintool objects to be added to """ super().check_missing_bintools(missing_list) - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.check_missing_bintools(missing_list) def _CollectEntries(self, entries, entries_by_name, add_entry): @@ -998,12 +1030,12 @@ class Entry_section(Entry): entry.Raise(f'Missing required properties/entry args: {missing}') def CheckAltFormats(self, alt_formats): - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.CheckAltFormats(alt_formats) def AddBintools(self, btools): super().AddBintools(btools) - for entry in self._entries.values(): + for entry in self.GetEntries().values(): entry.AddBintools(btools) def read_elf_segments(self): diff --git a/tools/binman/etype/ti_board_config.py b/tools/binman/etype/ti_board_config.py new file mode 100644 index 00000000000..94f894c2811 --- /dev/null +++ b/tools/binman/etype/ti_board_config.py @@ -0,0 +1,259 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/ +# Written by Neha Malcom Francis <n-francis@ti.com> +# +# Entry-type module for generating schema validated TI board +# configuration binary +# + +import os +import struct +import yaml + +from collections import OrderedDict +from jsonschema import validate +from shutil import copyfileobj + +from binman.entry import Entry +from binman.etype.section import Entry_section +from dtoc import fdt_util +from u_boot_pylib import tools + +BOARDCFG = 0xB +BOARDCFG_SEC = 0xD +BOARDCFG_PM = 0xE +BOARDCFG_RM = 0xC +BOARDCFG_NUM_ELEMS = 4 + +class Entry_ti_board_config(Entry_section): + """An entry containing a TI schema validated board config binary + + This etype supports generation of two kinds of board configuration + binaries: singular board config binary as well as combined board config + binary. + + Properties / Entry arguments: + - config-file: File containing board configuration data in YAML + - schema-file: File containing board configuration YAML schema against + which the config file is validated + + Output files: + - board config binary: File containing board configuration binary + + These above parameters are used only when the generated binary is + intended to be a single board configuration binary. Example:: + + my-ti-board-config { + ti-board-config { + config = "board-config.yaml"; + schema = "schema.yaml"; + }; + }; + + To generate a combined board configuration binary, we pack the + needed individual binaries into a ti-board-config binary. In this case, + the available supported subnode names are board-cfg, pm-cfg, sec-cfg and + rm-cfg. The final binary is prepended with a header containing details about + the included board config binaries. Example:: + + my-combined-ti-board-config { + ti-board-config { + board-cfg { + config = "board-cfg.yaml"; + schema = "schema.yaml"; + }; + sec-cfg { + config = "sec-cfg.yaml"; + schema = "schema.yaml"; + }; + } + } + """ + def __init__(self, section, etype, node): + super().__init__(section, etype, node) + self._config = None + self._schema = None + self._entries = OrderedDict() + self._num_elems = BOARDCFG_NUM_ELEMS + self._fmt = '<HHHBB' + self._index = 0 + self._binary_offset = 0 + self._sw_rev = 1 + self._devgrp = 0 + + def ReadNode(self): + super().ReadNode() + self._config = fdt_util.GetString(self._node, 'config') + self._schema = fdt_util.GetString(self._node, 'schema') + # Depending on whether config file is present in node, we determine + # whether it is a combined board config binary or not + if self._config is None: + self.ReadEntries() + else: + self._config_file = tools.get_input_filename(self._config) + self._schema_file = tools.get_input_filename(self._schema) + + def ReadEntries(self): + """Read the subnodes to find out what should go in this image + """ + for node in self._node.subnodes: + if 'type' not in node.props: + entry = Entry.Create(self, node, 'ti-board-config') + entry.ReadNode() + cfg_data = entry.BuildSectionData(True) + entry._cfg_data = cfg_data + self._entries[entry.name] = entry + self._num_elems = len(self._node.subnodes) + + def _convert_to_byte_chunk(self, val, data_type): + """Convert value into byte array + + Args: + val: value to convert into byte array + data_type: data type used in schema, supported data types are u8, + u16 and u32 + + Returns: + array of bytes representing value + """ + size = 0 + if (data_type == '#/definitions/u8'): + size = 1 + elif (data_type == '#/definitions/u16'): + size = 2 + else: + size = 4 + if type(val) == int: + br = val.to_bytes(size, byteorder='little') + return br + + def _compile_yaml(self, schema_yaml, file_yaml): + """Convert YAML file into byte array based on YAML schema + + Args: + schema_yaml: file containing YAML schema + file_yaml: file containing config to compile + + Returns: + array of bytes repesenting YAML file against YAML schema + """ + br = bytearray() + for key, node in file_yaml.items(): + node_schema = schema_yaml['properties'][key] + node_type = node_schema.get('type') + if not 'type' in node_schema: + br += self._convert_to_byte_chunk(node, + node_schema.get('$ref')) + elif node_type == 'object': + br += self._compile_yaml(node_schema, node) + elif node_type == 'array': + for item in node: + if not isinstance(item, dict): + br += self._convert_to_byte_chunk( + item, schema_yaml['properties'][key]['items']['$ref']) + else: + br += self._compile_yaml(node_schema.get('items'), item) + return br + + def _generate_binaries(self): + """Generate config binary artifacts from the loaded YAML configuration file + + Returns: + byte array containing config binary artifacts + or None if generation fails + """ + cfg_binary = bytearray() + for key, node in self.file_yaml.items(): + node_schema = self.schema_yaml['properties'][key] + br = self._compile_yaml(node_schema, node) + cfg_binary += br + return cfg_binary + + def _add_boardcfg(self, bcfgtype, bcfgdata): + """Add board config to combined board config binary + + Args: + bcfgtype (int): board config type + bcfgdata (byte array): board config data + """ + size = len(bcfgdata) + desc = struct.pack(self._fmt, bcfgtype, + self._binary_offset, size, self._devgrp, 0) + with open(self.descfile, 'ab+') as desc_fh: + desc_fh.write(desc) + with open(self.bcfgfile, 'ab+') as bcfg_fh: + bcfg_fh.write(bcfgdata) + self._binary_offset += size + self._index += 1 + + def _finalize(self): + """Generate final combined board config binary + + Returns: + byte array containing combined board config data + or None if unable to generate + """ + with open(self.descfile, 'rb') as desc_fh: + with open(self.bcfgfile, 'rb') as bcfg_fh: + with open(self.fh_file, 'ab+') as fh: + copyfileobj(desc_fh, fh) + copyfileobj(bcfg_fh, fh) + data = tools.read_file(self.fh_file) + return data + + def BuildSectionData(self, required): + if self._config is None: + self._binary_offset = 0 + uniq = self.GetUniqueName() + self.fh_file = tools.get_output_filename('fh.%s' % uniq) + self.descfile = tools.get_output_filename('desc.%s' % uniq) + self.bcfgfile = tools.get_output_filename('bcfg.%s' % uniq) + + # when binman runs again make sure we start clean + if os.path.exists(self.fh_file): + os.remove(self.fh_file) + if os.path.exists(self.descfile): + os.remove(self.descfile) + if os.path.exists(self.bcfgfile): + os.remove(self.bcfgfile) + + with open(self.fh_file, 'wb') as f: + t_bytes = f.write(struct.pack( + '<BB', self._num_elems, self._sw_rev)) + self._binary_offset += t_bytes + self._binary_offset += self._num_elems * struct.calcsize(self._fmt) + + if 'board-cfg' in self._entries: + self._add_boardcfg(BOARDCFG, self._entries['board-cfg']._cfg_data) + + if 'sec-cfg' in self._entries: + self._add_boardcfg(BOARDCFG_SEC, self._entries['sec-cfg']._cfg_data) + + if 'pm-cfg' in self._entries: + self._add_boardcfg(BOARDCFG_PM, self._entries['pm-cfg']._cfg_data) + + if 'rm-cfg' in self._entries: + self._add_boardcfg(BOARDCFG_RM, self._entries['rm-cfg']._cfg_data) + + data = self._finalize() + return data + + else: + with open(self._config_file, 'r') as f: + self.file_yaml = yaml.safe_load(f) + with open(self._schema_file, 'r') as sch: + self.schema_yaml = yaml.safe_load(sch) + + try: + validate(self.file_yaml, self.schema_yaml) + except Exception as e: + self.Raise(f"Schema validation error: {e}") + + data = self._generate_binaries() + return data + + def SetImagePos(self, image_pos): + Entry.SetImagePos(self, image_pos) + + def CheckEntries(self): + Entry.CheckEntries(self) diff --git a/tools/binman/etype/ti_secure.py b/tools/binman/etype/ti_secure.py new file mode 100644 index 00000000000..d939dce5713 --- /dev/null +++ b/tools/binman/etype/ti_secure.py @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/ +# Written by Neha Malcom Francis <n-francis@ti.com> +# + +# Support for generation of TI secured binary blobs + +from binman.entry import EntryArg +from binman.etype.x509_cert import Entry_x509_cert + +from dtoc import fdt_util + +class Entry_ti_secure(Entry_x509_cert): + """Entry containing a TI x509 certificate binary + + Properties / Entry arguments: + - content: List of phandles to entries to sign + - keyfile: Filename of file containing key to sign binary with + - sha: Hash function to be used for signing + + Output files: + - input.<unique_name> - input file passed to openssl + - config.<unique_name> - input file generated for openssl (which is + used as the config file) + - cert.<unique_name> - output file generated by openssl (which is + used as the entry contents) + + openssl signs the provided data, using the TI templated config file and + writes the signature in this entry. This allows verification that the + data is genuine. + """ + def __init__(self, section, etype, node): + super().__init__(section, etype, node) + self.openssl = None + + def ReadNode(self): + super().ReadNode() + self.key_fname = self.GetEntryArgsOrProps([ + EntryArg('keyfile', str)], required=True)[0] + self.sha = fdt_util.GetInt(self._node, 'sha', 512) + self.req_dist_name = {'C': 'US', + 'ST': 'TX', + 'L': 'Dallas', + 'O': 'Texas Instruments Incorporated', + 'OU': 'Processors', + 'CN': 'TI Support', + 'emailAddress': 'support@ti.com'} + + def GetCertificate(self, required): + """Get the contents of this entry + + Args: + required: True if the data must be present, False if it is OK to + return None + + Returns: + bytes content of the entry, which is the certificate binary for the + provided data + """ + return super().GetCertificate(required=required, type='sysfw') + + def ObtainContents(self): + data = self.data + if data is None: + data = self.GetCertificate(False) + if data is None: + return False + self.SetContents(data) + return True + + def ProcessContents(self): + # The blob may have changed due to WriteSymbols() + data = self.data + return self.ProcessContentsUpdate(data) + + def AddBintools(self, btools): + super().AddBintools(btools) + self.openssl = self.AddBintool(btools, 'openssl') diff --git a/tools/binman/etype/ti_secure_rom.py b/tools/binman/etype/ti_secure_rom.py new file mode 100644 index 00000000000..9a7ac9e9e0a --- /dev/null +++ b/tools/binman/etype/ti_secure_rom.py @@ -0,0 +1,249 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/ +# Written by Neha Malcom Francis <n-francis@ti.com> +# + +# Support for generation of TI secured bootloaders booted by ROM + +from binman.entry import EntryArg +from binman.etype.x509_cert import Entry_x509_cert + +import hashlib + +from dtoc import fdt_util +from u_boot_pylib import tools + +VALID_SHAS = [256, 384, 512, 224] +SHA_OIDS = {256:'2.16.840.1.101.3.4.2.1', + 384:'2.16.840.1.101.3.4.2.2', + 512:'2.16.840.1.101.3.4.2.3', + 224:'2.16.840.1.101.3.4.2.4'} + +class Entry_ti_secure_rom(Entry_x509_cert): + """Entry containing a TI x509 certificate binary for images booted by ROM + + Properties / Entry arguments: + - keyfile: Filename of file containing key to sign binary with + - combined: boolean if device follows combined boot flow + - countersign: boolean if device contains countersigned system firmware + - load: load address of SPL + - sw-rev: software revision + - sha: Hash function to be used for signing + - core: core on which bootloader runs, valid cores are 'secure' and 'public' + - content: phandle of SPL in case of legacy bootflow or phandles of component binaries + in case of combined bootflow + + The following properties are only for generating a combined bootflow binary: + - sysfw-inner-cert: boolean if binary contains sysfw inner certificate + - dm-data: boolean if binary contains dm-data binary + - content-sbl: phandle of SPL binary + - content-sysfw: phandle of sysfw binary + - content-sysfw-data: phandle of sysfw-data or tifs-data binary + - content-sysfw-inner-cert (optional): phandle of sysfw inner certificate binary + - content-dm-data (optional): phandle of dm-data binary + - load-sysfw: load address of sysfw binary + - load-sysfw-data: load address of sysfw-data or tifs-data binary + - load-sysfw-inner-cert (optional): load address of sysfw inner certificate binary + - load-dm-data (optional): load address of dm-data binary + + Output files: + - input.<unique_name> - input file passed to openssl + - config.<unique_name> - input file generated for openssl (which is + used as the config file) + - cert.<unique_name> - output file generated by openssl (which is + used as the entry contents) + + openssl signs the provided data, using the TI templated config file and + writes the signature in this entry. This allows verification that the + data is genuine. + """ + def __init__(self, section, etype, node): + super().__init__(section, etype, node) + self.openssl = None + + def ReadNode(self): + super().ReadNode() + self.combined = fdt_util.GetBool(self._node, 'combined', False) + self.countersign = fdt_util.GetBool(self._node, 'countersign', False) + self.load_addr = fdt_util.GetInt(self._node, 'load', 0x00000000) + self.sw_rev = fdt_util.GetInt(self._node, 'sw-rev', 1) + self.sha = fdt_util.GetInt(self._node, 'sha', 512) + self.core = fdt_util.GetString(self._node, 'core', 'secure') + self.key_fname = self.GetEntryArgsOrProps([ + EntryArg('keyfile', str)], required=True)[0] + if self.combined: + self.sysfw_inner_cert = fdt_util.GetBool(self._node, 'sysfw-inner-cert', False) + self.load_addr_sysfw = fdt_util.GetInt(self._node, 'load-sysfw', 0x00000000) + self.load_addr_sysfw_data = fdt_util.GetInt(self._node, 'load-sysfw-data', 0x00000000) + self.dm_data = fdt_util.GetBool(self._node, 'dm-data', False) + if self.dm_data: + self.load_addr_dm_data = fdt_util.GetInt(self._node, 'load-dm-data', 0x00000000) + self.req_dist_name = {'C': 'US', + 'ST': 'TX', + 'L': 'Dallas', + 'O': 'Texas Instruments Incorporated', + 'OU': 'Processors', + 'CN': 'TI Support', + 'emailAddress': 'support@ti.com'} + + def NonCombinedGetCertificate(self, required): + """Generate certificate for legacy boot flow + + Args: + required: True if the data must be present, False if it is OK to + return None + + Returns: + bytes content of the entry, which is the certificate binary for the + provided data + """ + if self.core == 'secure': + if self.countersign: + self.cert_type = 3 + else: + self.cert_type = 2 + self.bootcore = 0 + self.bootcore_opts = 32 + else: + self.cert_type = 1 + self.bootcore = 16 + self.bootcore_opts = 0 + return super().GetCertificate(required=required, type='rom') + + def CombinedGetCertificate(self, required): + """Generate certificate for combined boot flow + + Args: + required: True if the data must be present, False if it is OK to + return None + + Returns: + bytes content of the entry, which is the certificate binary for the + provided data + """ + uniq = self.GetUniqueName() + + self.num_comps = 3 + self.sha_type = SHA_OIDS[self.sha] + + # sbl + self.content = fdt_util.GetPhandleList(self._node, 'content-sbl') + input_data_sbl = self.GetContents(required) + if input_data_sbl is None: + return None + + input_fname_sbl = tools.get_output_filename('input.%s' % uniq) + tools.write_file(input_fname_sbl, input_data_sbl) + + indata_sbl = tools.read_file(input_fname_sbl) + self.hashval_sbl = hashlib.sha512(indata_sbl).hexdigest() + self.imagesize_sbl = len(indata_sbl) + + # sysfw + self.content = fdt_util.GetPhandleList(self._node, 'content-sysfw') + input_data_sysfw = self.GetContents(required) + + input_fname_sysfw = tools.get_output_filename('input.%s' % uniq) + tools.write_file(input_fname_sysfw, input_data_sysfw) + + indata_sysfw = tools.read_file(input_fname_sysfw) + self.hashval_sysfw = hashlib.sha512(indata_sysfw).hexdigest() + self.imagesize_sysfw = len(indata_sysfw) + + # sysfw data + self.content = fdt_util.GetPhandleList(self._node, 'content-sysfw-data') + input_data_sysfw_data = self.GetContents(required) + + input_fname_sysfw_data = tools.get_output_filename('input.%s' % uniq) + tools.write_file(input_fname_sysfw_data, input_data_sysfw_data) + + indata_sysfw_data = tools.read_file(input_fname_sysfw_data) + self.hashval_sysfw_data = hashlib.sha512(indata_sysfw_data).hexdigest() + self.imagesize_sysfw_data = len(indata_sysfw_data) + + # sysfw inner cert + self.sysfw_inner_cert_ext_boot_block = "" + self.sysfw_inner_cert_ext_boot_sequence_string = "" + imagesize_sysfw_inner_cert = 0 + if self.sysfw_inner_cert: + self.content = fdt_util.GetPhandleList(self._node, 'content-sysfw-inner-cert') + input_data_sysfw_inner_cert = self.GetContents(required) + + input_fname_sysfw_inner_cert = tools.get_output_filename('input.%s' % uniq) + tools.write_file(input_fname_sysfw_inner_cert, input_data_sysfw_inner_cert) + + indata_sysfw_inner_cert = tools.read_file(input_fname_sysfw_inner_cert) + hashval_sysfw_inner_cert = hashlib.sha512(indata_sysfw_inner_cert).hexdigest() + imagesize_sysfw_inner_cert = len(indata_sysfw_inner_cert) + self.num_comps += 1 + self.sysfw_inner_cert_ext_boot_sequence_string = "sysfw_inner_cert=SEQUENCE:sysfw_inner_cert" + self.sysfw_inner_cert_ext_boot_block = f"""[sysfw_inner_cert] +compType = INTEGER:3 +bootCore = INTEGER:0 +compOpts = INTEGER:0 +destAddr = FORMAT:HEX,OCT:00000000 +compSize = INTEGER:{imagesize_sysfw_inner_cert} +shaType = OID:{self.sha_type} +shaValue = FORMAT:HEX,OCT:{hashval_sysfw_inner_cert}""" + + # dm data + self.dm_data_ext_boot_sequence_string = "" + self.dm_data_ext_boot_block = "" + imagesize_dm_data = 0 + if self.dm_data: + self.content = fdt_util.GetPhandleList(self._node, 'content-dm-data') + input_data_dm_data = self.GetContents(required) + + input_fname_dm_data = tools.get_output_filename('input.%s' % uniq) + tools.write_file(input_fname_dm_data, input_data_dm_data) + + indata_dm_data = tools.read_file(input_fname_dm_data) + hashval_dm_data = hashlib.sha512(indata_dm_data).hexdigest() + imagesize_dm_data = len(indata_dm_data) + self.num_comps += 1 + self.dm_data_ext_boot_sequence_string = "dm_data=SEQUENCE:dm_data" + self.dm_data_ext_boot_block = f"""[dm_data] +compType = INTEGER:17 +bootCore = INTEGER:16 +compOpts = INTEGER:0 +destAddr = FORMAT:HEX,OCT:{self.load_addr_dm_data:08x} +compSize = INTEGER:{imagesize_dm_data} +shaType = OID:{self.sha_type} +shaValue = FORMAT:HEX,OCT:{hashval_dm_data}""" + + self.total_size = self.imagesize_sbl + self.imagesize_sysfw + self.imagesize_sysfw_data + imagesize_sysfw_inner_cert + imagesize_dm_data + return super().GetCertificate(required=required, type='rom-combined') + + def GetCertificate(self, required): + """Get the contents of this entry + + Args: + required: True if the data must be present, False if it is OK to + return None + + Returns: + bytes content of the entry, which is the certificate binary for the + provided data + """ + if self.combined: + return self.CombinedGetCertificate(required) + else: + return self.NonCombinedGetCertificate(required) + + def ObtainContents(self): + data = self.data + if data is None: + data = self.GetCertificate(False) + if data is None: + return False + self.SetContents(data) + return True + + def ProcessContents(self): + # The blob may have changed due to WriteSymbols() + data = self.data + return self.ProcessContentsUpdate(data) + + def AddBintools(self, btools): + super().AddBintools(btools) + self.openssl = self.AddBintool(btools, 'openssl') diff --git a/tools/binman/etype/u_boot_spl_bss_pad.py b/tools/binman/etype/u_boot_spl_bss_pad.py index 1ffeb3911fd..4af4045d370 100644 --- a/tools/binman/etype/u_boot_spl_bss_pad.py +++ b/tools/binman/etype/u_boot_spl_bss_pad.py @@ -38,7 +38,7 @@ class Entry_u_boot_spl_bss_pad(Entry_blob): def ObtainContents(self): fname = tools.get_input_filename('spl/u-boot-spl') bss_size = elf.GetSymbolAddress(fname, '__bss_size') - if not bss_size: + if bss_size is None: self.Raise('Expected __bss_size symbol in spl/u-boot-spl') self.SetContents(tools.get_bytes(0, bss_size)) return True diff --git a/tools/binman/etype/u_boot_spl_pubkey_dtb.py b/tools/binman/etype/u_boot_spl_pubkey_dtb.py new file mode 100644 index 00000000000..cb196061de2 --- /dev/null +++ b/tools/binman/etype/u_boot_spl_pubkey_dtb.py @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2023 Weidmueller GmbH +# Written by Lukas Funke <lukas.funke@weidmueller.com> +# +# Entry-type module for 'u-boot-spl-pubkey.dtb' +# + +import tempfile +import os + +from binman.etype.blob_dtb import Entry_blob_dtb + +from dtoc import fdt_util + +from u_boot_pylib import tools + +# This is imported if needed +state = None + +# pylint: disable=C0103 +class Entry_u_boot_spl_pubkey_dtb(Entry_blob_dtb): + """U-Boot SPL device tree including public key + + Properties / Entry arguments: + - key-name-hint: Public key name without extension (.crt). + Default is determined by underlying + bintool (fdt_add_pubkey), usually 'key'. + - algo: (Optional) Algorithm used for signing. Default is determined by + underlying bintool (fdt_add_pubkey), usually 'sha1,rsa2048' + - required: (Optional) If present this indicates that the key must be + verified for the image / configuration to be + considered valid + + The following example shows an image containing an SPL which + is packed together with the dtb. Binman will add a signature + node to the dtb. + + Example node:: + + image { + ... + spl { + filename = "spl.bin" + + u-boot-spl-nodtb { + }; + u-boot-spl-pubkey-dtb { + algo = "sha384,rsa4096"; + required = "conf"; + key-name-hint = "dev"; + }; + }; + ... + } + """ + + def __init__(self, section, etype, node): + # Put this here to allow entry-docs and help to work without libfdt + global state + from binman import state + + super().__init__(section, etype, node) + self.required_props = ['key-name-hint'] + self.fdt_add_pubkey = None + self._algo = fdt_util.GetString(self._node, 'algo') + self._required = fdt_util.GetString(self._node, 'required') + self._key_name_hint = fdt_util.GetString(self._node, 'key-name-hint') + + def ObtainContents(self, fake_size=0): + """Add public key to SPL dtb + + Add public key which is pointed out by + 'key-name-hint' to node 'signature' in the spl-dtb + + This is equivalent to the '-K' option of 'mkimage' + + Args: + fake_size (int): unused + """ + + # We don't pass fake_size upwards because this is currently + # not supported by the blob type + super().ObtainContents() + + with tempfile.NamedTemporaryFile(prefix=os.path.basename( + self.GetFdtEtype()), + dir=tools.get_output_dir())\ + as pubkey_tdb: + tools.write_file(pubkey_tdb.name, self.GetData()) + keyname = tools.get_input_filename(self._key_name_hint + ".crt") + self.fdt_add_pubkey.run(pubkey_tdb.name, + os.path.dirname(keyname), + self._key_name_hint, + self._required, self._algo) + dtb = tools.read_file(pubkey_tdb.name) + self.SetContents(dtb) + state.UpdateFdtContents(self.GetFdtEtype(), dtb) + + return True + + # pylint: disable=R0201,C0116 + def GetDefaultFilename(self): + return 'spl/u-boot-spl-pubkey.dtb' + + # pylint: disable=R0201,C0116 + def GetFdtEtype(self): + return 'u-boot-spl-dtb' + + # pylint: disable=R0201,C0116 + def AddBintools(self, btools): + super().AddBintools(btools) + self.fdt_add_pubkey = self.AddBintool(btools, 'fdt_add_pubkey') diff --git a/tools/binman/etype/u_boot_tpl_bss_pad.py b/tools/binman/etype/u_boot_tpl_bss_pad.py index 29c6a954129..46d2cd58f7e 100644 --- a/tools/binman/etype/u_boot_tpl_bss_pad.py +++ b/tools/binman/etype/u_boot_tpl_bss_pad.py @@ -38,7 +38,7 @@ class Entry_u_boot_tpl_bss_pad(Entry_blob): def ObtainContents(self): fname = tools.get_input_filename('tpl/u-boot-tpl') bss_size = elf.GetSymbolAddress(fname, '__bss_size') - if not bss_size: + if bss_size is None: self.Raise('Expected __bss_size symbol in tpl/u-boot-tpl') self.SetContents(tools.get_bytes(0, bss_size)) return True diff --git a/tools/binman/etype/u_boot_vpl_bss_pad.py b/tools/binman/etype/u_boot_vpl_bss_pad.py index bba38ccf9e9..12b286a7198 100644 --- a/tools/binman/etype/u_boot_vpl_bss_pad.py +++ b/tools/binman/etype/u_boot_vpl_bss_pad.py @@ -38,7 +38,7 @@ class Entry_u_boot_vpl_bss_pad(Entry_blob): def ObtainContents(self): fname = tools.get_input_filename('vpl/u-boot-vpl') bss_size = elf.GetSymbolAddress(fname, '__bss_size') - if not bss_size: + if bss_size is None: self.Raise('Expected __bss_size symbol in vpl/u-boot-vpl') self.SetContents(tools.get_bytes(0, bss_size)) return True diff --git a/tools/binman/etype/x509_cert.py b/tools/binman/etype/x509_cert.py index f80a6ec2d12..d028cfe38cd 100644 --- a/tools/binman/etype/x509_cert.py +++ b/tools/binman/etype/x509_cert.py @@ -31,6 +31,26 @@ class Entry_x509_cert(Entry_collection): def __init__(self, section, etype, node): super().__init__(section, etype, node) self.openssl = None + self.req_dist_name = None + self.cert_type = None + self.bootcore = None + self.bootcore_opts = None + self.load_addr = None + self.sha = None + self.total_size = None + self.num_comps = None + self.sysfw_inner_cert_ext_boot_sequence_string = None + self.dm_data_ext_boot_sequence_string = None + self.imagesize_sbl = None + self.hashval_sbl = None + self.load_addr_sysfw = None + self.imagesize_sysfw = None + self.hashval_sysfw = None + self.load_addr_sysfw_data = None + self.imagesize_sysfw_data = None + self.hashval_sysfw_data = None + self.sysfw_inner_cert_ext_boot_block = None + self.dm_data_ext_boot_block = None def ReadNode(self): super().ReadNode() @@ -38,13 +58,16 @@ class Entry_x509_cert(Entry_collection): self._cert_rev = fdt_util.GetInt(self._node, 'cert-revision-int', 0) self.key_fname = self.GetEntryArgsOrProps([ EntryArg('keyfile', str)], required=True)[0] + self.sw_rev = fdt_util.GetInt(self._node, 'sw-rev', 1) - def GetCertificate(self, required): + def GetCertificate(self, required, type='generic'): """Get the contents of this entry Args: required: True if the data must be present, False if it is OK to return None + type: Type of x509 certificate to generate, current supported ones are + 'generic', 'sysfw', 'rom' Returns: bytes content of the entry, which is the signed vblock for the @@ -60,13 +83,61 @@ class Entry_x509_cert(Entry_collection): input_fname = tools.get_output_filename('input.%s' % uniq) config_fname = tools.get_output_filename('config.%s' % uniq) tools.write_file(input_fname, input_data) - stdout = self.openssl.x509_cert( - cert_fname=output_fname, - input_fname=input_fname, - key_fname=self.key_fname, - cn=self._cert_ca, - revision=self._cert_rev, - config_fname=config_fname) + if type == 'generic': + stdout = self.openssl.x509_cert( + cert_fname=output_fname, + input_fname=input_fname, + key_fname=self.key_fname, + cn=self._cert_ca, + revision=self._cert_rev, + config_fname=config_fname) + elif type == 'sysfw': + stdout = self.openssl.x509_cert_sysfw( + cert_fname=output_fname, + input_fname=input_fname, + key_fname=self.key_fname, + config_fname=config_fname, + sw_rev=self.sw_rev, + req_dist_name_dict=self.req_dist_name) + elif type == 'rom': + stdout = self.openssl.x509_cert_rom( + cert_fname=output_fname, + input_fname=input_fname, + key_fname=self.key_fname, + config_fname=config_fname, + sw_rev=self.sw_rev, + req_dist_name_dict=self.req_dist_name, + cert_type=self.cert_type, + bootcore=self.bootcore, + bootcore_opts=self.bootcore_opts, + load_addr=self.load_addr, + sha=self.sha + ) + elif type == 'rom-combined': + stdout = self.openssl.x509_cert_rom_combined( + cert_fname=output_fname, + input_fname=input_fname, + key_fname=self.key_fname, + config_fname=config_fname, + sw_rev=self.sw_rev, + req_dist_name_dict=self.req_dist_name, + load_addr=self.load_addr, + sha=self.sha, + total_size=self.total_size, + num_comps=self.num_comps, + sysfw_inner_cert_ext_boot_sequence_string=self.sysfw_inner_cert_ext_boot_sequence_string, + dm_data_ext_boot_sequence_string=self.dm_data_ext_boot_sequence_string, + imagesize_sbl=self.imagesize_sbl, + hashval_sbl=self.hashval_sbl, + load_addr_sysfw=self.load_addr_sysfw, + imagesize_sysfw=self.imagesize_sysfw, + hashval_sysfw=self.hashval_sysfw, + load_addr_sysfw_data=self.load_addr_sysfw_data, + imagesize_sysfw_data=self.imagesize_sysfw_data, + hashval_sysfw_data=self.hashval_sysfw_data, + sysfw_inner_cert_ext_boot_block=self.sysfw_inner_cert_ext_boot_block, + dm_data_ext_boot_block=self.dm_data_ext_boot_block + ) if stdout is not None: data = tools.read_file(output_fname) else: diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index 43b4f850a69..1cfa349d38e 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -94,9 +94,13 @@ ROCKCHIP_TPL_DATA = b'rockchip-tpl' TEST_FDT1_DATA = b'fdt1' TEST_FDT2_DATA = b'test-fdt2' ENV_DATA = b'var1=1\nvar2="2"' +ENCRYPTED_IV_DATA = b'123456' +ENCRYPTED_KEY_DATA = b'abcde' PRE_LOAD_MAGIC = b'UBSH' PRE_LOAD_VERSION = 0x11223344.to_bytes(4, 'big') PRE_LOAD_HDR_SIZE = 0x00001000.to_bytes(4, 'big') +TI_BOARD_CONFIG_DATA = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +TI_UNSECURE_DATA = b'unsecuredata' # Subdirectory of the input dir to use to put test FDTs TEST_FDT_SUBDIR = 'fdts' @@ -199,6 +203,9 @@ class TestFunctional(unittest.TestCase): shutil.copytree(cls.TestFile('files'), os.path.join(cls._indir, 'files')) + shutil.copytree(cls.TestFile('yaml'), + os.path.join(cls._indir, 'yaml')) + TestFunctional._MakeInputFile('compress', COMPRESS_DATA) TestFunctional._MakeInputFile('compress_big', COMPRESS_DATA_BIG) TestFunctional._MakeInputFile('bl31.bin', ATF_BL31_DATA) @@ -207,6 +214,7 @@ class TestFunctional(unittest.TestCase): TestFunctional._MakeInputFile('fw_dynamic.bin', OPENSBI_DATA) TestFunctional._MakeInputFile('scp.bin', SCP_DATA) TestFunctional._MakeInputFile('rockchip-tpl.bin', ROCKCHIP_TPL_DATA) + TestFunctional._MakeInputFile('ti_unsecure.bin', TI_UNSECURE_DATA) # Add a few .dtb files for testing TestFunctional._MakeInputFile('%s/test-fdt1.dtb' % TEST_FDT_SUBDIR, @@ -226,6 +234,10 @@ class TestFunctional(unittest.TestCase): # Newer OP_TEE file in v1 binary format cls.make_tee_bin('tee.bin') + # test files for encrypted tests + TestFunctional._MakeInputFile('encrypted-file.iv', ENCRYPTED_IV_DATA) + TestFunctional._MakeInputFile('encrypted-file.key', ENCRYPTED_KEY_DATA) + cls.comp_bintools = {} for name in COMP_BINTOOLS: cls.comp_bintools[name] = bintool.Bintool.create(name) @@ -347,7 +359,7 @@ class TestFunctional(unittest.TestCase): use_expanded=False, verbosity=None, allow_missing=False, allow_fake_blobs=False, extra_indirs=None, threads=None, test_section_timeout=False, update_fdt_in_elf=None, - force_missing_bintools='', ignore_missing=False): + force_missing_bintools='', ignore_missing=False, output_dir=None): """Run binman with a given test file Args: @@ -378,6 +390,7 @@ class TestFunctional(unittest.TestCase): update_fdt_in_elf: Value to pass with --update-fdt-in-elf=xxx force_missing_tools (str): comma-separated list of bintools to regard as missing + output_dir: Specific output directory to use for image using -O Returns: int return code, 0 on success @@ -424,6 +437,8 @@ class TestFunctional(unittest.TestCase): if extra_indirs: for indir in extra_indirs: args += ['-I', indir] + if output_dir: + args += ['-O', output_dir] return self._DoBinman(*args) def _SetupDtb(self, fname, outfile='u-boot.dtb'): @@ -639,6 +654,16 @@ class TestFunctional(unittest.TestCase): tools.read_file(cls.ElfTestFile(src_fname))) @classmethod + def _SetupPmuFwlElf(cls, src_fname='bss_data'): + """Set up an ELF file with a '_dt_ucode_base_size' symbol + + Args: + Filename of ELF file to use as VPL + """ + TestFunctional._MakeInputFile('pmu-firmware.elf', + tools.read_file(cls.ElfTestFile(src_fname))) + + @classmethod def _SetupDescriptor(cls): with open(cls.TestFile('descriptor.bin'), 'rb') as fd: TestFunctional._MakeInputFile('descriptor.bin', fd.read()) @@ -1103,6 +1128,7 @@ class TestFunctional(unittest.TestCase): def testPackZeroOffset(self): """Test that an entry at offset 0 is not given a new offset""" + self._SetupSplElf() with self.assertRaises(ValueError) as e: self._DoTestFile('025_pack_zero_size.dts') self.assertIn("Node '/binman/u-boot-spl': Offset 0x0 (0) overlaps " @@ -1116,6 +1142,7 @@ class TestFunctional(unittest.TestCase): def testPackX86RomNoSize(self): """Test that the end-at-4gb property requires a size property""" + self._SetupSplElf() with self.assertRaises(ValueError) as e: self._DoTestFile('027_pack_4gb_no_size.dts') self.assertIn("Image '/binman': Section size must be provided when " @@ -1124,6 +1151,7 @@ class TestFunctional(unittest.TestCase): def test4gbAndSkipAtStartTogether(self): """Test that the end-at-4gb and skip-at-size property can't be used together""" + self._SetupSplElf() with self.assertRaises(ValueError) as e: self._DoTestFile('098_4gb_and_skip_at_start_together.dts') self.assertIn("Image '/binman': Provide either 'end-at-4gb' or " @@ -1131,6 +1159,7 @@ class TestFunctional(unittest.TestCase): def testPackX86RomOutside(self): """Test that the end-at-4gb property checks for offset boundaries""" + self._SetupSplElf() with self.assertRaises(ValueError) as e: self._DoTestFile('028_pack_4gb_outside.dts') self.assertIn("Node '/binman/u-boot': Offset 0x0 (0) size 0x4 (4) " @@ -1423,6 +1452,7 @@ class TestFunctional(unittest.TestCase): def testPackUbootSplMicrocode(self): """Test that x86 microcode can be handled correctly in SPL""" + self._SetupSplElf() self._PackUbootSplMicrocode('049_x86_ucode_spl.dts') def testPackUbootSplMicrocodeReorder(self): @@ -1442,6 +1472,7 @@ class TestFunctional(unittest.TestCase): def testSplDtb(self): """Test that an image with spl/u-boot-spl.dtb can be created""" + self._SetupSplElf() data = self._DoReadFile('051_u_boot_spl_dtb.dts') self.assertEqual(U_BOOT_SPL_DTB_DATA, data[:len(U_BOOT_SPL_DTB_DATA)]) @@ -1452,7 +1483,7 @@ class TestFunctional(unittest.TestCase): self.assertEqual(U_BOOT_SPL_NODTB_DATA, data[:len(U_BOOT_SPL_NODTB_DATA)]) def checkSymbols(self, dts, base_data, u_boot_offset, entry_args=None, - use_expanded=False): + use_expanded=False, no_write_symbols=False): """Check the image contains the expected symbol values Args: @@ -1481,9 +1512,14 @@ class TestFunctional(unittest.TestCase): sym_values = struct.pack('<LLQLL', elf.BINMAN_SYM_MAGIC_VALUE, 0x00, u_boot_offset + len(U_BOOT_DATA), 0x10 + u_boot_offset, 0x04) - expected = (sym_values + base_data[24:] + - tools.get_bytes(0xff, 1) + U_BOOT_DATA + sym_values + - base_data[24:]) + if no_write_symbols: + expected = (base_data + + tools.get_bytes(0xff, 0x38 - len(base_data)) + + U_BOOT_DATA + base_data) + else: + expected = (sym_values + base_data[24:] + + tools.get_bytes(0xff, 1) + U_BOOT_DATA + sym_values + + base_data[24:]) self.assertEqual(expected, data) def testSymbols(self): @@ -1957,6 +1993,8 @@ class TestFunctional(unittest.TestCase): def testUpdateFdtAll(self): """Test that all device trees are updated with offset/size info""" + self._SetupSplElf() + self._SetupTplElf() data = self._DoReadFileRealDtb('082_fdt_update_all.dts') base_expected = { @@ -3279,6 +3317,8 @@ class TestFunctional(unittest.TestCase): def testUpdateFdtAllRepack(self): """Test that all device trees are updated with offset/size info""" + self._SetupSplElf() + self._SetupTplElf() data = self._DoReadFileRealDtb('134_fdt_update_all_repack.dts') SECTION_SIZE = 0x300 DTB_SIZE = 602 @@ -3732,6 +3772,7 @@ class TestFunctional(unittest.TestCase): def testMkimage(self): """Test using mkimage to build an image""" + self._SetupSplElf() data = self._DoReadFile('156_mkimage.dts') # Just check that the data appears in the file somewhere @@ -3739,6 +3780,7 @@ class TestFunctional(unittest.TestCase): def testMkimageMissing(self): """Test that binman still produces an image if mkimage is missing""" + self._SetupSplElf() with test_util.capture_sys_output() as (_, stderr): self._DoTestFile('156_mkimage.dts', force_missing_bintools='mkimage') @@ -3851,6 +3893,7 @@ class TestFunctional(unittest.TestCase): def testSimpleFit(self): """Test an image with a FIT inside""" + self._SetupSplElf() data = self._DoReadFile('161_fit.dts') self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)]) self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):]) @@ -5370,6 +5413,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testFitSubentryHashSubnode(self): """Test an image with a FIT inside""" + self._SetupSplElf() data, _, _, out_dtb_name = self._DoReadFileDtb( '221_fit_subentry_hash.dts', use_real_dtb=True, update_dtb=True) @@ -5619,41 +5663,61 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testPreLoad(self): """Test an image with a pre-load header""" entry_args = { - 'pre-load-key-path': '.', + 'pre-load-key-path': os.path.join(self._binman_dir, 'test'), } - data, _, _, _ = self._DoReadFileDtb('230_pre_load.dts', - entry_args=entry_args) - self.assertEqual(PRE_LOAD_MAGIC, data[:len(PRE_LOAD_MAGIC)]) - self.assertEqual(PRE_LOAD_VERSION, data[4:4 + len(PRE_LOAD_VERSION)]) - self.assertEqual(PRE_LOAD_HDR_SIZE, data[8:8 + len(PRE_LOAD_HDR_SIZE)]) - data = self._DoReadFile('230_pre_load.dts') + data = self._DoReadFileDtb( + '230_pre_load.dts', entry_args=entry_args, + extra_indirs=[os.path.join(self._binman_dir, 'test')])[0] self.assertEqual(PRE_LOAD_MAGIC, data[:len(PRE_LOAD_MAGIC)]) self.assertEqual(PRE_LOAD_VERSION, data[4:4 + len(PRE_LOAD_VERSION)]) self.assertEqual(PRE_LOAD_HDR_SIZE, data[8:8 + len(PRE_LOAD_HDR_SIZE)]) + def testPreLoadNoKey(self): + """Test an image with a pre-load heade0r with missing key""" + with self.assertRaises(FileNotFoundError) as exc: + self._DoReadFile('230_pre_load.dts') + self.assertIn("No such file or directory: 'dev.key'", + str(exc.exception)) + def testPreLoadPkcs(self): """Test an image with a pre-load header with padding pkcs""" - data = self._DoReadFile('231_pre_load_pkcs.dts') + entry_args = { + 'pre-load-key-path': os.path.join(self._binman_dir, 'test'), + } + data = self._DoReadFileDtb('231_pre_load_pkcs.dts', + entry_args=entry_args)[0] self.assertEqual(PRE_LOAD_MAGIC, data[:len(PRE_LOAD_MAGIC)]) self.assertEqual(PRE_LOAD_VERSION, data[4:4 + len(PRE_LOAD_VERSION)]) self.assertEqual(PRE_LOAD_HDR_SIZE, data[8:8 + len(PRE_LOAD_HDR_SIZE)]) def testPreLoadPss(self): """Test an image with a pre-load header with padding pss""" - data = self._DoReadFile('232_pre_load_pss.dts') + entry_args = { + 'pre-load-key-path': os.path.join(self._binman_dir, 'test'), + } + data = self._DoReadFileDtb('232_pre_load_pss.dts', + entry_args=entry_args)[0] self.assertEqual(PRE_LOAD_MAGIC, data[:len(PRE_LOAD_MAGIC)]) self.assertEqual(PRE_LOAD_VERSION, data[4:4 + len(PRE_LOAD_VERSION)]) self.assertEqual(PRE_LOAD_HDR_SIZE, data[8:8 + len(PRE_LOAD_HDR_SIZE)]) def testPreLoadInvalidPadding(self): """Test an image with a pre-load header with an invalid padding""" + entry_args = { + 'pre-load-key-path': os.path.join(self._binman_dir, 'test'), + } with self.assertRaises(ValueError) as e: - data = self._DoReadFile('233_pre_load_invalid_padding.dts') + self._DoReadFileDtb('233_pre_load_invalid_padding.dts', + entry_args=entry_args) def testPreLoadInvalidSha(self): """Test an image with a pre-load header with an invalid hash""" + entry_args = { + 'pre-load-key-path': os.path.join(self._binman_dir, 'test'), + } with self.assertRaises(ValueError) as e: - data = self._DoReadFile('234_pre_load_invalid_sha.dts') + self._DoReadFileDtb('234_pre_load_invalid_sha.dts', + entry_args=entry_args) def testPreLoadInvalidAlgo(self): """Test an image with a pre-load header with an invalid algo""" @@ -5662,8 +5726,12 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testPreLoadInvalidKey(self): """Test an image with a pre-load header with an invalid key""" + entry_args = { + 'pre-load-key-path': os.path.join(self._binman_dir, 'test'), + } with self.assertRaises(ValueError) as e: - data = self._DoReadFile('236_pre_load_invalid_key.dts') + data = self._DoReadFileDtb('236_pre_load_invalid_key.dts', + entry_args=entry_args) def _CheckSafeUniqueNames(self, *images): """Check all entries of given images for unsafe unique names""" @@ -5888,6 +5956,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testMkimageImagename(self): """Test using mkimage with -n holding the data too""" + self._SetupSplElf() data = self._DoReadFile('242_mkimage_name.dts') # Check that the data appears in the file somewhere @@ -5905,6 +5974,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testMkimageImage(self): """Test using mkimage with -n holding the data too""" + self._SetupSplElf() data = self._DoReadFile('243_mkimage_image.dts') # Check that the data appears in the file somewhere @@ -5925,6 +5995,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testMkimageImageNoContent(self): """Test using mkimage with -n and no data""" + self._SetupSplElf() with self.assertRaises(ValueError) as exc: self._DoReadFile('244_mkimage_image_no_content.dts') self.assertIn('Could not complete processing of contents', @@ -5932,6 +6003,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testMkimageImageBad(self): """Test using mkimage with imagename node and data-to-imagename""" + self._SetupSplElf() with self.assertRaises(ValueError) as exc: self._DoReadFile('245_mkimage_image_bad.dts') self.assertIn('Cannot use both imagename node and data-to-imagename', @@ -5947,6 +6019,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testMkimageCollection(self): """Test using a collection referring to an entry in a mkimage entry""" + self._SetupSplElf() data = self._DoReadFile('247_mkimage_coll.dts') expect = U_BOOT_SPL_DATA + U_BOOT_DATA self.assertEqual(expect, data[:len(expect)]) @@ -6032,6 +6105,8 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testMkimageMultipleDataFiles(self): """Test passing multiple files to mkimage in a mkimage entry""" + self._SetupSplElf() + self._SetupTplElf() data = self._DoReadFile('252_mkimage_mult_data.dts') # Size of files are packed in their 4B big-endian format expect = struct.pack('>I', len(U_BOOT_TPL_DATA)) @@ -6046,8 +6121,42 @@ fdt fdtmap Extract the devicetree blob from the fdtmap expect += U_BOOT_SPL_DATA self.assertEqual(expect, data[-len(expect):]) + def testMkimageMultipleExpanded(self): + """Test passing multiple files to mkimage in a mkimage entry""" + self._SetupSplElf() + self._SetupTplElf() + entry_args = { + 'spl-bss-pad': 'y', + 'spl-dtb': 'y', + } + data = self._DoReadFileDtb('252_mkimage_mult_data.dts', + use_expanded=True, entry_args=entry_args)[0] + pad_len = 10 + tpl_expect = U_BOOT_TPL_DATA + spl_expect = U_BOOT_SPL_NODTB_DATA + tools.get_bytes(0, pad_len) + spl_expect += U_BOOT_SPL_DTB_DATA + + content = data[0x40:] + lens = struct.unpack('>III', content[:12]) + + # Size of files are packed in their 4B big-endian format + # Size info is always followed by a 4B zero value. + self.assertEqual(len(tpl_expect), lens[0]) + self.assertEqual(len(spl_expect), lens[1]) + self.assertEqual(0, lens[2]) + + rest = content[12:] + self.assertEqual(tpl_expect, rest[:len(tpl_expect)]) + + rest = rest[len(tpl_expect):] + align_pad = len(tpl_expect) % 4 + self.assertEqual(tools.get_bytes(0, align_pad), rest[:align_pad]) + rest = rest[align_pad:] + self.assertEqual(spl_expect, rest) + def testMkimageMultipleNoContent(self): """Test passing multiple data files to mkimage with one data file having no content""" + self._SetupSplElf() with self.assertRaises(ValueError) as exc: self._DoReadFile('253_mkimage_mult_no_content.dts') self.assertIn('Could not complete processing of contents', @@ -6055,6 +6164,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testMkimageFilename(self): """Test using mkimage to build a binary with a filename""" + self._SetupSplElf() retcode = self._DoTestFile('254_mkimage_filename.dts') self.assertEqual(0, retcode) fname = tools.get_output_filename('mkimage-test.bin') @@ -6107,7 +6217,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap str(e.exception)) def testSymlink(self): - """Test that image files can be named""" + """Test that image files can be symlinked""" retcode = self._DoTestFile('259_symlink.dts', debug=True, map=True) self.assertEqual(0, retcode) image = control.images['test_image'] @@ -6116,6 +6226,17 @@ fdt fdtmap Extract the devicetree blob from the fdtmap self.assertTrue(os.path.islink(sname)) self.assertEqual(os.readlink(sname), fname) + def testSymlinkOverwrite(self): + """Test that symlinked images can be overwritten""" + testdir = TestFunctional._MakeInputDir('symlinktest') + self._DoTestFile('259_symlink.dts', debug=True, map=True, output_dir=testdir) + # build the same image again in the same directory so that existing symlink is present + self._DoTestFile('259_symlink.dts', debug=True, map=True, output_dir=testdir) + fname = tools.get_output_filename('test_image.bin') + sname = tools.get_output_filename('symlink_to_test.bin') + self.assertTrue(os.path.islink(sname)) + self.assertEqual(os.readlink(sname), fname) + def testSymbolsElf(self): """Test binman can assign symbols embedded in an ELF file""" if not elf.ELF_TOOLS: @@ -6529,6 +6650,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testReplaceFitSibling(self): """Test an image with a FIT inside where we replace its sibling""" + self._SetupSplElf() fname = TestFunctional._MakeInputFile('once', b'available once') self._DoReadFileRealDtb('277_replace_fit_sibling.dts') os.remove(fname) @@ -6577,18 +6699,18 @@ fdt fdtmap Extract the devicetree blob from the fdtmap def testPackRockchipTpl(self): """Test that an image with a Rockchip TPL binary can be created""" - data = self._DoReadFile('277_rockchip_tpl.dts') + data = self._DoReadFile('291_rockchip_tpl.dts') self.assertEqual(ROCKCHIP_TPL_DATA, data[:len(ROCKCHIP_TPL_DATA)]) def testMkimageMissingBlobMultiple(self): """Test missing blob with mkimage entry and multiple-data-files""" with test_util.capture_sys_output() as (stdout, stderr): - self._DoTestFile('278_mkimage_missing_multiple.dts', allow_missing=True) + self._DoTestFile('292_mkimage_missing_multiple.dts', allow_missing=True) err = stderr.getvalue() self.assertIn("is missing external blobs and is non-functional", err) with self.assertRaises(ValueError) as e: - self._DoTestFile('278_mkimage_missing_multiple.dts', allow_missing=False) + self._DoTestFile('292_mkimage_missing_multiple.dts', allow_missing=False) self.assertIn("not found in input path", str(e.exception)) def _PrepareSignEnv(self, dts='280_fit_sign.dts'): @@ -6603,7 +6725,7 @@ fdt fdtmap Extract the devicetree blob from the fdtmap Private key DTB """ - + self._SetupSplElf() data = self._DoReadFileRealDtb(dts) updated_fname = tools.get_output_filename('image-updated.bin') tools.write_file(updated_fname, data) @@ -6676,6 +6798,294 @@ fdt fdtmap Extract the devicetree blob from the fdtmap ['fit']) self.assertIn("Node '/fit': Missing tool: 'mkimage'", str(e.exception)) + def testSymbolNoWrite(self): + """Test disabling of symbol writing""" + self._SetupSplElf() + self.checkSymbols('282_symbols_disable.dts', U_BOOT_SPL_DATA, 0x1c, + no_write_symbols=True) + + def testSymbolNoWriteExpanded(self): + """Test disabling of symbol writing in expanded entries""" + entry_args = { + 'spl-dtb': '1', + } + self.checkSymbols('282_symbols_disable.dts', U_BOOT_SPL_NODTB_DATA + + U_BOOT_SPL_DTB_DATA, 0x38, + entry_args=entry_args, use_expanded=True, + no_write_symbols=True) + + def testMkimageSpecial(self): + """Test mkimage ignores special hash-1 node""" + data = self._DoReadFile('283_mkimage_special.dts') + + # Just check that the data appears in the file somewhere + self.assertIn(U_BOOT_DATA, data) + + def testFitFdtList(self): + """Test an image with an FIT with the fit,fdt-list-val option""" + entry_args = { + 'default-dt': 'test-fdt2', + } + data = self._DoReadFileDtb( + '284_fit_fdt_list.dts', + entry_args=entry_args, + extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0] + self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):]) + fit_data = data[len(U_BOOT_DATA):-len(U_BOOT_NODTB_DATA)] + + def testSplEmptyBss(self): + """Test an expanded SPL with a zero-size BSS""" + # ELF file with a '__bss_size' symbol + self._SetupSplElf(src_fname='bss_data_zero') + + entry_args = { + 'spl-bss-pad': 'y', + 'spl-dtb': 'y', + } + data = self._DoReadFileDtb('285_spl_expand.dts', + use_expanded=True, entry_args=entry_args)[0] + + def testTemplate(self): + """Test using a template""" + TestFunctional._MakeInputFile('vga2.bin', b'#' + VGA_DATA) + data = self._DoReadFile('286_template.dts') + first = U_BOOT_DATA + VGA_DATA + U_BOOT_DTB_DATA + second = U_BOOT_DATA + b'#' + VGA_DATA + U_BOOT_DTB_DATA + self.assertEqual(U_BOOT_IMG_DATA + first + second, data) + + def testTemplateBlobMulti(self): + """Test using a template with 'multiple-images' enabled""" + TestFunctional._MakeInputFile('my-blob.bin', b'blob') + TestFunctional._MakeInputFile('my-blob2.bin', b'other') + retcode = self._DoTestFile('287_template_multi.dts') + + self.assertEqual(0, retcode) + image = control.images['image'] + image_fname = tools.get_output_filename('my-image.bin') + data = tools.read_file(image_fname) + self.assertEqual(b'blob@@@@other', data) + + def testTemplateFit(self): + """Test using a template in a FIT""" + fit_data = self._DoReadFile('288_template_fit.dts') + fname = os.path.join(self._indir, 'fit_data.fit') + tools.write_file(fname, fit_data) + out = tools.run('dumpimage', '-l', fname) + + def testTemplateSection(self): + """Test using a template in a section (not at top level)""" + TestFunctional._MakeInputFile('vga2.bin', b'#' + VGA_DATA) + data = self._DoReadFile('289_template_section.dts') + first = U_BOOT_DATA + VGA_DATA + U_BOOT_DTB_DATA + second = U_BOOT_DATA + b'#' + VGA_DATA + U_BOOT_DTB_DATA + self.assertEqual(U_BOOT_IMG_DATA + first + second + first, data) + + def testMkimageSymbols(self): + """Test using mkimage to build an image with symbols in it""" + self._SetupSplElf('u_boot_binman_syms') + data = self._DoReadFile('290_mkimage_sym.dts') + + image = control.images['image'] + entries = image.GetEntries() + self.assertIn('u-boot', entries) + u_boot = entries['u-boot'] + + mkim = entries['mkimage'] + mkim_entries = mkim.GetEntries() + self.assertIn('u-boot-spl', mkim_entries) + spl = mkim_entries['u-boot-spl'] + self.assertIn('u-boot-spl2', mkim_entries) + spl2 = mkim_entries['u-boot-spl2'] + + # skip the mkimage header and the area sizes + mk_data = data[mkim.offset + 0x40:] + size, term = struct.unpack('>LL', mk_data[:8]) + + # There should be only one image, so check that the zero terminator is + # present + self.assertEqual(0, term) + + content = mk_data[8:8 + size] + + # The image should contain the symbols from u_boot_binman_syms.c + # Note that image_pos is adjusted by the base address of the image, + # which is 0x10 in our test image + spl_data = content[:0x18] + content = content[0x1b:] + + # After the header is a table of offsets for each image. There should + # only be one image, then a 0 terminator, so figure out the real start + # of the image data + base = 0x40 + 8 + + # Check symbols in both u-boot-spl and u-boot-spl2 + for i in range(2): + vals = struct.unpack('<LLQLL', spl_data) + + # The image should contain the symbols from u_boot_binman_syms.c + # Note that image_pos is adjusted by the base address of the image, + # which is 0x10 in our 'u_boot_binman_syms' test image + self.assertEqual(elf.BINMAN_SYM_MAGIC_VALUE, vals[0]) + self.assertEqual(base, vals[1]) + self.assertEqual(spl2.offset, vals[2]) + # figure out the internal positions of its components + self.assertEqual(0x10 + u_boot.image_pos, vals[3]) + + # Check that spl and spl2 are actually at the indicated positions + self.assertEqual( + elf.BINMAN_SYM_MAGIC_VALUE, + struct.unpack('<I', data[spl.image_pos:spl.image_pos + 4])[0]) + self.assertEqual( + elf.BINMAN_SYM_MAGIC_VALUE, + struct.unpack('<I', data[spl2.image_pos:spl2.image_pos + 4])[0]) + + self.assertEqual(len(U_BOOT_DATA), vals[4]) + + # Move to next + spl_data = content[:0x18] + + def testTIBoardConfig(self): + """Test that a schema validated board config file can be generated""" + data = self._DoReadFile('293_ti_board_cfg.dts') + self.assertEqual(TI_BOARD_CONFIG_DATA, data) + + def testTIBoardConfigCombined(self): + """Test that a schema validated combined board config file can be generated""" + data = self._DoReadFile('294_ti_board_cfg_combined.dts') + configlen_noheader = TI_BOARD_CONFIG_DATA * 4 + self.assertGreater(data, configlen_noheader) + + def testTIBoardConfigNoDataType(self): + """Test that error is thrown when data type is not supported""" + with self.assertRaises(ValueError) as e: + data = self._DoReadFile('295_ti_board_cfg_no_type.dts') + self.assertIn("Schema validation error", str(e.exception)) + + def testPackTiSecure(self): + """Test that an image with a TI secured binary can be created""" + keyfile = self.TestFile('key.key') + entry_args = { + 'keyfile': keyfile, + } + data = self._DoReadFileDtb('296_ti_secure.dts', + entry_args=entry_args)[0] + self.assertGreater(len(data), len(TI_UNSECURE_DATA)) + + def testPackTiSecureMissingTool(self): + """Test that an image with a TI secured binary (non-functional) can be created + when openssl is missing""" + keyfile = self.TestFile('key.key') + entry_args = { + 'keyfile': keyfile, + } + with test_util.capture_sys_output() as (_, stderr): + self._DoTestFile('296_ti_secure.dts', + force_missing_bintools='openssl', + entry_args=entry_args) + err = stderr.getvalue() + self.assertRegex(err, "Image 'image'.*missing bintools.*: openssl") + + def testPackTiSecureROM(self): + """Test that a ROM image with a TI secured binary can be created""" + keyfile = self.TestFile('key.key') + entry_args = { + 'keyfile': keyfile, + } + data = self._DoReadFileDtb('297_ti_secure_rom.dts', + entry_args=entry_args)[0] + data_a = self._DoReadFileDtb('299_ti_secure_rom_a.dts', + entry_args=entry_args)[0] + data_b = self._DoReadFileDtb('300_ti_secure_rom_b.dts', + entry_args=entry_args)[0] + self.assertGreater(len(data), len(TI_UNSECURE_DATA)) + self.assertGreater(len(data_a), len(TI_UNSECURE_DATA)) + self.assertGreater(len(data_b), len(TI_UNSECURE_DATA)) + + def testPackTiSecureROMCombined(self): + """Test that a ROM image with a TI secured binary can be created""" + keyfile = self.TestFile('key.key') + entry_args = { + 'keyfile': keyfile, + } + data = self._DoReadFileDtb('298_ti_secure_rom_combined.dts', + entry_args=entry_args)[0] + self.assertGreater(len(data), len(TI_UNSECURE_DATA)) + + def testEncryptedNoAlgo(self): + """Test encrypted node with missing required properties""" + with self.assertRaises(ValueError) as e: + self._DoReadFileDtb('301_encrypted_no_algo.dts') + self.assertIn( + "Node '/binman/fit/images/u-boot/encrypted': 'encrypted' entry is missing properties: algo iv-filename", + str(e.exception)) + + def testEncryptedInvalidIvfile(self): + """Test encrypted node with invalid iv file""" + with self.assertRaises(ValueError) as e: + self._DoReadFileDtb('302_encrypted_invalid_iv_file.dts') + self.assertIn("Filename 'invalid-iv-file' not found in input path", + str(e.exception)) + + def testEncryptedMissingKey(self): + """Test encrypted node with missing key properties""" + with self.assertRaises(ValueError) as e: + self._DoReadFileDtb('303_encrypted_missing_key.dts') + self.assertIn( + "Node '/binman/fit/images/u-boot/encrypted': Provide either 'key-filename' or 'key-source'", + str(e.exception)) + + def testEncryptedKeySource(self): + """Test encrypted node with key-source property""" + data = self._DoReadFileDtb('304_encrypted_key_source.dts')[0] + + dtb = fdt.Fdt.FromData(data) + dtb.Scan() + + node = dtb.GetNode('/images/u-boot/cipher') + self.assertEqual('algo-name', node.props['algo'].value) + self.assertEqual('key-source-value', node.props['key-source'].value) + self.assertEqual(ENCRYPTED_IV_DATA, + tools.to_bytes(''.join(node.props['iv'].value))) + self.assertNotIn('key', node.props) + + def testEncryptedKeyFile(self): + """Test encrypted node with key-filename property""" + data = self._DoReadFileDtb('305_encrypted_key_file.dts')[0] + + dtb = fdt.Fdt.FromData(data) + dtb.Scan() + + node = dtb.GetNode('/images/u-boot/cipher') + self.assertEqual('algo-name', node.props['algo'].value) + self.assertEqual(ENCRYPTED_IV_DATA, + tools.to_bytes(''.join(node.props['iv'].value))) + self.assertEqual(ENCRYPTED_KEY_DATA, + tools.to_bytes(''.join(node.props['key'].value))) + self.assertNotIn('key-source', node.props) + + + def testSplPubkeyDtb(self): + """Test u_boot_spl_pubkey_dtb etype""" + data = tools.read_file(self.TestFile("key.pem")) + self._MakeInputFile("key.crt", data) + self._DoReadFileRealDtb('306_spl_pubkey_dtb.dts') + image = control.images['image'] + entries = image.GetEntries() + dtb_entry = entries['u-boot-spl-pubkey-dtb'] + dtb_data = dtb_entry.GetData() + dtb = fdt.Fdt.FromData(dtb_data) + dtb.Scan() + + signature_node = dtb.GetNode('/signature') + self.assertIsNotNone(signature_node) + key_node = signature_node.FindNode("key-key") + self.assertIsNotNone(key_node) + self.assertEqual(fdt_util.GetString(key_node, "required"), + "conf") + self.assertEqual(fdt_util.GetString(key_node, "algo"), + "sha384,rsa4096") + self.assertEqual(fdt_util.GetString(key_node, "key-name-hint"), + "key") if __name__ == "__main__": unittest.main() diff --git a/tools/binman/image.py b/tools/binman/image.py index 8ebf71d61a8..e77b5d0d97c 100644 --- a/tools/binman/image.py +++ b/tools/binman/image.py @@ -182,6 +182,8 @@ class Image(section.Entry_section): # Create symlink to file if symlink given if self._symlink is not None: sname = tools.get_output_filename(self._symlink) + if os.path.islink(sname): + os.remove(sname) os.symlink(fname, sname) def WriteMap(self): diff --git a/tools/binman/state.py b/tools/binman/state.py index 3e78cf34300..45bae40c525 100644 --- a/tools/binman/state.py +++ b/tools/binman/state.py @@ -385,8 +385,8 @@ def SetInt(node, prop, value, for_repack=False): for_repack: True is this property is only needed for repacking """ for n in GetUpdateNodes(node, for_repack): - tout.detail("File %s: Update node '%s' prop '%s' to %#x" % - (n.GetFdt().name, n.path, prop, value)) + tout.debug("File %s: Update node '%s' prop '%s' to %#x" % + (n.GetFdt().name, n.path, prop, value)) n.SetInt(prop, value) def CheckAddHashProp(node): diff --git a/tools/binman/test/230_pre_load.dts b/tools/binman/test/230_pre_load.dts index c0c24729f82..e6d9ef40c6c 100644 --- a/tools/binman/test/230_pre_load.dts +++ b/tools/binman/test/230_pre_load.dts @@ -10,7 +10,7 @@ pre-load { content = <&image>; algo-name = "sha256,rsa2048"; - key-name = "tools/binman/test/230_dev.key"; + key-name = "dev.key"; header-size = <4096>; version = <0x11223344>; }; diff --git a/tools/binman/test/231_pre_load_pkcs.dts b/tools/binman/test/231_pre_load_pkcs.dts index 530638c56b6..66268cdb212 100644 --- a/tools/binman/test/231_pre_load_pkcs.dts +++ b/tools/binman/test/231_pre_load_pkcs.dts @@ -11,7 +11,7 @@ content = <&image>; algo-name = "sha256,rsa2048"; padding-name = "pkcs-1.5"; - key-name = "tools/binman/test/230_dev.key"; + key-name = "dev.key"; header-size = <4096>; version = <0x11223344>; }; diff --git a/tools/binman/test/232_pre_load_pss.dts b/tools/binman/test/232_pre_load_pss.dts index 371e0fdb408..3008d3f4649 100644 --- a/tools/binman/test/232_pre_load_pss.dts +++ b/tools/binman/test/232_pre_load_pss.dts @@ -11,7 +11,7 @@ content = <&image>; algo-name = "sha256,rsa2048"; padding-name = "pss"; - key-name = "tools/binman/test/230_dev.key"; + key-name = "dev.key"; header-size = <4096>; version = <0x11223344>; }; diff --git a/tools/binman/test/233_pre_load_invalid_padding.dts b/tools/binman/test/233_pre_load_invalid_padding.dts index 9cb4cb570bc..bbe2d1ba869 100644 --- a/tools/binman/test/233_pre_load_invalid_padding.dts +++ b/tools/binman/test/233_pre_load_invalid_padding.dts @@ -11,7 +11,7 @@ content = <&image>; algo-name = "sha256,rsa2048"; padding-name = "padding"; - key-name = "tools/binman/test/230_dev.key"; + key-name = "dev.key"; header-size = <4096>; version = <1>; }; diff --git a/tools/binman/test/234_pre_load_invalid_sha.dts b/tools/binman/test/234_pre_load_invalid_sha.dts index 8ded98df533..29afd2e37e4 100644 --- a/tools/binman/test/234_pre_load_invalid_sha.dts +++ b/tools/binman/test/234_pre_load_invalid_sha.dts @@ -11,7 +11,7 @@ content = <&image>; algo-name = "sha2560,rsa2048"; padding-name = "pkcs-1.5"; - key-name = "tools/binman/test/230_dev.key"; + key-name = "dev.key"; header-size = <4096>; version = <1>; }; diff --git a/tools/binman/test/235_pre_load_invalid_algo.dts b/tools/binman/test/235_pre_load_invalid_algo.dts index 145286caa3e..d6f6dd20cd9 100644 --- a/tools/binman/test/235_pre_load_invalid_algo.dts +++ b/tools/binman/test/235_pre_load_invalid_algo.dts @@ -11,7 +11,7 @@ content = <&image>; algo-name = "sha256,rsa20480"; padding-name = "pkcs-1.5"; - key-name = "tools/binman/test/230_dev.key"; + key-name = "dev.key"; header-size = <4096>; version = <1>; }; diff --git a/tools/binman/test/236_pre_load_invalid_key.dts b/tools/binman/test/236_pre_load_invalid_key.dts index df858c3a28b..f93bc9792cd 100644 --- a/tools/binman/test/236_pre_load_invalid_key.dts +++ b/tools/binman/test/236_pre_load_invalid_key.dts @@ -11,7 +11,7 @@ content = <&image>; algo-name = "sha256,rsa4096"; padding-name = "pkcs-1.5"; - key-name = "tools/binman/test/230_dev.key"; + key-name = "dev.key"; header-size = <4096>; version = <1>; }; diff --git a/tools/binman/test/282_symbols_disable.dts b/tools/binman/test/282_symbols_disable.dts new file mode 100644 index 00000000000..6efa9335041 --- /dev/null +++ b/tools/binman/test/282_symbols_disable.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + pad-byte = <0xff>; + u-boot-spl { + no-write-symbols; + }; + + u-boot { + offset = <0x38>; + no-expanded; + }; + + u-boot-spl2 { + type = "u-boot-spl"; + no-write-symbols; + }; + }; +}; diff --git a/tools/binman/test/283_mkimage_special.dts b/tools/binman/test/283_mkimage_special.dts new file mode 100644 index 00000000000..c234093e6ec --- /dev/null +++ b/tools/binman/test/283_mkimage_special.dts @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + mkimage { + args = "-T script"; + + u-boot { + }; + + hash { + }; + + imagename { + type = "u-boot"; + }; + }; + }; +}; diff --git a/tools/binman/test/284_fit_fdt_list.dts b/tools/binman/test/284_fit_fdt_list.dts new file mode 100644 index 00000000000..8885313f5b8 --- /dev/null +++ b/tools/binman/test/284_fit_fdt_list.dts @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + u-boot { + }; + fit { + description = "test-desc"; + #address-cells = <1>; + fit,fdt-list-val = "test-fdt1", "test-fdt2"; + + images { + kernel { + description = "Vanilla Linux kernel"; + type = "kernel"; + arch = "ppc"; + os = "linux"; + compression = "gzip"; + load = <00000000>; + entry = <00000000>; + hash-1 { + algo = "crc32"; + }; + hash-2 { + algo = "sha1"; + }; + u-boot { + }; + }; + @fdt-SEQ { + description = "fdt-NAME.dtb"; + type = "flat_dt"; + compression = "none"; + hash { + algo = "sha256"; + }; + }; + }; + + configurations { + default = "@config-DEFAULT-SEQ"; + @config-SEQ { + description = "conf-NAME.dtb"; + firmware = "uboot"; + loadables = "atf"; + fdt = "fdt-SEQ"; + }; + }; + }; + u-boot-nodtb { + }; + }; +}; diff --git a/tools/binman/test/285_spl_expand.dts b/tools/binman/test/285_spl_expand.dts new file mode 100644 index 00000000000..9c88ccb287b --- /dev/null +++ b/tools/binman/test/285_spl_expand.dts @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + u-boot-spl { + }; + }; +}; diff --git a/tools/binman/test/286_template.dts b/tools/binman/test/286_template.dts new file mode 100644 index 00000000000..6980dbfafcc --- /dev/null +++ b/tools/binman/test/286_template.dts @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + u-boot-img { + }; + + common_part: template { + u-boot { + }; + + intel-vga { + filename = "vga.bin"; + }; + }; + + first { + type = "section"; + insert-template = <&common_part>; + + u-boot-dtb { + }; + }; + + second { + type = "section"; + insert-template = <&common_part>; + + u-boot-dtb { + }; + + intel-vga { + filename = "vga2.bin"; + }; + }; + }; +}; diff --git a/tools/binman/test/287_template_multi.dts b/tools/binman/test/287_template_multi.dts new file mode 100644 index 00000000000..122bfccd565 --- /dev/null +++ b/tools/binman/test/287_template_multi.dts @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; +/ { + binman: binman { + multiple-images; + + my_template: template { + blob-ext@0 { + filename = "my-blob.bin"; + offset = <0>; + }; + blob-ext@8 { + offset = <8>; + }; + }; + + image { + pad-byte = <0x40>; + filename = "my-image.bin"; + insert-template = <&my_template>; + blob-ext@8 { + filename = "my-blob2.bin"; + }; + }; + }; +}; diff --git a/tools/binman/test/288_template_fit.dts b/tools/binman/test/288_template_fit.dts new file mode 100644 index 00000000000..d84dca4ea41 --- /dev/null +++ b/tools/binman/test/288_template_fit.dts @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + binman: binman { + multiple-images; + + my_template: template { + fit@0 { + images { + kernel-1 { + }; + kernel-2 { + }; + }; + }; + }; + + image { + filename = "image.bin"; + insert-template = <&my_template>; + + fit@0 { + description = "desc"; + configurations { + }; + images { + kernel-3 { + }; + kernel-4 { + }; + }; + }; + }; + }; +}; diff --git a/tools/binman/test/289_template_section.dts b/tools/binman/test/289_template_section.dts new file mode 100644 index 00000000000..8a744a0cf68 --- /dev/null +++ b/tools/binman/test/289_template_section.dts @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + u-boot-img { + }; + + common_part: template { + u-boot { + }; + + intel-vga { + filename = "vga.bin"; + }; + }; + + first { + type = "section"; + insert-template = <&common_part>; + + u-boot-dtb { + }; + }; + + section { + second { + type = "section"; + insert-template = <&common_part>; + + u-boot-dtb { + }; + + intel-vga { + filename = "vga2.bin"; + }; + }; + }; + + second { + type = "section"; + insert-template = <&common_part>; + + u-boot-dtb { + }; + }; + }; +}; diff --git a/tools/binman/test/290_mkimage_sym.dts b/tools/binman/test/290_mkimage_sym.dts new file mode 100644 index 00000000000..2dfd286ad44 --- /dev/null +++ b/tools/binman/test/290_mkimage_sym.dts @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + u-boot-dtb { + }; + + mkimage { + args = "-n test -T script"; + + u-boot-spl { + }; + + u-boot-spl2 { + type = "u-boot-spl"; + }; + }; + + u-boot { + }; + }; +}; diff --git a/tools/binman/test/277_rockchip_tpl.dts b/tools/binman/test/291_rockchip_tpl.dts index 269f56e2545..269f56e2545 100644 --- a/tools/binman/test/277_rockchip_tpl.dts +++ b/tools/binman/test/291_rockchip_tpl.dts diff --git a/tools/binman/test/278_mkimage_missing_multiple.dts b/tools/binman/test/292_mkimage_missing_multiple.dts index f84aea49ead..f84aea49ead 100644 --- a/tools/binman/test/278_mkimage_missing_multiple.dts +++ b/tools/binman/test/292_mkimage_missing_multiple.dts diff --git a/tools/binman/test/293_ti_board_cfg.dts b/tools/binman/test/293_ti_board_cfg.dts new file mode 100644 index 00000000000..cda024c1b8c --- /dev/null +++ b/tools/binman/test/293_ti_board_cfg.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + ti-board-config { + config = "yaml/config.yaml"; + schema = "yaml/schema.yaml"; + }; + }; +}; diff --git a/tools/binman/test/294_ti_board_cfg_combined.dts b/tools/binman/test/294_ti_board_cfg_combined.dts new file mode 100644 index 00000000000..95ef449cbf4 --- /dev/null +++ b/tools/binman/test/294_ti_board_cfg_combined.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + ti-board-config { + board-cfg { + config = "yaml/config.yaml"; + schema = "yaml/schema.yaml"; + }; + sec-cfg { + config = "yaml/config.yaml"; + schema = "yaml/schema.yaml"; + }; + rm-cfg { + config = "yaml/config.yaml"; + schema = "yaml/schema.yaml"; + }; + pm-cfg { + config = "yaml/config.yaml"; + schema = "yaml/schema.yaml"; + }; + }; + }; +}; diff --git a/tools/binman/test/295_ti_board_cfg_no_type.dts b/tools/binman/test/295_ti_board_cfg_no_type.dts new file mode 100644 index 00000000000..584b7acc5a4 --- /dev/null +++ b/tools/binman/test/295_ti_board_cfg_no_type.dts @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + ti-board-config { + config = "yaml/config.yaml"; + schema = "yaml/schema_notype.yaml"; + }; + }; +}; diff --git a/tools/binman/test/296_ti_secure.dts b/tools/binman/test/296_ti_secure.dts new file mode 100644 index 00000000000..941d0ab4ca3 --- /dev/null +++ b/tools/binman/test/296_ti_secure.dts @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + ti-secure { + content = <&unsecure_binary>; + }; + unsecure_binary: blob-ext { + filename = "ti_unsecure.bin"; + }; + }; +}; diff --git a/tools/binman/test/297_ti_secure_rom.dts b/tools/binman/test/297_ti_secure_rom.dts new file mode 100644 index 00000000000..d1313769f43 --- /dev/null +++ b/tools/binman/test/297_ti_secure_rom.dts @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + ti-secure-rom { + content = <&unsecure_binary>; + }; + unsecure_binary: blob-ext { + filename = "ti_unsecure.bin"; + }; + }; +}; diff --git a/tools/binman/test/298_ti_secure_rom_combined.dts b/tools/binman/test/298_ti_secure_rom_combined.dts new file mode 100644 index 00000000000..bf872739bc1 --- /dev/null +++ b/tools/binman/test/298_ti_secure_rom_combined.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + ti-secure-rom { + content = <&unsecure_binary>; + content-sbl = <&unsecure_binary>; + content-sysfw = <&unsecure_binary>; + content-sysfw-data = <&unsecure_binary>; + content-sysfw-inner-cert = <&unsecure_binary>; + content-dm-data = <&unsecure_binary>; + combined; + sysfw-inner-cert; + dm-data; + }; + unsecure_binary: blob-ext { + filename = "ti_unsecure.bin"; + }; + }; +}; diff --git a/tools/binman/test/299_ti_secure_rom_a.dts b/tools/binman/test/299_ti_secure_rom_a.dts new file mode 100644 index 00000000000..887138f0e4b --- /dev/null +++ b/tools/binman/test/299_ti_secure_rom_a.dts @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + ti-secure-rom { + content = <&unsecure_binary>; + core = "secure"; + countersign; + }; + unsecure_binary: blob-ext { + filename = "ti_unsecure.bin"; + }; + }; +}; diff --git a/tools/binman/test/300_ti_secure_rom_b.dts b/tools/binman/test/300_ti_secure_rom_b.dts new file mode 100644 index 00000000000..c6d6182158c --- /dev/null +++ b/tools/binman/test/300_ti_secure_rom_b.dts @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + ti-secure-rom { + content = <&unsecure_binary>; + core = "public"; + }; + unsecure_binary: blob-ext { + filename = "ti_unsecure.bin"; + }; + }; +}; diff --git a/tools/binman/test/301_encrypted_no_algo.dts b/tools/binman/test/301_encrypted_no_algo.dts new file mode 100644 index 00000000000..03f7ffee90f --- /dev/null +++ b/tools/binman/test/301_encrypted_no_algo.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + fit { + images { + u-boot { + encrypted { + }; + }; + }; + }; + }; +}; diff --git a/tools/binman/test/302_encrypted_invalid_iv_file.dts b/tools/binman/test/302_encrypted_invalid_iv_file.dts new file mode 100644 index 00000000000..388a0a6ad90 --- /dev/null +++ b/tools/binman/test/302_encrypted_invalid_iv_file.dts @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + fit { + images { + u-boot { + encrypted { + algo = "some-algo"; + key-source = "key"; + iv-filename = "invalid-iv-file"; + }; + }; + }; + }; + }; +}; diff --git a/tools/binman/test/303_encrypted_missing_key.dts b/tools/binman/test/303_encrypted_missing_key.dts new file mode 100644 index 00000000000..d1daaa08851 --- /dev/null +++ b/tools/binman/test/303_encrypted_missing_key.dts @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + fit { + description = "test desc"; + + images { + u-boot { + encrypted { + algo = "algo-name"; + iv-filename = "encrypted-file.iv"; + }; + }; + }; + }; + }; +}; diff --git a/tools/binman/test/304_encrypted_key_source.dts b/tools/binman/test/304_encrypted_key_source.dts new file mode 100644 index 00000000000..884ec508db8 --- /dev/null +++ b/tools/binman/test/304_encrypted_key_source.dts @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + fit { + description = "test desc"; + + images { + u-boot { + encrypted { + algo = "algo-name"; + key-source = "key-source-value"; + iv-filename = "encrypted-file.iv"; + }; + }; + }; + }; + }; +}; diff --git a/tools/binman/test/305_encrypted_key_file.dts b/tools/binman/test/305_encrypted_key_file.dts new file mode 100644 index 00000000000..efd7ee5f35a --- /dev/null +++ b/tools/binman/test/305_encrypted_key_file.dts @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + fit { + description = "test desc"; + + images { + u-boot { + encrypted { + algo = "algo-name"; + iv-filename = "encrypted-file.iv"; + key-filename = "encrypted-file.key"; + }; + }; + }; + }; + }; +}; diff --git a/tools/binman/test/306_spl_pubkey_dtb.dts b/tools/binman/test/306_spl_pubkey_dtb.dts new file mode 100644 index 00000000000..3256ff970cd --- /dev/null +++ b/tools/binman/test/306_spl_pubkey_dtb.dts @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + u-boot-spl-pubkey-dtb { + algo = "sha384,rsa4096"; + required = "conf"; + key-name-hint = "key"; + }; + }; +}; diff --git a/tools/binman/test/Makefile b/tools/binman/test/Makefile index cd66a3038be..4d152eee9c0 100644 --- a/tools/binman/test/Makefile +++ b/tools/binman/test/Makefile @@ -32,7 +32,7 @@ LDS_BINMAN_EMBED := -T $(SRC)u_boot_binman_embed.lds LDS_EFL_SECTIONS := -T $(SRC)elf_sections.lds LDS_BLOB := -T $(SRC)blob_syms.lds -TARGETS = u_boot_ucode_ptr u_boot_no_ucode_ptr bss_data \ +TARGETS = u_boot_ucode_ptr u_boot_no_ucode_ptr bss_data bss_data_zero \ u_boot_binman_syms u_boot_binman_syms.bin u_boot_binman_syms_bad \ u_boot_binman_syms_size u_boot_binman_syms_x86 embed_data \ u_boot_binman_embed u_boot_binman_embed_sm elf_sections blob_syms.bin @@ -48,6 +48,9 @@ u_boot_ucode_ptr: u_boot_ucode_ptr.c bss_data: CFLAGS += $(SRC)bss_data.lds bss_data: bss_data.c +bss_data_zero: CFLAGS += $(SRC)bss_data_zero.lds +bss_data_zero: bss_data_zero.c + embed_data: CFLAGS += $(SRC)embed_data.lds embed_data: embed_data.c diff --git a/tools/binman/test/bss_data.c b/tools/binman/test/bss_data.c index 4f9b64cef9e..7047a3bb014 100644 --- a/tools/binman/test/bss_data.c +++ b/tools/binman/test/bss_data.c @@ -7,9 +7,8 @@ */ int bss_data[10]; -int __bss_size = sizeof(bss_data); -int main() +int main(void) { bss_data[2] = 2; diff --git a/tools/binman/test/bss_data_zero.c b/tools/binman/test/bss_data_zero.c new file mode 100644 index 00000000000..7047a3bb014 --- /dev/null +++ b/tools/binman/test/bss_data_zero.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2016 Google, Inc + * + * Simple program to create a bss_data region so the symbol can be read + * by binutils. This is used by binman tests. + */ + +int bss_data[10]; + +int main(void) +{ + bss_data[2] = 2; + + return 0; +} diff --git a/tools/binman/test/bss_data_zero.lds b/tools/binman/test/bss_data_zero.lds new file mode 100644 index 00000000000..8fa0210a8f4 --- /dev/null +++ b/tools/binman/test/bss_data_zero.lds @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2016 Google, Inc + */ + +OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") +OUTPUT_ARCH(i386) +ENTRY(_start) + +SECTIONS +{ + . = 0xfffffdf0; + _start = .; + __bss_size = 0; +} diff --git a/tools/binman/test/230_dev.key b/tools/binman/test/dev.key index b36bad2cfb3..b36bad2cfb3 100644 --- a/tools/binman/test/230_dev.key +++ b/tools/binman/test/dev.key diff --git a/tools/binman/test/embed_data.lds b/tools/binman/test/embed_data.lds index 908bf66c294..d416cb21110 100644 --- a/tools/binman/test/embed_data.lds +++ b/tools/binman/test/embed_data.lds @@ -17,6 +17,7 @@ SECTIONS embed_start = .; *(.embed*) embed_end = .; + region_size = 0; . = ALIGN(32); *(.data*) } diff --git a/tools/binman/test/yaml/config.yaml b/tools/binman/test/yaml/config.yaml new file mode 100644 index 00000000000..5f799a6e3a9 --- /dev/null +++ b/tools/binman/test/yaml/config.yaml @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Test config +# +--- + +main-branch: + obj: + a: 0x0 + b: 0 + arr: [0, 0, 0, 0] + another-arr: + - #1 + c: 0 + d: 0 + - #2 + c: 0 + d: 0 diff --git a/tools/binman/test/yaml/schema.yaml b/tools/binman/test/yaml/schema.yaml new file mode 100644 index 00000000000..8aa03f3c8ec --- /dev/null +++ b/tools/binman/test/yaml/schema.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Test schema +# +--- + +definitions: + u8: + type: integer + minimum: 0 + maximum: 0xff + u16: + type: integer + minimum: 0 + maximum: 0xffff + u32: + type: integer + minimum: 0 + maximum: 0xffffffff + +type: object +properties: + main-branch: + type: object + properties: + obj: + type: object + properties: + a: + $ref: "#/definitions/u32" + b: + $ref: "#/definitions/u16" + arr: + type: array + minItems: 4 + maxItems: 4 + items: + $ref: "#/definitions/u8" + another-arr: + type: array + minItems: 2 + maxItems: 2 + items: + type: object + properties: + c: + $ref: "#/definitions/u8" + d: + $ref: "#/definitions/u8" diff --git a/tools/binman/test/yaml/schema_notype.yaml b/tools/binman/test/yaml/schema_notype.yaml new file mode 100644 index 00000000000..6b4d98ffa18 --- /dev/null +++ b/tools/binman/test/yaml/schema_notype.yaml @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Test schema +# +--- + +definitions: + u8: + type: integer + minimum: 0 + maximum: 0xff + u16: + type: integer + minimum: 0 + maximum: 0xffff + u32: + type: integer + minimum: 0 + maximum: 0xffffffff + +type: object +properties: + main-branch: + type: object + properties: + obj: + type: object + properties: + a: + $ref: "#/definitions/u4" + b: + $ref: "#/definitions/u16" + arr: + type: array + minItems: 4 + maxItems: 4 + items: + $ref: "#/definitions/u8" diff --git a/tools/buildman/board.py b/tools/buildman/board.py index 8ef905b8ce1..248d8bfff18 100644 --- a/tools/buildman/board.py +++ b/tools/buildman/board.py @@ -17,14 +17,14 @@ class Board: vendor: Name of vendor (e.g. armltd) board_name: Name of board (e.g. integrator) target: Target name (use make <target>_defconfig to configure) - cfg_name: Config name + cfg_name: Config-file name (in includes/configs/) """ self.target = target self.arch = arch self.cpu = cpu - self.board_name = board_name - self.vendor = vendor self.soc = soc + self.vendor = vendor + self.board_name = board_name self.cfg_name = cfg_name self.props = [self.target, self.arch, self.cpu, self.board_name, self.vendor, self.soc, self.cfg_name] diff --git a/tools/buildman/boards.py b/tools/buildman/boards.py index 0bb0723b18e..83adbf167c7 100644 --- a/tools/buildman/boards.py +++ b/tools/buildman/boards.py @@ -50,7 +50,7 @@ def try_remove(fname): raise -def output_is_new(output): +def output_is_new(output, config_dir, srcdir): """Check if the output file is up to date. Looks at defconfig and Kconfig files to make sure none is newer than the @@ -59,6 +59,8 @@ def output_is_new(output): Args: output (str): Filename to check + config_dir (str): Directory containing defconfig files + srcdir (str): Directory containing Kconfig and MAINTAINERS files Returns: True if the given output file exists and is newer than any of @@ -76,7 +78,7 @@ def output_is_new(output): return False raise - for (dirpath, _, filenames) in os.walk(CONFIG_DIR): + for (dirpath, _, filenames) in os.walk(config_dir): for filename in fnmatch.filter(filenames, '*_defconfig'): if fnmatch.fnmatch(filename, '.*'): continue @@ -84,7 +86,7 @@ def output_is_new(output): if ctime < os.path.getctime(filepath): return False - for (dirpath, _, filenames) in os.walk('.'): + for (dirpath, _, filenames) in os.walk(srcdir): for filename in filenames: if (fnmatch.fnmatch(filename, '*~') or not fnmatch.fnmatch(filename, 'Kconfig*') and @@ -103,7 +105,7 @@ def output_is_new(output): if line[0] == '#' or line == '\n': continue defconfig = line.split()[6] + '_defconfig' - if not os.path.exists(os.path.join(CONFIG_DIR, defconfig)): + if not os.path.exists(os.path.join(config_dir, defconfig)): return False return True @@ -191,10 +193,10 @@ class KconfigScanner: # 'target' is added later } - def __init__(self): + def __init__(self, srctree): """Scan all the Kconfig files and create a Kconfig object.""" # Define environment variables referenced from Kconfig - os.environ['srctree'] = os.getcwd() + os.environ['srctree'] = srctree os.environ['UBOOTVERSION'] = 'dummy' os.environ['KCONFIG_OBJDIR'] = '' self._tmpfile = None @@ -211,40 +213,36 @@ class KconfigScanner: if self._tmpfile: try_remove(self._tmpfile) - def scan(self, defconfig): + def scan(self, defconfig, warn_targets): """Load a defconfig file to obtain board parameters. Args: defconfig (str): path to the defconfig file to be processed + warn_targets (bool): True to warn about missing or duplicate + CONFIG_TARGET options Returns: - A dictionary of board parameters. It has a form of: - { - 'arch': <arch_name>, - 'cpu': <cpu_name>, - 'soc': <soc_name>, - 'vendor': <vendor_name>, - 'board': <board_name>, - 'target': <target_name>, - 'config': <config_header_name>, - } + tuple: dictionary of board parameters. It has a form of: + { + 'arch': <arch_name>, + 'cpu': <cpu_name>, + 'soc': <soc_name>, + 'vendor': <vendor_name>, + 'board': <board_name>, + 'target': <target_name>, + 'config': <config_header_name>, + } + warnings (list of str): list of warnings found """ - # strip special prefixes and save it in a temporary file - outfd, self._tmpfile = tempfile.mkstemp() - with os.fdopen(outfd, 'w') as outf: - with open(defconfig, encoding='utf-8') as inf: - for line in inf: - colon = line.find(':CONFIG_') - if colon == -1: - outf.write(line) - else: - outf.write(line[colon + 1:]) + leaf = os.path.basename(defconfig) + expect_target, match, rear = leaf.partition('_defconfig') + assert match and not rear, f'{leaf} : invalid defconfig' - self._conf.load_config(self._tmpfile) - try_remove(self._tmpfile) + self._conf.load_config(defconfig) self._tmpfile = None params = {} + warnings = [] # Get the value of CONFIG_SYS_ARCH, CONFIG_SYS_CPU, ... etc. # Set '-' if the value is empty. @@ -255,9 +253,23 @@ class KconfigScanner: else: params[key] = '-' - defconfig = os.path.basename(defconfig) - params['target'], match, rear = defconfig.partition('_defconfig') - assert match and not rear, f'{defconfig} : invalid defconfig' + # Check there is exactly one TARGET_xxx set + if warn_targets: + target = None + for name, sym in self._conf.syms.items(): + if name.startswith('TARGET_') and sym.str_value == 'y': + tname = name[7:].lower() + if target: + warnings.append( + f'WARNING: {leaf}: Duplicate TARGET_xxx: {target} and {tname}') + else: + target = tname + + if not target: + cfg_name = expect_target.replace('-', '_').upper() + warnings.append(f'WARNING: {leaf}: No TARGET_{cfg_name} enabled') + + params['target'] = expect_target # fix-up for aarch64 if params['arch'] == 'arm' and params['cpu'] == 'armv8': @@ -274,7 +286,7 @@ class KconfigScanner: else: params['arch'] = 'riscv64' - return params + return params, warnings class MaintainersDatabase: @@ -332,26 +344,55 @@ class MaintainersDatabase: str: Maintainers of the board. If the board has two or more maintainers, they are separated with colons. """ - if not target in self.database: - self.warnings.append(f"WARNING: no maintainers for '{target}'") - return '' + entry = self.database.get(target) + if entry: + status, maint_list = entry + if not status.startswith('Orphan'): + if len(maint_list) > 1 or (maint_list and maint_list[0] != '-'): + return ':'.join(maint_list) - return ':'.join(self.database[target][1]) + self.warnings.append(f"WARNING: no maintainers for '{target}'") + return '' - def parse_file(self, fname): + def parse_file(self, srcdir, fname): """Parse a MAINTAINERS file. Parse a MAINTAINERS file and accumulate board status and maintainers information in the self.database dict. + defconfig files are used to specify the target, e.g. xxx_defconfig is + used for target 'xxx'. If there is no defconfig file mentioned in the + MAINTAINERS file F: entries, then this function does nothing. + + The N: name entries can be used to specify a defconfig file using + wildcards. + Args: + srcdir (str): Directory containing source code (Kconfig files) fname (str): MAINTAINERS file to be parsed """ + def add_targets(linenum): + """Add any new targets + + Args: + linenum (int): Current line number + """ + added = False + if targets: + for target in targets: + self.database[target] = (status, maintainers) + added = True + if not added and (status != '-' and maintainers): + leaf = fname[len(srcdir) + 1:] + if leaf != 'MAINTAINERS': + self.warnings.append( + f'WARNING: orphaned defconfig in {leaf} ending at line {linenum + 1}') + targets = [] maintainers = [] status = '-' with open(fname, encoding="utf-8") as inf: - for line in inf: + for linenum, line in enumerate(inf): # Check also commented maintainers if line[:3] == '#M:': line = line[1:] @@ -360,9 +401,12 @@ class MaintainersDatabase: maintainers.append(rest) elif tag == 'F:': # expand wildcard and filter by 'configs/*_defconfig' - for item in glob.glob(rest): + glob_path = os.path.join(srcdir, rest) + for item in glob.glob(glob_path): front, match, rear = item.partition('configs/') - if not front and match: + if front.endswith('/'): + front = front[:-1] + if front == srcdir and match: front, match, rear = rear.rpartition('_defconfig') if match and not rear: targets.append(front) @@ -371,23 +415,26 @@ class MaintainersDatabase: elif tag == 'N:': # Just scan the configs directory since that's all we care # about - for dirpath, _, fnames in os.walk('configs'): - for fname in fnames: - path = os.path.join(dirpath, fname) + walk_path = os.walk(os.path.join(srcdir, 'configs')) + for dirpath, _, fnames in walk_path: + for cfg in fnames: + path = os.path.join(dirpath, cfg)[len(srcdir) + 1:] front, match, rear = path.partition('configs/') - if not front and match: - front, match, rear = rear.rpartition('_defconfig') - if match and not rear: - targets.append(front) + if front or not match: + continue + front, match, rear = rear.rpartition('_defconfig') + + # Use this entry if it matches the defconfig file + # without the _defconfig suffix. For example + # 'am335x.*' matches am335x_guardian_defconfig + if match and not rear and re.search(rest, front): + targets.append(front) elif line == '\n': - for target in targets: - self.database[target] = (status, maintainers) + add_targets(linenum) targets = [] maintainers = [] status = '-' - if targets: - for target in targets: - self.database[target] = (status, maintainers) + add_targets(linenum) class Boards: @@ -622,39 +669,63 @@ class Boards: return result, warnings @classmethod - def scan_defconfigs_for_multiprocess(cls, queue, defconfigs): + def scan_defconfigs_for_multiprocess(cls, srcdir, queue, defconfigs, + warn_targets): """Scan defconfig files and queue their board parameters This function is intended to be passed to multiprocessing.Process() constructor. Args: + srcdir (str): Directory containing source code queue (multiprocessing.Queue): The resulting board parameters are written into this. defconfigs (sequence of str): A sequence of defconfig files to be scanned. + warn_targets (bool): True to warn about missing or duplicate + CONFIG_TARGET options """ - kconf_scanner = KconfigScanner() + kconf_scanner = KconfigScanner(srcdir) for defconfig in defconfigs: - queue.put(kconf_scanner.scan(defconfig)) + queue.put(kconf_scanner.scan(defconfig, warn_targets)) @classmethod - def read_queues(cls, queues, params_list): - """Read the queues and append the data to the paramers list""" + def read_queues(cls, queues, params_list, warnings): + """Read the queues and append the data to the paramers list + + Args: + queues (list of multiprocessing.Queue): Queues to read + params_list (list of dict): List to add params too + warnings (set of str): Set to add warnings to + """ for que in queues: while not que.empty(): - params_list.append(que.get()) + params, warn = que.get() + params_list.append(params) + warnings.update(warn) - def scan_defconfigs(self, jobs=1): + def scan_defconfigs(self, config_dir, srcdir, jobs=1, warn_targets=False): """Collect board parameters for all defconfig files. This function invokes multiple processes for faster processing. Args: + config_dir (str): Directory containing the defconfig files + srcdir (str): Directory containing source code (Kconfig files) jobs (int): The number of jobs to run simultaneously + warn_targets (bool): True to warn about missing or duplicate + CONFIG_TARGET options + + Returns: + tuple: + list of dict: List of board parameters, each a dict: + key: 'arch', 'cpu', 'soc', 'vendor', 'board', 'target', + 'config' + value: string value of the key + list of str: List of warnings recorded """ all_defconfigs = [] - for (dirpath, _, filenames) in os.walk(CONFIG_DIR): + for (dirpath, _, filenames) in os.walk(config_dir): for filename in fnmatch.filter(filenames, '*_defconfig'): if fnmatch.fnmatch(filename, '.*'): continue @@ -669,18 +740,19 @@ class Boards: que = multiprocessing.Queue(maxsize=-1) proc = multiprocessing.Process( target=self.scan_defconfigs_for_multiprocess, - args=(que, defconfigs)) + args=(srcdir, que, defconfigs, warn_targets)) proc.start() processes.append(proc) queues.append(que) - # The resulting data should be accumulated to this list + # The resulting data should be accumulated to these lists params_list = [] + warnings = set() # Data in the queues should be retrieved preriodically. # Otherwise, the queues would become full and subprocesses would get stuck. while any(p.is_alive() for p in processes): - self.read_queues(queues, params_list) + self.read_queues(queues, params_list, warnings) # sleep for a while until the queues are filled time.sleep(SLEEP_TIME) @@ -690,12 +762,12 @@ class Boards: proc.join() # retrieve leftover data - self.read_queues(queues, params_list) + self.read_queues(queues, params_list, warnings) - return params_list + return params_list, sorted(list(warnings)) @classmethod - def insert_maintainers_info(cls, params_list): + def insert_maintainers_info(cls, srcdir, params_list): """Add Status and Maintainers information to the board parameters list. Args: @@ -705,16 +777,21 @@ class Boards: list of str: List of warnings collected due to missing status, etc. """ database = MaintainersDatabase() - for (dirpath, _, filenames) in os.walk('.'): - if 'MAINTAINERS' in filenames: - database.parse_file(os.path.join(dirpath, 'MAINTAINERS')) + for (dirpath, _, filenames) in os.walk(srcdir): + if 'MAINTAINERS' in filenames and 'tools/buildman' not in dirpath: + database.parse_file(srcdir, + os.path.join(dirpath, 'MAINTAINERS')) for i, params in enumerate(params_list): target = params['target'] - params['status'] = database.get_status(target) - params['maintainers'] = database.get_maintainers(target) + maintainers = database.get_maintainers(target) + params['maintainers'] = maintainers + if maintainers: + params['status'] = database.get_status(target) + else: + params['status'] = '-' params_list[i] = params - return database.warnings + return sorted(database.warnings) @classmethod def format_and_output(cls, params_list, output): @@ -750,9 +827,40 @@ class Boards: with open(output, 'w', encoding="utf-8") as outf: outf.write(COMMENT_BLOCK + '\n'.join(output_lines) + '\n') + def build_board_list(self, config_dir=CONFIG_DIR, srcdir='.', jobs=1, + warn_targets=False): + """Generate a board-database file + + This works by reading the Kconfig, then loading each board's defconfig + in to get the setting for each option. In particular, CONFIG_TARGET_xxx + is typically set by the defconfig, where xxx is the target to build. + + Args: + config_dir (str): Directory containing the defconfig files + srcdir (str): Directory containing source code (Kconfig files) + jobs (int): The number of jobs to run simultaneously + warn_targets (bool): True to warn about missing or duplicate + CONFIG_TARGET options + + Returns: + tuple: + list of dict: List of board parameters, each a dict: + key: 'arch', 'cpu', 'soc', 'vendor', 'board', 'config', + 'target' + value: string value of the key + list of str: Warnings that came up + """ + params_list, warnings = self.scan_defconfigs(config_dir, srcdir, jobs, + warn_targets) + m_warnings = self.insert_maintainers_info(srcdir, params_list) + return params_list, warnings + m_warnings + def ensure_board_list(self, output, jobs=1, force=False, quiet=False): """Generate a board database file if needed. + This is intended to check if Kconfig has changed since the boards.cfg + files was generated. + Args: output (str): The name of the output file jobs (int): The number of jobs to run simultaneously @@ -762,12 +870,11 @@ class Boards: Returns: bool: True if all is well, False if there were warnings """ - if not force and output_is_new(output): + if not force and output_is_new(output, CONFIG_DIR, '.'): if not quiet: print(f'{output} is up to date. Nothing to do.') return True - params_list = self.scan_defconfigs(jobs) - warnings = self.insert_maintainers_info(params_list) + params_list, warnings = self.build_board_list(CONFIG_DIR, '.', jobs) for warn in warnings: print(warn, file=sys.stderr) self.format_and_output(params_list, output) diff --git a/tools/buildman/bsettings.py b/tools/buildman/bsettings.py index 0eb894a558c..f7f8276e629 100644 --- a/tools/buildman/bsettings.py +++ b/tools/buildman/bsettings.py @@ -7,7 +7,7 @@ import io config_fname = None -def Setup(fname=''): +def setup(fname=''): """Set up the buildman settings module by reading config files Args: @@ -23,15 +23,15 @@ def Setup(fname=''): config_fname = '%s/.buildman' % os.getenv('HOME') if not os.path.exists(config_fname): print('No config file found ~/.buildman\nCreating one...\n') - CreateBuildmanConfigFile(config_fname) + create_buildman_config_file(config_fname) print('To install tool chains, please use the --fetch-arch option') if config_fname: settings.read(config_fname) -def AddFile(data): +def add_file(data): settings.readfp(io.StringIO(data)) -def GetItems(section): +def get_items(section): """Get the items from a section of the config. Args: @@ -47,7 +47,7 @@ def GetItems(section): except: raise -def GetGlobalItemValue(name): +def get_global_item_value(name): """Get an item from the 'global' section of the config. Args: @@ -58,7 +58,7 @@ def GetGlobalItemValue(name): """ return settings.get('global', name, fallback=None) -def SetItem(section, tag, value): +def set_item(section, tag, value): """Set an item and write it back to the settings file""" global settings global config_fname @@ -68,7 +68,7 @@ def SetItem(section, tag, value): with open(config_fname, 'w') as fd: settings.write(fd) -def CreateBuildmanConfigFile(config_fname): +def create_buildman_config_file(config_fname): """Creates a new config file with no tool chain information. Args: @@ -91,7 +91,6 @@ other = / [toolchain-prefix] # name = path to prefix # e.g. x86 = /opt/gcc-4.6.3-nolibc/x86_64-linux/bin/x86_64-linux- -# arc = /opt/arc/arc_gnu_2021.03_prebuilt_elf32_le_linux_install/bin/arc-elf32- [toolchain-alias] # arch = alias diff --git a/tools/buildman/builder.py b/tools/buildman/builder.py index d81752e9943..ecbd368c47a 100644 --- a/tools/buildman/builder.py +++ b/tools/buildman/builder.py @@ -134,7 +134,7 @@ class Config: for fname in config_filename: self.config[fname] = {} - def Add(self, fname, key, value): + def add(self, fname, key, value): self.config[fname][key] = value def __hash__(self): @@ -151,7 +151,7 @@ class Environment: self.target = target self.environment = {} - def Add(self, key, value): + def add(self, key, value): self.environment[key] = value class Builder: @@ -163,7 +163,8 @@ class Builder: checkout: True to check out source, False to skip that step. This is used for testing. col: terminal.Color() object - count: Number of commits to build + count: Total number of commits to build, which is the number of commits + multiplied by the number of boards do_make: Method to call to invoke Make fail: Number of builds that failed due to error force_build: Force building even if a build already exists @@ -255,7 +256,10 @@ class Builder: config_only=False, squash_config_y=False, warnings_as_errors=False, work_in_output=False, test_thread_exceptions=False, adjust_cfg=None, - allow_missing=False, no_lto=False, reproducible_builds=False): + allow_missing=False, no_lto=False, reproducible_builds=False, + force_build=False, force_build_failures=False, + force_reconfig=False, in_tree=False, + force_config_on_failure=False, make_func=None): """Create a new Builder object Args: @@ -295,7 +299,14 @@ class Builder: a string Kconfig allow_missing: Run build with BINMAN_ALLOW_MISSING=1 no_lto (bool): True to set the NO_LTO flag when building - + force_build (bool): Rebuild even commits that are already built + force_build_failures (bool): Rebuild commits that have not been + built, or failed to build + force_reconfig (bool): Reconfigure on each commit + in_tree (bool): Bulid in tree instead of out-of-tree + force_config_on_failure (bool): Reconfigure the build before + retrying a failed build + make_func (function): Function to call to run 'make' """ self.toolchains = toolchains self.base_dir = base_dir @@ -304,7 +315,7 @@ class Builder: else: self._working_dir = os.path.join(base_dir, '.bm-work') self.threads = [] - self.do_make = self.Make + self.do_make = make_func or self.make self.gnu_make = gnu_make self.checkout = checkout self.num_threads = num_threads @@ -318,11 +329,7 @@ class Builder: self._complete_delay = None self._next_delay_update = datetime.now() self._start_time = datetime.now() - self.force_config_on_failure = True - self.force_build_failures = False - self.force_reconfig = False self._step = step - self.in_tree = False self._error_lines = 0 self.no_subdirs = no_subdirs self.full_path = full_path @@ -336,6 +343,11 @@ class Builder: self._ide = False self.no_lto = no_lto self.reproducible_builds = reproducible_builds + self.force_build = force_build + self.force_build_failures = force_build_failures + self.force_reconfig = force_reconfig + self.in_tree = in_tree + self.force_config_on_failure = force_config_on_failure if not self.squash_config_y: self.config_filenames += EXTRA_CONFIG_FILENAMES @@ -389,7 +401,7 @@ class Builder: def signal_handler(self, signal, frame): sys.exit(1) - def SetDisplayOptions(self, show_errors=False, show_sizes=False, + def set_display_options(self, show_errors=False, show_sizes=False, show_detail=False, show_bloat=False, list_error_boards=False, show_config=False, show_environment=False, filter_dtb_warnings=False, @@ -422,7 +434,7 @@ class Builder: self._filter_migration_warnings = filter_migration_warnings self._ide = ide - def _AddTimestamp(self): + def _add_timestamp(self): """Add a new timestamp to the list and record the build period. The build period is the length of time taken to perform a single @@ -451,14 +463,14 @@ class Builder: self._timestamps.popleft() count -= 1 - def SelectCommit(self, commit, checkout=True): + def select_commit(self, commit, checkout=True): """Checkout the selected commit for this build """ self.commit = commit if checkout and self.checkout: gitutil.checkout(commit.hash) - def Make(self, commit, brd, stage, cwd, *args, **kwargs): + def make(self, commit, brd, stage, cwd, *args, **kwargs): """Run make Args: @@ -503,7 +515,7 @@ class Builder: result.combined = '%s\n' % (' '.join(cmd)) + result.combined return result - def ProcessResult(self, result): + def process_result(self, result): """Process the result of a build, showing progress information Args: @@ -524,8 +536,8 @@ class Builder: if self._verbose: terminal.print_clear() boards_selected = {target : result.brd} - self.ResetResultSummary(boards_selected) - self.ProduceResultSummary(result.commit_upto, self.commits, + self.reset_result_summary(boards_selected) + self.produce_result_summary(result.commit_upto, self.commits, boards_selected) else: target = '(starting)' @@ -544,7 +556,7 @@ class Builder: line += ' ' * 8 # Add our current completion time estimate - self._AddTimestamp() + self._add_timestamp() if self._complete_delay: line += '%s : ' % self._complete_delay @@ -553,7 +565,7 @@ class Builder: terminal.print_clear() tprint(line, newline=False, limit_to_line=True) - def _GetOutputDir(self, commit_upto): + def get_output_dir(self, commit_upto): """Get the name of the output directory for a commit number The output directory is typically .../<branch>/<commit>. @@ -568,7 +580,7 @@ class Builder: if self.commits: commit = self.commits[commit_upto] subject = commit.subject.translate(trans_valid_chars) - # See _GetOutputSpaceRemovals() which parses this name + # See _get_output_space_removals() which parses this name commit_dir = ('%02d_g%s_%s' % (commit_upto + 1, commit.hash, subject[:20])) elif not self.no_subdirs: @@ -577,7 +589,7 @@ class Builder: return self.base_dir return os.path.join(self.base_dir, commit_dir) - def GetBuildDir(self, commit_upto, target): + def get_build_dir(self, commit_upto, target): """Get the name of the build directory for a commit number The build directory is typically .../<branch>/<commit>/<target>. @@ -586,30 +598,30 @@ class Builder: commit_upto: Commit number to use (0..self.count-1) target: Target name """ - output_dir = self._GetOutputDir(commit_upto) + output_dir = self.get_output_dir(commit_upto) if self.work_in_output: return output_dir return os.path.join(output_dir, target) - def GetDoneFile(self, commit_upto, target): + def get_done_file(self, commit_upto, target): """Get the name of the done file for a commit number Args: commit_upto: Commit number to use (0..self.count-1) target: Target name """ - return os.path.join(self.GetBuildDir(commit_upto, target), 'done') + return os.path.join(self.get_build_dir(commit_upto, target), 'done') - def GetSizesFile(self, commit_upto, target): + def get_sizes_file(self, commit_upto, target): """Get the name of the sizes file for a commit number Args: commit_upto: Commit number to use (0..self.count-1) target: Target name """ - return os.path.join(self.GetBuildDir(commit_upto, target), 'sizes') + return os.path.join(self.get_build_dir(commit_upto, target), 'sizes') - def GetFuncSizesFile(self, commit_upto, target, elf_fname): + def get_func_sizes_file(self, commit_upto, target, elf_fname): """Get the name of the funcsizes file for a commit number and ELF file Args: @@ -617,10 +629,10 @@ class Builder: target: Target name elf_fname: Filename of elf image """ - return os.path.join(self.GetBuildDir(commit_upto, target), + return os.path.join(self.get_build_dir(commit_upto, target), '%s.sizes' % elf_fname.replace('/', '-')) - def GetObjdumpFile(self, commit_upto, target, elf_fname): + def get_objdump_file(self, commit_upto, target, elf_fname): """Get the name of the objdump file for a commit number and ELF file Args: @@ -628,20 +640,20 @@ class Builder: target: Target name elf_fname: Filename of elf image """ - return os.path.join(self.GetBuildDir(commit_upto, target), + return os.path.join(self.get_build_dir(commit_upto, target), '%s.objdump' % elf_fname.replace('/', '-')) - def GetErrFile(self, commit_upto, target): + def get_err_file(self, commit_upto, target): """Get the name of the err file for a commit number Args: commit_upto: Commit number to use (0..self.count-1) target: Target name """ - output_dir = self.GetBuildDir(commit_upto, target) + output_dir = self.get_build_dir(commit_upto, target) return os.path.join(output_dir, 'err') - def FilterErrors(self, lines): + def filter_errors(self, lines): """Filter out errors in which we have no interest We should probably use map(). @@ -664,7 +676,7 @@ class Builder: out_lines.append(line) return out_lines - def ReadFuncSizes(self, fname, fd): + def read_func_sizes(self, fname, fd): """Read function sizes from the output of 'nm' Args: @@ -688,7 +700,7 @@ class Builder: sym[name] = sym.get(name, 0) + int(size, 16) return sym - def _ProcessConfig(self, fname): + def _process_config(self, fname): """Read in a .config, autoconf.mk or autoconf.h file This function handles all config file types. It ignores comments and @@ -725,7 +737,7 @@ class Builder: config[key] = value return config - def _ProcessEnvironment(self, fname): + def _process_environment(self, fname): """Read in a uboot.env file This function reads in environment variables from a file. @@ -750,7 +762,7 @@ class Builder: pass return environment - def GetBuildOutcome(self, commit_upto, target, read_func_sizes, + def get_build_outcome(self, commit_upto, target, read_func_sizes, read_config, read_environment): """Work out the outcome of a build. @@ -764,8 +776,8 @@ class Builder: Returns: Outcome object """ - done_file = self.GetDoneFile(commit_upto, target) - sizes_file = self.GetSizesFile(commit_upto, target) + done_file = self.get_done_file(commit_upto, target) + sizes_file = self.get_sizes_file(commit_upto, target) sizes = {} func_sizes = {} config = {} @@ -779,10 +791,10 @@ class Builder: # Try a rebuild return_code = 1 err_lines = [] - err_file = self.GetErrFile(commit_upto, target) + err_file = self.get_err_file(commit_upto, target) if os.path.exists(err_file): with open(err_file, 'r') as fd: - err_lines = self.FilterErrors(fd.readlines()) + err_lines = self.filter_errors(fd.readlines()) # Decide whether the build was ok, failed or created warnings if return_code: @@ -811,30 +823,30 @@ class Builder: sizes[values[5]] = size_dict if read_func_sizes: - pattern = self.GetFuncSizesFile(commit_upto, target, '*') + pattern = self.get_func_sizes_file(commit_upto, target, '*') for fname in glob.glob(pattern): with open(fname, 'r') as fd: dict_name = os.path.basename(fname).replace('.sizes', '') - func_sizes[dict_name] = self.ReadFuncSizes(fname, fd) + func_sizes[dict_name] = self.read_func_sizes(fname, fd) if read_config: - output_dir = self.GetBuildDir(commit_upto, target) + output_dir = self.get_build_dir(commit_upto, target) for name in self.config_filenames: fname = os.path.join(output_dir, name) - config[name] = self._ProcessConfig(fname) + config[name] = self._process_config(fname) if read_environment: - output_dir = self.GetBuildDir(commit_upto, target) + output_dir = self.get_build_dir(commit_upto, target) fname = os.path.join(output_dir, 'uboot.env') - environment = self._ProcessEnvironment(fname) + environment = self._process_environment(fname) return Builder.Outcome(rc, err_lines, sizes, func_sizes, config, environment) return Builder.Outcome(OUTCOME_UNKNOWN, [], {}, {}, {}, {}) - def GetResultSummary(self, boards_selected, commit_upto, read_func_sizes, + def get_result_summary(self, boards_selected, commit_upto, read_func_sizes, read_config, read_environment): """Calculate a summary of the results of building a commit. @@ -865,7 +877,7 @@ class Builder: key: environment variable value: value of environment variable """ - def AddLine(lines_summary, lines_boards, line, board): + def add_line(lines_summary, lines_boards, line, board): line = line.rstrip() if line in lines_boards: lines_boards[line].append(board) @@ -882,7 +894,7 @@ class Builder: environment = {} for brd in boards_selected.values(): - outcome = self.GetBuildOutcome(commit_upto, brd.target, + outcome = self.get_build_outcome(commit_upto, brd.target, read_func_sizes, read_config, read_environment) board_dict[brd.target] = outcome @@ -899,15 +911,15 @@ class Builder: is_note = self._re_note.match(line) if is_warning or (last_was_warning and is_note): if last_func: - AddLine(warn_lines_summary, warn_lines_boards, + add_line(warn_lines_summary, warn_lines_boards, last_func, brd) - AddLine(warn_lines_summary, warn_lines_boards, + add_line(warn_lines_summary, warn_lines_boards, line, brd) else: if last_func: - AddLine(err_lines_summary, err_lines_boards, + add_line(err_lines_summary, err_lines_boards, last_func, brd) - AddLine(err_lines_summary, err_lines_boards, + add_line(err_lines_summary, err_lines_boards, line, brd) last_was_warning = is_warning last_func = None @@ -915,19 +927,19 @@ class Builder: for fname in self.config_filenames: if outcome.config: for key, value in outcome.config[fname].items(): - tconfig.Add(fname, key, value) + tconfig.add(fname, key, value) config[brd.target] = tconfig tenvironment = Environment(brd.target) if outcome.environment: for key, value in outcome.environment.items(): - tenvironment.Add(key, value) + tenvironment.add(key, value) environment[brd.target] = tenvironment return (board_dict, err_lines_summary, err_lines_boards, warn_lines_summary, warn_lines_boards, config, environment) - def AddOutcome(self, board_dict, arch_list, changes, char, color): + def add_outcome(self, board_dict, arch_list, changes, char, color): """Add an output to our list of outcomes for each architecture This simple function adds failing boards (changes) to the @@ -957,19 +969,19 @@ class Builder: arch_list[arch] += str - def ColourNum(self, num): + def colour_num(self, num): color = self.col.RED if num > 0 else self.col.GREEN if num == 0: return '0' return self.col.build(color, str(num)) - def ResetResultSummary(self, board_selected): + def reset_result_summary(self, board_selected): """Reset the results summary ready for use. Set up the base board list to be all those selected, and set the error lines to empty. - Following this, calls to PrintResultSummary() will use this + Following this, calls to print_result_summary() will use this information to work out what has changed. Args: @@ -986,7 +998,7 @@ class Builder: self._base_config = None self._base_environment = None - def PrintFuncSizeDetail(self, fname, old, new): + def print_func_size_detail(self, fname, old, new): grow, shrink, add, remove, up, down = 0, 0, 0, 0, 0, 0 delta, common = [], {} @@ -1020,7 +1032,7 @@ class Builder: args = [add, -remove, grow, -shrink, up, -down, up - down] if max(args) == 0 and min(args) == 0: return - args = [self.ColourNum(x) for x in args] + args = [self.colour_num(x) for x in args] indent = ' ' * 15 tprint('%s%s: add: %s/%s, grow: %s/%s bytes: %s/%s (%s)' % tuple([indent, self.col.build(self.col.YELLOW, fname)] + args)) @@ -1034,7 +1046,7 @@ class Builder: tprint(msg, colour=color) - def PrintSizeDetail(self, target_list, show_bloat): + def print_size_detail(self, target_list, show_bloat): """Show details size information for each board Args: @@ -1067,12 +1079,12 @@ class Builder: outcome = result['_outcome'] base_outcome = self._base_board_dict[target] for fname in outcome.func_sizes: - self.PrintFuncSizeDetail(fname, + self.print_func_size_detail(fname, base_outcome.func_sizes[fname], outcome.func_sizes[fname]) - def PrintSizeSummary(self, board_selected, board_dict, show_detail, + def print_size_summary(self, board_selected, board_dict, show_detail, show_bloat): """Print a summary of image sizes broken down by section. @@ -1173,10 +1185,10 @@ class Builder: if printed_arch: tprint() if show_detail: - self.PrintSizeDetail(target_list, show_bloat) + self.print_size_detail(target_list, show_bloat) - def PrintResultSummary(self, board_selected, board_dict, err_lines, + def print_result_summary(self, board_selected, board_dict, err_lines, err_line_boards, warn_lines, warn_line_boards, config, environment, show_sizes, show_detail, show_bloat, show_config, show_environment): @@ -1212,7 +1224,7 @@ class Builder: show_config: Show config changes show_environment: Show environment changes """ - def _BoardList(line, line_boards): + def _board_list(line, line_boards): """Helper function to get a line of boards containing a line Args: @@ -1231,7 +1243,7 @@ class Builder: board_set.add(brd) return brds - def _CalcErrorDelta(base_lines, base_line_boards, lines, line_boards, + def _calc_error_delta(base_lines, base_line_boards, lines, line_boards, char): """Calculate the required output based on changes in errors @@ -1255,17 +1267,17 @@ class Builder: worse_lines = [] for line in lines: if line not in base_lines: - errline = ErrLine(char + '+', _BoardList(line, line_boards), + errline = ErrLine(char + '+', _board_list(line, line_boards), line) worse_lines.append(errline) for line in base_lines: if line not in lines: errline = ErrLine(char + '-', - _BoardList(line, base_line_boards), line) + _board_list(line, base_line_boards), line) better_lines.append(errline) return better_lines, worse_lines - def _CalcConfig(delta, name, config): + def _calc_config(delta, name, config): """Calculate configuration changes Args: @@ -1283,7 +1295,7 @@ class Builder: out += '%s=%s ' % (key, config[key]) return '%s %s: %s' % (delta, name, out) - def _AddConfig(lines, name, config_plus, config_minus, config_change): + def _add_config(lines, name, config_plus, config_minus, config_change): """Add changes in configuration to a list Args: @@ -1300,13 +1312,13 @@ class Builder: value: config value """ if config_plus: - lines.append(_CalcConfig('+', name, config_plus)) + lines.append(_calc_config('+', name, config_plus)) if config_minus: - lines.append(_CalcConfig('-', name, config_minus)) + lines.append(_calc_config('-', name, config_minus)) if config_change: - lines.append(_CalcConfig('c', name, config_change)) + lines.append(_calc_config('c', name, config_change)) - def _OutputConfigInfo(lines): + def _output_config_info(lines): for line in lines: if not line: continue @@ -1318,7 +1330,7 @@ class Builder: col = self.col.YELLOW tprint(' ' + line, newline=True, colour=col) - def _OutputErrLines(err_lines, colour): + def _output_err_lines(err_lines, colour): """Output the line of error/warning lines, if not empty Also increments self._error_lines if err_lines not empty @@ -1376,9 +1388,9 @@ class Builder: new_boards.append(target) # Get a list of errors and warnings that have appeared, and disappeared - better_err, worse_err = _CalcErrorDelta(self._base_err_lines, + better_err, worse_err = _calc_error_delta(self._base_err_lines, self._base_err_line_boards, err_lines, err_line_boards, '') - better_warn, worse_warn = _CalcErrorDelta(self._base_warn_lines, + better_warn, worse_warn = _calc_error_delta(self._base_warn_lines, self._base_warn_line_boards, warn_lines, warn_line_boards, 'w') # For the IDE mode, print out all the output @@ -1391,26 +1403,26 @@ class Builder: elif any((ok_boards, warn_boards, err_boards, unknown_boards, new_boards, worse_err, better_err, worse_warn, better_warn)): arch_list = {} - self.AddOutcome(board_selected, arch_list, ok_boards, '', + self.add_outcome(board_selected, arch_list, ok_boards, '', self.col.GREEN) - self.AddOutcome(board_selected, arch_list, warn_boards, 'w+', + self.add_outcome(board_selected, arch_list, warn_boards, 'w+', self.col.YELLOW) - self.AddOutcome(board_selected, arch_list, err_boards, '+', + self.add_outcome(board_selected, arch_list, err_boards, '+', self.col.RED) - self.AddOutcome(board_selected, arch_list, new_boards, '*', self.col.BLUE) + self.add_outcome(board_selected, arch_list, new_boards, '*', self.col.BLUE) if self._show_unknown: - self.AddOutcome(board_selected, arch_list, unknown_boards, '?', + self.add_outcome(board_selected, arch_list, unknown_boards, '?', self.col.MAGENTA) for arch, target_list in arch_list.items(): tprint('%10s: %s' % (arch, target_list)) self._error_lines += 1 - _OutputErrLines(better_err, colour=self.col.GREEN) - _OutputErrLines(worse_err, colour=self.col.RED) - _OutputErrLines(better_warn, colour=self.col.CYAN) - _OutputErrLines(worse_warn, colour=self.col.YELLOW) + _output_err_lines(better_err, colour=self.col.GREEN) + _output_err_lines(worse_err, colour=self.col.RED) + _output_err_lines(better_warn, colour=self.col.CYAN) + _output_err_lines(worse_warn, colour=self.col.YELLOW) if show_sizes: - self.PrintSizeSummary(board_selected, board_dict, show_detail, + self.print_size_summary(board_selected, board_dict, show_detail, show_bloat) if show_environment and self._base_environment: @@ -1438,10 +1450,10 @@ class Builder: desc = '%s -> %s' % (value, new_value) environment_change[key] = desc - _AddConfig(lines, target, environment_plus, environment_minus, + _add_config(lines, target, environment_plus, environment_minus, environment_change) - _OutputConfigInfo(lines) + _output_config_info(lines) if show_config and self._base_config: summary = {} @@ -1504,9 +1516,9 @@ class Builder: arch_config_minus[arch][name].update(config_minus) arch_config_change[arch][name].update(config_change) - _AddConfig(lines, name, config_plus, config_minus, + _add_config(lines, name, config_plus, config_minus, config_change) - _AddConfig(lines, 'all', all_config_plus, all_config_minus, + _add_config(lines, 'all', all_config_plus, all_config_minus, all_config_change) summary[target] = '\n'.join(lines) @@ -1526,20 +1538,20 @@ class Builder: all_plus.update(arch_config_plus[arch][name]) all_minus.update(arch_config_minus[arch][name]) all_change.update(arch_config_change[arch][name]) - _AddConfig(lines, name, arch_config_plus[arch][name], + _add_config(lines, name, arch_config_plus[arch][name], arch_config_minus[arch][name], arch_config_change[arch][name]) - _AddConfig(lines, 'all', all_plus, all_minus, all_change) + _add_config(lines, 'all', all_plus, all_minus, all_change) #arch_summary[target] = '\n'.join(lines) if lines: tprint('%s:' % arch) - _OutputConfigInfo(lines) + _output_config_info(lines) for lines, targets in lines_by_target.items(): if not lines: continue tprint('%s :' % ' '.join(sorted(targets))) - _OutputConfigInfo(lines.split('\n')) + _output_config_info(lines.split('\n')) # Save our updated information for the next call to this function @@ -1560,9 +1572,9 @@ class Builder: tprint("Boards not built (%d): %s" % (len(not_built), ', '.join(not_built))) - def ProduceResultSummary(self, commit_upto, commits, board_selected): + def produce_result_summary(self, commit_upto, commits, board_selected): (board_dict, err_lines, err_line_boards, warn_lines, - warn_line_boards, config, environment) = self.GetResultSummary( + warn_line_boards, config, environment) = self.get_result_summary( board_selected, commit_upto, read_func_sizes=self._show_bloat, read_config=self._show_config, @@ -1571,13 +1583,13 @@ class Builder: msg = '%02d: %s' % (commit_upto + 1, commits[commit_upto].subject) tprint(msg, colour=self.col.BLUE) - self.PrintResultSummary(board_selected, board_dict, + self.print_result_summary(board_selected, board_dict, err_lines if self._show_errors else [], err_line_boards, warn_lines if self._show_errors else [], warn_line_boards, config, environment, self._show_sizes, self._show_detail, self._show_bloat, self._show_config, self._show_environment) - def ShowSummary(self, commits, board_selected): + def show_summary(self, commits, board_selected): """Show a build summary for U-Boot for a given board list. Reset the result summary, then repeatedly call GetResultSummary on @@ -1589,16 +1601,16 @@ class Builder: """ self.commit_count = len(commits) if commits else 1 self.commits = commits - self.ResetResultSummary(board_selected) + self.reset_result_summary(board_selected) self._error_lines = 0 for commit_upto in range(0, self.commit_count, self._step): - self.ProduceResultSummary(commit_upto, commits, board_selected) + self.produce_result_summary(commit_upto, commits, board_selected) if not self._error_lines: tprint('(no errors to report)', colour=self.col.GREEN) - def SetupBuild(self, board_selected, commits): + def setup_build(self, board_selected, commits): """Set up ready to start a build. Args: @@ -1611,7 +1623,7 @@ class Builder: self.upto = self.warned = self.fail = 0 self._timestamps = collections.deque() - def GetThreadDir(self, thread_num): + def get_thread_dir(self, thread_num): """Get the directory path to the working dir for a thread. Args: @@ -1622,7 +1634,7 @@ class Builder: return self._working_dir return os.path.join(self._working_dir, '%02d' % max(thread_num, 0)) - def _PrepareThread(self, thread_num, setup_git): + def _prepare_thread(self, thread_num, setup_git): """Prepare the working directory for a thread. This clones or fetches the repo into the thread's work directory. @@ -1635,8 +1647,8 @@ class Builder: 'clone' to set up a git clone 'worktree' to set up a git worktree """ - thread_dir = self.GetThreadDir(thread_num) - builderthread.Mkdir(thread_dir) + thread_dir = self.get_thread_dir(thread_num) + builderthread.mkdir(thread_dir) git_dir = os.path.join(thread_dir, '.git') # Create a worktree or a git repo clone for this thread if it @@ -1672,7 +1684,7 @@ class Builder: else: raise ValueError("Can't setup git repo with %s." % setup_git) - def _PrepareWorkingSpace(self, max_threads, setup_git): + def _prepare_working_space(self, max_threads, setup_git): """Prepare the working directory for use. Set up the git repo for each thread. Creates a linked working tree @@ -1684,7 +1696,7 @@ class Builder: work setup_git: True to set up a git worktree or a git clone """ - builderthread.Mkdir(self._working_dir) + builderthread.mkdir(self._working_dir) if setup_git and self.git_dir: src_dir = os.path.abspath(self.git_dir) if gitutil.check_worktree_is_available(src_dir): @@ -1698,14 +1710,14 @@ class Builder: # Always do at least one thread for thread in range(max(max_threads, 1)): - self._PrepareThread(thread, setup_git) + self._prepare_thread(thread, setup_git) - def _GetOutputSpaceRemovals(self): + def _get_output_space_removals(self): """Get the output directories ready to receive files. Figure out what needs to be deleted in the output directory before it can be used. We only delete old buildman directories which have the - expected name pattern. See _GetOutputDir(). + expected name pattern. See get_output_dir(). Returns: List of full paths of directories to remove @@ -1714,7 +1726,7 @@ class Builder: return dir_list = [] for commit_upto in range(self.commit_count): - dir_list.append(self._GetOutputDir(commit_upto)) + dir_list.append(self.get_output_dir(commit_upto)) to_remove = [] for dirname in glob.glob(os.path.join(self.base_dir, '*')): @@ -1725,14 +1737,14 @@ class Builder: to_remove.append(dirname) return to_remove - def _PrepareOutputSpace(self): + def _prepare_output_space(self): """Get the output directories ready to receive files. We delete any output directories which look like ones we need to create. Having left over directories is confusing when the user wants to check the output manually. """ - to_remove = self._GetOutputSpaceRemovals() + to_remove = self._get_output_space_removals() if to_remove: tprint('Removing %d old build directories...' % len(to_remove), newline=False) @@ -1740,7 +1752,7 @@ class Builder: shutil.rmtree(dirname) terminal.print_clear() - def BuildBoards(self, commits, board_selected, keep_outputs, verbose): + def build_boards(self, commits, board_selected, keep_outputs, verbose): """Build all commits for a list of boards Args: @@ -1759,15 +1771,15 @@ class Builder: self.commits = commits self._verbose = verbose - self.ResetResultSummary(board_selected) - builderthread.Mkdir(self.base_dir, parents = True) - self._PrepareWorkingSpace(min(self.num_threads, len(board_selected)), + self.reset_result_summary(board_selected) + builderthread.mkdir(self.base_dir, parents = True) + self._prepare_working_space(min(self.num_threads, len(board_selected)), commits is not None) - self._PrepareOutputSpace() + self._prepare_output_space() if not self._ide: tprint('\rStarting build...', newline=False) - self.SetupBuild(board_selected, commits) - self.ProcessResult(None) + self.setup_build(board_selected, commits) + self.process_result(None) self.thread_exceptions = [] # Create jobs to build all commits for each board for brd in board_selected.values(): @@ -1781,7 +1793,7 @@ class Builder: if self.num_threads: self.queue.put(job) else: - self._single_builder.RunJob(job) + self._single_builder.run_job(job) if self.num_threads: term = threading.Thread(target=self.queue.join) diff --git a/tools/buildman/builderthread.py b/tools/buildman/builderthread.py index 635865c21c8..25f460c207d 100644 --- a/tools/buildman/builderthread.py +++ b/tools/buildman/builderthread.py @@ -2,8 +2,15 @@ # Copyright (c) 2014 Google, Inc # +"""Implementation the bulider threads + +This module provides the BuilderThread class, which handles calling the builder +based on the jobs provided. +""" + import errno import glob +import io import os import shutil import sys @@ -16,11 +23,15 @@ from u_boot_pylib import command RETURN_CODE_RETRY = -1 BASE_ELF_FILENAMES = ['u-boot', 'spl/u-boot-spl', 'tpl/u-boot-tpl'] -def Mkdir(dirname, parents = False): +def mkdir(dirname, parents=False): """Make a directory if it doesn't already exist. Args: - dirname: Directory to create + dirname (str): Directory to create + parents (bool): True to also make parent directories + + Raises: + OSError: File already exists """ try: if parents: @@ -30,12 +41,51 @@ def Mkdir(dirname, parents = False): except OSError as err: if err.errno == errno.EEXIST: if os.path.realpath('.') == os.path.realpath(dirname): - print("Cannot create the current working directory '%s'!" % dirname) + print(f"Cannot create the current working directory '{dirname}'!") sys.exit(1) - pass else: raise + +def _remove_old_outputs(out_dir): + """Remove any old output-target files + + Args: + out_dir (str): Output directory for the build + + Since we use a build directory that was previously used by another + board, it may have produced an SPL image. If we don't remove it (i.e. + see do_config and self.mrproper below) then it will appear to be the + output of this build, even if it does not produce SPL images. + """ + for elf in BASE_ELF_FILENAMES: + fname = os.path.join(out_dir, elf) + if os.path.exists(fname): + os.remove(fname) + + +def copy_files(out_dir, build_dir, dirname, patterns): + """Copy files from the build directory to the output. + + Args: + out_dir (str): Path to output directory containing the files + build_dir (str): Place to copy the files + dirname (str): Source directory, '' for normal U-Boot, 'spl' for SPL + patterns (list of str): A list of filenames to copy, each relative + to the build directory + """ + for pattern in patterns: + file_list = glob.glob(os.path.join(out_dir, dirname, pattern)) + for fname in file_list: + target = os.path.basename(fname) + if dirname: + base, ext = os.path.splitext(target) + if ext: + target = f'{base}-{dirname}{ext}' + shutil.copy(fname, os.path.join(build_dir, target)) + + +# pylint: disable=R0903 class BuilderJob: """Holds information about a job to be performed by a thread @@ -77,7 +127,7 @@ class ResultThread(threading.Thread): """ while True: result = self.builder.out_queue.get() - self.builder.ProcessResult(result) + self.builder.process_result(result) self.builder.out_queue.task_done() @@ -107,22 +157,25 @@ class BuilderThread(threading.Thread): self.mrproper = mrproper self.per_board_out_dir = per_board_out_dir self.test_exception = test_exception + self.toolchain = None - def Make(self, commit, brd, stage, cwd, *args, **kwargs): + def make(self, commit, brd, stage, cwd, *args, **kwargs): """Run 'make' on a particular commit and board. The source code will already be checked out, so the 'commit' argument is only for information. Args: - commit: Commit object that is being built - brd: Board object that is being built - stage: Stage of the build. Valid stages are: + commit (Commit): Commit that is being built + brd (Board): Board that is being built + stage (str): Stage of the build. Valid stages are: mrproper - can be called to clean source config - called to configure for a board build - the main make invocation - it does the build - args: A list of arguments to pass to 'make' - kwargs: A list of keyword arguments to pass to command.run_pipe() + cwd (str): Working directory to set, or None to leave it alone + *args (list of str): Arguments to pass to 'make' + **kwargs (dict): A list of keyword arguments to pass to + command.run_pipe() Returns: CommandResult object @@ -130,61 +183,140 @@ class BuilderThread(threading.Thread): return self.builder.do_make(commit, brd, stage, cwd, *args, **kwargs) - def RunCommit(self, commit_upto, brd, work_dir, do_config, config_only, - force_build, force_build_failures, work_in_output, - adjust_cfg): - """Build a particular commit. - - If the build is already done, and we are not forcing a build, we skip - the build and just return the previously-saved results. + def _build_args(self, brd, out_dir, out_rel_dir, work_dir, commit_upto): + """Set up arguments to the args list based on the settings Args: - commit_upto: Commit number to build (0...n-1) - brd: Board object to build - work_dir: Directory to which the source will be checked out - do_config: True to run a make <board>_defconfig on the source - config_only: Only configure the source, do not build it - force_build: Force a build even if one was previously done - force_build_failures: Force a bulid if the previous result showed - failure - work_in_output: Use the output directory as the work directory and - don't write to a separate output directory. - adjust_cfg (list of str): List of changes to make to .config file - before building. Each is one of (where C is either CONFIG_xxx - or just xxx): - C to enable C - ~C to disable C - C=val to set the value of C (val must have quotes if C is - a string Kconfig + brd (Board): Board to create arguments for + out_dir (str): Path to output directory containing the files + out_rel_dir (str): Output directory relative to the current dir + work_dir (str): Directory to which the source will be checked out + commit_upto (int): Commit number to build (0...n-1) Returns: - tuple containing: - - CommandResult object containing the results of the build - - boolean indicating whether 'make config' is still needed + tuple: + list of str: Arguments to pass to make + str: Current working directory, or None if no commit + str: Source directory (typically the work directory) """ - # Create a default result - it will be overwritte by the call to - # self.Make() below, in the event that we do a build. - result = command.CommandResult() - result.return_code = 0 - if work_in_output or self.builder.in_tree: - out_dir = work_dir - else: - if self.per_board_out_dir: - out_rel_dir = os.path.join('..', brd.target) + args = [] + cwd = work_dir + src_dir = os.path.realpath(work_dir) + if not self.builder.in_tree: + if commit_upto is None: + # In this case we are building in the original source directory + # (i.e. the current directory where buildman is invoked. The + # output directory is set to this thread's selected work + # directory. + # + # Symlinks can confuse U-Boot's Makefile since we may use '..' + # in our path, so remove them. + real_dir = os.path.realpath(out_dir) + args.append(f'O={real_dir}') + cwd = None + src_dir = os.getcwd() else: - out_rel_dir = 'build' - out_dir = os.path.join(work_dir, out_rel_dir) + args.append(f'O={out_rel_dir}') + if self.builder.verbose_build: + args.append('V=1') + else: + args.append('-s') + if self.builder.num_jobs is not None: + args.extend(['-j', str(self.builder.num_jobs)]) + if self.builder.warnings_as_errors: + args.append('KCFLAGS=-Werror') + args.append('HOSTCFLAGS=-Werror') + if self.builder.allow_missing: + args.append('BINMAN_ALLOW_MISSING=1') + if self.builder.no_lto: + args.append('NO_LTO=1') + if self.builder.reproducible_builds: + args.append('SOURCE_DATE_EPOCH=0') + args.extend(self.builder.toolchains.GetMakeArguments(brd)) + args.extend(self.toolchain.MakeArgs()) + return args, cwd, src_dir + + def _reconfigure(self, commit, brd, cwd, args, env, config_args, config_out, + cmd_list): + """Reconfigure the build - # Check if the job was already completed last time - done_file = self.builder.GetDoneFile(commit_upto, brd.target) + Args: + commit (Commit): Commit only being built + brd (Board): Board being built + cwd (str): Current working directory + args (list of str): Arguments to pass to make + env (dict): Environment strings + config_args (list of str): defconfig arg for this board + cmd_list (list of str): List to add the commands to, for logging + + Returns: + CommandResult object + """ + if self.mrproper: + result = self.make(commit, brd, 'mrproper', cwd, 'mrproper', *args, + env=env) + config_out.write(result.combined) + cmd_list.append([self.builder.gnu_make, 'mrproper', *args]) + result = self.make(commit, brd, 'config', cwd, *(args + config_args), + env=env) + cmd_list.append([self.builder.gnu_make] + args + config_args) + config_out.write(result.combined) + return result + + def _build(self, commit, brd, cwd, args, env, cmd_list, config_only): + """Perform the build + + Args: + commit (Commit): Commit only being built + brd (Board): Board being built + cwd (str): Current working directory + args (list of str): Arguments to pass to make + env (dict): Environment strings + cmd_list (list of str): List to add the commands to, for logging + config_only (bool): True if this is a config-only build (using the + 'make cfg' target) + + Returns: + CommandResult object + """ + if config_only: + args.append('cfg') + result = self.make(commit, brd, 'build', cwd, *args, env=env) + cmd_list.append([self.builder.gnu_make] + args) + if (result.return_code == 2 and + ('Some images are invalid' in result.stderr)): + # This is handled later by the check for output in stderr + result.return_code = 0 + return result + + def _read_done_file(self, commit_upto, brd, force_build, + force_build_failures): + """Check the 'done' file and see if this commit should be built + + Args: + commit (Commit): Commit only being built + brd (Board): Board being built + force_build (bool): Force a build even if one was previously done + force_build_failures (bool): Force a bulid if the previous result + showed failure + + Returns: + tuple: + bool: True if build should be built + CommandResult: if there was a previous run: + - already_done set to True + - return_code set to return code + - result.stderr set to 'bad' if stderr output was recorded + """ + result = command.CommandResult() + done_file = self.builder.get_done_file(commit_upto, brd.target) result.already_done = os.path.exists(done_file) will_build = (force_build or force_build_failures or not result.already_done) if result.already_done: - # Get the return code from that build and use it - with open(done_file, 'r') as fd: + with open(done_file, 'r', encoding='utf-8') as outf: try: - result.return_code = int(fd.readline()) + result.return_code = int(outf.readline()) except ValueError: # The file may be empty due to running out of disk space. # Try a rebuild @@ -194,12 +326,155 @@ class BuilderThread(threading.Thread): if result.return_code == RETURN_CODE_RETRY: will_build = True elif will_build: - err_file = self.builder.GetErrFile(commit_upto, brd.target) + err_file = self.builder.get_err_file(commit_upto, brd.target) if os.path.exists(err_file) and os.stat(err_file).st_size: result.stderr = 'bad' elif not force_build: # The build passed, so no need to build it again will_build = False + return will_build, result + + def _decide_dirs(self, brd, work_dir, work_in_output): + """Decide the output directory to use + + Args: + work_dir (str): Directory to which the source will be checked out + work_in_output (bool): Use the output directory as the work + directory and don't write to a separate output directory. + + Returns: + tuple: + out_dir (str): Output directory for the build + out_rel_dir (str): Output directory relatie to the current dir + """ + if work_in_output or self.builder.in_tree: + out_rel_dir = None + out_dir = work_dir + else: + if self.per_board_out_dir: + out_rel_dir = os.path.join('..', brd.target) + else: + out_rel_dir = 'build' + out_dir = os.path.join(work_dir, out_rel_dir) + return out_dir, out_rel_dir + + def _checkout(self, commit_upto, work_dir): + """Checkout the right commit + + Args: + commit_upto (int): Commit number to build (0...n-1) + work_dir (str): Directory to which the source will be checked out + + Returns: + Commit: Commit being built, or 'current' for current source + """ + if self.builder.commits: + commit = self.builder.commits[commit_upto] + if self.builder.checkout: + git_dir = os.path.join(work_dir, '.git') + gitutil.checkout(commit.hash, git_dir, work_dir, force=True) + else: + commit = 'current' + return commit + + def _config_and_build(self, commit_upto, brd, work_dir, do_config, + config_only, adjust_cfg, commit, out_dir, out_rel_dir, + result): + """Do the build, configuring first if necessary + + Args: + commit_upto (int): Commit number to build (0...n-1) + brd (Board): Board to create arguments for + work_dir (str): Directory to which the source will be checked out + do_config (bool): True to run a make <board>_defconfig on the source + config_only (bool): Only configure the source, do not build it + adjust_cfg (list of str): See the cfgutil module and run_commit() + commit (Commit): Commit only being built + out_dir (str): Output directory for the build + out_rel_dir (str): Output directory relatie to the current dir + result (CommandResult): Previous result + + Returns: + tuple: + result (CommandResult): Result of the build + do_config (bool): indicates whether 'make config' is needed on + the next incremental build + """ + # Set up the environment and command line + env = self.toolchain.MakeEnvironment(self.builder.full_path) + mkdir(out_dir) + + args, cwd, src_dir = self._build_args(brd, out_dir, out_rel_dir, + work_dir, commit_upto) + config_args = [f'{brd.target}_defconfig'] + config_out = io.StringIO() + + _remove_old_outputs(out_dir) + + # If we need to reconfigure, do that now + cfg_file = os.path.join(out_dir, '.config') + cmd_list = [] + if do_config or adjust_cfg: + result = self._reconfigure( + commit, brd, cwd, args, env, config_args, config_out, cmd_list) + do_config = False # No need to configure next time + if adjust_cfg: + cfgutil.adjust_cfg_file(cfg_file, adjust_cfg) + + # Now do the build, if everything looks OK + if result.return_code == 0: + result = self._build(commit, brd, cwd, args, env, cmd_list, + config_only) + if adjust_cfg: + errs = cfgutil.check_cfg_file(cfg_file, adjust_cfg) + if errs: + result.stderr += errs + result.return_code = 1 + result.stderr = result.stderr.replace(src_dir + '/', '') + if self.builder.verbose_build: + result.stdout = config_out.getvalue() + result.stdout + result.cmd_list = cmd_list + return result, do_config + + def run_commit(self, commit_upto, brd, work_dir, do_config, config_only, + force_build, force_build_failures, work_in_output, + adjust_cfg): + """Build a particular commit. + + If the build is already done, and we are not forcing a build, we skip + the build and just return the previously-saved results. + + Args: + commit_upto (int): Commit number to build (0...n-1) + brd (Board): Board to build + work_dir (str): Directory to which the source will be checked out + do_config (bool): True to run a make <board>_defconfig on the source + config_only (bool): Only configure the source, do not build it + force_build (bool): Force a build even if one was previously done + force_build_failures (bool): Force a bulid if the previous result + showed failure + work_in_output (bool) : Use the output directory as the work + directory and don't write to a separate output directory. + adjust_cfg (list of str): List of changes to make to .config file + before building. Each is one of (where C is either CONFIG_xxx + or just xxx): + C to enable C + ~C to disable C + C=val to set the value of C (val must have quotes if C is + a string Kconfig + + Returns: + tuple containing: + - CommandResult object containing the results of the build + - boolean indicating whether 'make config' is still needed + """ + # Create a default result - it will be overwritte by the call to + # self.make() below, in the event that we do a build. + out_dir, out_rel_dir = self._decide_dirs(brd, work_dir, work_in_output) + + # Check if the job was already completed last time + will_build, result = self._read_done_file(commit_upto, brd, force_build, + force_build_failures) if will_build: # We are going to have to build it. First, get a toolchain @@ -209,115 +484,13 @@ class BuilderThread(threading.Thread): except ValueError as err: result.return_code = 10 result.stdout = '' - result.stderr = str(err) - # TODO(sjg@chromium.org): This gets swallowed, but needs - # to be reported. + result.stderr = f'Tool chain error for {brd.arch}: {str(err)}' if self.toolchain: - # Checkout the right commit - if self.builder.commits: - commit = self.builder.commits[commit_upto] - if self.builder.checkout: - git_dir = os.path.join(work_dir, '.git') - gitutil.checkout(commit.hash, git_dir, work_dir, - force=True) - else: - commit = 'current' - - # Set up the environment and command line - env = self.toolchain.MakeEnvironment(self.builder.full_path) - Mkdir(out_dir) - args = [] - cwd = work_dir - src_dir = os.path.realpath(work_dir) - if not self.builder.in_tree: - if commit_upto is None: - # In this case we are building in the original source - # directory (i.e. the current directory where buildman - # is invoked. The output directory is set to this - # thread's selected work directory. - # - # Symlinks can confuse U-Boot's Makefile since - # we may use '..' in our path, so remove them. - out_dir = os.path.realpath(out_dir) - args.append('O=%s' % out_dir) - cwd = None - src_dir = os.getcwd() - else: - args.append('O=%s' % out_rel_dir) - if self.builder.verbose_build: - args.append('V=1') - else: - args.append('-s') - if self.builder.num_jobs is not None: - args.extend(['-j', str(self.builder.num_jobs)]) - if self.builder.warnings_as_errors: - args.append('KCFLAGS=-Werror') - args.append('HOSTCFLAGS=-Werror') - if self.builder.allow_missing: - args.append('BINMAN_ALLOW_MISSING=1') - if self.builder.no_lto: - args.append('NO_LTO=1') - if self.builder.reproducible_builds: - args.append('SOURCE_DATE_EPOCH=0') - config_args = ['%s_defconfig' % brd.target] - config_out = '' - args.extend(self.builder.toolchains.GetMakeArguments(brd)) - args.extend(self.toolchain.MakeArgs()) - - # Remove any output targets. Since we use a build directory that - # was previously used by another board, it may have produced an - # SPL image. If we don't remove it (i.e. see do_config and - # self.mrproper below) then it will appear to be the output of - # this build, even if it does not produce SPL images. - build_dir = self.builder.GetBuildDir(commit_upto, brd.target) - for elf in BASE_ELF_FILENAMES: - fname = os.path.join(out_dir, elf) - if os.path.exists(fname): - os.remove(fname) - - # If we need to reconfigure, do that now - cfg_file = os.path.join(out_dir, '.config') - cmd_list = [] - if do_config or adjust_cfg: - config_out = '' - if self.mrproper: - result = self.Make(commit, brd, 'mrproper', cwd, - 'mrproper', *args, env=env) - config_out += result.combined - cmd_list.append([self.builder.gnu_make, 'mrproper', - *args]) - result = self.Make(commit, brd, 'config', cwd, - *(args + config_args), env=env) - cmd_list.append([self.builder.gnu_make] + args + - config_args) - config_out += result.combined - do_config = False # No need to configure next time - if adjust_cfg: - cfgutil.adjust_cfg_file(cfg_file, adjust_cfg) - if result.return_code == 0: - if config_only: - args.append('cfg') - result = self.Make(commit, brd, 'build', cwd, *args, - env=env) - cmd_list.append([self.builder.gnu_make] + args) - if (result.return_code == 2 and - ('Some images are invalid' in result.stderr)): - # This is handled later by the check for output in - # stderr - result.return_code = 0 - if adjust_cfg: - errs = cfgutil.check_cfg_file(cfg_file, adjust_cfg) - if errs: - result.stderr += errs - result.return_code = 1 - result.stderr = result.stderr.replace(src_dir + '/', '') - if self.builder.verbose_build: - result.stdout = config_out + result.stdout - result.cmd_list = cmd_list - else: - result.return_code = 1 - result.stderr = 'No tool chain for %s\n' % brd.arch + commit = self._checkout(commit_upto, work_dir) + result, do_config = self._config_and_build( + commit_upto, brd, work_dir, do_config, config_only, + adjust_cfg, commit, out_dir, out_rel_dir, result) result.already_done = False result.toolchain = self.toolchain @@ -326,15 +499,15 @@ class BuilderThread(threading.Thread): result.out_dir = out_dir return result, do_config - def _WriteResult(self, result, keep_outputs, work_in_output): + def _write_result(self, result, keep_outputs, work_in_output): """Write a built result to the output directory. Args: - result: CommandResult object containing result to write - keep_outputs: True to store the output binaries, False + result (CommandResult): result to write + keep_outputs (bool): True to store the output binaries, False to delete them - work_in_output: Use the output directory as the work directory and - don't write to a separate output directory. + work_in_output (bool): Use the output directory as the work + directory and don't write to a separate output directory. """ # If we think this might have been aborted with Ctrl-C, record the # failure but not that we are 'done' with this board. A retry may fix @@ -345,22 +518,22 @@ class BuilderThread(threading.Thread): return # Write the output and stderr - output_dir = self.builder._GetOutputDir(result.commit_upto) - Mkdir(output_dir) - build_dir = self.builder.GetBuildDir(result.commit_upto, + output_dir = self.builder.get_output_dir(result.commit_upto) + mkdir(output_dir) + build_dir = self.builder.get_build_dir(result.commit_upto, result.brd.target) - Mkdir(build_dir) + mkdir(build_dir) outfile = os.path.join(build_dir, 'log') - with open(outfile, 'w') as fd: + with open(outfile, 'w', encoding='utf-8') as outf: if result.stdout: - fd.write(result.stdout) + outf.write(result.stdout) - errfile = self.builder.GetErrFile(result.commit_upto, + errfile = self.builder.get_err_file(result.commit_upto, result.brd.target) if result.stderr: - with open(errfile, 'w') as fd: - fd.write(result.stderr) + with open(errfile, 'w', encoding='utf-8') as outf: + outf.write(result.stderr) elif os.path.exists(errfile): os.remove(errfile) @@ -370,60 +543,61 @@ class BuilderThread(threading.Thread): if result.toolchain: # Write the build result and toolchain information. - done_file = self.builder.GetDoneFile(result.commit_upto, + done_file = self.builder.get_done_file(result.commit_upto, result.brd.target) - with open(done_file, 'w') as fd: + with open(done_file, 'w', encoding='utf-8') as outf: if maybe_aborted: # Special code to indicate we need to retry - fd.write('%s' % RETURN_CODE_RETRY) + outf.write(f'{RETURN_CODE_RETRY}') else: - fd.write('%s' % result.return_code) - with open(os.path.join(build_dir, 'toolchain'), 'w') as fd: - print('gcc', result.toolchain.gcc, file=fd) - print('path', result.toolchain.path, file=fd) - print('cross', result.toolchain.cross, file=fd) - print('arch', result.toolchain.arch, file=fd) - fd.write('%s' % result.return_code) + outf.write(f'{result.return_code}') + with open(os.path.join(build_dir, 'toolchain'), 'w', + encoding='utf-8') as outf: + print('gcc', result.toolchain.gcc, file=outf) + print('path', result.toolchain.path, file=outf) + print('cross', result.toolchain.cross, file=outf) + print('arch', result.toolchain.arch, file=outf) + outf.write(f'{result.return_code}') # Write out the image and function size information and an objdump env = result.toolchain.MakeEnvironment(self.builder.full_path) - with open(os.path.join(build_dir, 'out-env'), 'wb') as fd: + with open(os.path.join(build_dir, 'out-env'), 'wb') as outf: for var in sorted(env.keys()): - fd.write(b'%s="%s"' % (var, env[var])) + outf.write(b'%s="%s"' % (var, env[var])) with open(os.path.join(build_dir, 'out-cmd'), 'w', - encoding='utf-8') as fd: + encoding='utf-8') as outf: for cmd in result.cmd_list: - print(' '.join(cmd), file=fd) + print(' '.join(cmd), file=outf) lines = [] for fname in BASE_ELF_FILENAMES: - cmd = ['%snm' % self.toolchain.cross, '--size-sort', fname] + cmd = [f'{self.toolchain.cross}nm', '--size-sort', fname] nm_result = command.run_pipe([cmd], capture=True, capture_stderr=True, cwd=result.out_dir, raise_on_error=False, env=env) if nm_result.stdout: - nm = self.builder.GetFuncSizesFile(result.commit_upto, - result.brd.target, fname) - with open(nm, 'w') as fd: - print(nm_result.stdout, end=' ', file=fd) + nm_fname = self.builder.get_func_sizes_file( + result.commit_upto, result.brd.target, fname) + with open(nm_fname, 'w', encoding='utf-8') as outf: + print(nm_result.stdout, end=' ', file=outf) - cmd = ['%sobjdump' % self.toolchain.cross, '-h', fname] + cmd = [f'{self.toolchain.cross}objdump', '-h', fname] dump_result = command.run_pipe([cmd], capture=True, capture_stderr=True, cwd=result.out_dir, raise_on_error=False, env=env) rodata_size = '' if dump_result.stdout: - objdump = self.builder.GetObjdumpFile(result.commit_upto, + objdump = self.builder.get_objdump_file(result.commit_upto, result.brd.target, fname) - with open(objdump, 'w') as fd: - print(dump_result.stdout, end=' ', file=fd) + with open(objdump, 'w', encoding='utf-8') as outf: + print(dump_result.stdout, end=' ', file=outf) for line in dump_result.stdout.splitlines(): fields = line.split() if len(fields) > 5 and fields[1] == '.rodata': rodata_size = fields[2] - cmd = ['%ssize' % self.toolchain.cross, fname] + cmd = [f'{self.toolchain.cross}size', fname] size_result = command.run_pipe([cmd], capture=True, capture_stderr=True, cwd=result.out_dir, raise_on_error=False, env=env) @@ -432,30 +606,29 @@ class BuilderThread(threading.Thread): rodata_size) # Extract the environment from U-Boot and dump it out - cmd = ['%sobjcopy' % self.toolchain.cross, '-O', 'binary', + cmd = [f'{self.toolchain.cross}objcopy', '-O', 'binary', '-j', '.rodata.default_environment', 'env/built-in.o', 'uboot.env'] command.run_pipe([cmd], capture=True, capture_stderr=True, cwd=result.out_dir, raise_on_error=False, env=env) - ubootenv = os.path.join(result.out_dir, 'uboot.env') if not work_in_output: - self.CopyFiles(result.out_dir, build_dir, '', ['uboot.env']) + copy_files(result.out_dir, build_dir, '', ['uboot.env']) # Write out the image sizes file. This is similar to the output # of binutil's 'size' utility, but it omits the header line and # adds an additional hex value at the end of each line for the # rodata size - if len(lines): - sizes = self.builder.GetSizesFile(result.commit_upto, + if lines: + sizes = self.builder.get_sizes_file(result.commit_upto, result.brd.target) - with open(sizes, 'w') as fd: - print('\n'.join(lines), file=fd) + with open(sizes, 'w', encoding='utf-8') as outf: + print('\n'.join(lines), file=outf) if not work_in_output: # Write out the configuration files, with a special case for SPL for dirname in ['', 'spl', 'tpl']: - self.CopyFiles( + copy_files( result.out_dir, build_dir, dirname, ['u-boot.cfg', 'spl/u-boot-spl.cfg', 'tpl/u-boot-tpl.cfg', '.config', 'include/autoconf.mk', @@ -463,60 +636,40 @@ class BuilderThread(threading.Thread): # Now write the actual build output if keep_outputs: - self.CopyFiles( + copy_files( result.out_dir, build_dir, '', ['u-boot*', '*.bin', '*.map', '*.img', 'MLO', 'SPL', 'include/autoconf.mk', 'spl/u-boot-spl*']) - def CopyFiles(self, out_dir, build_dir, dirname, patterns): - """Copy files from the build directory to the output. - - Args: - out_dir: Path to output directory containing the files - build_dir: Place to copy the files - dirname: Source directory, '' for normal U-Boot, 'spl' for SPL - patterns: A list of filenames (strings) to copy, each relative - to the build directory - """ - for pattern in patterns: - file_list = glob.glob(os.path.join(out_dir, dirname, pattern)) - for fname in file_list: - target = os.path.basename(fname) - if dirname: - base, ext = os.path.splitext(target) - if ext: - target = '%s-%s%s' % (base, dirname, ext) - shutil.copy(fname, os.path.join(build_dir, target)) - - def _SendResult(self, result): + def _send_result(self, result): """Send a result to the builder for processing Args: - result: CommandResult object containing the results of the build + result (CommandResult): results of the build Raises: - ValueError if self.test_exception is true (for testing) + ValueError: self.test_exception is true (for testing) """ if self.test_exception: raise ValueError('test exception') if self.thread_num != -1: self.builder.out_queue.put(result) else: - self.builder.ProcessResult(result) + self.builder.process_result(result) - def RunJob(self, job): + def run_job(self, job): """Run a single job A job consists of a building a list of commits for a particular board. Args: - job: Job to build + job (Job): Job to build - Returns: - List of Result objects + Raises: + ValueError: Thread was interrupted """ brd = job.brd - work_dir = self.builder.GetThreadDir(self.thread_num) + work_dir = self.builder.get_thread_dir(self.thread_num) self.toolchain = None if job.commits: # Run 'make board_defconfig' on the first commit @@ -524,7 +677,7 @@ class BuilderThread(threading.Thread): commit_upto = 0 force_build = False for commit_upto in range(0, len(job.commits), job.step): - result, request_config = self.RunCommit(commit_upto, brd, + result, request_config = self.run_commit(commit_upto, brd, work_dir, do_config, self.builder.config_only, force_build or self.builder.force_build, self.builder.force_build_failures, @@ -535,7 +688,7 @@ class BuilderThread(threading.Thread): # If our incremental build failed, try building again # with a reconfig. if self.builder.force_config_on_failure: - result, request_config = self.RunCommit(commit_upto, + result, request_config = self.run_commit(commit_upto, brd, work_dir, True, False, True, False, job.work_in_output, job.adjust_cfg) did_config = True @@ -576,17 +729,17 @@ class BuilderThread(threading.Thread): raise ValueError('Interrupt') # We have the build results, so output the result - self._WriteResult(result, job.keep_outputs, job.work_in_output) - self._SendResult(result) + self._write_result(result, job.keep_outputs, job.work_in_output) + self._send_result(result) else: # Just build the currently checked-out build - result, request_config = self.RunCommit(None, brd, work_dir, True, + result, request_config = self.run_commit(None, brd, work_dir, True, self.builder.config_only, True, self.builder.force_build_failures, job.work_in_output, job.adjust_cfg) result.commit_upto = 0 - self._WriteResult(result, job.keep_outputs, job.work_in_output) - self._SendResult(result) + self._write_result(result, job.keep_outputs, job.work_in_output) + self._send_result(result) def run(self): """Our thread's run function @@ -597,8 +750,9 @@ class BuilderThread(threading.Thread): while True: job = self.builder.queue.get() try: - self.RunJob(job) - except Exception as e: - print('Thread exception (use -T0 to run without threads):', e) - self.builder.thread_exceptions.append(e) + self.run_job(job) + except Exception as exc: + print('Thread exception (use -T0 to run without threads):', + exc) + self.builder.thread_exceptions.append(exc) self.builder.queue.task_done() diff --git a/tools/buildman/buildman.rst b/tools/buildman/buildman.rst index c8b0db3d8b9..aae2477b5c3 100644 --- a/tools/buildman/buildman.rst +++ b/tools/buildman/buildman.rst @@ -159,7 +159,7 @@ on the command line: .. code-block:: bash - buildman --boards sandbox,snow --boards + buildman --boards sandbox,snow --boards firefly-rk3399 It is convenient to use the -n option to see what will be built based on the subset given. Use -v as well to get an actual list of boards. @@ -475,10 +475,6 @@ Setting up sudo mkdir -p /toolchains sudo mv ~/.buildman-toolchains/*/* /toolchains/ - For those not available from kernel.org, download from the following links: - - - `Arc Toolchain`_ - Buildman should now be set up to use your new toolchain. At the time of writing, U-Boot has these architectures: @@ -1066,9 +1062,9 @@ same as 'am335x_evm_usbspl'/ The -K option uses the u-boot.cfg, spl/u-boot-spl.cfg and tpl/u-boot-tpl.cfg files which are produced by a build. If all you want is to check the -configuration you can in fact avoid doing a full build, using -D. This tells -buildman to configuration U-Boot and create the .cfg files, but not actually -build the source. This is 5-10 times faster than doing a full build. +configuration you can in fact avoid doing a full build, using --config-only. +This tells buildman to configuration U-Boot and create the .cfg files, but not +actually build the source. This is 5-10 times faster than doing a full build. By default buildman considers the follow two configuration methods equivalent:: @@ -1307,14 +1303,32 @@ Using boards.cfg This file is no-longer needed by buildman but it is still generated in the working directory. This helps avoid a delay on every build, since scanning all -the Kconfig files takes a few seconds. Use the -R flag to force regeneration -of the file - in that case buildman exits after writing the file. with exit code -2 if there was an error in the maintainer files. +the Kconfig files takes a few seconds. Use the `-R <filename>` flag to force +regeneration of the file - in that case buildman exits after writing the file +with exit code 2 if there was an error in the maintainer files. To use the +default filename, use a hyphen, i.e. `-R -`. You should use 'buildman -nv <criteria>' instead of greoing the boards.cfg file, since it may be dropped altogether in future. +Checking maintainers +-------------------- + +Sometimes a board is added without a corresponding entry in a MAINTAINERS file. +Use the `--maintainer-check` option to check this:: + + $ buildman --maintainer-check + WARNING: board/mikrotik/crs3xx-98dx3236/MAINTAINERS: missing defconfig ending at line 7 + WARNING: no maintainers for 'clearfog_spi' + +Buildman returns with an exit code of 2 if there area any warnings. + +An experimental `--full-check option` also checks for boards which don't have a +CONFIG_TARGET_xxx where xxx corresponds to their defconfig filename. This is +not strictly necessary, but may be useful information. + + Checking the command -------------------- @@ -1342,8 +1356,6 @@ Thanks to Grant Grundler <grundler@chromium.org> for his ideas for improving the build speed by building all commits for a board instead of the other way around. -.. _`Arc Toolchain`: https://github.com/foss-for-synopsys-dwc-arc-processors/toolchain/releases/download/arc-2021.03-release/arc_gnu_2021.03_prebuilt_elf32_le_linux_install.tar.gz - .. sectionauthor:: Simon Glass .. sectionauthor:: Copyright (c) 2013 The Chromium OS Authors. .. sectionauthor:: sjg@chromium.org diff --git a/tools/buildman/cmdline.py b/tools/buildman/cmdline.py index a9cda249572..03211bd5aa5 100644 --- a/tools/buildman/cmdline.py +++ b/tools/buildman/cmdline.py @@ -2,148 +2,190 @@ # Copyright (c) 2014 Google, Inc # -from optparse import OptionParser +"""Handles parsing of buildman arguments + +This creates the argument parser and uses it to parse the arguments passed in +""" + +import argparse import os import pathlib BUILDMAN_DIR = pathlib.Path(__file__).parent HAS_TESTS = os.path.exists(BUILDMAN_DIR / "test.py") -def ParseArgs(): - """Parse command line arguments from sys.argv[] +def add_upto_m(parser): + """Add arguments up to 'M' - Returns: - tuple containing: - options: command line options - args: command lin arguments + Args: + parser (ArgumentParser): Parse to add to + + This is split out to avoid having too many statements in one function """ - parser = OptionParser() - parser.add_option('-a', '--adjust-cfg', type=str, action='append', + parser.add_argument('-a', '--adjust-cfg', type=str, action='append', help='Adjust the Kconfig settings in .config before building') - parser.add_option('-A', '--print-prefix', action='store_true', + parser.add_argument('-A', '--print-prefix', action='store_true', help='Print the tool-chain prefix for a board (CROSS_COMPILE=)') - parser.add_option('-b', '--branch', type='string', + parser.add_argument('-b', '--branch', type=str, help='Branch name to build, or range of commits to build') - parser.add_option('-B', '--bloat', dest='show_bloat', + parser.add_argument('-B', '--bloat', dest='show_bloat', action='store_true', default=False, help='Show changes in function code size for each board') - parser.add_option('--boards', type='string', action='append', + parser.add_argument('--boards', type=str, action='append', help='List of board names to build separated by comma') - parser.add_option('-c', '--count', dest='count', type='int', + parser.add_argument('-c', '--count', dest='count', type=int, default=-1, help='Run build on the top n commits') - parser.add_option('-C', '--force-reconfig', dest='force_reconfig', + parser.add_argument('-C', '--force-reconfig', dest='force_reconfig', action='store_true', default=False, help='Reconfigure for every commit (disable incremental build)') - parser.add_option('-d', '--detail', dest='show_detail', + parser.add_argument('--config-only', action='store_true', + default=False, + help="Don't build, just configure each commit") + parser.add_argument('-d', '--detail', dest='show_detail', action='store_true', default=False, help='Show detailed size delta for each board in the -S summary') - parser.add_option('-D', '--config-only', action='store_true', default=False, - help="Don't build, just configure each commit") - parser.add_option('--debug', action='store_true', + parser.add_argument('-D', '--debug', action='store_true', help='Enabling debugging (provides a full traceback on error)') - parser.add_option('-e', '--show_errors', action='store_true', + parser.add_argument('-e', '--show_errors', action='store_true', default=False, help='Show errors and warnings') - parser.add_option('-E', '--warnings-as-errors', action='store_true', + parser.add_argument('-E', '--warnings-as-errors', action='store_true', default=False, help='Treat all compiler warnings as errors') - parser.add_option('-f', '--force-build', dest='force_build', + parser.add_argument('-f', '--force-build', dest='force_build', action='store_true', default=False, help='Force build of boards even if already built') - parser.add_option('-F', '--force-build-failures', dest='force_build_failures', + parser.add_argument('-F', '--force-build-failures', dest='force_build_failures', action='store_true', default=False, help='Force build of previously-failed build') - parser.add_option('--fetch-arch', type='string', + parser.add_argument('--fetch-arch', type=str, help="Fetch a toolchain for architecture FETCH_ARCH ('list' to list)." ' You can also fetch several toolchains separate by comma, or' " 'all' to download all") - parser.add_option('-g', '--git', type='string', + parser.add_argument( + '--full-check', action='store_true', + help='Check maintainer entries and TARGET configs') + parser.add_argument('-g', '--git', type=str, help='Git repo containing branch to build', default='.') - parser.add_option('-G', '--config-file', type='string', + parser.add_argument('-G', '--config-file', type=str, help='Path to buildman config file', default='') - parser.add_option('-H', '--full-help', action='store_true', dest='full_help', + parser.add_argument('-H', '--full-help', action='store_true', dest='full_help', default=False, help='Display the README file') - parser.add_option('-i', '--in-tree', dest='in_tree', + parser.add_argument('-i', '--in-tree', dest='in_tree', action='store_true', default=False, help='Build in the source tree instead of a separate directory') - parser.add_option('-I', '--ide', action='store_true', default=False, + parser.add_argument('-I', '--ide', action='store_true', default=False, help='Create build output that can be parsed by an IDE') - parser.add_option('-j', '--jobs', dest='jobs', type='int', + parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=None, help='Number of jobs to run at once (passed to make)') - parser.add_option('-k', '--keep-outputs', action='store_true', + parser.add_argument('-k', '--keep-outputs', action='store_true', default=False, help='Keep all build output files (e.g. binaries)') - parser.add_option('-K', '--show-config', action='store_true', - default=False, help='Show configuration changes in summary (both board config files and Kconfig)') - parser.add_option('--preserve-config-y', action='store_true', + parser.add_argument('-K', '--show-config', action='store_true', + default=False, + help='Show configuration changes in summary (both board config files and Kconfig)') + parser.add_argument('--preserve-config-y', action='store_true', default=False, help="Don't convert y to 1 in configs") - parser.add_option('-l', '--list-error-boards', action='store_true', + parser.add_argument('-l', '--list-error-boards', action='store_true', default=False, help='Show a list of boards next to each error/warning') - parser.add_option('-L', '--no-lto', action='store_true', + parser.add_argument('-L', '--no-lto', action='store_true', default=False, help='Disable Link-time Optimisation (LTO) for builds') - parser.add_option('--list-tool-chains', action='store_true', default=False, + parser.add_argument('--list-tool-chains', action='store_true', default=False, help='List available tool chains (use -v to see probing detail)') - parser.add_option('-m', '--mrproper', action='store_true', + parser.add_argument('-m', '--mrproper', action='store_true', default=False, help="Run 'make mrproper before reconfiguring") - parser.add_option( + parser.add_argument( '-M', '--allow-missing', action='store_true', default=False, - help='Tell binman to allow missing blobs and generate fake ones as needed'), - parser.add_option( + help='Tell binman to allow missing blobs and generate fake ones as needed') + parser.add_argument( + '--maintainer-check', action='store_true', + help='Check that maintainer entries exist for each board') + parser.add_argument( '--no-allow-missing', action='store_true', default=False, - help='Disable telling binman to allow missing blobs'), - parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run', + help='Disable telling binman to allow missing blobs') + parser.add_argument('-n', '--dry-run', action='store_true', dest='dry_run', default=False, help="Do a dry run (describe actions, but do nothing)") - parser.add_option('-N', '--no-subdirs', action='store_true', dest='no_subdirs', - default=False, help="Don't create subdirectories when building current source for a single board") - parser.add_option('-o', '--output-dir', type='string', dest='output_dir', + parser.add_argument('-N', '--no-subdirs', action='store_true', dest='no_subdirs', + default=False, + help="Don't create subdirectories when building current source for a single board") + + +def add_after_m(parser): + """Add arguments after 'M' + + Args: + parser (ArgumentParser): Parse to add to + + This is split out to avoid having too many statements in one function + """ + parser.add_argument('-o', '--output-dir', type=str, dest='output_dir', help='Directory where all builds happen and buildman has its workspace (default is ../)') - parser.add_option('-O', '--override-toolchain', type='string', + parser.add_argument('-O', '--override-toolchain', type=str, help="Override host toochain to use for sandbox (e.g. 'clang-7')") - parser.add_option('-Q', '--quick', action='store_true', + parser.add_argument('-Q', '--quick', action='store_true', default=False, help='Do a rough build, with limited warning resolution') - parser.add_option('-p', '--full-path', action='store_true', + parser.add_argument('-p', '--full-path', action='store_true', default=False, help="Use full toolchain path in CROSS_COMPILE") - parser.add_option('-P', '--per-board-out-dir', action='store_true', + parser.add_argument('-P', '--per-board-out-dir', action='store_true', default=False, help="Use an O= (output) directory per board rather than per thread") - parser.add_option('-r', '--reproducible-builds', action='store_true', + parser.add_argument('--print-arch', action='store_true', + default=False, help="Print the architecture for a board (ARCH=)") + parser.add_argument('-r', '--reproducible-builds', action='store_true', help='Set SOURCE_DATE_EPOCH=0 to suuport a reproducible build') - parser.add_option('-R', '--regen-board-list', action='store_true', + parser.add_argument('-R', '--regen-board-list', type=str, help='Force regeneration of the list of boards, like the old boards.cfg file') - parser.add_option('-s', '--summary', action='store_true', + parser.add_argument('-s', '--summary', action='store_true', default=False, help='Show a build summary') - parser.add_option('-S', '--show-sizes', action='store_true', + parser.add_argument('-S', '--show-sizes', action='store_true', default=False, help='Show image size variation in summary') - parser.add_option('--step', type='int', + parser.add_argument('--step', type=int, default=1, help='Only build every n commits (0=just first and last)') if HAS_TESTS: - parser.add_option('--skip-net-tests', action='store_true', default=False, + parser.add_argument('--skip-net-tests', action='store_true', default=False, help='Skip tests which need the network') - parser.add_option('-t', '--test', action='store_true', dest='test', + parser.add_argument('-t', '--test', action='store_true', dest='test', default=False, help='run tests') - parser.add_option('-T', '--threads', type='int', + parser.add_argument('--coverage', action='store_true', + help='Calculated test coverage') + parser.add_argument('-T', '--threads', type=int, default=None, help='Number of builder threads to use (0=single-thread)') - parser.add_option('-u', '--show_unknown', action='store_true', + parser.add_argument('-u', '--show_unknown', action='store_true', default=False, help='Show boards with unknown build result') - parser.add_option('-U', '--show-environment', action='store_true', + parser.add_argument('-U', '--show-environment', action='store_true', default=False, help='Show environment changes in summary') - parser.add_option('-v', '--verbose', action='store_true', + parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Show build results while the build progresses') - parser.add_option('-V', '--verbose-build', action='store_true', + parser.add_argument('-V', '--verbose-build', action='store_true', default=False, help='Run make with V=1, logging all output') - parser.add_option('-w', '--work-in-output', action='store_true', + parser.add_argument('-w', '--work-in-output', action='store_true', default=False, help='Use the output directory as the work directory') - parser.add_option('-W', '--ignore-warnings', action='store_true', + parser.add_argument('-W', '--ignore-warnings', action='store_true', default=False, help='Return success even if there are warnings') - parser.add_option('-x', '--exclude', dest='exclude', - type='string', action='append', + parser.add_argument('-x', '--exclude', dest='exclude', + type=str, action='append', help='Specify a list of boards to exclude, separated by comma') - parser.add_option('-y', '--filter-dtb-warnings', action='store_true', + parser.add_argument('-y', '--filter-dtb-warnings', action='store_true', default=False, help='Filter out device-tree-compiler warnings from output') - parser.add_option('-Y', '--filter-migration-warnings', action='store_true', + parser.add_argument('-Y', '--filter-migration-warnings', action='store_true', default=False, help='Filter out migration warnings from output') - parser.usage += """ [list of target/arch/cpu/board/vendor/soc to build] + +def parse_args(): + """Parse command line arguments from sys.argv[] + + Returns: + tuple containing: + options: command line options + args: command lin arguments + """ + epilog = """ [list of target/arch/cpu/board/vendor/soc to build] Build U-Boot for all commits in a branch. Use -n to do a dry run""" + parser = argparse.ArgumentParser(epilog=epilog) + add_upto_m(parser) + add_after_m(parser) + parser.add_argument('terms', type=str, nargs='*', + help='Board / SoC names to build') + return parser.parse_args() diff --git a/tools/buildman/control.py b/tools/buildman/control.py index 09a11f25b3f..5c5720034b6 100644 --- a/tools/buildman/control.py +++ b/tools/buildman/control.py @@ -2,15 +2,14 @@ # Copyright (c) 2013 The Chromium OS Authors. # +"""Control module for buildman + +This holds the main control logic for buildman, when not running tests. +""" + import multiprocessing -try: - import importlib.resources -except ImportError: - # for Python 3.6 - import importlib_resources import os import shutil -import subprocess import sys from buildman import boards @@ -22,34 +21,58 @@ from patman import gitutil from patman import patchstream from u_boot_pylib import command from u_boot_pylib import terminal -from u_boot_pylib import tools from u_boot_pylib.terminal import tprint -def GetPlural(count): +TEST_BUILDER = None + +def get_plural(count): """Returns a plural 's' if count is not 1""" return 's' if count != 1 else '' -def GetActionSummary(is_summary, commits, selected, options): - """Return a string summarising the intended action. + +def count_build_commits(commits, step): + """Calculate the number of commits to be built + + Args: + commits (list of Commit): Commits to build or None + step (int): Step value for commits, typically 1 Returns: - Summary string. + Number of commits that will be built """ if commits: count = len(commits) - count = (count + options.step - 1) // options.step - commit_str = '%d commit%s' % (count, GetPlural(count)) + return (count + step - 1) // step + return 0 + + +def get_action_summary(is_summary, commit_count, selected, threads, jobs): + """Return a string summarising the intended action. + + Args: + is_summary (bool): True if this is a summary (otherwise it is building) + commits (list): List of commits being built + selected (list of Board): List of Board objects that are marked + step (int): Step increment through commits + threads (int): Number of processor threads being used + jobs (int): Number of jobs to build at once + + Returns: + Summary string. + """ + if commit_count: + commit_str = f'{commit_count} commit{get_plural(commit_count)}' else: commit_str = 'current source' - str = '%s %s for %d boards' % ( - 'Summary of' if is_summary else 'Building', commit_str, - len(selected)) - str += ' (%d thread%s, %d job%s per thread)' % (options.threads, - GetPlural(options.threads), options.jobs, GetPlural(options.jobs)) - return str - -def ShowActions(series, why_selected, boards_selected, builder, options, - board_warnings): + msg = (f"{'Summary of' if is_summary else 'Building'} " + f'{commit_str} for {len(selected)} boards') + msg += (f' ({threads} thread{get_plural(threads)}, ' + f'{jobs} job{get_plural(jobs)} per thread)') + return msg + +# pylint: disable=R0913 +def show_actions(series, why_selected, boards_selected, output_dir, + board_warnings, step, threads, jobs, verbose): """Display a list of actions that we would take, if not a dry run. Args: @@ -61,9 +84,12 @@ def ShowActions(series, why_selected, boards_selected, builder, options, the value would be a list of board names. boards_selected: Dict of selected boards, key is target name, value is Board object - builder: The builder that will be used to build the commits - options: Command line options object + output_dir (str): Output directory for builder board_warnings: List of warnings obtained from board selected + step (int): Step increment through commits + threads (int): Number of processor threads being used + jobs (int): Number of jobs to build at once + verbose (bool): True to indicate why each board was selected """ col = terminal.Color() print('Dry run, so not doing much. But I would do this:') @@ -72,27 +98,27 @@ def ShowActions(series, why_selected, boards_selected, builder, options, commits = series.commits else: commits = None - print(GetActionSummary(False, commits, boards_selected, - options)) - print('Build directory: %s' % builder.base_dir) + print(get_action_summary(False, count_build_commits(commits, step), + boards_selected, threads, jobs)) + print(f'Build directory: {output_dir}') if commits: - for upto in range(0, len(series.commits), options.step): + for upto in range(0, len(series.commits), step): commit = series.commits[upto] print(' ', col.build(col.YELLOW, commit.hash[:8], bright=False), end=' ') print(commit.subject) print() for arg in why_selected: if arg != 'all': - print(arg, ': %d boards' % len(why_selected[arg])) - if options.verbose: - print(' %s' % ' '.join(why_selected[arg])) - print(('Total boards to build for each commit: %d\n' % - len(why_selected['all']))) + print(arg, f': {len(why_selected[arg])} boards') + if verbose: + print(f" {' '.join(why_selected[arg])}") + print('Total boards to build for each ' + f"commit: {len(why_selected['all'])}\n") if board_warnings: for warning in board_warnings: print(col.build(col.YELLOW, warning)) -def ShowToolchainPrefix(brds, toolchains): +def show_toolchain_prefix(brds, toolchains): """Show information about a the tool chain used by one or more boards The function checks that all boards use the same toolchain, then prints @@ -110,15 +136,48 @@ def ShowToolchainPrefix(brds, toolchains): for brd in board_selected.values(): tc_set.add(toolchains.Select(brd.arch)) if len(tc_set) != 1: - return 'Supplied boards must share one toolchain' - return False - tc = tc_set.pop() - print(tc.GetEnvArgs(toolchain.VAR_CROSS_COMPILE)) - return None + sys.exit('Supplied boards must share one toolchain') + tchain = tc_set.pop() + print(tchain.GetEnvArgs(toolchain.VAR_CROSS_COMPILE)) + +def show_arch(brds): + """Show information about a the architecture used by one or more boards + + The function checks that all boards use the same architecture, then prints + the correct value for ARCH. + + Args: + boards: Boards object containing selected boards + + Return: + None on success, string error message otherwise + """ + board_selected = brds.get_selected_dict() + arch_set = set() + for brd in board_selected.values(): + arch_set.add(brd.arch) + if len(arch_set) != 1: + sys.exit('Supplied boards must share one arch') + print(arch_set.pop()) def get_allow_missing(opt_allow, opt_no_allow, num_selected, has_branch): + """Figure out whether to allow external blobs + + Uses the allow-missing setting and the provided arguments to decide whether + missing external blobs should be allowed + + Args: + opt_allow (bool): True if --allow-missing flag is set + opt_no_allow (bool): True if --no-allow-missing flag is set + num_selected (int): Number of selected board + has_branch (bool): True if a git branch (to build) has been provided + + Returns: + bool: True to allow missing external blobs, False to produce an error if + external blobs are used + """ allow_missing = False - am_setting = bsettings.GetGlobalItemValue('allow-missing') + am_setting = bsettings.get_global_item_value('allow-missing') if am_setting: if am_setting == 'always': allow_missing = True @@ -133,142 +192,82 @@ def get_allow_missing(opt_allow, opt_no_allow, num_selected, has_branch): allow_missing = False return allow_missing -def DoBuildman(options, args, toolchains=None, make_func=None, brds=None, - clean_dir=False, test_thread_exceptions=False): - """The main control code for buildman - - Args: - options: Command line options object - args: Command line arguments (list of strings) - toolchains: Toolchains to use - this should be a Toolchains() - object. If None, then it will be created and scanned - make_func: Make function to use for the builder. This is called - to execute 'make'. If this is None, the normal function - will be used, which calls the 'make' tool with suitable - arguments. This setting is useful for tests. - brds: Boards() object to use, containing a list of available - boards. If this is None it will be created and scanned. - clean_dir: Used for tests only, indicates that the existing output_dir - should be removed before starting the build - test_thread_exceptions: Uses for tests only, True to make the threads - raise an exception instead of reporting their result. This simulates - a failure in the code somewhere - """ - global builder - - if options.full_help: - with importlib.resources.path('buildman', 'README.rst') as readme: - tools.print_full_help(str(readme)) - return 0 - gitutil.setup() - col = terminal.Color() +def count_commits(branch, count, col, git_dir): + """Could the number of commits in the branch/ranch being built - options.git_dir = os.path.join(options.git, '.git') - - no_toolchains = toolchains is None - if no_toolchains: - toolchains = toolchain.Toolchains(options.override_toolchain) - - if options.fetch_arch: - if options.fetch_arch == 'list': - sorted_list = toolchains.ListArchs() - print(col.build(col.BLUE, 'Available architectures: %s\n' % - ' '.join(sorted_list))) - return 0 - else: - fetch_arch = options.fetch_arch - if fetch_arch == 'all': - fetch_arch = ','.join(toolchains.ListArchs()) - print(col.build(col.CYAN, '\nDownloading toolchains: %s' % - fetch_arch)) - for arch in fetch_arch.split(','): - print() - ret = toolchains.FetchAndInstall(arch) - if ret: - return ret - return 0 - - if no_toolchains: - toolchains.GetSettings() - toolchains.Scan(options.list_tool_chains and options.verbose) - if options.list_tool_chains: - toolchains.List() - print() - return 0 - - if not options.output_dir: - if options.work_in_output: - sys.exit(col.build(col.RED, '-w requires that you specify -o')) - options.output_dir = '..' - - # Work out what subset of the boards we are building - if not brds: - if not os.path.exists(options.output_dir): - os.makedirs(options.output_dir) - board_file = os.path.join(options.output_dir, 'boards.cfg') - - brds = boards.Boards() - ok = brds.ensure_board_list(board_file, - options.threads or multiprocessing.cpu_count(), - force=options.regen_board_list, - quiet=not options.verbose) - if options.regen_board_list: - return 0 if ok else 2 - brds.read_boards(board_file) - - exclude = [] - if options.exclude: - for arg in options.exclude: - exclude += arg.split(',') - - if options.boards: - requested_boards = [] - for b in options.boards: - requested_boards += b.split(',') - else: - requested_boards = None - why_selected, board_warnings = brds.select_boards(args, exclude, - requested_boards) - selected = brds.get_selected() - if not len(selected): - sys.exit(col.build(col.RED, 'No matching boards found')) - - if options.print_prefix: - err = ShowToolchainPrefix(brds, toolchains) - if err: - sys.exit(col.build(col.RED, err)) - return 0 + Args: + branch (str): Name of branch to build, or None if none + count (int): Number of commits to build, or -1 for all + col (Terminal.Color): Color object to use + git_dir (str): Git directory to use, e.g. './.git' - # Work out how many commits to build. We want to build everything on the - # branch. We also build the upstream commit as a control so we can see - # problems introduced by the first commit on the branch. - count = options.count - has_range = options.branch and '..' in options.branch + Returns: + tuple: + Number of commits being built + True if the 'branch' string contains a range rather than a simple + name + """ + has_range = branch and '..' in branch if count == -1: - if not options.branch: + if not branch: count = 1 else: if has_range: - count, msg = gitutil.count_commits_in_range(options.git_dir, - options.branch) + count, msg = gitutil.count_commits_in_range(git_dir, branch) else: - count, msg = gitutil.count_commits_in_branch(options.git_dir, - options.branch) + count, msg = gitutil.count_commits_in_branch(git_dir, branch) if count is None: sys.exit(col.build(col.RED, msg)) elif count == 0: - sys.exit(col.build(col.RED, "Range '%s' has no commits" % - options.branch)) + sys.exit(col.build(col.RED, + f"Range '{branch}' has no commits")) if msg: print(col.build(col.YELLOW, msg)) count += 1 # Build upstream commit also if not count: - msg = ("No commits found to process in branch '%s': " - "set branch's upstream or use -c flag" % options.branch) + msg = (f"No commits found to process in branch '{branch}': " + "set branch's upstream or use -c flag") sys.exit(col.build(col.RED, msg)) - if options.work_in_output: + return count, has_range + + +def determine_series(selected, col, git_dir, count, branch, work_in_output): + """Determine the series which is to be built, if any + + If there is a series, the commits in that series are numbered by setting + their sequence value (starting from 0). This is used by tests. + + Args: + selected (list of Board): List of Board objects that are marked + selected + col (Terminal.Color): Color object to use + git_dir (str): Git directory to use, e.g. './.git' + count (int): Number of commits in branch + branch (str): Name of branch to build, or None if none + work_in_output (bool): True to work in the output directory + + Returns: + Series: Series to build, or None for none + + Read the metadata from the commits. First look at the upstream commit, + then the ones in the branch. We would like to do something like + upstream/master~..branch but that isn't possible if upstream/master is + a merge commit (it will list all the commits that form part of the + merge) + + Conflicting tags are not a problem for buildman, since it does not use + them. For example, Series-version is not useful for buildman. On the + other hand conflicting tags will cause an error. So allow later tags + to overwrite earlier ones by setting allow_overwrite=True + """ + + # Work out how many commits to build. We want to build everything on the + # branch. We also build the upstream commit as a control so we can see + # problems introduced by the first commit on the branch. + count, has_range = count_commits(branch, count, col, git_dir) + if work_in_output: if len(selected) != 1: sys.exit(col.build(col.RED, '-w can only be used with a single board')) @@ -276,141 +275,402 @@ def DoBuildman(options, args, toolchains=None, make_func=None, brds=None, sys.exit(col.build(col.RED, '-w can only be used with a single commit')) - # Read the metadata from the commits. First look at the upstream commit, - # then the ones in the branch. We would like to do something like - # upstream/master~..branch but that isn't possible if upstream/master is - # a merge commit (it will list all the commits that form part of the - # merge) - # Conflicting tags are not a problem for buildman, since it does not use - # them. For example, Series-version is not useful for buildman. On the - # other hand conflicting tags will cause an error. So allow later tags - # to overwrite earlier ones by setting allow_overwrite=True - if options.branch: + if branch: if count == -1: if has_range: - range_expr = options.branch + range_expr = branch else: - range_expr = gitutil.get_range_in_branch(options.git_dir, - options.branch) - upstream_commit = gitutil.get_upstream(options.git_dir, - options.branch) + range_expr = gitutil.get_range_in_branch(git_dir, branch) + upstream_commit = gitutil.get_upstream(git_dir, branch) series = patchstream.get_metadata_for_list(upstream_commit, - options.git_dir, 1, series=None, allow_overwrite=True) + git_dir, 1, series=None, allow_overwrite=True) series = patchstream.get_metadata_for_list(range_expr, - options.git_dir, None, series, allow_overwrite=True) + git_dir, None, series, allow_overwrite=True) else: # Honour the count - series = patchstream.get_metadata_for_list(options.branch, - options.git_dir, count, series=None, allow_overwrite=True) + series = patchstream.get_metadata_for_list(branch, + git_dir, count, series=None, allow_overwrite=True) + + # Number the commits for test purposes + for i, commit in enumerate(series.commits): + commit.sequence = i else: series = None - if not options.dry_run: - options.verbose = True - if not options.summary: - options.show_errors = True + return series + + +def do_fetch_arch(toolchains, col, fetch_arch): + """Handle the --fetch-arch option + + Args: + toolchains (Toolchains): Tool chains to use + col (terminal.Color): Color object to build + fetch_arch (str): Argument passed to the --fetch-arch option + + Returns: + int: Return code for buildman + """ + if fetch_arch == 'list': + sorted_list = toolchains.ListArchs() + print(col.build( + col.BLUE, + f"Available architectures: {' '.join(sorted_list)}\n")) + return 0 + + if fetch_arch == 'all': + fetch_arch = ','.join(toolchains.ListArchs()) + print(col.build(col.CYAN, + f'\nDownloading toolchains: {fetch_arch}')) + for arch in fetch_arch.split(','): + print() + ret = toolchains.FetchAndInstall(arch) + if ret: + return ret + return 0 + + +def get_toolchains(toolchains, col, override_toolchain, fetch_arch, + list_tool_chains, verbose): + """Get toolchains object to use + + Args: + toolchains (Toolchains or None): Toolchains to use. If None, then a + Toolchains object will be created and scanned + col (Terminal.Color): Color object + override_toolchain (str or None): Override value for toolchain, or None + fetch_arch (bool): True to fetch the toolchain for the architectures + list_tool_chains (bool): True to list all tool chains + verbose (bool): True for verbose output when listing toolchains + + Returns: + Either: + int: Operation completed and buildman should exit with exit code + Toolchains: Toolchains object to use + """ + no_toolchains = toolchains is None + if no_toolchains: + toolchains = toolchain.Toolchains(override_toolchain) + + if fetch_arch: + return do_fetch_arch(toolchains, col, fetch_arch) + + if no_toolchains: + toolchains.GetSettings() + toolchains.Scan(list_tool_chains and verbose) + if list_tool_chains: + toolchains.List() + print() + return 0 + return toolchains + + +def get_boards_obj(output_dir, regen_board_list, maintainer_check, full_check, + threads, verbose): + """Object the Boards object to use + + Creates the output directory and ensures there is a boards.cfg file, then + read it in. + + Args: + output_dir (str): Output directory to use + regen_board_list (bool): True to just regenerate the board list + maintainer_check (bool): True to just run a maintainer check + full_check (bool): True to just run a full check of Kconfig and + maintainers + threads (int or None): Number of threads to use to create boards file + verbose (bool): False to suppress output from boards-file generation + + Returns: + Either: + int: Operation completed and buildman should exit with exit code + Boards: Boards object to use + """ + brds = boards.Boards() + nr_cpus = threads or multiprocessing.cpu_count() + if maintainer_check or full_check: + warnings = brds.build_board_list(jobs=nr_cpus, + warn_targets=full_check)[1] + if warnings: + for warn in warnings: + print(warn, file=sys.stderr) + return 2 + return 0 + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + board_file = os.path.join(output_dir, 'boards.cfg') + if regen_board_list and regen_board_list != '-': + board_file = regen_board_list + + okay = brds.ensure_board_list(board_file, nr_cpus, force=regen_board_list, + quiet=not verbose) + if regen_board_list: + return 0 if okay else 2 + brds.read_boards(board_file) + return brds + + +def determine_boards(brds, args, col, opt_boards, exclude_list): + """Determine which boards to build + + Each element of args and exclude can refer to a board name, arch or SoC + + Args: + brds (Boards): Boards object + args (list of str): Arguments describing boards to build + col (Terminal.Color): Color object + opt_boards (list of str): Specific boards to build, or None for all + exclude_list (list of str): Arguments describing boards to exclude + + Returns: + tuple: + list of Board: List of Board objects that are marked selected + why_selected: Dictionary where each key is a buildman argument + provided by the user, and the value is the list of boards + brought in by that argument. For example, 'arm' might bring + in 400 boards, so in this case the key would be 'arm' and + the value would be a list of board names. + board_warnings: List of warnings obtained from board selected + """ + exclude = [] + if exclude_list: + for arg in exclude_list: + exclude += arg.split(',') + + if opt_boards: + requested_boards = [] + for brd in opt_boards: + requested_boards += brd.split(',') + else: + requested_boards = None + why_selected, board_warnings = brds.select_boards(args, exclude, + requested_boards) + selected = brds.get_selected() + if not selected: + sys.exit(col.build(col.RED, 'No matching boards found')) + return selected, why_selected, board_warnings + + +def adjust_args(args, series, selected): + """Adjust arguments according to various constraints + + Updates verbose, show_errors, threads, jobs and step + + Args: + args (Namespace): Namespace object to adjust + series (Series): Series being built / summarised + selected (list of Board): List of Board objects that are marked + """ + if not series and not args.dry_run: + args.verbose = True + if not args.summary: + args.show_errors = True # By default we have one thread per CPU. But if there are not enough jobs # we can have fewer threads and use a high '-j' value for make. - if options.threads is None: - options.threads = min(multiprocessing.cpu_count(), len(selected)) - if not options.jobs: - options.jobs = max(1, (multiprocessing.cpu_count() + + if args.threads is None: + args.threads = min(multiprocessing.cpu_count(), len(selected)) + if not args.jobs: + args.jobs = max(1, (multiprocessing.cpu_count() + len(selected) - 1) // len(selected)) - if not options.step: - options.step = len(series.commits) - 1 + if not args.step: + args.step = len(series.commits) - 1 + + # We can't show function sizes without board details at present + if args.show_bloat: + args.show_detail = True - gnu_make = command.output(os.path.join(options.git, - 'scripts/show-gnu-make'), raise_on_error=False).rstrip() - if not gnu_make: - sys.exit('GNU Make not found') - allow_missing = get_allow_missing(options.allow_missing, - options.no_allow_missing, len(selected), - options.branch) +def setup_output_dir(output_dir, work_in_output, branch, no_subdirs, col, + clean_dir): + """Set up the output directory - # Create a new builder with the selected options. - output_dir = options.output_dir - if options.branch: - dirname = options.branch.replace('/', '_') + Args: + output_dir (str): Output directory provided by the user, or None if none + work_in_output (bool): True to work in the output directory + branch (str): Name of branch to build, or None if none + no_subdirs (bool): True to put the output in the top-level output dir + clean_dir: Used for tests only, indicates that the existing output_dir + should be removed before starting the build + + Returns: + str: Updated output directory pathname + """ + if not output_dir: + if work_in_output: + sys.exit(col.build(col.RED, '-w requires that you specify -o')) + output_dir = '..' + if branch and not no_subdirs: # As a special case allow the board directory to be placed in the # output directory itself rather than any subdirectory. - if not options.no_subdirs: - output_dir = os.path.join(options.output_dir, dirname) + dirname = branch.replace('/', '_') + output_dir = os.path.join(output_dir, dirname) if clean_dir and os.path.exists(output_dir): shutil.rmtree(output_dir) - adjust_cfg = cfgutil.convert_list_to_dict(options.adjust_cfg) + return output_dir + + +def run_builder(builder, commits, board_selected, args): + """Run the builder or show the summary + + Args: + commits (list of Commit): List of commits being built, None if no branch + boards_selected (dict): Dict of selected boards: + key: target name + value: Board object + args (Namespace): Namespace to use + + Returns: + int: Return code for buildman + """ + gnu_make = command.output(os.path.join(args.git, + 'scripts/show-gnu-make'), raise_on_error=False).rstrip() + if not gnu_make: + sys.exit('GNU Make not found') + builder.gnu_make = gnu_make + + if not args.ide: + commit_count = count_build_commits(commits, args.step) + tprint(get_action_summary(args.summary, commit_count, board_selected, + args.threads, args.jobs)) + + builder.set_display_options( + args.show_errors, args.show_sizes, args.show_detail, args.show_bloat, + args.list_error_boards, args.show_config, args.show_environment, + args.filter_dtb_warnings, args.filter_migration_warnings, args.ide) + if args.summary: + builder.show_summary(commits, board_selected) + else: + fail, warned, excs = builder.build_boards( + commits, board_selected, args.keep_outputs, args.verbose) + if excs: + return 102 + if fail: + return 100 + if warned and not args.ignore_warnings: + return 101 + return 0 + + +def calc_adjust_cfg(adjust_cfg, reproducible_builds): + """Calculate the value to use for adjust_cfg + + Args: + adjust_cfg (list of str): List of configuration changes. See cfgutil for + details + reproducible_builds (bool): True to adjust the configuration to get + reproduceable builds + + Returns: + adjust_cfg (list of str): List of configuration changes + """ + adjust_cfg = cfgutil.convert_list_to_dict(adjust_cfg) # Drop LOCALVERSION_AUTO since it changes the version string on every commit - if options.reproducible_builds: + if reproducible_builds: # If these are mentioned, leave the local version alone if 'LOCALVERSION' in adjust_cfg or 'LOCALVERSION_AUTO' in adjust_cfg: print('Not dropping LOCALVERSION_AUTO for reproducible build') else: adjust_cfg['LOCALVERSION_AUTO'] = '~' + return adjust_cfg - builder = Builder(toolchains, output_dir, options.git_dir, - options.threads, options.jobs, gnu_make=gnu_make, checkout=True, - show_unknown=options.show_unknown, step=options.step, - no_subdirs=options.no_subdirs, full_path=options.full_path, - verbose_build=options.verbose_build, - mrproper=options.mrproper, - per_board_out_dir=options.per_board_out_dir, - config_only=options.config_only, - squash_config_y=not options.preserve_config_y, - warnings_as_errors=options.warnings_as_errors, - work_in_output=options.work_in_output, - test_thread_exceptions=test_thread_exceptions, - adjust_cfg=adjust_cfg, - allow_missing=allow_missing, no_lto=options.no_lto, - reproducible_builds=options.reproducible_builds) - builder.force_config_on_failure = not options.quick - if make_func: - builder.do_make = make_func + +def do_buildman(args, toolchains=None, make_func=None, brds=None, + clean_dir=False, test_thread_exceptions=False): + """The main control code for buildman + + Args: + args: ArgumentParser object + args: Command line arguments (list of strings) + toolchains: Toolchains to use - this should be a Toolchains() + object. If None, then it will be created and scanned + make_func: Make function to use for the builder. This is called + to execute 'make'. If this is None, the normal function + will be used, which calls the 'make' tool with suitable + arguments. This setting is useful for tests. + brds: Boards() object to use, containing a list of available + boards. If this is None it will be created and scanned. + clean_dir: Used for tests only, indicates that the existing output_dir + should be removed before starting the build + test_thread_exceptions: Uses for tests only, True to make the threads + raise an exception instead of reporting their result. This simulates + a failure in the code somewhere + """ + # Used so testing can obtain the builder: pylint: disable=W0603 + global TEST_BUILDER + + gitutil.setup() + col = terminal.Color() + + git_dir = os.path.join(args.git, '.git') + + toolchains = get_toolchains(toolchains, col, args.override_toolchain, + args.fetch_arch, args.list_tool_chains, + args.verbose) + output_dir = setup_output_dir( + args.output_dir, args.work_in_output, args.branch, + args.no_subdirs, col, clean_dir) + + # Work out what subset of the boards we are building + if not brds: + brds = get_boards_obj(output_dir, args.regen_board_list, + args.maintainer_check, args.full_check, + args.threads, args.verbose) + if isinstance(brds, int): + return brds + + selected, why_selected, board_warnings = determine_boards( + brds, args.terms, col, args.boards, args.exclude) + + if args.print_prefix: + show_toolchain_prefix(brds, toolchains) + return 0 + + if args.print_arch: + show_arch(brds) + return 0 + + series = determine_series(selected, col, git_dir, args.count, + args.branch, args.work_in_output) + + adjust_args(args, series, selected) # For a dry run, just show our actions as a sanity check - if options.dry_run: - ShowActions(series, why_selected, selected, builder, options, - board_warnings) - else: - builder.force_build = options.force_build - builder.force_build_failures = options.force_build_failures - builder.force_reconfig = options.force_reconfig - builder.in_tree = options.in_tree - - # Work out which boards to build - board_selected = brds.get_selected_dict() - - if series: - commits = series.commits - # Number the commits for test purposes - for commit in range(len(commits)): - commits[commit].sequence = commit - else: - commits = None - - if not options.ide: - tprint(GetActionSummary(options.summary, commits, board_selected, - options)) - - # We can't show function sizes without board details at present - if options.show_bloat: - options.show_detail = True - builder.SetDisplayOptions( - options.show_errors, options.show_sizes, options.show_detail, - options.show_bloat, options.list_error_boards, options.show_config, - options.show_environment, options.filter_dtb_warnings, - options.filter_migration_warnings, options.ide) - if options.summary: - builder.ShowSummary(commits, board_selected) - else: - fail, warned, excs = builder.BuildBoards( - commits, board_selected, options.keep_outputs, options.verbose) - if excs: - return 102 - elif fail: - return 100 - elif warned and not options.ignore_warnings: - return 101 - return 0 + if args.dry_run: + show_actions(series, why_selected, selected, output_dir, board_warnings, + args.step, args.threads, args.jobs, + args.verbose) + return 0 + + # Create a new builder with the selected args + builder = Builder(toolchains, output_dir, git_dir, + args.threads, args.jobs, checkout=True, + show_unknown=args.show_unknown, step=args.step, + no_subdirs=args.no_subdirs, full_path=args.full_path, + verbose_build=args.verbose_build, + mrproper=args.mrproper, + per_board_out_dir=args.per_board_out_dir, + config_only=args.config_only, + squash_config_y=not args.preserve_config_y, + warnings_as_errors=args.warnings_as_errors, + work_in_output=args.work_in_output, + test_thread_exceptions=test_thread_exceptions, + adjust_cfg=calc_adjust_cfg(args.adjust_cfg, + args.reproducible_builds), + allow_missing=get_allow_missing(args.allow_missing, + args.no_allow_missing, + len(selected), args.branch), + no_lto=args.no_lto, + reproducible_builds=args.reproducible_builds, + force_build = args.force_build, + force_build_failures = args.force_build_failures, + force_reconfig = args.force_reconfig, in_tree = args.in_tree, + force_config_on_failure=not args.quick, make_func=make_func) + + TEST_BUILDER = builder + + return run_builder(builder, series.commits if series else None, + brds.get_selected_dict(), args) diff --git a/tools/buildman/func_test.py b/tools/buildman/func_test.py index ebd78f225e1..3115700f07b 100644 --- a/tools/buildman/func_test.py +++ b/tools/buildman/func_test.py @@ -3,9 +3,11 @@ # import os +from pathlib import Path import shutil import sys import tempfile +import time import unittest from buildman import board @@ -38,8 +40,8 @@ chromeos_peach=VBOOT=${chroot}/build/peach_pit/usr ${vboot} ''' BOARDS = [ - ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 1', 'board0', ''], - ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 2', 'board1', ''], + ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 0', 'board0', ''], + ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 1', 'board1', ''], ['Active', 'powerpc', 'powerpc', '', 'Tester', 'PowerPC board 1', 'board2', ''], ['Active', 'sandbox', 'sandbox', '', 'Tester', 'Sandbox board', 'board4', ''], ] @@ -184,8 +186,8 @@ class TestFunctional(unittest.TestCase): self._buildman_pathname = sys.argv[0] self._buildman_dir = os.path.dirname(os.path.realpath(sys.argv[0])) command.test_result = self._HandleCommand - bsettings.Setup(None) - bsettings.AddFile(settings_data) + bsettings.setup(None) + bsettings.add_file(settings_data) self.setupToolchains() self._toolchains.Add('arm-gcc', test=False) self._toolchains.Add('powerpc-gcc', test=False) @@ -209,6 +211,12 @@ class TestFunctional(unittest.TestCase): # Set to True to report missing blobs self._missing = False + self._buildman_dir = os.path.dirname(os.path.realpath(sys.argv[0])) + self._test_dir = os.path.join(self._buildman_dir, 'test') + + # Set up some fake source files + shutil.copytree(self._test_dir, self._git_dir) + # Avoid sending any output and clear all terminal output terminal.set_print_test_mode() terminal.get_print_test_lines() @@ -225,29 +233,34 @@ class TestFunctional(unittest.TestCase): return command.run_pipe([[self._buildman_pathname] + list(args)], capture=True, capture_stderr=True) - def _RunControl(self, *args, brds=None, clean_dir=False, - test_thread_exceptions=False): + def _RunControl(self, *args, brds=False, clean_dir=False, + test_thread_exceptions=False, get_builder=True): """Run buildman Args: args: List of arguments to pass - brds: Boards object + brds: Boards object, or False to pass self._boards, or None to pass + None clean_dir: Used for tests only, indicates that the existing output_dir should be removed before starting the build test_thread_exceptions: Uses for tests only, True to make the threads raise an exception instead of reporting their result. This simulates a failure in the code somewhere + get_builder (bool): Set self._builder to the resulting builder Returns: result code from buildman """ sys.argv = [sys.argv[0]] + list(args) - options, args = cmdline.ParseArgs() - result = control.DoBuildman(options, args, toolchains=self._toolchains, - make_func=self._HandleMake, brds=brds or self._boards, - clean_dir=clean_dir, - test_thread_exceptions=test_thread_exceptions) - self._builder = control.builder + args = cmdline.parse_args() + if brds == False: + brds = self._boards + result = control.do_buildman( + args, toolchains=self._toolchains, make_func=self._HandleMake, + brds=brds, clean_dir=clean_dir, + test_thread_exceptions=test_thread_exceptions) + if get_builder: + self._builder = control.TEST_BUILDER return result def testFullHelp(self): @@ -496,10 +509,12 @@ Some images are invalid''' for commit in range(self._commits): for brd in self._boards.get_list(): if brd.arch != 'sandbox': - errfile = self._builder.GetErrFile(commit, brd.target) + errfile = self._builder.get_err_file(commit, brd.target) fd = open(errfile) - self.assertEqual(fd.readlines(), - ['No tool chain for %s\n' % brd.arch]) + self.assertEqual( + fd.readlines(), + [f'Tool chain error for {brd.arch}: ' + f"No tool chain found for arch '{brd.arch}'"]) fd.close() def testBranch(self): @@ -573,7 +588,8 @@ Some images are invalid''' def testBranchWithSlash(self): """Test building a branch with a '/' in the name""" self._test_branch = '/__dev/__testbranch' - self._RunControl('-b', self._test_branch, clean_dir=False) + self._RunControl('-b', self._test_branch, '-o', self._output_dir, + clean_dir=False) self.assertEqual(self._builder.count, self._total_builds) self.assertEqual(self._builder.fail, 0) @@ -686,7 +702,7 @@ Some images are invalid''' def testBlobSettingsAlways(self): """Test the 'always' policy""" - bsettings.SetItem('global', 'allow-missing', 'always') + bsettings.set_item('global', 'allow-missing', 'always') self.assertEqual(True, control.get_allow_missing(False, False, 1, False)) self.assertEqual(False, @@ -694,7 +710,7 @@ Some images are invalid''' def testBlobSettingsBranch(self): """Test the 'branch' policy""" - bsettings.SetItem('global', 'allow-missing', 'branch') + bsettings.set_item('global', 'allow-missing', 'branch') self.assertEqual(False, control.get_allow_missing(False, False, 1, False)) self.assertEqual(True, @@ -704,7 +720,7 @@ Some images are invalid''' def testBlobSettingsMultiple(self): """Test the 'multiple' policy""" - bsettings.SetItem('global', 'allow-missing', 'multiple') + bsettings.set_item('global', 'allow-missing', 'multiple') self.assertEqual(False, control.get_allow_missing(False, False, 1, False)) self.assertEqual(True, @@ -714,7 +730,7 @@ Some images are invalid''' def testBlobSettingsBranchMultiple(self): """Test the 'branch multiple' policy""" - bsettings.SetItem('global', 'allow-missing', 'branch multiple') + bsettings.set_item('global', 'allow-missing', 'branch multiple') self.assertEqual(False, control.get_allow_missing(False, False, 1, False)) self.assertEqual(True, @@ -779,3 +795,278 @@ Some images are invalid''' CONFIG_LOCALVERSION=y ''', cfg_data) self.assertIn('Not dropping LOCALVERSION_AUTO', stdout.getvalue()) + + def test_scan_defconfigs(self): + """Test scanning the defconfigs to obtain all the boards""" + src = self._git_dir + + # Scan the test directory which contains a Kconfig and some *_defconfig + # files + params, warnings = self._boards.scan_defconfigs(src, src) + + # We should get two boards + self.assertEquals(2, len(params)) + self.assertFalse(warnings) + first = 0 if params[0]['target'] == 'board0' else 1 + board0 = params[first] + board2 = params[1 - first] + + self.assertEquals('arm', board0['arch']) + self.assertEquals('armv7', board0['cpu']) + self.assertEquals('-', board0['soc']) + self.assertEquals('Tester', board0['vendor']) + self.assertEquals('ARM Board 0', board0['board']) + self.assertEquals('config0', board0['config']) + self.assertEquals('board0', board0['target']) + + self.assertEquals('powerpc', board2['arch']) + self.assertEquals('ppc', board2['cpu']) + self.assertEquals('mpc85xx', board2['soc']) + self.assertEquals('Tester', board2['vendor']) + self.assertEquals('PowerPC board 1', board2['board']) + self.assertEquals('config2', board2['config']) + self.assertEquals('board2', board2['target']) + + def test_output_is_new(self): + """Test detecting new changes to Kconfig""" + base = self._base_dir + src = self._git_dir + config_dir = os.path.join(src, 'configs') + delay = 0.02 + + # Create a boards.cfg file + boards_cfg = os.path.join(base, 'boards.cfg') + content = b'''# +# List of boards +# Automatically generated by buildman/boards.py: don't edit +# +# Status, Arch, CPU, SoC, Vendor, Board, Target, Config, Maintainers + +Active aarch64 armv8 - armltd corstone1000 board0 +Active aarch64 armv8 - armltd total_compute board2 +''' + # Check missing file + self.assertFalse(boards.output_is_new(boards_cfg, config_dir, src)) + + # Check that the board.cfg file is newer + time.sleep(delay) + tools.write_file(boards_cfg, content) + self.assertTrue(boards.output_is_new(boards_cfg, config_dir, src)) + + # Touch the Kconfig files after a show delay to avoid a race + time.sleep(delay) + Path(os.path.join(src, 'Kconfig')).touch() + self.assertFalse(boards.output_is_new(boards_cfg, config_dir, src)) + Path(boards_cfg).touch() + self.assertTrue(boards.output_is_new(boards_cfg, config_dir, src)) + + # Touch a different Kconfig file + time.sleep(delay) + Path(os.path.join(src, 'Kconfig.something')).touch() + self.assertFalse(boards.output_is_new(boards_cfg, config_dir, src)) + Path(boards_cfg).touch() + self.assertTrue(boards.output_is_new(boards_cfg, config_dir, src)) + + # Touch a MAINTAINERS file + time.sleep(delay) + Path(os.path.join(src, 'MAINTAINERS')).touch() + self.assertFalse(boards.output_is_new(boards_cfg, config_dir, src)) + + Path(boards_cfg).touch() + self.assertTrue(boards.output_is_new(boards_cfg, config_dir, src)) + + # Touch a defconfig file + time.sleep(delay) + Path(os.path.join(config_dir, 'board0_defconfig')).touch() + self.assertFalse(boards.output_is_new(boards_cfg, config_dir, src)) + Path(boards_cfg).touch() + self.assertTrue(boards.output_is_new(boards_cfg, config_dir, src)) + + # Remove a board and check that the board.cfg file is now older + Path(os.path.join(config_dir, 'board0_defconfig')).unlink() + self.assertFalse(boards.output_is_new(boards_cfg, config_dir, src)) + + def test_maintainers(self): + """Test detecting boards without a MAINTAINERS entry""" + src = self._git_dir + main = os.path.join(src, 'boards', 'board0', 'MAINTAINERS') + other = os.path.join(src, 'boards', 'board2', 'MAINTAINERS') + kc_file = os.path.join(src, 'Kconfig') + config_dir = os.path.join(src, 'configs') + params_list, warnings = self._boards.build_board_list(config_dir, src) + + # There should be two boards no warnings + self.assertEquals(2, len(params_list)) + self.assertFalse(warnings) + + # Set an invalid status line in the file + orig_data = tools.read_file(main, binary=False) + lines = ['S: Other\n' if line.startswith('S:') else line + for line in orig_data.splitlines(keepends=True)] + tools.write_file(main, ''.join(lines), binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + params = params_list[0] + if params['target'] == 'board2': + params = params_list[1] + self.assertEquals('-', params['status']) + self.assertEquals(["WARNING: Other: unknown status for 'board0'"], + warnings) + + # Remove the status line (S:) from a file + lines = [line for line in orig_data.splitlines(keepends=True) + if not line.startswith('S:')] + tools.write_file(main, ''.join(lines), binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertEquals(["WARNING: -: unknown status for 'board0'"], warnings) + + # Remove the configs/ line (F:) from a file - this is the last line + data = ''.join(orig_data.splitlines(keepends=True)[:-1]) + tools.write_file(main, data, binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertEquals( + ["WARNING: no maintainers for 'board0'", + 'WARNING: orphaned defconfig in boards/board0/MAINTAINERS ending at line 4', + ], warnings) + + # Mark a board as orphaned - this should give a warning + lines = ['S: Orphaned' if line.startswith('S') else line + for line in orig_data.splitlines(keepends=True)] + tools.write_file(main, ''.join(lines), binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings) + + # Change the maintainer to '-' - this should give a warning + lines = ['M: -' if line.startswith('M') else line + for line in orig_data.splitlines(keepends=True)] + tools.write_file(main, ''.join(lines), binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertEquals(["WARNING: -: unknown status for 'board0'"], warnings) + + # Remove the maintainer line (M:) from a file + lines = [line for line in orig_data.splitlines(keepends=True) + if not line.startswith('M:')] + tools.write_file(main, ''.join(lines), binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings) + + # Move the contents of the second file into this one, removing the + # second file, to check multiple records in a single file. + both_data = orig_data + tools.read_file(other, binary=False) + tools.write_file(main, both_data, binary=False) + os.remove(other) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertFalse(warnings) + + # Add another record, this should be ignored with a warning + extra = '\n\nAnother\nM: Fred\nF: configs/board9_defconfig\nS: other\n' + tools.write_file(main, both_data + extra, binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertEquals( + ['WARNING: orphaned defconfig in boards/board0/MAINTAINERS ending at line 16'], + warnings) + + # Add another TARGET to the Kconfig + tools.write_file(main, both_data, binary=False) + orig_kc_data = tools.read_file(kc_file) + extra = (b''' +if TARGET_BOARD2 +config TARGET_OTHER +\tbool "other" +\tdefault y +endif +''') + tools.write_file(kc_file, orig_kc_data + extra) + params_list, warnings = self._boards.build_board_list(config_dir, src, + warn_targets=True) + self.assertEquals(2, len(params_list)) + self.assertEquals( + ['WARNING: board2_defconfig: Duplicate TARGET_xxx: board2 and other'], + warnings) + + # Remove the TARGET_BOARD0 Kconfig option + lines = [b'' if line == b'config TARGET_BOARD2\n' else line + for line in orig_kc_data.splitlines(keepends=True)] + tools.write_file(kc_file, b''.join(lines)) + params_list, warnings = self._boards.build_board_list(config_dir, src, + warn_targets=True) + self.assertEquals(2, len(params_list)) + self.assertEquals( + ['WARNING: board2_defconfig: No TARGET_BOARD2 enabled'], + warnings) + tools.write_file(kc_file, orig_kc_data) + + # Replace the last F: line of board 2 with an N: line + data = ''.join(both_data.splitlines(keepends=True)[:-1]) + tools.write_file(main, data + 'N: oa.*2\n', binary=False) + params_list, warnings = self._boards.build_board_list(config_dir, src) + self.assertEquals(2, len(params_list)) + self.assertFalse(warnings) + + def testRegenBoards(self): + """Test that we can regenerate the boards.cfg file""" + outfile = os.path.join(self._output_dir, 'test-boards.cfg') + if os.path.exists(outfile): + os.remove(outfile) + with test_util.capture_sys_output() as (stdout, stderr): + result = self._RunControl('-R', outfile, brds=None, + get_builder=False) + self.assertTrue(os.path.exists(outfile)) + + def test_print_prefix(self): + """Test that we can print the toolchain prefix""" + with test_util.capture_sys_output() as (stdout, stderr): + result = self._RunControl('-A', 'board0') + self.assertEqual('arm-\n', stdout.getvalue()) + self.assertEqual('', stderr.getvalue()) + + def test_exclude_one(self): + """Test excluding a single board from an arch""" + self._RunControl('arm', '-x', 'board1', '-o', self._output_dir) + self.assertEqual(['board0'], + [b.target for b in self._boards.get_selected()]) + + def test_exclude_arch(self): + """Test excluding an arch""" + self._RunControl('-x', 'arm', '-o', self._output_dir) + self.assertEqual(['board2', 'board4'], + [b.target for b in self._boards.get_selected()]) + + def test_exclude_comma(self): + """Test excluding a comma-separated list of things""" + self._RunControl('-x', 'arm,powerpc', '-o', self._output_dir) + self.assertEqual(['board4'], + [b.target for b in self._boards.get_selected()]) + + def test_exclude_list(self): + """Test excluding a list of things""" + self._RunControl('-x', 'board2', '-x' 'board4', '-o', self._output_dir) + self.assertEqual(['board0', 'board1'], + [b.target for b in self._boards.get_selected()]) + + def test_single_boards(self): + """Test building single boards""" + self._RunControl('--boards', 'board1', '-o', self._output_dir) + self.assertEqual(1, self._builder.count) + + self._RunControl('--boards', 'board1', '--boards', 'board2', + '-o', self._output_dir) + self.assertEqual(2, self._builder.count) + + self._RunControl('--boards', 'board1,board2', '--boards', 'board4', + '-o', self._output_dir) + self.assertEqual(3, self._builder.count) + + def test_print_arch(self): + """Test that we can print the board architecture""" + with test_util.capture_sys_output() as (stdout, stderr): + result = self._RunControl('--print-arch', 'board0') + self.assertEqual('arm\n', stdout.getvalue()) + self.assertEqual('', stderr.getvalue()) diff --git a/tools/buildman/main.py b/tools/buildman/main.py index 5e1f68d8235..5f42a58ddbb 100755 --- a/tools/buildman/main.py +++ b/tools/buildman/main.py @@ -6,62 +6,91 @@ """See README for more information""" -import doctest -import multiprocessing +try: + from importlib.resources import files +except ImportError: + # for Python 3.6 + import importlib_resources import os -import re import sys # Bring in the patman libraries +# pylint: disable=C0413 our_path = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(1, os.path.join(our_path, '..')) # Our modules -from buildman import board from buildman import bsettings -from buildman import builder from buildman import cmdline from buildman import control -from buildman import toolchain -from patman import patchstream -from patman import gitutil -from u_boot_pylib import terminal from u_boot_pylib import test_util +from u_boot_pylib import tools -def RunTests(skip_net_tests, verboose, args): +def run_tests(skip_net_tests, debug, verbose, args): + """Run the buildman tests + + Args: + skip_net_tests (bool): True to skip tests which need the network + debug (bool): True to run in debugging mode (full traceback) + verbosity (int): Verbosity level to use (0-4) + args (list of str): List of tests to run, empty to run all + """ + # These imports are here since tests are not available when buildman is + # installed as a Python module + # pylint: disable=C0415 from buildman import func_test from buildman import test - import doctest - test_name = args and args[0] or None + test_name = args.terms and args.terms[0] or None if skip_net_tests: test.use_network = False # Run the entry tests first ,since these need to be the first to import the # 'entry' module. result = test_util.run_test_suites( - 'buildman', False, verboose, False, None, test_name, [], + 'buildman', debug, verbose, False, args.threads, test_name, [], [test.TestBuild, func_test.TestFunctional, 'buildman.toolchain', 'patman.gitutil']) return (0 if result.wasSuccessful() else 1) +def run_test_coverage(): + """Run the tests and check that we get 100% coverage""" + test_util.run_test_coverage( + 'tools/buildman/buildman', None, + ['tools/patman/*.py', 'tools/u_boot_pylib/*', '*test_fdt.py', + 'tools/buildman/kconfiglib.py', 'tools/buildman/*test*.py', + 'tools/buildman/main.py'], + '/tmp/b', single_thread='-T1') + + def run_buildman(): - options, args = cmdline.ParseArgs() + """Run bulidman - if not options.debug: + This is the main program. It collects arguments and runs either the tests or + the control module. + """ + args = cmdline.parse_args() + + if not args.debug: sys.tracebacklimit = 0 # Run our meagre tests - if cmdline.HAS_TESTS and options.test: - RunTests(options.skip_net_tests, options.verbose, args) + if cmdline.HAS_TESTS and args.test: + return run_tests(args.skip_net_tests, args.debug, args.verbose, args) + + elif cmdline.HAS_TESTS and args.coverage: + run_test_coverage() + + elif args.full_help: + tools.print_full_help(str(files('buildman').joinpath('README.rst'))) # Build selected commits for selected boards else: - bsettings.Setup(options.config_file) - ret_code = control.DoBuildman(options, args) - sys.exit(ret_code) + bsettings.setup(args.config_file) + ret_code = control.do_buildman(args) + return ret_code if __name__ == "__main__": - run_buildman() + sys.exit(run_buildman()) diff --git a/tools/buildman/requirements.txt b/tools/buildman/requirements.txt new file mode 100644 index 00000000000..a1efcb9d4b4 --- /dev/null +++ b/tools/buildman/requirements.txt @@ -0,0 +1,2 @@ +jsonschema==4.17.3 +pyyaml==6.0 diff --git a/tools/buildman/test.py b/tools/buildman/test.py index 9fa6445b798..bdd3d84158a 100644 --- a/tools/buildman/test.py +++ b/tools/buildman/test.py @@ -138,8 +138,8 @@ class TestBuild(unittest.TestCase): self.brds.select_boards([]) # Add some test settings - bsettings.Setup(None) - bsettings.AddFile(settings_data) + bsettings.setup(None) + bsettings.add_file(settings_data) # Set up the toolchains self.toolchains = toolchain.Toolchains() @@ -208,8 +208,8 @@ class TestBuild(unittest.TestCase): # Build the boards for the pre-defined commits and warnings/errors # associated with each. This calls our Make() to inject the fake output. - build.BuildBoards(self.commits, board_selected, keep_outputs=False, - verbose=False) + build.build_boards(self.commits, board_selected, keep_outputs=False, + verbose=False) lines = terminal.get_print_test_lines() count = 0 for line in lines: @@ -219,8 +219,8 @@ class TestBuild(unittest.TestCase): # We should get two starting messages, an update for every commit built # and a summary message self.assertEqual(count, len(commits) * len(BOARDS) + 3) - build.SetDisplayOptions(**kwdisplay_args); - build.ShowSummary(self.commits, board_selected) + build.set_display_options(**kwdisplay_args); + build.show_summary(self.commits, board_selected) if echo_lines: terminal.echo_print_test_lines() return iter(terminal.get_print_test_lines()) @@ -465,7 +465,7 @@ class TestBuild(unittest.TestCase): options.show_errors = False options.keep_outputs = False args = ['tegra20'] - control.DoBuildman(options, args) + control.do_buildman(options, args) def testBoardSingle(self): """Test single board selection""" @@ -528,17 +528,17 @@ class TestBuild(unittest.TestCase): 'sandbox']), ({'all': ['board4'], 'sandbox': ['board4']}, [])) def CheckDirs(self, build, dirname): - self.assertEqual('base%s' % dirname, build._GetOutputDir(1)) + self.assertEqual('base%s' % dirname, build.get_output_dir(1)) self.assertEqual('base%s/fred' % dirname, - build.GetBuildDir(1, 'fred')) + build.get_build_dir(1, 'fred')) self.assertEqual('base%s/fred/done' % dirname, - build.GetDoneFile(1, 'fred')) + build.get_done_file(1, 'fred')) self.assertEqual('base%s/fred/u-boot.sizes' % dirname, - build.GetFuncSizesFile(1, 'fred', 'u-boot')) + build.get_func_sizes_file(1, 'fred', 'u-boot')) self.assertEqual('base%s/fred/u-boot.objdump' % dirname, - build.GetObjdumpFile(1, 'fred', 'u-boot')) + build.get_objdump_file(1, 'fred', 'u-boot')) self.assertEqual('base%s/fred/err' % dirname, - build.GetErrFile(1, 'fred')) + build.get_err_file(1, 'fred')) def testOutputDir(self): build = builder.Builder(self.toolchains, BASE_DIR, None, 1, 2, @@ -622,7 +622,7 @@ class TestBuild(unittest.TestCase): build = builder.Builder(self.toolchains, base_dir, None, 1, 2) build.commits = self.commits build.commit_count = len(commits) - result = set(build._GetOutputSpaceRemovals()) + result = set(build._get_output_space_removals()) expected = set([os.path.join(base_dir, f) for f in to_remove]) self.assertEqual(expected, result) diff --git a/tools/buildman/test/Kconfig b/tools/buildman/test/Kconfig new file mode 100644 index 00000000000..a87660ce457 --- /dev/null +++ b/tools/buildman/test/Kconfig @@ -0,0 +1,72 @@ +# Board properties +config SYS_ARCH + string + +config SYS_CPU + string + +config SYS_SOC + string + +config SYS_VENDOR + string + +config SYS_BOARD + string + +config SYS_CONFIG_NAME + string + + +# Available targets +config TARGET_BOARD0 + bool "board 9" + +config TARGET_BOARD2 + bool "board 2" + + +# Settings for each board +if TARGET_BOARD0 + +config SYS_ARCH + default "arm" + +config SYS_CPU + default "armv7" + +#config SYS_SOC +# string + +config SYS_VENDOR + default "Tester" + +config SYS_BOARD + default "ARM Board 0" + +config SYS_CONFIG_NAME + default "config0" + +endif + +if TARGET_BOARD2 + +config SYS_ARCH + default "powerpc" + +config SYS_CPU + default "ppc" + +config SYS_SOC + default "mpc85xx" + +config SYS_VENDOR + default "Tester" + +config SYS_BOARD + default "PowerPC board 1" + +config SYS_CONFIG_NAME + default "config2" + +endif diff --git a/tools/buildman/test/boards/board0/MAINTAINERS b/tools/buildman/test/boards/board0/MAINTAINERS new file mode 100644 index 00000000000..08207ff3f48 --- /dev/null +++ b/tools/buildman/test/boards/board0/MAINTAINERS @@ -0,0 +1,5 @@ +ARM Board 0 +M: Mary Mary <quite@contrary.org> +S: Maintained +F: boards/board0 +F: configs/board0_defconfig diff --git a/tools/buildman/test/boards/board2/MAINTAINERS b/tools/buildman/test/boards/board2/MAINTAINERS new file mode 100644 index 00000000000..c1547822026 --- /dev/null +++ b/tools/buildman/test/boards/board2/MAINTAINERS @@ -0,0 +1,5 @@ +ARM Board 2 +M: Old Mother <hubbard@cupboard.org> +S: Maintained +F: boards/board2 +F: configs/board2_defconfig diff --git a/tools/buildman/test/configs/board0_defconfig b/tools/buildman/test/configs/board0_defconfig new file mode 100644 index 00000000000..50e562e53b2 --- /dev/null +++ b/tools/buildman/test/configs/board0_defconfig @@ -0,0 +1 @@ +CONFIG_TARGET_BOARD0=y diff --git a/tools/buildman/test/configs/board2_defconfig b/tools/buildman/test/configs/board2_defconfig new file mode 100644 index 00000000000..8b76c0ae1d0 --- /dev/null +++ b/tools/buildman/test/configs/board2_defconfig @@ -0,0 +1 @@ +CONFIG_TARGET_BOARD2=y diff --git a/tools/buildman/toolchain.py b/tools/buildman/toolchain.py index 0ecd8458b91..b05001194e4 100644 --- a/tools/buildman/toolchain.py +++ b/tools/buildman/toolchain.py @@ -139,7 +139,7 @@ class Toolchain: """Get toolchain wrapper from the setting file. """ value = '' - for name, value in bsettings.GetItems('toolchain-wrapper'): + for name, value in bsettings.get_items('toolchain-wrapper'): if not value: print("Warning: Wrapper not found") if value: @@ -249,7 +249,7 @@ class Toolchains: self.prefixes = {} self.paths = [] self.override_toolchain = override_toolchain - self._make_flags = dict(bsettings.GetItems('make-flags')) + self._make_flags = dict(bsettings.get_items('make-flags')) def GetPathList(self, show_warning=True): """Get a list of available toolchain paths @@ -261,7 +261,7 @@ class Toolchains: List of strings, each a path to a toolchain mentioned in the [toolchain] section of the settings file. """ - toolchains = bsettings.GetItems('toolchain') + toolchains = bsettings.get_items('toolchain') if show_warning and not toolchains: print(("Warning: No tool chains. Please run 'buildman " "--fetch-arch all' to download all available toolchains, or " @@ -283,7 +283,7 @@ class Toolchains: Args: show_warning: True to show a warning if there are no tool chains. """ - self.prefixes = bsettings.GetItems('toolchain-prefix') + self.prefixes = bsettings.get_items('toolchain-prefix') self.paths += self.GetPathList(show_warning) def Add(self, fname, test=True, verbose=False, priority=PRIORITY_CALC, @@ -399,7 +399,7 @@ class Toolchains: returns: toolchain object, or None if none found """ - for tag, value in bsettings.GetItems('toolchain-alias'): + for tag, value in bsettings.get_items('toolchain-alias'): if arch == tag: for alias in value.split(): if alias in self.toolchains: @@ -421,7 +421,7 @@ class Toolchains: Returns: Resolved string - >>> bsettings.Setup(None) + >>> bsettings.setup(None) >>> tcs = Toolchains() >>> tcs.Add('fred', False) >>> var_dict = {'oblique' : 'OBLIQUE', 'first' : 'fi${second}rst', \ @@ -499,7 +499,7 @@ class Toolchains: if arch == 'aarch64': arch = 'arm64' base = 'https://www.kernel.org/pub/tools/crosstool/files/bin' - versions = ['12.2.0', '11.1.0'] + versions = ['13.1.0', '12.2.0'] links = [] for version in versions: url = '%s/%s/%s/' % (base, arch, version) @@ -598,5 +598,5 @@ class Toolchains: if not self.TestSettingsHasPath(dirpath): print(("Adding 'download' to config file '%s'" % bsettings.config_fname)) - bsettings.SetItem('toolchain', 'download', '%s/*/*' % dest) + bsettings.set_item('toolchain', 'download', '%s/*/*' % dest) return 0 diff --git a/tools/docker/Dockerfile b/tools/docker/Dockerfile index 9804b55ddde..3d2b64a355f 100644 --- a/tools/docker/Dockerfile +++ b/tools/docker/Dockerfile @@ -2,7 +2,7 @@ # This Dockerfile is used to build an image containing basic stuff to be used # to build U-Boot and run our test suites. -FROM ubuntu:jammy-20230308 +FROM ubuntu:jammy-20230624 MAINTAINER Tom Rini <trini@konsulko.com> LABEL Description=" This image is for building U-Boot inside a container" @@ -14,22 +14,22 @@ RUN apt-get update && apt-get install -y gnupg2 wget xz-utils && rm -rf /var/lib RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - RUN echo deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main | tee /etc/apt/sources.list.d/llvm.list -# Manually install the kernel.org "Crosstool" based toolchains for gcc-12.2.0 -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-aarch64-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-arm-linux-gnueabi.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-i386-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-m68k-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-mips-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-microblaze-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-nios2-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-powerpc-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-riscv64-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-riscv32-linux.tar.xz | tar -C /opt -xJ -RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/12.2.0/x86_64-gcc-12.2.0-nolibc-sh2-linux.tar.xz | tar -C /opt -xJ +# Manually install the kernel.org "Crosstool" based toolchains for gcc-13.1.0 +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-aarch64-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-arc-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-arm-linux-gnueabi.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-i386-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-m68k-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-mips-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-microblaze-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-nios2-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-powerpc-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-riscv64-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-riscv32-linux.tar.xz | tar -C /opt -xJ +RUN wget -O - https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/13.1.0/x86_64-gcc-13.1.0-nolibc-sh2-linux.tar.xz | tar -C /opt -xJ # Manually install other toolchains RUN wget -O - https://github.com/foss-xtensa/toolchain/releases/download/2020.07/x86_64-2020.07-xtensa-dc233c-elf.tar.gz | tar -C /opt -xz -RUN wget -O - https://github.com/foss-for-synopsys-dwc-arc-processors/toolchain/releases/download/arc-2021.03-release/arc_gnu_2021.03_prebuilt_uclibc_le_archs_linux_install.tar.gz | tar --no-same-owner -C /opt -xz # Update and install things from apt now RUN apt-get update && apt-get install -y \ @@ -77,6 +77,7 @@ RUN apt-get update && apt-get install -y \ libsdl1.2-dev \ libsdl2-dev \ libseccomp-dev \ + libslirp-dev \ libssl-dev \ libtool \ libudev-dev \ @@ -128,15 +129,16 @@ RUN git clone git://git.savannah.gnu.org/grub.git /tmp/grub && \ git config --global user.name "GitLab CI Runner" && \ git config --global user.email trini@konsulko.com && \ git cherry-pick 049efdd72eb7baa7b2bf8884391ee7fe650da5a0 && \ + git cherry-pick 403d6540cd608b2706cfa0cb4713f7e4b490ff45 && \ ./bootstrap && \ mkdir -p /opt/grub && \ ./configure --target=aarch64 --with-platform=efi \ CC=gcc \ - TARGET_CC=/opt/gcc-12.2.0-nolibc/aarch64-linux/bin/aarch64-linux-gcc \ - TARGET_OBJCOPY=/opt/gcc-12.2.0-nolibc/aarch64-linux/bin/aarch64-linux-objcopy \ - TARGET_STRIP=/opt/gcc-12.2.0-nolibc/aarch64-linux/bin/aarch64-linux-strip \ - TARGET_NM=/opt/gcc-12.2.0-nolibc/aarch64-linux/bin/aarch64-linux-nm \ - TARGET_RANLIB=/opt/gcc-12.2.0-nolibc/aarch64-linux/bin/aarch64-linux-ranlib && \ + TARGET_CC=/opt/gcc-13.1.0-nolibc/aarch64-linux/bin/aarch64-linux-gcc \ + TARGET_OBJCOPY=/opt/gcc-13.1.0-nolibc/aarch64-linux/bin/aarch64-linux-objcopy \ + TARGET_STRIP=/opt/gcc-13.1.0-nolibc/aarch64-linux/bin/aarch64-linux-strip \ + TARGET_NM=/opt/gcc-13.1.0-nolibc/aarch64-linux/bin/aarch64-linux-nm \ + TARGET_RANLIB=/opt/gcc-13.1.0-nolibc/aarch64-linux/bin/aarch64-linux-ranlib && \ make && \ ./grub-mkimage -O arm64-efi -o /opt/grub/grubaa64.efi --prefix= -d \ grub-core cat chain configfile echo efinet ext2 fat halt help linux \ @@ -146,11 +148,11 @@ RUN git clone git://git.savannah.gnu.org/grub.git /tmp/grub && \ make clean && \ ./configure --target=arm --with-platform=efi \ CC=gcc \ - TARGET_CC=/opt/gcc-12.2.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-gcc \ - TARGET_OBJCOPY=/opt/gcc-12.2.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-objcopy \ - TARGET_STRIP=/opt/gcc-12.2.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-strip \ - TARGET_NM=/opt/gcc-12.2.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-nm \ - TARGET_RANLIB=/opt/gcc-12.2.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-ranlib && \ + TARGET_CC=/opt/gcc-13.1.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-gcc \ + TARGET_OBJCOPY=/opt/gcc-13.1.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-objcopy \ + TARGET_STRIP=/opt/gcc-13.1.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-strip \ + TARGET_NM=/opt/gcc-13.1.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-nm \ + TARGET_RANLIB=/opt/gcc-13.1.0-nolibc/arm-linux-gnueabi/bin/arm-linux-gnueabi-ranlib && \ make && \ ./grub-mkimage -O arm-efi -o /opt/grub/grubarm.efi --prefix= -d \ grub-core cat chain configfile echo efinet ext2 fat halt help linux \ @@ -160,11 +162,11 @@ RUN git clone git://git.savannah.gnu.org/grub.git /tmp/grub && \ make clean && \ ./configure --target=riscv64 --with-platform=efi \ CC=gcc \ - TARGET_CC=/opt/gcc-12.2.0-nolibc/riscv64-linux/bin/riscv64-linux-gcc \ - TARGET_OBJCOPY=/opt/gcc-12.2.0-nolibc/riscv64-linux/bin/riscv64-linux-objcopy \ - TARGET_STRIP=/opt/gcc-12.2.0-nolibc/riscv64-linux/bin/riscv64-linux-strip \ - TARGET_NM=/opt/gcc-12.2.0-nolibc/riscv64-linux/bin/riscv64-linux-nm \ - TARGET_RANLIB=/opt/gcc-12.2.0-nolibc/riscv64-linux/bin/riscv64-linux-ranlib && \ + TARGET_CC=/opt/gcc-13.1.0-nolibc/riscv64-linux/bin/riscv64-linux-gcc \ + TARGET_OBJCOPY=/opt/gcc-13.1.0-nolibc/riscv64-linux/bin/riscv64-linux-objcopy \ + TARGET_STRIP=/opt/gcc-13.1.0-nolibc/riscv64-linux/bin/riscv64-linux-strip \ + TARGET_NM=/opt/gcc-13.1.0-nolibc/riscv64-linux/bin/riscv64-linux-nm \ + TARGET_RANLIB=/opt/gcc-13.1.0-nolibc/riscv64-linux/bin/riscv64-linux-ranlib && \ make && \ ./grub-mkimage -O riscv64-efi -o /opt/grub/grubriscv64.efi --prefix= -d \ grub-core cat chain configfile echo efinet ext2 fat halt help linux \ @@ -175,13 +177,10 @@ RUN git clone git://git.savannah.gnu.org/grub.git /tmp/grub && \ RUN git clone https://gitlab.com/qemu-project/qemu.git /tmp/qemu && \ cd /tmp/qemu && \ - git submodule update --init dtc && \ - git checkout v6.1.0 && \ + git checkout v8.0.3 && \ # config user.name and user.email to make 'git am' happy git config user.name u-boot && \ git config user.email u-boot@denx.de && \ - # manually apply the bug fix for QEMU 6.1.0 Xilinx Zynq UART emulation codes - wget -O - http://patchwork.ozlabs.org/project/qemu-devel/patch/20210823020813.25192-2-bmeng.cn@gmail.com/mbox/ | git am && \ ./configure --prefix=/opt/qemu --target-list="aarch64-softmmu,arm-softmmu,i386-softmmu,m68k-softmmu,mips-softmmu,mips64-softmmu,mips64el-softmmu,mipsel-softmmu,ppc-softmmu,riscv32-softmmu,riscv64-softmmu,sh4-softmmu,x86_64-softmmu,xtensa-softmmu" && \ make -j$(nproc) all install && \ rm -rf /tmp/qemu @@ -278,8 +277,7 @@ RUN virtualenv -p /usr/bin/python3 /tmp/venv && \ # Create the buildman config file RUN /bin/echo -e "[toolchain]\nroot = /usr" > ~/.buildman -RUN /bin/echo -e "kernelorg = /opt/gcc-12.2.0-nolibc/*" >> ~/.buildman -RUN /bin/echo -e "arc = /opt/arc_gnu_2021.03_prebuilt_uclibc_le_archs_linux_install" >> ~/.buildman +RUN /bin/echo -e "kernelorg = /opt/gcc-13.1.0-nolibc/*" >> ~/.buildman RUN /bin/echo -e "\n[toolchain-prefix]\nxtensa = /opt/2020.07/xtensa-dc233c-elf/bin/xtensa-dc233c-elf-" >> ~/.buildman; RUN /bin/echo -e "\n[toolchain-alias]\nsh = sh2" >> ~/.buildman RUN /bin/echo -e "\nsandbox = x86_64" >> ~/.buildman diff --git a/tools/dtoc/fdt.py b/tools/dtoc/fdt.py index a8e05349a72..fd0f3e94f5c 100644 --- a/tools/dtoc/fdt.py +++ b/tools/dtoc/fdt.py @@ -13,6 +13,7 @@ from dtoc import fdt_util import libfdt from libfdt import QUIET_NOTFOUND from u_boot_pylib import tools +from u_boot_pylib import tout # This deals with a device tree, presenting it as an assortment of Node and # Prop objects, representing nodes and properties, respectively. This file @@ -264,6 +265,13 @@ class Prop: fdt_obj.setprop(node.Offset(), self.name, self.bytes) self.dirty = False + def purge(self): + """Set a property offset to None + + The property remains in the tree structure and will be recreated when + the FDT is synced + """ + self._offset = None class Node: """A device tree node @@ -534,8 +542,8 @@ class Node: """ return self.AddData(prop_name, struct.pack('>I', val)) - def AddSubnode(self, name): - """Add a new subnode to the node + def Subnode(self, name): + """Create new subnode for the node Args: name: name of node to add @@ -544,10 +552,72 @@ class Node: New subnode that was created """ path = self.path + '/' + name - subnode = Node(self._fdt, self, None, name, path) + return Node(self._fdt, self, None, name, path) + + def AddSubnode(self, name): + """Add a new subnode to the node, after all other subnodes + + Args: + name: name of node to add + + Returns: + New subnode that was created + """ + subnode = self.Subnode(name) self.subnodes.append(subnode) return subnode + def insert_subnode(self, name): + """Add a new subnode to the node, before all other subnodes + + This deletes other subnodes and sets their offset to None, so that they + will be recreated after this one. + + Args: + name: name of node to add + + Returns: + New subnode that was created + """ + # Deleting a node invalidates the offsets of all following nodes, so + # process in reverse order so that the offset of each node remains valid + # until deletion. + for subnode in reversed(self.subnodes): + subnode.purge(True) + subnode = self.Subnode(name) + self.subnodes.insert(0, subnode) + return subnode + + def purge(self, delete_it=False): + """Purge this node, setting offset to None and deleting from FDT""" + if self._offset is not None: + if delete_it: + CheckErr(self._fdt._fdt_obj.del_node(self.Offset()), + "Node '%s': delete" % self.path) + self._offset = None + self._fdt.Invalidate() + + for prop in self.props.values(): + prop.purge() + + for subnode in self.subnodes: + subnode.purge(False) + + def move_to_first(self): + """Move the current node to first in its parent's node list""" + parent = self.parent + if parent.subnodes and parent.subnodes[0] == self: + return + for subnode in reversed(parent.subnodes): + subnode.purge(True) + + new_subnodes = [self] + for subnode in parent.subnodes: + #subnode.purge(False) + if subnode != self: + new_subnodes.append(subnode) + parent.subnodes = new_subnodes + def Delete(self): """Delete a node @@ -635,6 +705,71 @@ class Node: prop.Sync(auto_resize) return added + def merge_props(self, src): + """Copy missing properties (except 'phandle') from another node + + Args: + src (Node): Node containing properties to copy + + Adds properties which are present in src but not in this node. Any + 'phandle' property is not copied since this might result in two nodes + with the same phandle, thus making phandle references ambiguous. + """ + for name, src_prop in src.props.items(): + if name != 'phandle' and name not in self.props: + self.props[name] = Prop(self, None, name, src_prop.bytes) + + def copy_node(self, src): + """Copy a node and all its subnodes into this node + + Args: + src (Node): Node to copy + + Returns: + Node: Resulting destination node + + This works recursively. + + The new node is put before all other nodes. If the node already + exists, just its subnodes and properties are copied, placing them before + any existing subnodes. Properties which exist in the destination node + already are not copied. + """ + dst = self.FindNode(src.name) + if dst: + dst.move_to_first() + else: + dst = self.insert_subnode(src.name) + dst.merge_props(src) + + # Process in reverse order so that they appear correctly in the result, + # since copy_node() puts the node first in the list + for node in reversed(src.subnodes): + dst.copy_node(node) + return dst + + def copy_subnodes_from_phandles(self, phandle_list): + """Copy subnodes of a list of nodes into another node + + Args: + phandle_list (list of int): List of phandles of nodes to copy + + For each node in the phandle list, its subnodes and their properties are + copied recursively. Note that it does not copy the node itself, nor its + properties. + """ + # Process in reverse order, since new nodes are inserted at the start of + # the destination's node list. We want them to appear in order of the + # phandle list + for phandle in phandle_list.__reversed__(): + parent = self.GetFdt().LookupPhandle(phandle) + tout.debug(f'adding template {parent.path} to node {self.path}') + for node in parent.subnodes.__reversed__(): + dst = self.copy_node(node) + + tout.debug(f'merge props from {parent.path} to {dst.path}') + self.merge_props(parent) + class Fdt: """Provides simple access to a flat device tree blob using libfdts. diff --git a/tools/dtoc/test/dtoc_test_copy.dts b/tools/dtoc/test/dtoc_test_copy.dts new file mode 100644 index 00000000000..36faa9b72b5 --- /dev/null +++ b/tools/dtoc/test/dtoc_test_copy.dts @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Test device tree file for dtoc + * + * Copyright 2017 Google, Inc + */ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + reference = <&over>; /* nake sure that the 'over' phandle exists */ + copy-list = <&another &base>; + + dest { + bootph-all; + compatible = "sandbox,spl-test"; + stringarray = "one"; + longbytearray = [09 0a 0b 0c 0d 0e 0f 10]; + maybe-empty-int = <1>; + + first@0 { + a-prop = <456>; + b-prop = <1>; + }; + + existing { + }; + + base { + second { + second3 { + }; + + second2 { + new-prop; + }; + + second1 { + new-prop; + }; + + second4 { + }; + }; + }; + }; + + base: base { + compatible = "sandbox,i2c"; + bootph-all; + #address-cells = <1>; + #size-cells = <0>; + over: over { + compatible = "sandbox,pmic"; + bootph-all; + reg = <9>; + low-power; + }; + + first@0 { + reg = <0>; + a-prop = <123>; + }; + + second: second { + second1 { + some-prop; + }; + + second2 { + some-prop; + }; + }; + }; + + another: another { + new-prop = "hello"; + earlier { + wibble = <2>; + }; + + later { + fibble = <3>; + }; + }; +}; diff --git a/tools/dtoc/test_fdt.py b/tools/dtoc/test_fdt.py index 4fe8d12c403..3e54694eec9 100755 --- a/tools/dtoc/test_fdt.py +++ b/tools/dtoc/test_fdt.py @@ -306,6 +306,119 @@ class TestNode(unittest.TestCase): self.assertIn("Internal error, node '/spl-test' name mismatch 'i2c@0'", str(exc.exception)) + def test_copy_node(self): + """Test copy_node() function""" + def do_copy_checks(dtb, dst, expect_none): + self.assertEqual( + ['/dest/base', '/dest/first@0', '/dest/existing'], + [n.path for n in dst.subnodes]) + + chk = dtb.GetNode('/dest/base') + self.assertTrue(chk) + self.assertEqual( + {'compatible', 'bootph-all', '#address-cells', '#size-cells'}, + chk.props.keys()) + + # Check the first property + prop = chk.props['bootph-all'] + self.assertEqual('bootph-all', prop.name) + self.assertEqual(True, prop.value) + self.assertEqual(chk.path, prop._node.path) + + # Check the second property + prop2 = chk.props['compatible'] + self.assertEqual('compatible', prop2.name) + self.assertEqual('sandbox,i2c', prop2.value) + self.assertEqual(chk.path, prop2._node.path) + + base = chk.FindNode('base') + self.assertTrue(chk) + + first = dtb.GetNode('/dest/base/first@0') + self.assertTrue(first) + over = dtb.GetNode('/dest/base/over') + self.assertTrue(over) + + # Make sure that the phandle for 'over' is not copied + self.assertNotIn('phandle', over.props.keys()) + + second = dtb.GetNode('/dest/base/second') + self.assertTrue(second) + self.assertEqual([over.name, first.name, second.name], + [n.name for n in chk.subnodes]) + self.assertEqual(chk, over.parent) + self.assertEqual( + {'bootph-all', 'compatible', 'reg', 'low-power'}, + over.props.keys()) + + if expect_none: + self.assertIsNone(prop._offset) + self.assertIsNone(prop2._offset) + self.assertIsNone(over._offset) + else: + self.assertTrue(prop._offset) + self.assertTrue(prop2._offset) + self.assertTrue(over._offset) + + # Now check ordering of the subnodes + self.assertEqual( + ['second1', 'second2', 'second3', 'second4'], + [n.name for n in second.subnodes]) + + dtb = fdt.FdtScan(find_dtb_file('dtoc_test_copy.dts')) + tmpl = dtb.GetNode('/base') + dst = dtb.GetNode('/dest') + dst.copy_node(tmpl) + + do_copy_checks(dtb, dst, expect_none=True) + + dtb.Sync(auto_resize=True) + + # Now check that the FDT looks correct + new_dtb = fdt.Fdt.FromData(dtb.GetContents()) + new_dtb.Scan() + dst = new_dtb.GetNode('/dest') + do_copy_checks(new_dtb, dst, expect_none=False) + + def test_copy_subnodes_from_phandles(self): + """Test copy_node() function""" + dtb = fdt.FdtScan(find_dtb_file('dtoc_test_copy.dts')) + + orig = dtb.GetNode('/') + node_list = fdt_util.GetPhandleList(orig, 'copy-list') + + dst = dtb.GetNode('/dest') + dst.copy_subnodes_from_phandles(node_list) + + pmic = dtb.GetNode('/dest/over') + self.assertTrue(pmic) + + subn = dtb.GetNode('/dest/first@0') + self.assertTrue(subn) + self.assertEqual({'a-prop', 'b-prop', 'reg'}, subn.props.keys()) + + self.assertEqual( + ['/dest/earlier', '/dest/later', '/dest/over', '/dest/first@0', + '/dest/second', '/dest/existing', '/dest/base'], + [n.path for n in dst.subnodes]) + + # Make sure that the phandle for 'over' is not copied + over = dst.FindNode('over') + print('keys', over.props.keys()) + self.assertNotIn('phandle', over.props.keys()) + + # Check the merged properties, first the base ones in '/dest' + expect = {'bootph-all', 'compatible', 'stringarray', 'longbytearray', + 'maybe-empty-int'} + + # Properties from 'base' + expect.update({'#address-cells', '#size-cells'}) + + # Properties from 'another' + expect.add('new-prop') + + self.assertEqual(expect, set(dst.props.keys())) + class TestProp(unittest.TestCase): """Test operation of the Prop class""" diff --git a/tools/k3_fit_atf.sh b/tools/k3_fit_atf.sh deleted file mode 100755 index 7bc07ad0746..00000000000 --- a/tools/k3_fit_atf.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0+ -# -# script to generate FIT image source for K3 Family boards with -# ATF, OPTEE, SPL and multiple device trees (given on the command line). -# Inspired from board/sunxi/mksunxi_fit_atf.sh -# -# usage: $0 <atf_load_addr> <dt_name> [<dt_name> [<dt_name] ...] - -[ -z "$ATF" ] && ATF="bl31.bin" - -if [ ! -f $ATF ]; then - echo "WARNING ATF file $ATF NOT found, resulting binary is non-functional" >&2 - ATF=/dev/null -fi - -[ -z "$TEE" ] && TEE="bl32.bin" - -if [ ! -f $TEE ]; then - echo "WARNING OPTEE file $TEE NOT found, resulting might be non-functional" >&2 - TEE=/dev/null -fi - -[ -z "$DM" ] && DM="dm.bin" - -if [ ! -e $DM ]; then - echo "WARNING DM file $DM NOT found, resulting might be non-functional" >&2 - DM=/dev/null -fi - -if [ ! -z "$IS_HS" ]; then - HS_APPEND=_HS -fi - -cat << __HEADER_EOF -/dts-v1/; - -/ { - description = "Configuration to load ATF and SPL"; - #address-cells = <1>; - - images { - atf { - description = "ARM Trusted Firmware"; - data = /incbin/("$ATF"); - type = "firmware"; - arch = "arm64"; - compression = "none"; - os = "arm-trusted-firmware"; - load = <$1>; - entry = <$1>; - }; - tee { - description = "OPTEE"; - data = /incbin/("$TEE"); - type = "tee"; - arch = "arm64"; - compression = "none"; - os = "tee"; - load = <0x9e800000>; - entry = <0x9e800000>; - }; - dm { - description = "DM binary"; - data = /incbin/("$DM"); - type = "firmware"; - arch = "arm32"; - compression = "none"; - os = "DM"; - load = <0x89000000>; - entry = <0x89000000>; - }; - spl { - description = "SPL (64-bit)"; - data = /incbin/("spl/u-boot-spl-nodtb.bin$HS_APPEND"); - type = "standalone"; - os = "U-Boot"; - arch = "arm64"; - compression = "none"; - load = <0x80080000>; - entry = <0x80080000>; - }; -__HEADER_EOF - -# shift through ATF load address in the command line arguments -shift - -for dtname in $* -do - cat << __FDT_IMAGE_EOF - $(basename $dtname) { - description = "$(basename $dtname .dtb)"; - data = /incbin/("$dtname$HS_APPEND"); - type = "flat_dt"; - arch = "arm"; - compression = "none"; - }; -__FDT_IMAGE_EOF -done - -cat << __CONF_HEADER_EOF - }; - configurations { - default = "$(basename $1)"; - -__CONF_HEADER_EOF - -for dtname in $* -do - cat << __CONF_SECTION_EOF - $(basename $dtname) { - description = "$(basename $dtname .dtb)"; - firmware = "atf"; - loadables = "tee", "dm", "spl"; - fdt = "$(basename $dtname)"; - }; -__CONF_SECTION_EOF -done - -cat << __ITS_EOF - }; -}; -__ITS_EOF diff --git a/tools/k3_gen_x509_cert.sh b/tools/k3_gen_x509_cert.sh deleted file mode 100755 index d9cde07417c..00000000000 --- a/tools/k3_gen_x509_cert.sh +++ /dev/null @@ -1,262 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause -# -# Script to add K3 specific x509 cetificate to a binary. -# - -# Variables -OUTPUT=tiboot3.bin -TEMP_X509=x509-temp.cert -CERT=certificate.bin -RAND_KEY=eckey.pem -LOADADDR=0x41c00000 -BOOTCORE_OPTS=0 -BOOTCORE=16 -DEBUG_TYPE=0 -SWRV=1 - -gen_degen_template() { -cat << 'EOF' > degen-template.txt - -asn1=SEQUENCE:rsa_key - -[rsa_key] -version=INTEGER:0 -modulus=INTEGER:0xDEGEN_MODULUS -pubExp=INTEGER:1 -privExp=INTEGER:1 -p=INTEGER:0xDEGEN_P -q=INTEGER:0xDEGEN_Q -e1=INTEGER:1 -e2=INTEGER:1 -coeff=INTEGER:0xDEGEN_COEFF -EOF -} - -# Generate x509 Template -gen_template() { -cat << 'EOF' > x509-template.txt - [ req ] - distinguished_name = req_distinguished_name - x509_extensions = v3_ca - prompt = no - dirstring_type = nobmp - - [ req_distinguished_name ] - C = US - ST = TX - L = Dallas - O = Texas Instruments Incorporated - OU = Processors - CN = TI support - emailAddress = support@ti.com - - [ v3_ca ] - basicConstraints = CA:true - 1.3.6.1.4.1.294.1.1 = ASN1:SEQUENCE:boot_seq - 1.3.6.1.4.1.294.1.2 = ASN1:SEQUENCE:image_integrity - 1.3.6.1.4.1.294.1.3 = ASN1:SEQUENCE:swrv -# 1.3.6.1.4.1.294.1.4 = ASN1:SEQUENCE:encryption - 1.3.6.1.4.1.294.1.8 = ASN1:SEQUENCE:debug - - [ boot_seq ] - certType = INTEGER:TEST_CERT_TYPE - bootCore = INTEGER:TEST_BOOT_CORE - bootCoreOpts = INTEGER:TEST_BOOT_CORE_OPTS - destAddr = FORMAT:HEX,OCT:TEST_BOOT_ADDR - imageSize = INTEGER:TEST_IMAGE_LENGTH - - [ image_integrity ] - shaType = OID:2.16.840.1.101.3.4.2.3 - shaValue = FORMAT:HEX,OCT:TEST_IMAGE_SHA_VAL - - [ swrv ] - swrv = INTEGER:TEST_SWRV - -# [ encryption ] -# initalVector = FORMAT:HEX,OCT:TEST_IMAGE_ENC_IV -# randomString = FORMAT:HEX,OCT:TEST_IMAGE_ENC_RS -# iterationCnt = INTEGER:TEST_IMAGE_KEY_DERIVE_INDEX -# salt = FORMAT:HEX,OCT:TEST_IMAGE_KEY_DERIVE_SALT - - [ debug ] - debugUID = FORMAT:HEX,OCT:0000000000000000000000000000000000000000000000000000000000000000 - debugType = INTEGER:TEST_DEBUG_TYPE - coreDbgEn = INTEGER:0 - coreDbgSecEn = INTEGER:0 -EOF -} - -parse_key() { - sed '/ /s/://g' key.txt | \ - awk '!/ / {printf("\n%s\n", $0)}; / / {printf("%s", $0)}' | \ - sed 's/ //g' | \ - awk "/$1:/{getline; print}" -} - -gen_degen_key() { -# Generate a 4096 bit RSA Key - openssl genrsa -out key.pem 1024 >>/dev/null 2>&1 - openssl rsa -in key.pem -text -out key.txt >>/dev/null 2>&1 - DEGEN_MODULUS=$( parse_key 'modulus' ) - DEGEN_P=$( parse_key 'prime1' ) - DEGEN_Q=$( parse_key 'prime2' ) - DEGEN_COEFF=$( parse_key 'coefficient' ) - gen_degen_template - - sed -e "s/DEGEN_MODULUS/$DEGEN_MODULUS/"\ - -e "s/DEGEN_P/$DEGEN_P/" \ - -e "s/DEGEN_Q/$DEGEN_Q/" \ - -e "s/DEGEN_COEFF/$DEGEN_COEFF/" \ - degen-template.txt > degenerateKey.txt - - openssl asn1parse -genconf degenerateKey.txt -out degenerateKey.der >>/dev/null 2>&1 - openssl rsa -in degenerateKey.der -inform DER -outform PEM -out $RAND_KEY >>/dev/null 2>&1 - KEY=$RAND_KEY - rm key.pem key.txt degen-template.txt degenerateKey.txt degenerateKey.der -} - -declare -A options_help -usage() { - if [ -n "$*" ]; then - echo "ERROR: $*" - fi - echo -n "Usage: $0 " - for option in "${!options_help[@]}" - do - arg=`echo ${options_help[$option]}|cut -d ':' -f1` - if [ -n "$arg" ]; then - arg=" $arg" - fi - echo -n "[-$option$arg] " - done - echo - echo -e "\nWhere:" - for option in "${!options_help[@]}" - do - arg=`echo ${options_help[$option]}|cut -d ':' -f1` - txt=`echo ${options_help[$option]}|cut -d ':' -f2` - tb="\t\t\t" - if [ -n "$arg" ]; then - arg=" $arg" - tb="\t" - fi - echo -e " -$option$arg:$tb$txt" - done - echo - echo "Examples of usage:-" - echo "# Example of signing the SYSFW binary with rsa degenerate key" - echo " $0 -c 0 -b ti-sci-firmware-am6x.bin -o sysfw.bin -l 0x40000" - echo "# Example of signing the SPL binary with rsa degenerate key" - echo " $0 -c 16 -b spl/u-boot-spl.bin -o tiboot3.bin -l 0x41c00000" -} - -options_help[b]="bin_file:Bin file that needs to be signed" -options_help[k]="key_file:file with key inside it. If not provided script generates a rsa degenerate key." -options_help[o]="output_file:Name of the final output file. default to $OUTPUT" -options_help[c]="core_id:target core id on which the image would be running. Default to $BOOTCORE" -options_help[l]="loadaddr: Target load address of the binary in hex. Default to $LOADADDR" -options_help[d]="debug_type: Debug type, set to 4 to enable early JTAG. Default to $DEBUG_TYPE" -options_help[r]="SWRV: Software Rev for X509 certificate" - -while getopts "b:k:o:c:l:d:h:r:" opt -do - case $opt in - b) - BIN=$OPTARG - ;; - k) - KEY=$OPTARG - ;; - o) - OUTPUT=$OPTARG - ;; - l) - LOADADDR=$OPTARG - ;; - c) - BOOTCORE=$OPTARG - ;; - d) - DEBUG_TYPE=$OPTARG - ;; - r) - SWRV=$OPTARG - ;; - h) - usage - exit 0 - ;; - \?) - usage "Invalid Option '-$OPTARG'" - exit 1 - ;; - :) - usage "Option '-$OPTARG' Needs an argument." - exit 1 - ;; - esac -done - -if [ "$#" -eq 0 ]; then - usage "Arguments missing" - exit 1 -fi - -if [ -z "$BIN" ]; then - usage "Bin file missing in arguments" - exit 1 -fi - -# Generate rsa degenerate key if user doesn't provide a key -if [ -z "$KEY" ]; then - gen_degen_key -fi - -if [ $BOOTCORE == 0 ]; then # BOOTCORE M3, loaded by ROM - CERTTYPE=2 -elif [ $BOOTCORE == 16 ]; then # BOOTCORE R5, loaded by ROM - CERTTYPE=1 -else # Non BOOTCORE, loaded by SYSFW - BOOTCORE_OPTS_VER=$(printf "%01x" 1) - # Add input args option for SET and CLR flags. - BOOTCORE_OPTS_SETFLAG=$(printf "%08x" 0) - BOOTCORE_OPTS_CLRFLAG=$(printf "%08x" 0x100) # Clear FLAG_ARMV8_AARCH32 - BOOTCORE_OPTS="0x$BOOTCORE_OPTS_VER$BOOTCORE_OPTS_SETFLAG$BOOTCORE_OPTS_CLRFLAG" - # Set the cert type to zero. - # We are not using public/private key store now - CERTTYPE=$(printf "0x%08x" 0) -fi - -SHA_VAL=`openssl dgst -sha512 -hex $BIN | sed -e "s/^.*= //g"` -BIN_SIZE=`cat $BIN | wc -c` -ADDR=`printf "%08x" $LOADADDR` - -gen_cert() { - #echo "Certificate being generated :" - #echo " LOADADDR = 0x$ADDR" - #echo " IMAGE_SIZE = $BIN_SIZE" - #echo " CERT_TYPE = $CERTTYPE" - #echo " DEBUG_TYPE = $DEBUG_TYPE" - #echo " SWRV = $SWRV" - sed -e "s/TEST_IMAGE_LENGTH/$BIN_SIZE/" \ - -e "s/TEST_IMAGE_SHA_VAL/$SHA_VAL/" \ - -e "s/TEST_CERT_TYPE/$CERTTYPE/" \ - -e "s/TEST_BOOT_CORE_OPTS/$BOOTCORE_OPTS/" \ - -e "s/TEST_BOOT_CORE/$BOOTCORE/" \ - -e "s/TEST_BOOT_ADDR/$ADDR/" \ - -e "s/TEST_DEBUG_TYPE/$DEBUG_TYPE/" \ - -e "s/TEST_SWRV/$SWRV/" \ - x509-template.txt > $TEMP_X509 - openssl req -new -x509 -key $KEY -nodes -outform DER -out $CERT -config $TEMP_X509 -sha512 -} - -gen_template -gen_cert -cat $CERT $BIN > $OUTPUT - -# Remove all intermediate files -rm $TEMP_X509 $CERT x509-template.txt -if [ "$KEY" == "$RAND_KEY" ]; then - rm $RAND_KEY -fi diff --git a/tools/moveconfig.py b/tools/moveconfig.py index c4d72ede368..6cbecc3d5c8 100755 --- a/tools/moveconfig.py +++ b/tools/moveconfig.py @@ -2037,7 +2037,7 @@ doc/develop/moveconfig.rst for documentation.''' if not args.cleanup_headers_only: check_clean_directory() - bsettings.Setup('') + bsettings.setup('') toolchains = toolchain.Toolchains() toolchains.GetSettings() toolchains.Scan(verbose=False) diff --git a/tools/u_boot_pylib/pyproject.toml b/tools/u_boot_pylib/pyproject.toml index 3f33caf6f8d..037c5d629ec 100644 --- a/tools/u_boot_pylib/pyproject.toml +++ b/tools/u_boot_pylib/pyproject.toml @@ -9,7 +9,7 @@ authors = [ { name="Simon Glass", email="sjg@chromium.org" }, ] description = "U-Boot python library" -readme = "README.md" +readme = "README.rst" requires-python = ">=3.7" classifiers = [ "Programming Language :: Python :: 3", @@ -20,3 +20,7 @@ classifiers = [ [project.urls] "Homepage" = "https://u-boot.readthedocs.io" "Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues" + +[tool.setuptools.packages.find] +where = [".."] +include = ["u_boot_pylib*"] diff --git a/tools/u_boot_pylib/test_util.py b/tools/u_boot_pylib/test_util.py index e7564e10c99..f18d385d995 100644 --- a/tools/u_boot_pylib/test_util.py +++ b/tools/u_boot_pylib/test_util.py @@ -24,7 +24,7 @@ except: def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None, - extra_args=None): + extra_args=None, single_thread='-P1'): """Run tests and check that we get 100% coverage Args: @@ -39,6 +39,9 @@ def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None required: List of modules which must be in the coverage report extra_args (str): Extra arguments to pass to the tool before the -t/test arg + single_thread (str): Argument string to make the tests run + single-threaded. This is necessary to get proper coverage results. + The default is '-P0' Raises: ValueError if the code coverage is not 100% @@ -58,8 +61,9 @@ def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None if build_dir: prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir cmd = ('%spython3-coverage run ' - '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list), - prog, extra_args or '', test_cmd)) + '--omit "%s" %s %s %s %s' % (prefix, ','.join(glob_list), + prog, extra_args or '', test_cmd, + single_thread or '-P1')) os.system(cmd) stdout = command.output('python3-coverage', 'report') lines = stdout.splitlines() diff --git a/tools/zynqmp_psu_init_minimize.sh b/tools/zynqmp_psu_init_minimize.sh index 16c622f6ce7..5c8b73703bf 100755 --- a/tools/zynqmp_psu_init_minimize.sh +++ b/tools/zynqmp_psu_init_minimize.sh @@ -1,6 +1,6 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0+ -# Copyright (C) 2018 Michal Simek <michal.simek@xilinx.com> +# Copyright (C) 2018 Michal Simek <michal.simek@amd.com> # Copyright (C) 2019 Luca Ceresoli <luca@lucaceresoli.net> # Copyright (C) 2022 Weidmüller Interface GmbH & Co. KG # Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com> diff --git a/tools/zynqmpimage.c b/tools/zynqmpimage.c index 5113ba895f0..bb54f41a153 100644 --- a/tools/zynqmpimage.c +++ b/tools/zynqmpimage.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright (C) 2016 Michal Simek <michals@xilinx.com> + * Copyright (C) 2016 Michal Simek <michal.simek@amd.com> * Copyright (C) 2015 Nathan Rossi <nathan@nathanrossi.com> * * The following Boot Header format/structures and values are defined in the diff --git a/tools/zynqmpimage.h b/tools/zynqmpimage.h index 9d526a17cdd..ca7489835a8 100644 --- a/tools/zynqmpimage.h +++ b/tools/zynqmpimage.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright (C) 2016 Michal Simek <michals@xilinx.com> + * Copyright (C) 2016 Michal Simek <michal.simek@amd.com> * Copyright (C) 2015 Nathan Rossi <nathan@nathanrossi.com> * * The following Boot Header format/structures and values are defined in the |