summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/binman/binman.rst4
-rw-r--r--tools/binman/btool/bootgen.py2
-rw-r--r--tools/binman/btool/fiptool.py2
-rw-r--r--tools/binman/btool/futility.py2
-rw-r--r--tools/binman/btool/mkeficapsule.py26
-rw-r--r--tools/binman/entries.rst44
-rw-r--r--tools/binman/etype/efi_capsule.py24
-rw-r--r--tools/binman/etype/efi_empty_capsule.py86
-rw-r--r--tools/binman/etype/section.py2
-rw-r--r--tools/binman/ftest.py160
-rw-r--r--tools/binman/test/311_capsule.dts3
-rw-r--r--tools/binman/test/312_capsule_signed.dts3
-rw-r--r--tools/binman/test/313_capsule_version.dts3
-rw-r--r--tools/binman/test/314_capsule_signed_ver.dts3
-rw-r--r--tools/binman/test/315_capsule_oemflags.dts3
-rw-r--r--tools/binman/test/316_capsule_missing_key.dts3
-rw-r--r--tools/binman/test/317_capsule_missing_index.dts3
-rw-r--r--tools/binman/test/318_capsule_missing_guid.dts3
-rw-r--r--tools/binman/test/319_capsule_accept.dts13
-rw-r--r--tools/binman/test/320_capsule_revert.dts11
-rw-r--r--tools/binman/test/321_capsule_accept_missing_guid.dts11
-rw-r--r--tools/binman/test/322_empty_capsule_type_missing.dts12
-rw-r--r--tools/binman/test/323_capsule_accept_revert_missing.dts13
-rwxr-xr-xtools/buildman/main.py10
-rw-r--r--tools/docker/Dockerfile31
-rw-r--r--tools/eficapsule.h2
-rw-r--r--tools/fit_image.c36
-rwxr-xr-xtools/iot2050-sign-fw.sh10
-rw-r--r--tools/logos/stm32f746-disco.bmpbin0 -> 18052 bytes
-rw-r--r--tools/mkeficapsule.c227
-rw-r--r--tools/patman/gitutil.py4
-rw-r--r--tools/patman/pyproject.toml2
-rw-r--r--tools/patman/test_checkpatch.py58
-rwxr-xr-xtools/qconfig.py (renamed from tools/moveconfig.py)894
34 files changed, 911 insertions, 799 deletions
diff --git a/tools/binman/binman.rst b/tools/binman/binman.rst
index aeea33fddb9..020988d955f 100644
--- a/tools/binman/binman.rst
+++ b/tools/binman/binman.rst
@@ -1480,9 +1480,6 @@ as set in stone, so Binman will ensure it doesn't change. Without this feature,
repacking an entry might cause it to disobey the original constraints provided
when it was created.
- Repacking an image involves
-
-.. _`BinmanLogging`:
Signing FIT container with private key in an image
--------------------------------------------------
@@ -1501,6 +1498,7 @@ If you want to sign and replace FIT container in place::
which will sign FIT container with private key and replace it immediately
inside your image.
+.. _`BinmanLogging`:
Logging
-------
diff --git a/tools/binman/btool/bootgen.py b/tools/binman/btool/bootgen.py
index f2ca552dc28..1bc9f0aa96f 100644
--- a/tools/binman/btool/bootgen.py
+++ b/tools/binman/btool/bootgen.py
@@ -132,6 +132,6 @@ class Bintoolbootgen(bintool.Bintool):
result = self.build_from_git(
'https://github.com/Xilinx/bootgen',
- 'all',
+ ['all'],
'bootgen')
return result
diff --git a/tools/binman/btool/fiptool.py b/tools/binman/btool/fiptool.py
index c80f8275c4c..34002f54af9 100644
--- a/tools/binman/btool/fiptool.py
+++ b/tools/binman/btool/fiptool.py
@@ -109,6 +109,6 @@ class Bintoolfiptool(bintool.Bintool):
return None
result = self.build_from_git(
'https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git',
- 'fiptool',
+ ['fiptool'],
'tools/fiptool/fiptool')
return result
diff --git a/tools/binman/btool/futility.py b/tools/binman/btool/futility.py
index 04c9aefe9b4..0d3980d071d 100644
--- a/tools/binman/btool/futility.py
+++ b/tools/binman/btool/futility.py
@@ -170,7 +170,7 @@ class Bintoolfutility(bintool.Bintool):
# .gitcookies file. So use a mirror instead.
result = self.build_from_git(
'https://github.com/sjg20/vboot_reference.git',
- 'all',
+ ['all'],
'build/futility/futility',
flags=['USE_FLASHROM=0'])
return result
diff --git a/tools/binman/btool/mkeficapsule.py b/tools/binman/btool/mkeficapsule.py
index 61179747ffa..ef1da638df1 100644
--- a/tools/binman/btool/mkeficapsule.py
+++ b/tools/binman/btool/mkeficapsule.py
@@ -80,6 +80,32 @@ class Bintoolmkeficapsule(bintool.Bintool):
return self.run_cmd(*args)
+ def generate_empty_capsule(self, image_guid, output_fname,
+ accept=True):
+ """Generate empty capsules for FWU A/B updates
+
+ Args:
+ image_guid (str): GUID used for identifying the image
+ in case of an accept capsule
+ output_fname (str): Path to the output capsule file
+ accept (bool): Generate an accept capsule,
+ else a revert capsule
+
+ Returns:
+ str: Tool output
+ """
+ if accept:
+ args = [
+ f'--guid={image_guid}',
+ '--fw-accept'
+ ]
+ else:
+ args = [ '--fw-revert' ]
+
+ args += [ output_fname ]
+
+ return self.run_cmd(*args)
+
def fetch(self, method):
"""Fetch handler for mkeficapsule
diff --git a/tools/binman/entries.rst b/tools/binman/entries.rst
index 801bd946742..e7b4e9380e2 100644
--- a/tools/binman/entries.rst
+++ b/tools/binman/entries.rst
@@ -532,6 +532,50 @@ payload using the blob-ext subnode.
+.. _etype_efi_empty_capsule:
+
+Entry: efi-empty-capsule: Entry for generating EFI Empty Capsule files
+----------------------------------------------------------------------
+
+The parameters needed for generation of the empty capsules can
+be provided as properties in the entry.
+
+Properties / Entry arguments:
+ - image-guid: Image GUID which will be used for identifying the
+ updatable image on the board. Mandatory for accept capsule.
+ - capsule-type - String to indicate type of capsule to generate. Valid
+ values are 'accept' and 'revert'.
+
+For more details on the description of the capsule format, and the capsule
+update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
+specification`_. For more information on the empty capsule, refer the
+sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_.
+
+A typical accept empty capsule entry node would then look something
+like this::
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ /* GUID of the image being accepted */
+ image-type-id = SANDBOX_UBOOT_IMAGE_GUID;
+ capsule-type = "accept";
+ };
+
+A typical revert empty capsule entry node would then look something
+like this::
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ capsule-type = "revert";
+ };
+
+The empty capsules do not have any input payload image.
+
+.. _`UEFI specification`: https://uefi.org/sites/default/files/resources/UEFI_Spec_2_10_Aug29.pdf
+.. _`Dependable Boot specification`: https://git.codelinaro.org/linaro/dependable-boot/mbfw/uploads/6f7ddfe3be24e18d4319e108a758d02e/mbfw.pdf
+
+
+
.. _etype_encrypted:
Entry: encrypted: Externally built encrypted binary blob
diff --git a/tools/binman/etype/efi_capsule.py b/tools/binman/etype/efi_capsule.py
index 006eb630adb..e3203717822 100644
--- a/tools/binman/etype/efi_capsule.py
+++ b/tools/binman/etype/efi_capsule.py
@@ -11,6 +11,24 @@ from binman.etype.section import Entry_section
from dtoc import fdt_util
from u_boot_pylib import tools
+def get_binman_test_guid(type_str):
+ """Get the test image GUID for binman
+
+ Based on the string passed to the function, return
+ the corresponding GUID.
+
+ Args:
+ type_str: Key value of the type of GUID to look for
+
+ Returns:
+ The actual GUID value (str)
+ """
+ TYPE_TO_GUID = {
+ 'binman-test' : '09d7cf52-0720-4710-91d1-08469b7fe9c8'
+ }
+
+ return TYPE_TO_GUID[type_str]
+
class Entry_efi_capsule(Entry_section):
"""Generate EFI capsules
@@ -104,12 +122,6 @@ class Entry_efi_capsule(Entry_section):
self.auth = 1
def BuildSectionData(self, required):
- def get_binman_test_guid(type_str):
- TYPE_TO_GUID = {
- 'binman-test' : '09d7cf52-0720-4710-91d1-08469b7fe9c8'
- }
- return TYPE_TO_GUID[type_str]
-
private_key = ''
public_key_cert = ''
if self.auth:
diff --git a/tools/binman/etype/efi_empty_capsule.py b/tools/binman/etype/efi_empty_capsule.py
new file mode 100644
index 00000000000..064bf9a77f0
--- /dev/null
+++ b/tools/binman/etype/efi_empty_capsule.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2023 Linaro Limited
+#
+# Entry-type module for producing an empty EFI capsule
+#
+
+import os
+
+from binman.entry import Entry
+from binman.etype.efi_capsule import get_binman_test_guid
+from binman.etype.section import Entry_section
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_efi_empty_capsule(Entry_section):
+ """Generate EFI empty capsules
+
+ The parameters needed for generation of the empty capsules can
+ be provided as properties in the entry.
+
+ Properties / Entry arguments:
+ - image-guid: Image GUID which will be used for identifying the
+ updatable image on the board. Mandatory for accept capsule.
+ - capsule-type - String to indicate type of capsule to generate. Valid
+ values are 'accept' and 'revert'.
+
+ For more details on the description of the capsule format, and the capsule
+ update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
+ specification`_. For more information on the empty capsule, refer the
+ sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_.
+
+ A typical accept empty capsule entry node would then look something like this
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ /* GUID of image being accepted */
+ image-type-id = SANDBOX_UBOOT_IMAGE_GUID;
+ capsule-type = "accept";
+ };
+
+ A typical revert empty capsule entry node would then look something like this
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ capsule-type = "revert";
+ };
+
+ The empty capsules do not have any input payload image.
+
+ .. _`UEFI specification`: https://uefi.org/sites/default/files/resources/UEFI_Spec_2_10_Aug29.pdf
+ .. _`Dependable Boot specification`: https://git.codelinaro.org/linaro/dependable-boot/mbfw/uploads/6f7ddfe3be24e18d4319e108a758d02e/mbfw.pdf
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.required_props = ['capsule-type']
+ self.accept = 0
+ self.revert = 0
+
+ def ReadNode(self):
+ super().ReadNode()
+
+ self.image_guid = fdt_util.GetString(self._node, 'image-guid')
+ self.capsule_type = fdt_util.GetString(self._node, 'capsule-type')
+
+ if self.capsule_type != 'accept' and self.capsule_type != 'revert':
+ self.Raise('capsule-type should be either \'accept\' or \'revert\'')
+
+ if self.capsule_type == 'accept' and not self.image_guid:
+ self.Raise('Image GUID needed for generating accept capsule')
+
+ def BuildSectionData(self, required):
+ uniq = self.GetUniqueName()
+ outfile = self._filename if self._filename else 'capsule.%s' % uniq
+ capsule_fname = tools.get_output_filename(outfile)
+ accept = True if self.capsule_type == 'accept' else False
+ guid = self.image_guid
+ if self.image_guid == "binman-test":
+ guid = get_binman_test_guid('binman-test')
+
+ ret = self.mkeficapsule.generate_empty_capsule(guid, capsule_fname,
+ accept)
+ if ret is not None:
+ return tools.read_file(capsule_fname)
+
+ def AddBintools(self, btools):
+ self.mkeficapsule = self.AddBintool(btools, 'mkeficapsule')
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py
index fb49e85a763..30c1041c7e8 100644
--- a/tools/binman/etype/section.py
+++ b/tools/binman/etype/section.py
@@ -40,7 +40,7 @@ class Entry_section(Entry):
For example code, see etypes which subclass `Entry_section`, or `cbfs.py`
for a more involved example::
- $ grep -l \(Entry_section tools/binman/etype/*.py
+ $ grep -l \\(Entry_section tools/binman/etype/*.py
ReadNode()
Call `super().ReadNode()`, then read any special properties for the
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py
index 8e419645a6d..16156b74105 100644
--- a/tools/binman/ftest.py
+++ b/tools/binman/ftest.py
@@ -121,9 +121,14 @@ COMP_BINTOOLS = ['bzip2', 'gzip', 'lz4', 'lzma_alone', 'lzop', 'xz', 'zstd']
TEE_ADDR = 0x5678
# Firmware Management Protocol(FMP) GUID
-FW_MGMT_GUID = 'edd5cb6d2de8444cbda17194199ad92a'
+FW_MGMT_GUID = '6dcbd5ed-e82d-4c44-bda1-7194199ad92a'
# Image GUID specified in the DTS
-CAPSULE_IMAGE_GUID = '52cfd7092007104791d108469b7fe9c8'
+CAPSULE_IMAGE_GUID = '09d7cf52-0720-4710-91d1-08469b7fe9c8'
+# Windows cert GUID
+WIN_CERT_TYPE_EFI_GUID = '4aafd29d-68df-49ee-8aa9-347d375665a7'
+# Empty capsule GUIDs
+EMPTY_CAPSULE_ACCEPT_GUID = '0c996046-bcc0-4d04-85ec-e1fcedf1c6f8'
+EMPTY_CAPSULE_REVERT_GUID = 'acd58b4b-c0e8-475f-99b5-6b3f7e07aaf0'
class TestFunctional(unittest.TestCase):
"""Functional tests for binman
@@ -7223,52 +7228,94 @@ fdt fdtmap Extract the devicetree blob from the fdtmap
self.assertRegex(err,
"Image 'image'.*missing bintools.*: bootgen")
+ def _GetCapsuleHeaders(self, data):
+ """Get the capsule header contents
+
+ Args:
+ data: Capsule file contents
+
+ Returns:
+ Dict:
+ key: Capsule Header name (str)
+ value: Header field value (str)
+ """
+ capsule_file = os.path.join(self._indir, 'test.capsule')
+ tools.write_file(capsule_file, data)
+
+ out = tools.run('mkeficapsule', '--dump-capsule', capsule_file)
+ lines = out.splitlines()
+
+ re_line = re.compile(r'^([^:\-\t]*)(?:\t*\s*:\s*(.*))?$')
+ vals = {}
+ for line in lines:
+ mat = re_line.match(line)
+ if mat:
+ vals[mat.group(1)] = mat.group(2)
+
+ return vals
+
def _CheckCapsule(self, data, signed_capsule=False, version_check=False,
capoemflags=False):
- fmp_signature = "4d535331" # 'M', 'S', 'S', '1'
- fmp_size = "10"
- fmp_fw_version = "02"
- oemflag = "0080"
+ fmp_signature = "3153534D" # 'M', 'S', 'S', '1'
+ fmp_size = "00000010"
+ fmp_fw_version = "00000002"
+ capsule_image_index = "00000001"
+ oemflag = "00018000"
+ auth_hdr_revision = "00000200"
+ auth_hdr_cert_type = "00000EF1"
- payload_data = EFI_CAPSULE_DATA
+ payload_data_len = len(EFI_CAPSULE_DATA)
- # TODO - Currently, these offsets for capsule fields are hardcoded.
- # There are plans to add support to the mkeficapsule tool to dump
- # the capsule contents which can then be used for capsule
- # verification.
+ hdr = self._GetCapsuleHeaders(data)
- # Firmware Management Protocol(FMP) GUID - offset(0 - 32)
- self.assertEqual(FW_MGMT_GUID, data.hex()[:32])
- # Image GUID - offset(96 - 128)
- self.assertEqual(CAPSULE_IMAGE_GUID, data.hex()[96:128])
+ self.assertEqual(FW_MGMT_GUID.upper(), hdr['EFI_CAPSULE_HDR.CAPSULE_GUID'])
+
+ self.assertEqual(CAPSULE_IMAGE_GUID.upper(),
+ hdr['FMP_CAPSULE_IMAGE_HDR.UPDATE_IMAGE_TYPE_ID'])
+ self.assertEqual(capsule_image_index,
+ hdr['FMP_CAPSULE_IMAGE_HDR.UPDATE_IMAGE_INDEX'])
if capoemflags:
- # OEM Flags - offset(40 - 44)
- self.assertEqual(oemflag, data.hex()[40:44])
- if signed_capsule and version_check:
- # FMP header signature - offset(4770 - 4778)
- self.assertEqual(fmp_signature, data.hex()[4770:4778])
- # FMP header size - offset(4778 - 4780)
- self.assertEqual(fmp_size, data.hex()[4778:4780])
- # firmware version - offset(4786 - 4788)
- self.assertEqual(fmp_fw_version, data.hex()[4786:4788])
- # payload offset signed capsule(4802 - 4808)
- self.assertEqual(payload_data.hex(), data.hex()[4802:4808])
- elif signed_capsule:
- # payload offset signed capsule(4770 - 4776)
- self.assertEqual(payload_data.hex(), data.hex()[4770:4776])
- elif version_check:
- # FMP header signature - offset(184 - 192)
- self.assertEqual(fmp_signature, data.hex()[184:192])
- # FMP header size - offset(192 - 194)
- self.assertEqual(fmp_size, data.hex()[192:194])
- # firmware version - offset(200 - 202)
- self.assertEqual(fmp_fw_version, data.hex()[200:202])
- # payload offset for non-signed capsule with version header(216 - 222)
- self.assertEqual(payload_data.hex(), data.hex()[216:222])
+ self.assertEqual(oemflag, hdr['EFI_CAPSULE_HDR.FLAGS'])
+
+ if signed_capsule:
+ self.assertEqual(auth_hdr_revision,
+ hdr['EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.HDR.wREVISION'])
+ self.assertEqual(auth_hdr_cert_type,
+ hdr['EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.HDR.wCERTTYPE'])
+ self.assertEqual(WIN_CERT_TYPE_EFI_GUID.upper(),
+ hdr['EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.CERT_TYPE'])
+
+ if version_check:
+ self.assertEqual(fmp_signature,
+ hdr['FMP_PAYLOAD_HDR.SIGNATURE'])
+ self.assertEqual(fmp_size,
+ hdr['FMP_PAYLOAD_HDR.HEADER_SIZE'])
+ self.assertEqual(fmp_fw_version,
+ hdr['FMP_PAYLOAD_HDR.FW_VERSION'])
+
+ self.assertEqual(payload_data_len, int(hdr['Payload Image Size']))
+
+ def _CheckEmptyCapsule(self, data, accept_capsule=False):
+ if accept_capsule:
+ capsule_hdr_guid = EMPTY_CAPSULE_ACCEPT_GUID
+ else:
+ capsule_hdr_guid = EMPTY_CAPSULE_REVERT_GUID
+
+ hdr = self._GetCapsuleHeaders(data)
+
+ self.assertEqual(capsule_hdr_guid.upper(),
+ hdr['EFI_CAPSULE_HDR.CAPSULE_GUID'])
+
+ if accept_capsule:
+ capsule_size = "0000002C"
else:
- # payload offset for non-signed capsule with no version header(184 - 190)
- self.assertEqual(payload_data.hex(), data.hex()[184:190])
+ capsule_size = "0000001C"
+ self.assertEqual(capsule_size,
+ hdr['EFI_CAPSULE_HDR.CAPSULE_IMAGE_SIZE'])
+
+ if accept_capsule:
+ self.assertEqual(CAPSULE_IMAGE_GUID.upper(), hdr['ACCEPT_IMAGE_GUID'])
def testCapsuleGen(self):
"""Test generation of EFI capsule"""
@@ -7334,5 +7381,38 @@ fdt fdtmap Extract the devicetree blob from the fdtmap
self.assertIn("entry is missing properties: image-guid",
str(e.exception))
+ def testCapsuleGenAcceptCapsule(self):
+ """Test generationg of accept EFI capsule"""
+ data = self._DoReadFile('319_capsule_accept.dts')
+
+ self._CheckEmptyCapsule(data, accept_capsule=True)
+
+ def testCapsuleGenRevertCapsule(self):
+ """Test generationg of revert EFI capsule"""
+ data = self._DoReadFile('320_capsule_revert.dts')
+
+ self._CheckEmptyCapsule(data)
+
+ def testCapsuleGenAcceptGuidMissing(self):
+ """Test that binman errors out on missing image GUID for accept capsule"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('321_capsule_accept_missing_guid.dts')
+
+ self.assertIn("Image GUID needed for generating accept capsule",
+ str(e.exception))
+
+ def testCapsuleGenEmptyCapsuleTypeMissing(self):
+ """Test that capsule-type is specified"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('322_empty_capsule_type_missing.dts')
+
+ self.assertIn("entry is missing properties: capsule-type",
+ str(e.exception))
+
+ def testCapsuleGenAcceptOrRevertMissing(self):
+ """Test that both accept and revert capsule are not specified"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('323_capsule_accept_revert_missing.dts')
+
if __name__ == "__main__":
unittest.main()
diff --git a/tools/binman/test/311_capsule.dts b/tools/binman/test/311_capsule.dts
index 8eb4250b14b..0a62ef81dd2 100644
--- a/tools/binman/test/311_capsule.dts
+++ b/tools/binman/test/311_capsule.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
image-index = <0x1>;
diff --git a/tools/binman/test/312_capsule_signed.dts b/tools/binman/test/312_capsule_signed.dts
index d1c76e269c7..4ab838efedd 100644
--- a/tools/binman/test/312_capsule_signed.dts
+++ b/tools/binman/test/312_capsule_signed.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
image-index = <0x1>;
diff --git a/tools/binman/test/313_capsule_version.dts b/tools/binman/test/313_capsule_version.dts
index bafef3609e0..19e7e833480 100644
--- a/tools/binman/test/313_capsule_version.dts
+++ b/tools/binman/test/313_capsule_version.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
image-index = <0x1>;
diff --git a/tools/binman/test/314_capsule_signed_ver.dts b/tools/binman/test/314_capsule_signed_ver.dts
index 85c784bba43..649b8ccb2df 100644
--- a/tools/binman/test/314_capsule_signed_ver.dts
+++ b/tools/binman/test/314_capsule_signed_ver.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
image-index = <0x1>;
diff --git a/tools/binman/test/315_capsule_oemflags.dts b/tools/binman/test/315_capsule_oemflags.dts
index f736e8758fd..45853f69c31 100644
--- a/tools/binman/test/315_capsule_oemflags.dts
+++ b/tools/binman/test/315_capsule_oemflags.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
image-index = <0x1>;
diff --git a/tools/binman/test/316_capsule_missing_key.dts b/tools/binman/test/316_capsule_missing_key.dts
index 2080b50e3dd..a14a74ee779 100644
--- a/tools/binman/test/316_capsule_missing_key.dts
+++ b/tools/binman/test/316_capsule_missing_key.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
image-index = <0x1>;
diff --git a/tools/binman/test/317_capsule_missing_index.dts b/tools/binman/test/317_capsule_missing_index.dts
index aadb61f6477..99a54d55c33 100644
--- a/tools/binman/test/317_capsule_missing_index.dts
+++ b/tools/binman/test/317_capsule_missing_index.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
/* Image GUID for testing capsule update */
diff --git a/tools/binman/test/318_capsule_missing_guid.dts b/tools/binman/test/318_capsule_missing_guid.dts
index d76afba853e..85d3317ecb5 100644
--- a/tools/binman/test/318_capsule_missing_guid.dts
+++ b/tools/binman/test/318_capsule_missing_guid.dts
@@ -3,9 +3,6 @@
/dts-v1/;
/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
binman {
efi-capsule {
image-index = <0x1>;
diff --git a/tools/binman/test/319_capsule_accept.dts b/tools/binman/test/319_capsule_accept.dts
new file mode 100644
index 00000000000..d48e59f859b
--- /dev/null
+++ b/tools/binman/test/319_capsule_accept.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ capsule-type = "accept";
+ };
+ };
+};
diff --git a/tools/binman/test/320_capsule_revert.dts b/tools/binman/test/320_capsule_revert.dts
new file mode 100644
index 00000000000..bd141ef2924
--- /dev/null
+++ b/tools/binman/test/320_capsule_revert.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ capsule-type = "revert";
+ };
+ };
+};
diff --git a/tools/binman/test/321_capsule_accept_missing_guid.dts b/tools/binman/test/321_capsule_accept_missing_guid.dts
new file mode 100644
index 00000000000..a0088b174c5
--- /dev/null
+++ b/tools/binman/test/321_capsule_accept_missing_guid.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ capsule-type = "accept";
+ };
+ };
+};
diff --git a/tools/binman/test/322_empty_capsule_type_missing.dts b/tools/binman/test/322_empty_capsule_type_missing.dts
new file mode 100644
index 00000000000..d356168e775
--- /dev/null
+++ b/tools/binman/test/322_empty_capsule_type_missing.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ };
+ };
+};
diff --git a/tools/binman/test/323_capsule_accept_revert_missing.dts b/tools/binman/test/323_capsule_accept_revert_missing.dts
new file mode 100644
index 00000000000..31268b20b88
--- /dev/null
+++ b/tools/binman/test/323_capsule_accept_revert_missing.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ capsule-type = "foo";
+ };
+ };
+};
diff --git a/tools/buildman/main.py b/tools/buildman/main.py
index 5f42a58ddbb..3cf877e5e68 100755
--- a/tools/buildman/main.py
+++ b/tools/buildman/main.py
@@ -7,7 +7,7 @@
"""See README for more information"""
try:
- from importlib.resources import files
+ import importlib.resources
except ImportError:
# for Python 3.6
import importlib_resources
@@ -83,7 +83,13 @@ def run_buildman():
run_test_coverage()
elif args.full_help:
- tools.print_full_help(str(files('buildman').joinpath('README.rst')))
+ if hasattr(importlib.resources, 'files'):
+ dirpath = importlib.resources.files('buildman')
+ tools.print_full_help(str(dirpath.joinpath('README.rst')))
+ else:
+ with importlib.resources.path('buildman', 'README.rst') as readme:
+ tools.print_full_help(str(readme))
+
# Build selected commits for selected boards
else:
diff --git a/tools/docker/Dockerfile b/tools/docker/Dockerfile
index 1acb5413c7e..c4a2596b487 100644
--- a/tools/docker/Dockerfile
+++ b/tools/docker/Dockerfile
@@ -186,27 +186,6 @@ RUN git clone https://gitlab.com/qemu-project/qemu.git /tmp/qemu && \
make -j$(nproc) all install && \
rm -rf /tmp/qemu
-# Build QEMU supporting Nokia n900 emulation
-RUN mkdir -p /opt/nokia && \
- cd /tmp && \
- git clone https://git.linaro.org/qemu/qemu-linaro.git && \
- cd /tmp/qemu-linaro && \
- git checkout 8f8d8e0796efe1a6f34cdd83fb798f3c41217ec1 && \
- ./configure --enable-system --target-list=arm-softmmu \
- --python=/usr/bin/python2.7 --disable-sdl --disable-gtk \
- --disable-curses --audio-drv-list= --audio-card-list= \
- --disable-werror --disable-xen --disable-xen-pci-passthrough \
- --disable-brlapi --disable-vnc --disable-curl --disable-slirp \
- --disable-kvm --disable-user --disable-linux-user --disable-bsd-user \
- --disable-guest-base --disable-uuid --disable-vde --disable-linux-aio \
- --disable-cap-ng --disable-attr --disable-blobs --disable-docs \
- --disable-spice --disable-libiscsi --disable-smartcard-nss \
- --disable-usb-redir --disable-guest-agent --disable-seccomp \
- --disable-glusterfs --disable-nptl --disable-fdt && \
- make -j$(nproc) && \
- cp /tmp/qemu-linaro/arm-softmmu/qemu-system-arm /opt/nokia && \
- rm -rf /tmp/qemu-linaro
-
# Build genimage (required by some targets to generate disk images)
RUN wget -O - https://github.com/pengutronix/genimage/releases/download/v14/genimage-14.tar.xz | tar -C /tmp -xJ && \
cd /tmp/genimage-14 && \
@@ -250,16 +229,6 @@ RUN mkdir /tmp/trace && \
sudo make install && \
rm -rf /tmp/trace
-# Files to run Nokia RX-51 (aka N900) tests
-RUN mkdir -p /opt/nokia && \
- cd /opt/nokia && \
- wget https://raw.githubusercontent.com/pali/u-boot-maemo/master/debian/u-boot-gen-combined && \
- chmod 0755 u-boot-gen-combined && \
- wget http://repository.maemo.org/qemu-n900/qemu-n900.tar.gz && \
- wget http://repository.maemo.org/pool/maemo5.0/free/k/kernel/kernel_2.6.28-20103103+0m5_armel.deb && \
- wget http://repository.maemo.org/pool/maemo5.0/free/g/glibc/libc6_2.5.1-1eglibc27+0m5_armel.deb && \
- wget http://repository.maemo.org/pool/maemo5.0/free/b/busybox/busybox_1.10.2.legal-1osso30+0m5_armel.deb
-
# Create our user/group
RUN echo uboot ALL=NOPASSWD: ALL > /etc/sudoers.d/uboot
RUN useradd -m -U uboot
diff --git a/tools/eficapsule.h b/tools/eficapsule.h
index 2099a2e9b88..6efd07d2eb6 100644
--- a/tools/eficapsule.h
+++ b/tools/eficapsule.h
@@ -22,6 +22,8 @@
#define __aligned(x) __attribute__((__aligned__(x)))
#endif
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
typedef struct {
uint8_t b[16];
} efi_guid_t __aligned(8);
diff --git a/tools/fit_image.c b/tools/fit_image.c
index 9fe69ea0d9f..71e031c8550 100644
--- a/tools/fit_image.c
+++ b/tools/fit_image.c
@@ -497,7 +497,7 @@ static int fit_extract_data(struct image_tool_params *params, const char *fname)
{
void *buf = NULL;
int buf_ptr;
- int fit_size, new_size;
+ int fit_size, unpadded_size, new_size, pad_boundary;
int fd;
struct stat sbuf;
void *fdt;
@@ -564,9 +564,13 @@ static int fit_extract_data(struct image_tool_params *params, const char *fname)
/* Pack the FDT and place the data after it */
fdt_pack(fdt);
- new_size = fdt_totalsize(fdt);
- new_size = ALIGN(new_size, align_size);
+ unpadded_size = fdt_totalsize(fdt);
+ new_size = ALIGN(unpadded_size, align_size);
fdt_set_totalsize(fdt, new_size);
+ if (unpadded_size < fit_size) {
+ pad_boundary = new_size < fit_size ? new_size : fit_size;
+ memset(fdt + unpadded_size, 0, pad_boundary - unpadded_size);
+ }
debug("Size reduced from %x to %x\n", fit_size, fdt_totalsize(fdt));
debug("External data size %x\n", buf_ptr);
munmap(fdt, sbuf.st_size);
@@ -616,6 +620,8 @@ err:
static int fit_import_data(struct image_tool_params *params, const char *fname)
{
void *fdt, *old_fdt;
+ void *data = NULL;
+ const char *ext_data_prop = NULL;
int fit_size, new_size, size, data_base;
int fd;
struct stat sbuf;
@@ -659,14 +665,28 @@ static int fit_import_data(struct image_tool_params *params, const char *fname)
int buf_ptr;
int len;
- buf_ptr = fdtdec_get_int(fdt, node, "data-offset", -1);
- len = fdtdec_get_int(fdt, node, "data-size", -1);
- if (buf_ptr == -1 || len == -1)
+ /*
+ * FIT_DATA_OFFSET_PROP and FIT_DATA_POSITION_PROP are never both present,
+ * but if they are, prefer FIT_DATA_OFFSET_PROP as it was there first
+ */
+ buf_ptr = fdtdec_get_int(fdt, node, FIT_DATA_POSITION_PROP, -1);
+ if (buf_ptr != -1) {
+ ext_data_prop = FIT_DATA_POSITION_PROP;
+ data = old_fdt + buf_ptr;
+ }
+ buf_ptr = fdtdec_get_int(fdt, node, FIT_DATA_OFFSET_PROP, -1);
+ if (buf_ptr != -1) {
+ ext_data_prop = FIT_DATA_OFFSET_PROP;
+ data = old_fdt + data_base + buf_ptr;
+ }
+ len = fdtdec_get_int(fdt, node, FIT_DATA_SIZE_PROP, -1);
+ if (!data || len == -1)
continue;
debug("Importing data size %x\n", len);
- ret = fdt_setprop(fdt, node, "data",
- old_fdt + data_base + buf_ptr, len);
+ ret = fdt_setprop(fdt, node, FIT_DATA_PROP, data, len);
+ ret = fdt_delprop(fdt, node, ext_data_prop);
+
if (ret) {
debug("%s: Failed to write property: %s\n", __func__,
fdt_strerror(ret));
diff --git a/tools/iot2050-sign-fw.sh b/tools/iot2050-sign-fw.sh
index 6b426c854c2..75ffd560823 100755
--- a/tools/iot2050-sign-fw.sh
+++ b/tools/iot2050-sign-fw.sh
@@ -5,6 +5,8 @@ if [ -z "$1" ]; then
exit 1
fi
+TOOLS_DIR=$(dirname $0)
+
TEMP_X509=$(mktemp XXXXXXXX.temp)
REVISION=${2:-0}
@@ -39,10 +41,10 @@ CERT_X509=$(mktemp XXXXXXXX.crt)
openssl req -new -x509 -key $1 -nodes -outform DER -out $CERT_X509 -config $TEMP_X509 -sha512
cat $CERT_X509 tispl.bin > tispl.bin_signed
-source/tools/binman/binman replace -i flash-pg1.bin -f tispl.bin_signed fit@180000
-source/tools/binman/binman replace -i flash-pg2.bin -f tispl.bin_signed fit@180000
+$TOOLS_DIR/binman/binman replace -i flash-pg1.bin -f tispl.bin_signed fit@180000
+$TOOLS_DIR/binman/binman replace -i flash-pg2.bin -f tispl.bin_signed fit@180000
rm $TEMP_X509 $CERT_X509
-source/tools/binman/binman sign -i flash-pg1.bin -k $1 -a sha256,rsa4096 fit@380000
-source/tools/binman/binman sign -i flash-pg2.bin -k $1 -a sha256,rsa4096 fit@380000
+$TOOLS_DIR/binman/binman sign -i flash-pg1.bin -k $1 -a sha256,rsa4096 fit@380000
+$TOOLS_DIR/binman/binman sign -i flash-pg2.bin -k $1 -a sha256,rsa4096 fit@380000
diff --git a/tools/logos/stm32f746-disco.bmp b/tools/logos/stm32f746-disco.bmp
new file mode 100644
index 00000000000..c1ef4fb035c
--- /dev/null
+++ b/tools/logos/stm32f746-disco.bmp
Binary files differ
diff --git a/tools/mkeficapsule.c b/tools/mkeficapsule.c
index 52be1f122ee..b8fc6069b58 100644
--- a/tools/mkeficapsule.c
+++ b/tools/mkeficapsule.c
@@ -29,7 +29,7 @@ static const char *tool_name = "mkeficapsule";
efi_guid_t efi_guid_fm_capsule = EFI_FIRMWARE_MANAGEMENT_CAPSULE_ID_GUID;
efi_guid_t efi_guid_cert_type_pkcs7 = EFI_CERT_TYPE_PKCS7_GUID;
-static const char *opts_short = "g:i:I:v:p:c:m:o:dhAR";
+static const char *opts_short = "g:i:I:v:p:c:m:o:dhARD";
enum {
CAPSULE_NORMAL_BLOB = 0,
@@ -49,6 +49,7 @@ static struct option options[] = {
{"fw-accept", no_argument, NULL, 'A'},
{"fw-revert", no_argument, NULL, 'R'},
{"capoemflag", required_argument, NULL, 'o'},
+ {"dump-capsule", no_argument, NULL, 'D'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0},
};
@@ -69,6 +70,7 @@ static void print_usage(void)
"\t-A, --fw-accept firmware accept capsule, requires GUID, no image blob\n"
"\t-R, --fw-revert firmware revert capsule, takes no GUID, no image blob\n"
"\t-o, --capoemflag Capsule OEM Flag, an integer between 0x0000 and 0xffff\n"
+ "\t-D, --dump-capsule dump the contents of the capsule headers\n"
"\t-h, --help print a help message\n",
tool_name);
}
@@ -647,6 +649,215 @@ err:
return ret;
}
+static void print_guid(void *ptr)
+{
+ int i;
+ efi_guid_t *guid = ptr;
+ const uint8_t seq[] = {
+ 3, 2, 1, 0, '-', 5, 4, '-', 7, 6,
+ '-', 8, 9, '-', 10, 11, 12, 13, 14, 15 };
+
+ for (i = 0; i < ARRAY_SIZE(seq); i++) {
+ if (seq[i] == '-')
+ putchar(seq[i]);
+ else
+ printf("%02X", guid->b[seq[i]]);
+ }
+
+ printf("\n");
+}
+
+static uint32_t dump_fmp_payload_header(
+ struct fmp_payload_header *fmp_payload_hdr)
+{
+ if (fmp_payload_hdr->signature == FMP_PAYLOAD_HDR_SIGNATURE) {
+ printf("--------\n");
+ printf("FMP_PAYLOAD_HDR.SIGNATURE\t\t\t: %08X\n",
+ FMP_PAYLOAD_HDR_SIGNATURE);
+ printf("FMP_PAYLOAD_HDR.HEADER_SIZE\t\t\t: %08X\n",
+ fmp_payload_hdr->header_size);
+ printf("FMP_PAYLOAD_HDR.FW_VERSION\t\t\t: %08X\n",
+ fmp_payload_hdr->fw_version);
+ printf("FMP_PAYLOAD_HDR.LOWEST_SUPPORTED_VERSION\t: %08X\n",
+ fmp_payload_hdr->lowest_supported_version);
+ return fmp_payload_hdr->header_size;
+ }
+
+ return 0;
+}
+
+static void dump_capsule_auth_header(
+ struct efi_firmware_image_authentication *capsule_auth_hdr)
+{
+ printf("EFI_FIRMWARE_IMAGE_AUTH.MONOTONIC_COUNT\t\t: %08lX\n",
+ capsule_auth_hdr->monotonic_count);
+ printf("EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.HDR.dwLENGTH\t: %08X\n",
+ capsule_auth_hdr->auth_info.hdr.dwLength);
+ printf("EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.HDR.wREVISION\t: %08X\n",
+ capsule_auth_hdr->auth_info.hdr.wRevision);
+ printf("EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.HDR.wCERTTYPE\t: %08X\n",
+ capsule_auth_hdr->auth_info.hdr.wCertificateType);
+ printf("EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.CERT_TYPE\t: ");
+ print_guid(&capsule_auth_hdr->auth_info.cert_type);
+}
+
+static void dump_fmp_capsule_image_header(
+ struct efi_firmware_management_capsule_image_header *image_hdr)
+{
+ void *capsule_auth_hdr;
+ void *fmp_payload_hdr;
+ uint64_t signature_size = 0;
+ uint32_t payload_size = 0;
+ uint32_t fmp_payload_hdr_size = 0;
+ struct efi_firmware_image_authentication *auth_hdr;
+
+ printf("--------\n");
+ printf("FMP_CAPSULE_IMAGE_HDR.VERSION\t\t\t: %08X\n",
+ image_hdr->version);
+ printf("FMP_CAPSULE_IMAGE_HDR.UPDATE_IMAGE_TYPE_ID\t: ");
+ print_guid(&image_hdr->update_image_type_id);
+ printf("FMP_CAPSULE_IMAGE_HDR.UPDATE_IMAGE_INDEX\t: %08X\n",
+ image_hdr->update_image_index);
+ printf("FMP_CAPSULE_IMAGE_HDR.UPDATE_IMAGE_SIZE\t\t: %08X\n",
+ image_hdr->update_image_size);
+ printf("FMP_CAPSULE_IMAGE_HDR.UPDATE_VENDOR_CODE_SIZE\t: %08X\n",
+ image_hdr->update_vendor_code_size);
+ printf("FMP_CAPSULE_IMAGE_HDR.UPDATE_HARDWARE_INSTANCE\t: %08lX\n",
+ image_hdr->update_hardware_instance);
+ printf("FMP_CAPSULE_IMAGE_HDR.IMAGE_CAPSULE_SUPPORT\t: %08lX\n",
+ image_hdr->image_capsule_support);
+
+ printf("--------\n");
+ if (image_hdr->image_capsule_support & CAPSULE_SUPPORT_AUTHENTICATION) {
+ capsule_auth_hdr = (char *)image_hdr + sizeof(*image_hdr);
+ dump_capsule_auth_header(capsule_auth_hdr);
+
+ auth_hdr = capsule_auth_hdr;
+ signature_size = sizeof(auth_hdr->monotonic_count) +
+ auth_hdr->auth_info.hdr.dwLength;
+ fmp_payload_hdr = (char *)capsule_auth_hdr + signature_size;
+ } else {
+ printf("Capsule Authentication Not Enabled\n");
+ fmp_payload_hdr = (char *)image_hdr + sizeof(*image_hdr);
+ }
+
+ fmp_payload_hdr_size = dump_fmp_payload_header(fmp_payload_hdr);
+
+ payload_size = image_hdr->update_image_size - signature_size -
+ fmp_payload_hdr_size;
+ printf("--------\n");
+ printf("Payload Image Size\t\t\t\t: %08X\n", payload_size);
+}
+
+static void dump_fmp_header(
+ struct efi_firmware_management_capsule_header *fmp_hdr)
+{
+ int i;
+ void *capsule_image_hdr;
+
+ printf("EFI_FMP_HDR.VERSION\t\t\t\t: %08X\n", fmp_hdr->version);
+ printf("EFI_FMP_HDR.EMBEDDED_DRIVER_COUNT\t\t: %08X\n",
+ fmp_hdr->embedded_driver_count);
+ printf("EFI_FMP_HDR.PAYLOAD_ITEM_COUNT\t\t\t: %08X\n",
+ fmp_hdr->payload_item_count);
+
+ /*
+ * We currently don't support Embedded Drivers.
+ * Only worry about the payload items.
+ */
+ for (i = 0; i < fmp_hdr->payload_item_count; i++) {
+ capsule_image_hdr = (char *)fmp_hdr +
+ fmp_hdr->item_offset_list[i];
+ dump_fmp_capsule_image_header(capsule_image_hdr);
+ }
+}
+
+static void dump_capsule_header(struct efi_capsule_header *capsule_hdr)
+{
+ printf("EFI_CAPSULE_HDR.CAPSULE_GUID\t\t\t: ");
+ print_guid((void *)&capsule_hdr->capsule_guid);
+ printf("EFI_CAPSULE_HDR.HEADER_SIZE\t\t\t: %08X\n",
+ capsule_hdr->header_size);
+ printf("EFI_CAPSULE_HDR.FLAGS\t\t\t\t: %08X\n", capsule_hdr->flags);
+ printf("EFI_CAPSULE_HDR.CAPSULE_IMAGE_SIZE\t\t: %08X\n",
+ capsule_hdr->capsule_image_size);
+}
+
+static void normal_capsule_dump(void *capsule_buf)
+{
+ void *fmp_hdr;
+ struct efi_capsule_header *hdr = capsule_buf;
+
+ dump_capsule_header(hdr);
+ printf("--------\n");
+
+ fmp_hdr = (char *)capsule_buf + sizeof(*hdr);
+ dump_fmp_header(fmp_hdr);
+}
+
+static void empty_capsule_dump(void *capsule_buf)
+{
+ efi_guid_t *accept_image_guid;
+ struct efi_capsule_header *hdr = capsule_buf;
+ efi_guid_t efi_empty_accept_capsule = FW_ACCEPT_OS_GUID;
+
+ dump_capsule_header(hdr);
+
+ if (!memcmp(&efi_empty_accept_capsule, &hdr->capsule_guid,
+ sizeof(efi_guid_t))) {
+ accept_image_guid = (void *)(char *)capsule_buf +
+ sizeof(struct efi_capsule_header);
+ printf("--------\n");
+ printf("ACCEPT_IMAGE_GUID\t\t\t\t: ");
+ print_guid(accept_image_guid);
+ }
+}
+
+static void dump_capsule_contents(char *capsule_file)
+{
+ int fd;
+ char *ptr;
+ efi_guid_t efi_fmp_guid = EFI_FIRMWARE_MANAGEMENT_CAPSULE_ID_GUID;
+ efi_guid_t efi_empty_accept_capsule = FW_ACCEPT_OS_GUID;
+ efi_guid_t efi_empty_revert_capsule = FW_REVERT_OS_GUID;
+ struct stat sbuf;
+
+ if (!capsule_file) {
+ fprintf(stderr, "No capsule file provided\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if ((fd = open(capsule_file, O_RDONLY)) < 0) {
+ fprintf(stderr, "Error opening capsule file: %s\n",
+ capsule_file);
+ exit(EXIT_FAILURE);
+ }
+
+ if (fstat(fd, &sbuf) < 0) {
+ fprintf(stderr, "Can't stat capsule file: %s\n", capsule_file);
+ exit(EXIT_FAILURE);
+ }
+
+ if ((ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, fd, 0))
+ == MAP_FAILED) {
+ fprintf(stderr, "Can't mmap capsule file: %s\n", capsule_file);
+ exit(EXIT_FAILURE);
+ }
+
+ if (!memcmp(&efi_fmp_guid, ptr, sizeof(efi_guid_t))) {
+ normal_capsule_dump(ptr);
+ } else if (!memcmp(&efi_empty_accept_capsule, ptr,
+ sizeof(efi_guid_t)) ||
+ !memcmp(&efi_empty_revert_capsule, ptr,
+ sizeof(efi_guid_t))) {
+ empty_capsule_dump(ptr);
+ } else {
+ fprintf(stderr, "Unable to decode the capsule file: %s\n",
+ capsule_file);
+ exit(EXIT_FAILURE);
+ }
+}
+
/**
* main - main entry function of mkeficapsule
* @argc: Number of arguments
@@ -666,6 +877,7 @@ int main(int argc, char **argv)
unsigned long index, instance;
uint64_t mcount;
unsigned long oemflags;
+ bool capsule_dump;
char *privkey_file, *cert_file;
int c, idx;
struct fmp_payload_header_params fmp_ph_params = { 0 };
@@ -676,6 +888,7 @@ int main(int argc, char **argv)
mcount = 0;
privkey_file = NULL;
cert_file = NULL;
+ capsule_dump = false;
dump_sig = 0;
capsule_type = CAPSULE_NORMAL_BLOB;
oemflags = 0;
@@ -754,12 +967,24 @@ int main(int argc, char **argv)
exit(1);
}
break;
+ case 'D':
+ capsule_dump = true;
+ break;
default:
print_usage();
exit(EXIT_SUCCESS);
}
}
+ if (capsule_dump) {
+ if (argc != optind + 1) {
+ fprintf(stderr, "Must provide the capsule file to parse\n");
+ exit(EXIT_FAILURE);
+ }
+ dump_capsule_contents(argv[argc - 1]);
+ exit(EXIT_SUCCESS);
+ }
+
/* check necessary parameters */
if ((capsule_type == CAPSULE_NORMAL_BLOB &&
((argc != optind + 2) || !guid ||
diff --git a/tools/patman/gitutil.py b/tools/patman/gitutil.py
index 6700057359f..b0a12f2e8c0 100644
--- a/tools/patman/gitutil.py
+++ b/tools/patman/gitutil.py
@@ -651,7 +651,7 @@ def get_default_user_name():
Returns:
User name found in .gitconfig file, or None if none
"""
- uname = command.output_one_line('git', 'config', '--global', 'user.name')
+ uname = command.output_one_line('git', 'config', '--global', '--includes', 'user.name')
return uname
@@ -661,7 +661,7 @@ def get_default_user_email():
Returns:
User's email found in .gitconfig file, or None if none
"""
- uemail = command.output_one_line('git', 'config', '--global', 'user.email')
+ uemail = command.output_one_line('git', 'config', '--global', '--includes', 'user.email')
return uemail
diff --git a/tools/patman/pyproject.toml b/tools/patman/pyproject.toml
index c5dc7c7e276..a54211f7069 100644
--- a/tools/patman/pyproject.toml
+++ b/tools/patman/pyproject.toml
@@ -23,7 +23,7 @@ classifiers = [
"Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues"
[project.scripts]
-patman = "patman.__main__:run_patman"
+patman = "patman.__main__"
[tool.setuptools.package-data]
patman = ["*.rst"]
diff --git a/tools/patman/test_checkpatch.py b/tools/patman/test_checkpatch.py
index a8bb364e42b..0a8f7408f14 100644
--- a/tools/patman/test_checkpatch.py
+++ b/tools/patman/test_checkpatch.py
@@ -18,19 +18,47 @@ from patman import commit
class Line:
+ """Single changed line in one file in a patch
+
+ Args:
+ fname (str): Filename containing the added line
+ text (str): Text of the added line
+ """
def __init__(self, fname, text):
self.fname = fname
self.text = text
class PatchMaker:
+ """Makes a patch for checking with checkpatch.pl
+
+ The idea here is to create a patch which adds one line in one file,
+ intended to provoke a checkpatch error or warning. The base patch is empty
+ (i.e. invalid), so you should call add_line() to add at least one line.
+ """
def __init__(self):
+ """Set up the PatchMaker object
+
+ Properties:
+ lines (list of Line): List of lines to add to the patch. Note that
+ each line has both a file and some text associated with it,
+ since for simplicity we just add a single line for each file
+ """
self.lines = []
def add_line(self, fname, text):
+ """Add to the list of filename/line pairs"""
self.lines.append(Line(fname, text))
def get_patch_text(self):
+ """Build the patch text
+
+ Takes a base patch and adds a diffstat and patch for each filename/line
+ pair in the list.
+
+ Returns:
+ str: Patch text ready for submission to checkpatch
+ """
base = '''From 125b77450f4c66b8fd9654319520bbe795c9ef31 Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Sun, 14 Jun 2020 09:45:14 -0600
@@ -75,6 +103,11 @@ Signed-off-by: Simon Glass <sjg@chromium.org>
return '\n'.join(lines)
def get_patch(self):
+ """Get the patch text and write it into a temporary file
+
+ Returns:
+ str: Filename containing the patch
+ """
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
infd.write(self.get_patch_text())
@@ -82,6 +115,22 @@ Signed-off-by: Simon Glass <sjg@chromium.org>
return inname
def run_checkpatch(self):
+ """Run checkpatch on the patch file
+
+ Returns:
+ namedtuple containing:
+ ok: False=failure, True=ok
+ problems: List of problems, each a dict:
+ 'type'; error or warning
+ 'msg': text message
+ 'file' : filename
+ 'line': line number
+ errors: Number of errors
+ warnings: Number of warnings
+ checks: Number of checks
+ lines: Number of lines
+ stdout: Full output of checkpatch
+ """
return checkpatch.check_patch(self.get_patch(), show_types=True)
@@ -238,7 +287,7 @@ index 0000000..2234c87
+ * passed to kernel in the ATAGs
+ */
+
-+#include <common.h>
++#include <config.h>
+
+struct bootstage_record {
+ u32 time_us;
@@ -401,10 +450,15 @@ index 0000000..2234c87
def test_barred_include_in_hdr(self):
"""Test for using a barred include in a header file"""
pm = PatchMaker()
- #pm.add_line('include/myfile.h', '#include <common.h>')
pm.add_line('include/myfile.h', '#include <dm.h>')
self.check_single_message(pm, 'BARRED_INCLUDE_IN_HDR', 'error')
+ def test_barred_include_common_h(self):
+ """Test for adding common.h to a file"""
+ pm = PatchMaker()
+ pm.add_line('include/myfile.h', '#include <common.h>')
+ self.check_single_message(pm, 'BARRED_INCLUDE_COMMON_H', 'error')
+
def test_config_is_enabled_config(self):
"""Test for accidental CONFIG_IS_ENABLED(CONFIG_*) calls"""
pm = PatchMaker()
diff --git a/tools/moveconfig.py b/tools/qconfig.py
index 6cbecc3d5c8..04118d942da 100755
--- a/tools/moveconfig.py
+++ b/tools/qconfig.py
@@ -5,17 +5,14 @@
#
"""
-Move config options from headers to defconfig files.
+Build and query a Kconfig database for boards.
See doc/develop/moveconfig.rst for documentation.
"""
from argparse import ArgumentParser
-import asteval
import collections
from contextlib import ExitStack
-import copy
-import difflib
import doctest
import filecmp
import fnmatch
@@ -32,9 +29,11 @@ import threading
import time
import unittest
+import asteval
from buildman import bsettings
from buildman import kconfiglib
from buildman import toolchain
+from u_boot_pylib import terminal
SHOW_GNU_MAKE = 'scripts/show-gnu-make'
SLEEP_TIME=0.03
@@ -44,30 +43,9 @@ STATE_DEFCONFIG = 1
STATE_AUTOCONF = 2
STATE_SAVEDEFCONFIG = 3
-ACTION_MOVE = 0
-ACTION_NO_ENTRY = 1
-ACTION_NO_ENTRY_WARN = 2
-ACTION_NO_CHANGE = 3
-
-COLOR_BLACK = '0;30'
-COLOR_RED = '0;31'
-COLOR_GREEN = '0;32'
-COLOR_BROWN = '0;33'
-COLOR_BLUE = '0;34'
-COLOR_PURPLE = '0;35'
-COLOR_CYAN = '0;36'
-COLOR_LIGHT_GRAY = '0;37'
-COLOR_DARK_GRAY = '1;30'
-COLOR_LIGHT_RED = '1;31'
-COLOR_LIGHT_GREEN = '1;32'
-COLOR_YELLOW = '1;33'
-COLOR_LIGHT_BLUE = '1;34'
-COLOR_LIGHT_PURPLE = '1;35'
-COLOR_LIGHT_CYAN = '1;36'
-COLOR_WHITE = '1;37'
-
AUTO_CONF_PATH = 'include/config/auto.conf'
-CONFIG_DATABASE = 'moveconfig.db'
+CONFIG_DATABASE = 'qconfig.db'
+FAILED_LIST = 'qconfig.failed'
CONFIG_LEN = len('CONFIG_')
@@ -196,111 +174,6 @@ def get_all_defconfigs():
return defconfigs
-def color_text(color_enabled, color, string):
- """Return colored string."""
- if color_enabled:
- # LF should not be surrounded by the escape sequence.
- # Otherwise, additional whitespace or line-feed might be printed.
- return '\n'.join([ '\033[' + color + 'm' + s + '\033[0m' if s else ''
- for s in string.split('\n') ])
- return string
-
-def show_diff(alines, blines, file_path, color_enabled):
- """Show unidified diff.
-
- Args:
- alines (list of str): A list of lines (before)
- blines (list of str): A list of lines (after)
- file_path (str): Path to the file
- color_enabled (bool): Display the diff in color
- """
- diff = difflib.unified_diff(alines, blines,
- fromfile=os.path.join('a', file_path),
- tofile=os.path.join('b', file_path))
-
- for line in diff:
- if line.startswith('-') and not line.startswith('--'):
- print(color_text(color_enabled, COLOR_RED, line))
- elif line.startswith('+') and not line.startswith('++'):
- print(color_text(color_enabled, COLOR_GREEN, line))
- else:
- print(line)
-
-def extend_matched_lines(lines, matched, pre_patterns, post_patterns,
- extend_pre, extend_post):
- """Extend matched lines if desired patterns are found before/after already
- matched lines.
-
- Args:
- lines (list of str): list of lines handled.
- matched (list of int): list of line numbers that have been already
- matched (will be updated by this function)
- pre_patterns (list of re.Pattern): list of regular expression that should
- be matched as preamble
- post_patterns (list of re.Pattern): list of regular expression that should
- be matched as postamble
- extend_pre (bool): Add the line number of matched preamble to the matched
- list
- extend_post (bool): Add the line number of matched postamble to the
- matched list
- """
- extended_matched = []
-
- j = matched[0]
-
- for i in matched:
- if i == 0 or i < j:
- continue
- j = i
- while j in matched:
- j += 1
- if j >= len(lines):
- break
-
- for pat in pre_patterns:
- if pat.search(lines[i - 1]):
- break
- else:
- # not matched
- continue
-
- for pat in post_patterns:
- if pat.search(lines[j]):
- break
- else:
- # not matched
- continue
-
- if extend_pre:
- extended_matched.append(i - 1)
- if extend_post:
- extended_matched.append(j)
-
- matched += extended_matched
- matched.sort()
-
-def confirm(args, prompt):
- """Ask the user to confirm something
-
- Args:
- args (Namespace ): program arguments
-
- Returns:
- bool: True to confirm, False to cancel/stop
- """
- if not args.yes:
- while True:
- choice = input(f'{prompt} [y/n]: ')
- choice = choice.lower()
- print(choice)
- if choice in ('y', 'n'):
- break
-
- if choice == 'n':
- return False
-
- return True
-
def write_file(fname, data):
"""Write data to a file
@@ -321,7 +194,7 @@ def read_file(fname, as_lines=True, skip_unicode=False):
Args:
fname (str): Filename to read from
- as_lines: Return file contents as a list of lines
+ as_lines (bool): Return file contents as a list of lines
skip_unicode (bool): True to report unicode errors and continue
Returns:
@@ -336,163 +209,13 @@ def read_file(fname, as_lines=True, skip_unicode=False):
try:
if as_lines:
return [line.rstrip('\n') for line in inf.readlines()]
- else:
- return inf.read()
- except UnicodeDecodeError as e:
+ return inf.read()
+ except UnicodeDecodeError as exc:
if not skip_unicode:
raise
- print("Failed on file %s': %s" % (fname, e))
+ print(f"Failed on file '{fname}: {exc}")
return None
-def cleanup_empty_blocks(header_path, args):
- """Clean up empty conditional blocks
-
- Args:
- header_path (str): path to the cleaned file.
- args (Namespace): program arguments
- """
- pattern = re.compile(r'^\s*#\s*if.*$\n^\s*#\s*endif.*$\n*', flags=re.M)
- data = read_file(header_path, as_lines=False, skip_unicode=True)
- if data is None:
- return
-
- new_data = pattern.sub('\n', data)
-
- show_diff(data.splitlines(True), new_data.splitlines(True), header_path,
- args.color)
-
- if args.dry_run:
- return
-
- if new_data != data:
- write_file(header_path, new_data)
-
-def cleanup_one_header(header_path, patterns, args):
- """Clean regex-matched lines away from a file.
-
- Args:
- header_path: path to the cleaned file.
- patterns: list of regex patterns. Any lines matching to these
- patterns are deleted.
- args (Namespace): program arguments
- """
- lines = read_file(header_path, skip_unicode=True)
- if lines is None:
- return
-
- matched = []
- for i, line in enumerate(lines):
- if i - 1 in matched and lines[i - 1].endswith('\\'):
- matched.append(i)
- continue
- for pattern in patterns:
- if pattern.search(line):
- matched.append(i)
- break
-
- if not matched:
- return
-
- # remove empty #ifdef ... #endif, successive blank lines
- pattern_if = re.compile(r'#\s*if(def|ndef)?\b') # #if, #ifdef, #ifndef
- pattern_elif = re.compile(r'#\s*el(if|se)\b') # #elif, #else
- pattern_endif = re.compile(r'#\s*endif\b') # #endif
- pattern_blank = re.compile(r'^\s*$') # empty line
-
- while True:
- old_matched = copy.copy(matched)
- extend_matched_lines(lines, matched, [pattern_if],
- [pattern_endif], True, True)
- extend_matched_lines(lines, matched, [pattern_elif],
- [pattern_elif, pattern_endif], True, False)
- extend_matched_lines(lines, matched, [pattern_if, pattern_elif],
- [pattern_blank], False, True)
- extend_matched_lines(lines, matched, [pattern_blank],
- [pattern_elif, pattern_endif], True, False)
- extend_matched_lines(lines, matched, [pattern_blank],
- [pattern_blank], True, False)
- if matched == old_matched:
- break
-
- tolines = copy.copy(lines)
-
- for i in reversed(matched):
- tolines.pop(i)
-
- show_diff(lines, tolines, header_path, args.color)
-
- if args.dry_run:
- return
-
- write_file(header_path, tolines)
-
-def cleanup_headers(configs, args):
- """Delete config defines from board headers.
-
- Args:
- configs: A list of CONFIGs to remove.
- args (Namespace): program arguments
- """
- if not confirm(args, 'Clean up headers?'):
- return
-
- patterns = []
- for config in configs:
- patterns.append(re.compile(r'#\s*define\s+%s\b' % config))
- patterns.append(re.compile(r'#\s*undef\s+%s\b' % config))
-
- for dir in 'include', 'arch', 'board':
- for (dirpath, dirnames, filenames) in os.walk(dir):
- if dirpath == os.path.join('include', 'generated'):
- continue
- for filename in filenames:
- if not filename.endswith(('~', '.dts', '.dtsi', '.bin',
- '.elf','.aml','.dat')):
- header_path = os.path.join(dirpath, filename)
- # This file contains UTF-16 data and no CONFIG symbols
- if header_path == 'include/video_font_data.h':
- continue
- cleanup_one_header(header_path, patterns, args)
- cleanup_empty_blocks(header_path, args)
-
-def find_matching(patterns, line):
- for pat in patterns:
- if pat.search(line):
- return True
- return False
-
-def cleanup_readme(configs, args):
- """Delete config description in README
-
- Args:
- configs: A list of CONFIGs to remove.
- args (Namespace): program arguments
- """
- if not confirm(args, 'Clean up README?'):
- return
-
- patterns = []
- for config in configs:
- patterns.append(re.compile(r'^\s+%s' % config))
-
- lines = read_file('README')
-
- found = False
- newlines = []
- for line in lines:
- if not found:
- found = find_matching(patterns, line)
- if found:
- continue
-
- if found and re.search(r'^\s+CONFIG', line):
- found = False
-
- if not found:
- newlines.append(line)
-
- write_file('README', newlines)
-
def try_expand(line):
"""If value looks like an expression, try expanding it
Otherwise just return the existing value
@@ -506,10 +229,10 @@ def try_expand(line):
val= val.strip('\"')
if re.search(r'[*+-/]|<<|SZ_+|\(([^\)]+)\)', val):
newval = hex(aeval(val))
- print('\tExpanded expression %s to %s' % (val, newval))
+ print(f'\tExpanded expression {val} to {newval}')
return cfg+'='+newval
except:
- print('\tFailed to expand expression in %s' % line)
+ print(f'\tFailed to expand expression in {line}')
return line
@@ -519,23 +242,36 @@ class Progress:
"""Progress Indicator"""
- def __init__(self, total):
+ def __init__(self, col, total):
"""Create a new progress indicator.
Args:
- total: A number of defconfig files to process.
+ color_enabled (bool): True for colour output
+ total (int): A number of defconfig files to process.
"""
+ self.col = col
self.current = 0
+ self.good = 0
self.total = total
- def inc(self):
- """Increment the number of processed defconfig files."""
+ def inc(self, success):
+ """Increment the number of processed defconfig files.
+ Args:
+ success (bool): True if processing succeeded
+ """
+ self.good += success
self.current += 1
def show(self):
"""Display the progress."""
- print(' %d defconfigs out of %d\r' % (self.current, self.total), end=' ')
+ if self.current != self.total:
+ line = self.col.build(self.col.GREEN, f'{self.good:5d}')
+ line += self.col.build(self.col.RED,
+ f'{self.current - self.good:5d}')
+ line += self.col.build(self.col.MAGENTA,
+ f'/{self.total - self.current}')
+ print(f'{line} \r', end='')
sys.stdout.flush()
@@ -559,15 +295,13 @@ class KconfigParser:
re_arch = re.compile(r'CONFIG_SYS_ARCH="(.*)"')
re_cpu = re.compile(r'CONFIG_SYS_CPU="(.*)"')
- def __init__(self, configs, args, build_dir):
+ def __init__(self, args, build_dir):
"""Create a new parser.
Args:
- configs: A list of CONFIGs to move.
args (Namespace): program arguments
build_dir: Build directory.
"""
- self.configs = configs
self.args = args
self.dotconfig = os.path.join(build_dir, '.config')
self.autoconf = os.path.join(build_dir, 'include', 'autoconf.mk')
@@ -585,13 +319,13 @@ class KconfigParser:
arch = ''
cpu = ''
for line in read_file(self.dotconfig):
- m = self.re_arch.match(line)
- if m:
- arch = m.group(1)
+ m_arch = self.re_arch.match(line)
+ if m_arch:
+ arch = m_arch.group(1)
continue
- m = self.re_cpu.match(line)
- if m:
- cpu = m.group(1)
+ m_cpu = self.re_cpu.match(line)
+ if m_cpu:
+ cpu = m_cpu.group(1)
if not arch:
return None
@@ -602,155 +336,6 @@ class KconfigParser:
return arch
- def parse_one_config(self, config, dotconfig_lines, autoconf_lines):
- """Parse .config, defconfig, include/autoconf.mk for one config.
-
- This function looks for the config options in the lines from
- defconfig, .config, and include/autoconf.mk in order to decide
- which action should be taken for this defconfig.
-
- Args:
- config: CONFIG name to parse.
- dotconfig_lines: lines from the .config file.
- autoconf_lines: lines from the include/autoconf.mk file.
-
- Returns:
- A tupple of the action for this defconfig and the line
- matched for the config.
- """
- not_set = '# %s is not set' % config
-
- for line in autoconf_lines:
- line = line.rstrip()
- if line.startswith(config + '='):
- new_val = line
- break
- else:
- new_val = not_set
-
- new_val = try_expand(new_val)
-
- for line in dotconfig_lines:
- line = line.rstrip()
- if line.startswith(config + '=') or line == not_set:
- old_val = line
- break
- else:
- if new_val == not_set:
- return (ACTION_NO_ENTRY, config)
- else:
- return (ACTION_NO_ENTRY_WARN, config)
-
- # If this CONFIG is neither bool nor trisate
- if old_val[-2:] != '=y' and old_val[-2:] != '=m' and old_val != not_set:
- # tools/scripts/define2mk.sed changes '1' to 'y'.
- # This is a problem if the CONFIG is int type.
- # Check the type in Kconfig and handle it correctly.
- if new_val[-2:] == '=y':
- new_val = new_val[:-1] + '1'
-
- return (ACTION_NO_CHANGE if old_val == new_val else ACTION_MOVE,
- new_val)
-
- def update_dotconfig(self):
- """Parse files for the config options and update the .config.
-
- This function parses the generated .config and include/autoconf.mk
- searching the target options.
- Move the config option(s) to the .config as needed.
-
- Args:
- defconfig: defconfig name.
-
- Returns:
- Return a tuple of (updated flag, log string).
- The "updated flag" is True if the .config was updated, False
- otherwise. The "log string" shows what happend to the .config.
- """
-
- results = []
- updated = False
- suspicious = False
- rm_files = [self.config_autoconf, self.autoconf]
-
- if self.args.spl:
- if os.path.exists(self.spl_autoconf):
- autoconf_path = self.spl_autoconf
- rm_files.append(self.spl_autoconf)
- else:
- for f in rm_files:
- os.remove(f)
- return (updated, suspicious,
- color_text(self.args.color, COLOR_BROWN,
- "SPL is not enabled. Skipped.") + '\n')
- else:
- autoconf_path = self.autoconf
-
- dotconfig_lines = read_file(self.dotconfig)
-
- autoconf_lines = read_file(autoconf_path)
-
- for config in self.configs:
- result = self.parse_one_config(config, dotconfig_lines,
- autoconf_lines)
- results.append(result)
-
- log = ''
-
- for (action, value) in results:
- if action == ACTION_MOVE:
- actlog = "Move '%s'" % value
- log_color = COLOR_LIGHT_GREEN
- elif action == ACTION_NO_ENTRY:
- actlog = '%s is not defined in Kconfig. Do nothing.' % value
- log_color = COLOR_LIGHT_BLUE
- elif action == ACTION_NO_ENTRY_WARN:
- actlog = '%s is not defined in Kconfig (suspicious). Do nothing.' % value
- log_color = COLOR_YELLOW
- suspicious = True
- elif action == ACTION_NO_CHANGE:
- actlog = "'%s' is the same as the define in Kconfig. Do nothing." \
- % value
- log_color = COLOR_LIGHT_PURPLE
- else:
- sys.exit('Internal Error. This should not happen.')
-
- log += color_text(self.args.color, log_color, actlog) + '\n'
-
- with open(self.dotconfig, 'a', encoding='utf-8') as out:
- for (action, value) in results:
- if action == ACTION_MOVE:
- out.write(value + '\n')
- updated = True
-
- self.results = results
- for f in rm_files:
- os.remove(f)
-
- return (updated, suspicious, log)
-
- def check_defconfig(self):
- """Check the defconfig after savedefconfig
-
- Returns:
- Return additional log if moved CONFIGs were removed again by
- 'make savedefconfig'.
- """
-
- log = ''
-
- defconfig_lines = read_file(self.defconfig)
-
- for (action, value) in self.results:
- if action != ACTION_MOVE:
- continue
- if not value in defconfig_lines:
- log += color_text(self.args.color, COLOR_YELLOW,
- "'%s' was removed by savedefconfig.\n" %
- value)
-
- return log
-
class DatabaseThread(threading.Thread):
"""This thread processes results from Slot threads.
@@ -788,13 +373,12 @@ class Slot:
for faster processing.
"""
- def __init__(self, toolchains, configs, args, progress, devnull,
- make_cmd, reference_src_dir, db_queue):
+ def __init__(self, toolchains, args, progress, devnull, make_cmd,
+ reference_src_dir, db_queue, col):
"""Create a new process slot.
Args:
toolchains: Toolchains object containing toolchains.
- configs: A list of CONFIGs to move.
args: Program arguments
progress: A progress indicator.
devnull: A file object of '/dev/null'.
@@ -802,6 +386,7 @@ class Slot:
reference_src_dir: Determine the true starting config state from this
source tree.
db_queue: output queue to write config info for the database
+ col (terminal.Color): Colour object
"""
self.toolchains = toolchains
self.args = args
@@ -811,10 +396,14 @@ class Slot:
self.make_cmd = (make_cmd, 'O=' + self.build_dir)
self.reference_src_dir = reference_src_dir
self.db_queue = db_queue
- self.parser = KconfigParser(configs, args, self.build_dir)
+ self.col = col
+ self.parser = KconfigParser(args, self.build_dir)
self.state = STATE_IDLE
self.failed_boards = set()
- self.suspicious_boards = set()
+ self.defconfig = None
+ self.log = []
+ self.current_src_dir = None
+ self.proc = None
def __del__(self):
"""Delete the working directory
@@ -827,7 +416,7 @@ class Slot:
If the subprocess is still running, wait until it finishes.
"""
if self.state != STATE_IDLE:
- while self.ps.poll() == None:
+ while self.proc.poll() is None:
pass
shutil.rmtree(self.build_dir)
@@ -839,7 +428,7 @@ class Slot:
the slot is occupied (i.e. the current subprocess is still running).
Args:
- defconfig: defconfig name.
+ defconfig (str): defconfig name.
Returns:
Return True on success or False on failure
@@ -848,7 +437,7 @@ class Slot:
return False
self.defconfig = defconfig
- self.log = ''
+ self.log = []
self.current_src_dir = self.reference_src_dir
self.do_defconfig()
return True
@@ -872,10 +461,10 @@ class Slot:
if self.state == STATE_IDLE:
return True
- if self.ps.poll() == None:
+ if self.proc.poll() is None:
return False
- if self.ps.poll() != 0:
+ if self.proc.poll() != 0:
self.handle_error()
elif self.state == STATE_DEFCONFIG:
if self.reference_src_dir and not self.current_src_dir:
@@ -895,16 +484,16 @@ class Slot:
else:
sys.exit('Internal Error. This should not happen.')
- return True if self.state == STATE_IDLE else False
+ return self.state == STATE_IDLE
def handle_error(self):
"""Handle error cases."""
- self.log += color_text(self.args.color, COLOR_LIGHT_RED,
- 'Failed to process.\n')
+ self.log.append(self.col.build(self.col.RED, 'Failed to process',
+ bright=True))
if self.args.verbose:
- self.log += color_text(self.args.color, COLOR_LIGHT_CYAN,
- self.ps.stderr.read().decode())
+ for line in self.proc.stderr.read().decode().splitlines():
+ self.log.append(self.col.build(self.col.CYAN, line, True))
self.finish(False)
def do_defconfig(self):
@@ -912,9 +501,9 @@ class Slot:
cmd = list(self.make_cmd)
cmd.append(self.defconfig)
- self.ps = subprocess.Popen(cmd, stdout=self.devnull,
- stderr=subprocess.PIPE,
- cwd=self.current_src_dir)
+ self.proc = subprocess.Popen(cmd, stdout=self.devnull,
+ stderr=subprocess.PIPE,
+ cwd=self.current_src_dir)
self.state = STATE_DEFCONFIG
def do_autoconf(self):
@@ -922,20 +511,21 @@ class Slot:
arch = self.parser.get_arch()
try:
- toolchain = self.toolchains.Select(arch)
+ tchain = self.toolchains.Select(arch)
except ValueError:
- self.log += color_text(self.args.color, COLOR_YELLOW,
- "Tool chain for '%s' is missing. Do nothing.\n" % arch)
+ self.log.append(self.col.build(
+ self.col.YELLOW,
+ f"Tool chain for '{arch}' is missing: do nothing"))
self.finish(False)
return
- env = toolchain.MakeEnvironment(False)
+ env = tchain.MakeEnvironment(False)
cmd = list(self.make_cmd)
cmd.append('KCONFIG_IGNORE_DUPLICATES=1')
cmd.append(AUTO_CONF_PATH)
- self.ps = subprocess.Popen(cmd, stdout=self.devnull, env=env,
- stderr=subprocess.PIPE,
- cwd=self.current_src_dir)
+ self.proc = subprocess.Popen(cmd, stdout=self.devnull, env=env,
+ stderr=subprocess.PIPE,
+ cwd=self.current_src_dir)
self.state = STATE_AUTOCONF
def do_build_db(self):
@@ -950,41 +540,25 @@ class Slot:
def do_savedefconfig(self):
"""Update the .config and run 'make savedefconfig'."""
-
- (updated, suspicious, log) = self.parser.update_dotconfig()
- if suspicious:
- self.suspicious_boards.add(self.defconfig)
- self.log += log
-
- if not self.args.force_sync and not updated:
+ if not self.args.force_sync:
self.finish(True)
return
- if updated:
- self.log += color_text(self.args.color, COLOR_LIGHT_GREEN,
- 'Syncing by savedefconfig...\n')
- else:
- self.log += 'Syncing by savedefconfig (forced by option)...\n'
cmd = list(self.make_cmd)
cmd.append('savedefconfig')
- self.ps = subprocess.Popen(cmd, stdout=self.devnull,
- stderr=subprocess.PIPE)
+ self.proc = subprocess.Popen(cmd, stdout=self.devnull,
+ stderr=subprocess.PIPE)
self.state = STATE_SAVEDEFCONFIG
def update_defconfig(self):
"""Update the input defconfig and go back to the idle state."""
-
- log = self.parser.check_defconfig()
- if log:
- self.suspicious_boards.add(self.defconfig)
- self.log += log
orig_defconfig = os.path.join('configs', self.defconfig)
new_defconfig = os.path.join(self.build_dir, 'defconfig')
updated = not filecmp.cmp(orig_defconfig, new_defconfig)
if updated:
- self.log += color_text(self.args.color, COLOR_LIGHT_BLUE,
- 'defconfig was updated.\n')
+ self.log.append(
+ self.col.build(self.col.BLUE, 'defconfig updated', bright=True))
if not self.args.dry_run and updated:
shutil.move(new_defconfig, orig_defconfig)
@@ -994,25 +568,30 @@ class Slot:
"""Display log along with progress and go to the idle state.
Args:
- success: Should be True when the defconfig was processed
+ success (bool): Should be True when the defconfig was processed
successfully, or False when it fails.
"""
# output at least 30 characters to hide the "* defconfigs out of *".
- log = self.defconfig.ljust(30) + '\n'
+ name = self.defconfig[:-len('_defconfig')]
+ if self.log:
+
+ # Put the first log line on the first line
+ log = name.ljust(20) + ' ' + self.log[0]
- log += '\n'.join([ ' ' + s for s in self.log.split('\n') ])
- # Some threads are running in parallel.
- # Print log atomically to not mix up logs from different threads.
- print(log, file=(sys.stdout if success else sys.stderr))
+ if len(self.log) > 1:
+ log += '\n' + '\n'.join([' ' + s for s in self.log[1:]])
+ # Some threads are running in parallel.
+ # Print log atomically to not mix up logs from different threads.
+ print(log, file=(sys.stdout if success else sys.stderr))
if not success:
if self.args.exit_on_error:
sys.exit('Exit on error.')
# If --exit-on-error flag is not set, skip this board and continue.
# Record the failed board.
- self.failed_boards.add(self.defconfig)
+ self.failed_boards.add(name)
- self.progress.inc()
+ self.progress.inc(success)
self.progress.show()
self.state = STATE_IDLE
@@ -1021,42 +600,38 @@ class Slot:
"""
return self.failed_boards
- def get_suspicious_boards(self):
- """Returns a set of boards (defconfigs) with possible misconversion.
- """
- return self.suspicious_boards - self.failed_boards
-
class Slots:
"""Controller of the array of subprocess slots."""
- def __init__(self, toolchains, configs, args, progress,
- reference_src_dir, db_queue):
+ def __init__(self, toolchains, args, progress, reference_src_dir, db_queue,
+ col):
"""Create a new slots controller.
Args:
- toolchains: Toolchains object containing toolchains.
- configs: A list of CONFIGs to move.
- args: Program arguments
- progress: A progress indicator.
- reference_src_dir: Determine the true starting config state from this
- source tree.
- db_queue: output queue to write config info for the database
+ toolchains (Toolchains): Toolchains object containing toolchains
+ args (Namespace): Program arguments
+ progress (Progress): A progress indicator.
+ reference_src_dir (str): Determine the true starting config state
+ from this source tree (None for none)
+ db_queue (Queue): output queue to write config info for the database
+ col (terminal.Color): Colour object
"""
self.args = args
self.slots = []
+ self.progress = progress
+ self.col = col
devnull = subprocess.DEVNULL
make_cmd = get_make_cmd()
- for i in range(args.jobs):
- self.slots.append(Slot(toolchains, configs, args, progress,
- devnull, make_cmd, reference_src_dir,
- db_queue))
+ for _ in range(args.jobs):
+ self.slots.append(Slot(toolchains, args, progress, devnull,
+ make_cmd, reference_src_dir, db_queue, col))
def add(self, defconfig):
"""Add a new subprocess if a vacant slot is found.
Args:
- defconfig: defconfig name to be put into.
+ defconfig (str): defconfig name to be put into.
Returns:
Return True on success or False on failure
@@ -1089,42 +664,17 @@ class Slots:
ret = False
return ret
- def show_failed_boards(self):
- """Display all of the failed boards (defconfigs)."""
+ def write_failed_boards(self):
+ """Show the results of processing"""
boards = set()
- output_file = 'moveconfig.failed'
for slot in self.slots:
boards |= slot.get_failed_boards()
if boards:
- boards = '\n'.join(boards) + '\n'
- msg = 'The following boards were not processed due to error:\n'
- msg += boards
- msg += '(the list has been saved in %s)\n' % output_file
- print(color_text(self.args.color, COLOR_LIGHT_RED,
- msg), file=sys.stderr)
+ boards = '\n'.join(sorted(boards)) + '\n'
+ write_file(FAILED_LIST, boards)
- write_file(output_file, boards)
-
- def show_suspicious_boards(self):
- """Display all boards (defconfigs) with possible misconversion."""
- boards = set()
- output_file = 'moveconfig.suspicious'
-
- for slot in self.slots:
- boards |= slot.get_suspicious_boards()
-
- if boards:
- boards = '\n'.join(boards) + '\n'
- msg = 'The following boards might have been converted incorrectly.\n'
- msg += 'It is highly recommended to check them manually:\n'
- msg += boards
- msg += '(the list has been saved in %s)\n' % output_file
- print(color_text(self.args.color, COLOR_YELLOW,
- msg), file=sys.stderr)
-
- write_file(output_file, boards)
class ReferenceSource:
@@ -1140,8 +690,9 @@ class ReferenceSource:
print('Cloning git repo to a separate work directory...')
subprocess.check_output(['git', 'clone', os.getcwd(), '.'],
cwd=self.src_dir)
- print("Checkout '%s' to build the original autoconf.mk." % \
- subprocess.check_output(['git', 'rev-parse', '--short', commit]).strip())
+ rev = subprocess.check_output(['git', 'rev-parse', '--short',
+ commit]).strip()
+ print(f"Checkout '{rev}' to build the original autoconf.mk.")
subprocess.check_output(['git', 'checkout', commit],
stderr=subprocess.STDOUT, cwd=self.src_dir)
@@ -1160,24 +711,18 @@ class ReferenceSource:
return self.src_dir
-def move_config(toolchains, configs, args, db_queue):
- """Move config options to defconfig files.
+def move_config(toolchains, args, db_queue, col):
+ """Build database or sync config options to defconfig files.
Args:
- configs: A list of CONFIGs to move.
- args: Program arguments
- """
- if len(configs) == 0:
- if args.force_sync:
- print('No CONFIG is specified. You are probably syncing defconfigs.', end=' ')
- elif args.build_db:
- print('Building %s database' % CONFIG_DATABASE)
- else:
- print('Neither CONFIG nor --force-sync is specified. Nothing will happen.', end=' ')
- else:
- print('Move ' + ', '.join(configs), end=' ')
- print('(jobs: %d)\n' % args.jobs)
+ toolchains (Toolchains): Toolchains to use
+ args (Namespace): Program arguments
+ db_queue (Queue): Queue for database updates
+ col (terminal.Color): Colour object
+ Returns:
+ Progress: Progress indicator
+ """
if args.git_ref:
reference_src = ReferenceSource(args.git_ref)
reference_src_dir = reference_src.get_dir()
@@ -1189,9 +734,8 @@ def move_config(toolchains, configs, args, db_queue):
else:
defconfigs = get_all_defconfigs()
- progress = Progress(len(defconfigs))
- slots = Slots(toolchains, configs, args, progress, reference_src_dir,
- db_queue)
+ progress = Progress(col, len(defconfigs))
+ slots = Slots(toolchains, args, progress, reference_src_dir, db_queue, col)
# Main loop to process defconfig files:
# Add a new subprocess into a vacant slot.
@@ -1206,25 +750,24 @@ def move_config(toolchains, configs, args, db_queue):
while not slots.empty():
time.sleep(SLEEP_TIME)
- print('')
- slots.show_failed_boards()
- slots.show_suspicious_boards()
+ slots.write_failed_boards()
+ return progress
def find_kconfig_rules(kconf, config, imply_config):
"""Check whether a config has a 'select' or 'imply' keyword
Args:
- kconf: Kconfiglib.Kconfig object
- config: Name of config to check (without CONFIG_ prefix)
- imply_config: Implying config (without CONFIG_ prefix) which may or
- may not have an 'imply' for 'config')
+ kconf (Kconfiglib.Kconfig): Kconfig object
+ config (str): Name of config to check (without CONFIG_ prefix)
+ imply_config (str): Implying config (without CONFIG_ prefix) which may
+ or may not have an 'imply' for 'config')
Returns:
Symbol object for 'config' if found, else None
"""
sym = kconf.syms.get(imply_config)
if sym:
- for sel, cond in (sym.selects + sym.implies):
+ for sel, _ in (sym.selects + sym.implies):
if sel.name == config:
return sym
return None
@@ -1236,54 +779,55 @@ def check_imply_rule(kconf, config, imply_config):
to add an 'imply' for 'config' to that part of the Kconfig.
Args:
- kconf: Kconfiglib.Kconfig object
- config: Name of config to check (without CONFIG_ prefix)
- imply_config: Implying config (without CONFIG_ prefix) which may or
- may not have an 'imply' for 'config')
+ kconf (Kconfiglib.Kconfig): Kconfig object
+ config (str): Name of config to check (without CONFIG_ prefix)
+ imply_config (str): Implying config (without CONFIG_ prefix) which may
+ or may not have an 'imply' for 'config')
Returns:
tuple:
- filename of Kconfig file containing imply_config, or None if none
- line number within the Kconfig file, or 0 if none
- message indicating the result
+ str: filename of Kconfig file containing imply_config, or None if
+ none
+ int: line number within the Kconfig file, or 0 if none
+ str: message indicating the result
"""
sym = kconf.syms.get(imply_config)
if not sym:
return 'cannot find sym'
nodes = sym.nodes
if len(nodes) != 1:
- return '%d locations' % len(nodes)
+ return f'{len(nodes)} locations'
node = nodes[0]
fname, linenum = node.filename, node.linenr
cwd = os.getcwd()
if cwd and fname.startswith(cwd):
fname = fname[len(cwd) + 1:]
- file_line = ' at %s:%d' % (fname, linenum)
+ file_line = f' at {fname}:{linenum}'
data = read_file(fname)
- if data[linenum - 1] != 'config %s' % imply_config:
- return None, 0, 'bad sym format %s%s' % (data[linenum], file_line)
- return fname, linenum, 'adding%s' % file_line
+ if data[linenum - 1] != f'config {imply_config}':
+ return None, 0, f'bad sym format {data[linenum]}{file_line})'
+ return fname, linenum, f'adding{file_line}'
def add_imply_rule(config, fname, linenum):
"""Add a new 'imply' option to a Kconfig
Args:
- config: config option to add an imply for (without CONFIG_ prefix)
- fname: Kconfig filename to update
- linenum: Line number to place the 'imply' before
+ config (str): config option to add an imply for (without CONFIG_ prefix)
+ fname (str): Kconfig filename to update
+ linenum (int): Line number to place the 'imply' before
Returns:
Message indicating the result
"""
- file_line = ' at %s:%d' % (fname, linenum)
+ file_line = f' at {fname}:{linenum}'
data = read_file(fname)
linenum -= 1
for offset, line in enumerate(data[linenum:]):
if line.strip().startswith('help') or not line:
- data.insert(linenum + offset, '\timply %s' % config)
+ data.insert(linenum + offset, f'\timply {config}')
write_file(fname, data)
- return 'added%s' % file_line
+ return f'added{file_line}'
return 'could not insert%s'
@@ -1356,7 +900,7 @@ def do_imply_config(config_list, add_imply, imply_flags, skip_added,
all x86 boards will have that option, avoiding adding CONFIG_CMD_IRQ to
each of the x86 defconfig files.
- This function uses the moveconfig database to find such options. It
+ This function uses the qconfig database to find such options. It
displays a list of things that could possibly imply those in the list.
The algorithm ignores any that start with CONFIG_TARGET since these
typically refer to only a few defconfigs (often one). It also does not
@@ -1392,21 +936,20 @@ def do_imply_config(config_list, add_imply, imply_flags, skip_added,
if add_imply and add_imply != 'all':
add_imply = add_imply.split(',')
- all_configs, all_defconfigs, config_db, defconfig_db = read_database()
+ all_configs, all_defconfigs, _, defconfig_db = read_database()
# Work through each target config option in turn, independently
for config in config_list:
defconfigs = defconfig_db.get(config)
if not defconfigs:
- print('%s not found in any defconfig' % config)
+ print(f'{config} not found in any defconfig')
continue
# Get the set of defconfigs without this one (since a config cannot
# imply itself)
non_defconfigs = all_defconfigs - defconfigs
num_defconfigs = len(defconfigs)
- print('%s found in %d/%d defconfigs' % (config, num_defconfigs,
- len(all_configs)))
+ print(f'{config} found in {num_defconfigs}/{len(all_configs)} defconfigs')
# This will hold the results: key=config, value=defconfigs containing it
imply_configs = {}
@@ -1452,7 +995,7 @@ def do_imply_config(config_list, add_imply, imply_flags, skip_added,
# skip imply_config because prev is a superset
skip = True
break
- elif count > prev_count:
+ if count > prev_count:
# delete prev because imply_config is a superset
del imply_configs[prev]
if not skip:
@@ -1487,7 +1030,7 @@ def do_imply_config(config_list, add_imply, imply_flags, skip_added,
fname, linenum = nodes[0].filename, nodes[0].linenr
if cwd and fname.startswith(cwd):
fname = fname[len(cwd) + 1:]
- kconfig_info = '%s:%d' % (fname, linenum)
+ kconfig_info = f'{fname}:{linenum}'
if skip_added:
show = False
else:
@@ -1513,8 +1056,8 @@ def do_imply_config(config_list, add_imply, imply_flags, skip_added,
add_list[fname].append(linenum)
if show and kconfig_info != 'skip':
- print('%5d : %-30s%-25s %s' % (num_common, iconfig.ljust(30),
- kconfig_info, missing_str))
+ print(f'{num_common:5d} : '
+ f'{iconfig.ljust(30):-30s}{kconfig_info:-25s} {missing_str}')
# Having collected a list of things to add, now we add them. We process
# each file from the largest line number to the smallest so that
@@ -1553,7 +1096,7 @@ def do_find_config(config_list):
is preceded by a tilde (~) then it must be false, otherwise it must
be true)
"""
- all_configs, all_defconfigs, config_db, defconfig_db = read_database()
+ _, all_defconfigs, config_db, _ = read_database()
# Start with all defconfigs
out = all_defconfigs
@@ -1597,19 +1140,19 @@ def prefix_config(cfg):
>>> prefix_config('A123')
'CONFIG_A123'
"""
- op = ''
+ oper = ''
if cfg[0] == '~':
- op = cfg[0]
+ oper = cfg[0]
cfg = cfg[1:]
if not cfg.startswith('CONFIG_'):
cfg = 'CONFIG_' + cfg
- return op + cfg
+ return oper + cfg
-RE_MK_CONFIGS = re.compile('CONFIG_(\$\(SPL_(?:TPL_)?\))?([A-Za-z0-9_]*)')
-RE_IFDEF = re.compile('(ifdef|ifndef)')
-RE_C_CONFIGS = re.compile('CONFIG_([A-Za-z0-9_]*)')
-RE_CONFIG_IS = re.compile('CONFIG_IS_ENABLED\(([A-Za-z0-9_]*)\)')
+RE_MK_CONFIGS = re.compile(r'CONFIG_(\$\(SPL_(?:TPL_)?\))?([A-Za-z0-9_]*)')
+RE_IFDEF = re.compile(r'(ifdef|ifndef)')
+RE_C_CONFIGS = re.compile(r'CONFIG_([A-Za-z0-9_]*)')
+RE_CONFIG_IS = re.compile(r'CONFIG_IS_ENABLED\(([A-Za-z0-9_]*)\)')
class ConfigUse:
def __init__(self, cfg, is_spl, fname, rest):
@@ -1651,14 +1194,12 @@ def scan_makefiles(fnames):
fname_uses = {}
for fname, rest in fnames:
m_iter = RE_MK_CONFIGS.finditer(rest)
- found = False
- for m in m_iter:
- found = True
- real_opt = m.group(2)
+ for mat in m_iter:
+ real_opt = mat.group(2)
if real_opt == '':
continue
is_spl = False
- if m.group(1):
+ if mat.group(1):
is_spl = True
use = ConfigUse(real_opt, is_spl, fname, rest)
if fname not in fname_uses:
@@ -1694,10 +1235,12 @@ def scan_src_files(fnames):
>>> RE_CONFIG_IS.search('#if CONFIG_IS_ENABLED(OF_PLATDATA)').groups()
('OF_PLATDATA',)
"""
+ fname = None
+ rest = None
+
def add_uses(m_iter, is_spl):
- for m in m_iter:
- found = True
- real_opt = m.group(1)
+ for mat in m_iter:
+ real_opt = mat.group(1)
if real_opt == '':
continue
use = ConfigUse(real_opt, is_spl, fname, rest)
@@ -1754,7 +1297,7 @@ def do_scan_source(path, do_update):
"""
# Make sure we know about all the options
not_found = collections.defaultdict(list)
- for use, rest in all_uses.items():
+ for use, _ in all_uses.items():
name = use.cfg
if name in IGNORE_SYMS:
continue
@@ -1766,7 +1309,6 @@ def do_scan_source(path, do_update):
# If it is an SPL symbol, try prepending all SPL_ prefixes to
# find at least one SPL symbol
if use.is_spl:
- add_to_dict = False
for prefix in SPL_PREFIXES:
try_name = prefix + name
sym = kconf.syms.get(try_name)
@@ -1784,7 +1326,6 @@ def do_scan_source(path, do_update):
elif not use.is_spl:
check = False
else: # MODE_NORMAL
- debug = False
sym = kconf.syms.get(name)
if not sym:
proper_name = is_not_proper(name)
@@ -1825,7 +1366,7 @@ def do_scan_source(path, do_update):
print(f'Scanning source in {path}')
args = ['git', 'grep', '-E', r'IS_ENABLED|\bCONFIG']
with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
- out, err = proc.communicate()
+ out, _ = proc.communicate()
lines = out.splitlines()
re_fname = re.compile('^([^:]*):(.*)')
src_list = []
@@ -1859,7 +1400,7 @@ def do_scan_source(path, do_update):
print(f'Not sure how to handle file {fname}')
# Scan the Makefiles
- all_uses, fname_uses = scan_makefiles(mk_list)
+ all_uses, _ = scan_makefiles(mk_list)
spl_not_found = set()
proper_not_found = set()
@@ -1872,15 +1413,15 @@ def do_scan_source(path, do_update):
print('\nCONFIG options present in Makefiles but not Kconfig (SPL):')
not_found = check_not_found(all_uses, MODE_SPL)
show_uses(not_found)
- spl_not_found |= set([is_not_proper(key) or key for key in not_found.keys()])
+ spl_not_found |= {is_not_proper(key) or key for key in not_found.keys()}
print('\nCONFIG options used as Proper in Makefiles but without a non-SPL_ variant:')
not_found = check_not_found(all_uses, MODE_PROPER)
show_uses(not_found)
- proper_not_found |= set([key for key in not_found.keys()])
+ proper_not_found |= {not_found.keys()}
# Scan the source code
- all_uses, fname_uses = scan_src_files(src_list)
+ all_uses, _ = scan_src_files(src_list)
# Make sure we know about all the options
print('\nCONFIG options present in source but not Kconfig:')
@@ -1890,12 +1431,12 @@ def do_scan_source(path, do_update):
print('\nCONFIG options present in source but not Kconfig (SPL):')
not_found = check_not_found(all_uses, MODE_SPL)
show_uses(not_found)
- spl_not_found |= set([is_not_proper(key) or key for key in not_found.keys()])
+ spl_not_found |= {is_not_proper(key) or key for key in not_found.keys()}
print('\nCONFIG options used as Proper in source but without a non-SPL_ variant:')
not_found = check_not_found(all_uses, MODE_PROPER)
show_uses(not_found)
- proper_not_found |= set([key for key in not_found.keys()])
+ proper_not_found |= {not_found.keys()}
print('\nCONFIG options used as SPL but without an SPL_ variant:')
for item in sorted(spl_not_found):
@@ -1907,12 +1448,14 @@ def do_scan_source(path, do_update):
# Write out the updated information
if do_update:
- with open(os.path.join(path, 'scripts', 'conf_nospl'), 'w') as out:
+ with open(os.path.join(path, 'scripts', 'conf_nospl'), 'w',
+ encoding='utf-8') as out:
print('# These options should not be enabled in SPL builds\n',
file=out)
for item in sorted(spl_not_found):
print(item, file=out)
- with open(os.path.join(path, 'scripts', 'conf_noproper'), 'w') as out:
+ with open(os.path.join(path, 'scripts', 'conf_noproper'), 'w',
+ encoding='utf-8') as out:
print('# These options should not be enabled in Proper builds\n',
file=out)
for item in sorted(proper_not_found):
@@ -1938,10 +1481,10 @@ doc/develop/moveconfig.rst for documentation.'''
'implying others')
parser.add_argument('-b', '--build-db', action='store_true', default=False,
help='build a CONFIG database')
- parser.add_argument('-c', '--color', action='store_true', default=False,
- help='display the log in color')
parser.add_argument('-C', '--commit', action='store_true', default=False,
help='Create a git commit for the operation')
+ parser.add_argument('--nocolour', action='store_true', default=False,
+ help="don't display the log in colour")
parser.add_argument('-d', '--defconfigs', type=str,
help='a file containing a list of defconfigs to move, '
"one per line (for example 'snow_defconfig') "
@@ -1951,9 +1494,6 @@ doc/develop/moveconfig.rst for documentation.'''
help='exit immediately on any error')
parser.add_argument('-f', '--find', action='store_true', default=False,
help='Find boards with a given config combination')
- parser.add_argument('-H', '--headers-only', dest='cleanup_headers_only',
- action='store_true', default=False,
- help='only cleanup the headers')
parser.add_argument('-i', '--imply', action='store_true', default=False,
help='find options which imply others')
parser.add_argument('-I', '--imply-flags', type=str, default='',
@@ -1981,26 +1521,27 @@ doc/develop/moveconfig.rst for documentation.'''
parser.add_argument('configs', nargs='*')
args = parser.parse_args()
- configs = args.configs
if args.test:
sys.argv = [sys.argv[0]]
- fail, count = doctest.testmod()
+ fail, _ = doctest.testmod()
if fail:
return 1
unittest.main()
+ col = terminal.Color(terminal.COLOR_NEVER if args.nocolour
+ else terminal.COLOR_IF_TERMINAL)
+
if args.scan_source:
do_scan_source(os.getcwd(), args.update)
- return
+ return 0
- if not any((len(configs), args.force_sync, args.build_db, args.imply,
- args.find)):
+ if not any((args.force_sync, args.build_db, args.imply, args.find)):
parser.print_usage()
sys.exit(1)
# prefix the option name with CONFIG_ if missing
- configs = [prefix_config(cfg) for cfg in configs]
+ configs = [prefix_config(cfg) for cfg in args.configs]
check_top_directory()
@@ -2013,40 +1554,36 @@ doc/develop/moveconfig.rst for documentation.'''
for flag in args.imply_flags.split(','):
bad = flag not in IMPLY_FLAGS
if bad:
- print("Invalid flag '%s'" % flag)
+ print(f"Invalid flag '{flag}'")
if flag == 'help' or bad:
print("Imply flags: (separate with ',')")
for name, info in IMPLY_FLAGS.items():
- print(' %-15s: %s' % (name, info[1]))
+ print(f' {name:-15s}: {info[1]}')
parser.print_usage()
sys.exit(1)
imply_flags |= IMPLY_FLAGS[flag][0]
do_imply_config(configs, args.add_imply, imply_flags, args.skip_added)
- return
+ return 0
if args.find:
do_find_config(configs)
- return
+ return 0
+ # We are either building the database or forcing a sync of defconfigs
config_db = {}
db_queue = queue.Queue()
- t = DatabaseThread(config_db, db_queue)
- t.setDaemon(True)
- t.start()
-
- if not args.cleanup_headers_only:
- check_clean_directory()
- bsettings.setup('')
- toolchains = toolchain.Toolchains()
- toolchains.GetSettings()
- toolchains.Scan(verbose=False)
- move_config(toolchains, configs, args, db_queue)
- db_queue.join()
-
- if configs:
- cleanup_headers(configs, args)
- cleanup_readme(configs, args)
+ dbt = DatabaseThread(config_db, db_queue)
+ dbt.daemon = True
+ dbt.start()
+
+ check_clean_directory()
+ bsettings.setup('')
+ toolchains = toolchain.Toolchains()
+ toolchains.GetSettings()
+ toolchains.Scan(verbose=False)
+ progress = move_config(toolchains, args, db_queue, col)
+ db_queue.join()
if args.commit:
subprocess.call(['git', 'add', '-u'])
@@ -2060,13 +1597,28 @@ doc/develop/moveconfig.rst for documentation.'''
msg += '\n\nRsync all defconfig files using moveconfig.py'
subprocess.call(['git', 'commit', '-s', '-m', msg])
+ failed = progress.total - progress.good
+ failure = f'{failed} failed, ' if failed else ''
if args.build_db:
- with open(CONFIG_DATABASE, 'w', encoding='utf-8') as fd:
+ with open(CONFIG_DATABASE, 'w', encoding='utf-8') as outf:
for defconfig, configs in config_db.items():
- fd.write('%s\n' % defconfig)
+ outf.write(f'{defconfig}\n')
for config in sorted(configs.keys()):
- fd.write(' %s=%s\n' % (config, configs[config]))
- fd.write('\n')
+ outf.write(f' {config}={configs[config]}\n')
+ outf.write('\n')
+ print(col.build(
+ col.RED if failed else col.GREEN,
+ f'{failure}{len(config_db)} boards written to {CONFIG_DATABASE}'))
+ else:
+ if failed:
+ print(col.build(col.RED, f'{failure}see {FAILED_LIST}', True))
+ else:
+ # Add enough spaces to overwrite the progress indicator
+ print(col.build(
+ col.GREEN, f'{progress.total} processed ', bright=True))
+
+ return 0
+
if __name__ == '__main__':
sys.exit(main())