From 4997a7ed05bf109b34ea0d072a33bb29209ae4ff Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Mon, 8 Jul 2019 13:18:52 -0600 Subject: binman: Add a utility library for coreboot CBFS Coreboot uses a simple flash-based filesystem called Coreboot Filesystem (CBFS) to organise files used during boot. This allows files to be named and their position in the flash to be set. It has special features for dealing with x86 devices which typically memory-map their SPI flash to the top of 32-bit address space and need a 'boot block' ending there. Create a library to help create and read CBFS files. This includes a writer class, a reader class and associated other helpers. Only a subset of features are currently supported. Signed-off-by: Simon Glass --- tools/binman/cbfs_util.py | 720 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 720 insertions(+) create mode 100644 tools/binman/cbfs_util.py (limited to 'tools/binman/cbfs_util.py') diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py new file mode 100644 index 00000000000..197cff89509 --- /dev/null +++ b/tools/binman/cbfs_util.py @@ -0,0 +1,720 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2019 Google LLC +# Written by Simon Glass + +"""Support for coreboot's CBFS format + +CBFS supports a header followed by a number of files, generally targeted at SPI +flash. + +The format is somewhat defined by documentation in the coreboot tree although +it is necessary to rely on the C structures and source code (mostly cbfstool) +to fully understand it. + +Currently supported: raw and stage types with compression +""" + +from __future__ import print_function + +from collections import OrderedDict +import io +import struct +import sys + +import command +import elf +import tools + +# Set to True to enable printing output while working +DEBUG = False + +# Set to True to enable output from running cbfstool for debugging +VERBOSE = False + +# The master header, at the start of the CBFS +HEADER_FORMAT = '>IIIIIIII' +HEADER_LEN = 0x20 +HEADER_MAGIC = 0x4f524243 +HEADER_VERSION1 = 0x31313131 +HEADER_VERSION2 = 0x31313132 + +# The file header, at the start of each file in the CBFS +FILE_HEADER_FORMAT = b'>8sIIII' +FILE_HEADER_LEN = 0x18 +FILE_MAGIC = b'LARCHIVE' +FILENAME_ALIGN = 16 # Filename lengths are aligned to this + +# A stage header containing information about 'stage' files +# Yes this is correct: this header is in litte-endian format +STAGE_FORMAT = ' offset: + raise ValueError('No space for data before offset %#x (current offset %#x)' % + (offset, fd.tell())) + fd.write(tools.GetBytes(self._erase_byte, offset - fd.tell())) + + def _align_to(self, fd, align): + """Write out pad bytes until a given alignment is reached + + This only aligns if the resulting output would not reach the end of the + CBFS, since we want to leave the last 4 bytes for the master-header + pointer. + + Args: + fd: File objext to write to + align: Alignment to require (e.g. 4 means pad to next 4-byte + boundary) + """ + offset = align_int(fd.tell(), align) + if offset < self._size: + self._skip_to(fd, offset) + + def add_file_stage(self, name, data): + """Add a new stage file to the CBFS + + Args: + name: String file name to put in CBFS (does not need to correspond + to the name that the file originally came from) + data: Contents of file + + Returns: + CbfsFile object created + """ + cfile = CbfsFile.stage(self._base_address, name, data) + self._files[name] = cfile + return cfile + + def add_file_raw(self, name, data, compress=COMPRESS_NONE): + """Create a new raw file + + Args: + name: String file name to put in CBFS (does not need to correspond + to the name that the file originally came from) + data: Contents of file + compress: Compression algorithm to use (COMPRESS_...) + + Returns: + CbfsFile object created + """ + cfile = CbfsFile.raw(name, data, compress) + self._files[name] = cfile + return cfile + + def _write_header(self, fd, add_fileheader): + """Write out the master header to a CBFS + + Args: + fd: File object + add_fileheader: True to place the master header in a file header + record + """ + if fd.tell() > self._header_offset: + raise ValueError('No space for header at offset %#x (current offset %#x)' % + (self._header_offset, fd.tell())) + if not add_fileheader: + self._skip_to(fd, self._header_offset) + hdr = struct.pack(HEADER_FORMAT, HEADER_MAGIC, HEADER_VERSION2, + self._size, self._bootblock_size, self._align, + self._contents_offset, self._arch, 0xffffffff) + if add_fileheader: + name = _pack_string(self._master_name) + fd.write(struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, len(hdr), + TYPE_CBFSHEADER, 0, + FILE_HEADER_LEN + len(name))) + fd.write(name) + self._header_offset = fd.tell() + fd.write(hdr) + self._align_to(fd, self._align) + else: + fd.write(hdr) + + def get_data(self): + """Obtain the full contents of the CBFS + + Thhis builds the CBFS with headers and all required files. + + Returns: + 'bytes' type containing the data + """ + fd = io.BytesIO() + + # THe header can go at the start in some cases + if self._hdr_at_start: + self._write_header(fd, add_fileheader=self._add_fileheader) + self._skip_to(fd, self._contents_offset) + + # Write out each file + for cbf in self._files.values(): + fd.write(cbf.get_data()) + self._align_to(fd, self._align) + if not self._hdr_at_start: + self._write_header(fd, add_fileheader=self._add_fileheader) + + # Pad to the end and write a pointer to the CBFS master header + self._skip_to(fd, self._base_address or self._size - 4) + rel_offset = self._header_offset - self._size + fd.write(struct.pack('II", hdr) + data = hdr + fd.read(alen - 8) + if atag == FILE_ATTR_TAG_COMPRESSION: + # We don't currently use this information + atag, alen, compress, _decomp_size = struct.unpack( + ATTR_COMPRESSION_FORMAT, data) + else: + print('Unknown attribute tag %x' % atag) + attr_size -= len(data) + return compress + + def _read_header(self, fd): + """Read the master header + + Reads the header and stores the information obtained into the member + variables. + + Args: + fd: File to read from + + Returns: + True if header was read OK, False if it is truncated or has the + wrong magic or version + """ + pos = fd.tell() + data = fd.read(HEADER_LEN) + if len(data) < HEADER_LEN: + print('Header at %x ran out of data' % pos) + return False + (self.magic, self.version, self.rom_size, self.boot_block_size, + self.align, self.cbfs_offset, self.arch, _) = struct.unpack( + HEADER_FORMAT, data) + return self.magic == HEADER_MAGIC and ( + self.version == HEADER_VERSION1 or + self.version == HEADER_VERSION2) + + @classmethod + def _read_string(cls, fd): + """Read a string from a file + + This reads a string and aligns the data to the next alignment boundary + + Args: + fd: File to read from + + Returns: + string read ('str' type) encoded to UTF-8, or None if we ran out of + data + """ + val = b'' + while True: + data = fd.read(FILENAME_ALIGN) + if len(data) < FILENAME_ALIGN: + return None + pos = data.find(b'\0') + if pos == -1: + val += data + else: + val += data[:pos] + break + return val.decode('utf-8') + + +def cbfstool(fname, *cbfs_args): + """Run cbfstool with provided arguments + + If the tool fails then this function raises an exception and prints out the + output and stderr. + + Args: + fname: Filename of CBFS + *cbfs_args: List of arguments to pass to cbfstool + + Returns: + CommandResult object containing the results + """ + args = ('cbfstool', fname) + cbfs_args + result = command.RunPipe([args], capture=not VERBOSE, + capture_stderr=not VERBOSE, raise_on_error=False) + if result.return_code: + print(result.stderr, file=sys.stderr) + raise Exception("Failed to run (error %d): '%s'" % + (result.return_code, ' '.join(args))) -- cgit v1.2.3 From 7c173ced645b9fff4d5b41849375275a8b63f04d Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Mon, 8 Jul 2019 13:18:55 -0600 Subject: binman: Pad empty areas of the CBFS with files When there is lots of open space in a CBFS it is normally padded with 'empty' files so that sequentially scanning the CBFS can skip from one to the next without a break. Add support for this. Signed-off-by: Simon Glass --- tools/binman/cbfs_util.py | 68 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 65 insertions(+), 3 deletions(-) (limited to 'tools/binman/cbfs_util.py') diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py index 197cff89509..ec4a2e5a8c6 100644 --- a/tools/binman/cbfs_util.py +++ b/tools/binman/cbfs_util.py @@ -11,7 +11,8 @@ The format is somewhat defined by documentation in the coreboot tree although it is necessary to rely on the C structures and source code (mostly cbfstool) to fully understand it. -Currently supported: raw and stage types with compression +Currently supported: raw and stage types with compression, padding empty areas + with empty files """ from __future__ import print_function @@ -102,6 +103,7 @@ ARCH_NAMES = { TYPE_CBFSHEADER = 0x02 # Master header, HEADER_FORMAT TYPE_STAGE = 0x10 # Stage, holding an executable, see STAGE_FORMAT TYPE_RAW = 0x50 # Raw file, possibly compressed +TYPE_EMPTY = 0xffffffff # Empty data # Compression types COMPRESS_NONE, COMPRESS_LZMA, COMPRESS_LZ4 = range(3) @@ -152,6 +154,19 @@ def align_int(val, align): """ return int((val + align - 1) / align) * align +def align_int_down(val, align): + """Align a value down to the given alignment + + Args: + val: Integer value to align + align: Integer alignment value (e.g. 4 to align to 4-byte boundary) + + Returns: + integer value aligned to the required boundary, rounding down if + necessary + """ + return int(val / align) * align + def _pack_string(instr): """Pack a string to the required aligned size by adding padding @@ -184,6 +199,9 @@ class CbfsFile(object): entry: Entry address in memory if known, else None. This is where execution starts after the file is loaded base_address: Base address to use for 'stage' files + erase_byte: Erase byte to use for padding between the file header and + contents (used for empty files) + size: Size of the file in bytes (used for empty files) """ def __init__(self, name, ftype, data, compress=COMPRESS_NONE): self.name = name @@ -196,6 +214,8 @@ class CbfsFile(object): self.entry = None self.base_address = None self.data_len = 0 + self.erase_byte = None + self.size = None def decompress(self): """Handle decompressing data if necessary""" @@ -242,6 +262,24 @@ class CbfsFile(object): """ return CbfsFile(name, TYPE_RAW, data, compress) + @classmethod + def empty(cls, space_to_use, erase_byte): + """Create a new empty file of a given size + + Args: + space_to_use:: Size of available space, which must be at least as + large as the alignment size for this CBFS + erase_byte: Byte to use for contents of file (repeated through the + whole file) + + Returns: + CbfsFile object containing the file information + """ + cfile = CbfsFile('', TYPE_EMPTY, b'') + cfile.size = space_to_use - FILE_HEADER_LEN - FILENAME_ALIGN + cfile.erase_byte = erase_byte + return cfile + def get_data(self): """Obtain the contents of the file, in CBFS format @@ -270,6 +308,8 @@ class CbfsFile(object): attr = struct.pack(ATTR_COMPRESSION_FORMAT, FILE_ATTR_TAG_COMPRESSION, ATTR_COMPRESSION_LEN, self.compress, len(orig_data)) + elif self.ftype == TYPE_EMPTY: + data = tools.GetBytes(self.erase_byte, self.size) else: raise ValueError('Unknown type %#x when writing\n' % self.ftype) if attr: @@ -357,6 +397,24 @@ class CbfsWriter(object): (offset, fd.tell())) fd.write(tools.GetBytes(self._erase_byte, offset - fd.tell())) + def _pad_to(self, fd, offset): + """Write out pad bytes and/or an empty file until a given offset + + Args: + fd: File objext to write to + offset: Offset to write to + """ + self._align_to(fd, self._align) + upto = fd.tell() + if upto > offset: + raise ValueError('No space for data before pad offset %#x (current offset %#x)' % + (offset, upto)) + todo = align_int_down(offset - upto, self._align) + if todo: + cbf = CbfsFile.empty(todo, self._erase_byte) + fd.write(cbf.get_data()) + self._skip_to(fd, offset) + def _align_to(self, fd, align): """Write out pad bytes until a given alignment is reached @@ -416,7 +474,7 @@ class CbfsWriter(object): raise ValueError('No space for header at offset %#x (current offset %#x)' % (self._header_offset, fd.tell())) if not add_fileheader: - self._skip_to(fd, self._header_offset) + self._pad_to(fd, self._header_offset) hdr = struct.pack(HEADER_FORMAT, HEADER_MAGIC, HEADER_VERSION2, self._size, self._bootblock_size, self._align, self._contents_offset, self._arch, 0xffffffff) @@ -455,7 +513,7 @@ class CbfsWriter(object): self._write_header(fd, add_fileheader=self._add_fileheader) # Pad to the end and write a pointer to the CBFS master header - self._skip_to(fd, self._base_address or self._size - 4) + self._pad_to(fd, self._base_address or self._size - 4) rel_offset = self._header_offset - self._size fd.write(struct.pack(' Date: Mon, 8 Jul 2019 13:18:56 -0600 Subject: binman: Add support for fixed-offset files in CBFS A feature of CBFS is that it allows files to be positioned at particular offset (as with binman in general). This is useful to support execute-in-place (XIP) code, since this may not be relocatable. Add a new cbfs-offset property to control this. Signed-off-by: Simon Glass --- tools/binman/cbfs_util.py | 125 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 102 insertions(+), 23 deletions(-) (limited to 'tools/binman/cbfs_util.py') diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py index ec4a2e5a8c6..1cdbcb2339e 100644 --- a/tools/binman/cbfs_util.py +++ b/tools/binman/cbfs_util.py @@ -12,7 +12,7 @@ it is necessary to rely on the C structures and source code (mostly cbfstool) to fully understand it. Currently supported: raw and stage types with compression, padding empty areas - with empty files + with empty files, fixed-offset files """ from __future__ import print_function @@ -190,6 +190,8 @@ class CbfsFile(object): Properties: name: Name of file offset: Offset of file data from start of file header + cbfs_offset: Offset of file data in bytes from start of CBFS, or None to + place this file anyway data: Contents of file, uncompressed data_len: Length of (possibly compressed) data in bytes ftype: File type (TYPE_...) @@ -203,9 +205,10 @@ class CbfsFile(object): contents (used for empty files) size: Size of the file in bytes (used for empty files) """ - def __init__(self, name, ftype, data, compress=COMPRESS_NONE): + def __init__(self, name, ftype, data, cbfs_offset, compress=COMPRESS_NONE): self.name = name self.offset = None + self.cbfs_offset = cbfs_offset self.data = data self.ftype = ftype self.compress = compress @@ -231,7 +234,7 @@ class CbfsFile(object): self.data_len = len(indata) @classmethod - def stage(cls, base_address, name, data): + def stage(cls, base_address, name, data, cbfs_offset): """Create a new stage file Args: @@ -239,28 +242,32 @@ class CbfsFile(object): name: String file name to put in CBFS (does not need to correspond to the name that the file originally came from) data: Contents of file + cbfs_offset: Offset of file data in bytes from start of CBFS, or + None to place this file anyway Returns: CbfsFile object containing the file information """ - cfile = CbfsFile(name, TYPE_STAGE, data) + cfile = CbfsFile(name, TYPE_STAGE, data, cbfs_offset) cfile.base_address = base_address return cfile @classmethod - def raw(cls, name, data, compress): + def raw(cls, name, data, cbfs_offset, compress): """Create a new raw file Args: name: String file name to put in CBFS (does not need to correspond to the name that the file originally came from) data: Contents of file + cbfs_offset: Offset of file data in bytes from start of CBFS, or + None to place this file anyway compress: Compression algorithm to use (COMPRESS_...) Returns: CbfsFile object containing the file information """ - return CbfsFile(name, TYPE_RAW, data, compress) + return CbfsFile(name, TYPE_RAW, data, cbfs_offset, compress) @classmethod def empty(cls, space_to_use, erase_byte): @@ -275,12 +282,44 @@ class CbfsFile(object): Returns: CbfsFile object containing the file information """ - cfile = CbfsFile('', TYPE_EMPTY, b'') + cfile = CbfsFile('', TYPE_EMPTY, b'', None) cfile.size = space_to_use - FILE_HEADER_LEN - FILENAME_ALIGN cfile.erase_byte = erase_byte return cfile - def get_data(self): + def calc_start_offset(self): + """Check if this file needs to start at a particular offset in CBFS + + Returns: + None if the file can be placed anywhere, or + the largest offset where the file could start (integer) + """ + if self.cbfs_offset is None: + return None + return self.cbfs_offset - self.get_header_len() + + def get_header_len(self): + """Get the length of headers required for a file + + This is the minimum length required before the actual data for this file + could start. It might start later if there is padding. + + Returns: + Total length of all non-data fields, in bytes + """ + name = _pack_string(self.name) + hdr_len = len(name) + FILE_HEADER_LEN + if self.ftype == TYPE_STAGE: + pass + elif self.ftype == TYPE_RAW: + hdr_len += ATTR_COMPRESSION_LEN + elif self.ftype == TYPE_EMPTY: + pass + else: + raise ValueError('Unknown file type %#x\n' % self.ftype) + return hdr_len + + def get_data(self, offset=None, pad_byte=None): """Obtain the contents of the file, in CBFS format Returns: @@ -292,6 +331,7 @@ class CbfsFile(object): attr_pos = 0 content = b'' attr = b'' + pad = b'' data = self.data if self.ftype == TYPE_STAGE: elf_data = elf.DecodeElf(data, self.base_address) @@ -315,10 +355,33 @@ class CbfsFile(object): if attr: attr_pos = hdr_len hdr_len += len(attr) - hdr = struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, - len(content) + len(data), + if self.cbfs_offset is not None: + pad_len = self.cbfs_offset - offset - hdr_len + if pad_len < 0: # pragma: no cover + # Test coverage of this is not available since this should never + # happen. It indicates that get_header_len() provided an + # incorrect value (too small) so that we decided that we could + # put this file at the requested place, but in fact a previous + # file extends far enough into the CBFS that this is not + # possible. + raise ValueError("Internal error: CBFS file '%s': Requested offset %#x but current output position is %#x" % + (self.name, self.cbfs_offset, offset)) + pad = tools.GetBytes(pad_byte, pad_len) + hdr_len += pad_len + self.offset = len(content) + len(data) + hdr = struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, self.offset, self.ftype, attr_pos, hdr_len) - return hdr + name + attr + content + data + + # Do a sanity check of the get_header_len() function, to ensure that it + # stays in lockstep with this function + expected_len = self.get_header_len() + actual_len = len(hdr + name + attr) + if expected_len != actual_len: # pragma: no cover + # Test coverage of this is not available since this should never + # happen. It probably indicates that get_header_len() is broken. + raise ValueError("Internal error: CBFS file '%s': Expected headers of %#x bytes, got %#d" % + (self.name, expected_len, actual_len)) + return hdr + name + attr + pad + content + data class CbfsWriter(object): @@ -431,34 +494,39 @@ class CbfsWriter(object): if offset < self._size: self._skip_to(fd, offset) - def add_file_stage(self, name, data): + def add_file_stage(self, name, data, cbfs_offset=None): """Add a new stage file to the CBFS Args: name: String file name to put in CBFS (does not need to correspond to the name that the file originally came from) data: Contents of file + cbfs_offset: Offset of this file's data within the CBFS, in bytes, + or None to place this file anywhere Returns: CbfsFile object created """ - cfile = CbfsFile.stage(self._base_address, name, data) + cfile = CbfsFile.stage(self._base_address, name, data, cbfs_offset) self._files[name] = cfile return cfile - def add_file_raw(self, name, data, compress=COMPRESS_NONE): + def add_file_raw(self, name, data, cbfs_offset=None, + compress=COMPRESS_NONE): """Create a new raw file Args: name: String file name to put in CBFS (does not need to correspond to the name that the file originally came from) data: Contents of file + cbfs_offset: Offset of this file's data within the CBFS, in bytes, + or None to place this file anywhere compress: Compression algorithm to use (COMPRESS_...) Returns: CbfsFile object created """ - cfile = CbfsFile.raw(name, data, compress) + cfile = CbfsFile.raw(name, data, cbfs_offset, compress) self._files[name] = cfile return cfile @@ -507,7 +575,11 @@ class CbfsWriter(object): # Write out each file for cbf in self._files.values(): - fd.write(cbf.get_data()) + # Place the file at its requested place, if any + offset = cbf.calc_start_offset() + if offset is not None: + self._pad_to(fd, align_int_down(offset, self._align)) + fd.write(cbf.get_data(fd.tell(), self._erase_byte)) self._align_to(fd, self._align) if not self._hdr_at_start: self._write_header(fd, add_fileheader=self._add_fileheader) @@ -639,25 +711,27 @@ class CbfsReader(object): # Create the correct CbfsFile object depending on the type cfile = None - fd.seek(file_pos + offset, io.SEEK_SET) + cbfs_offset = file_pos + offset + fd.seek(cbfs_offset, io.SEEK_SET) if ftype == TYPE_CBFSHEADER: self._read_header(fd) elif ftype == TYPE_STAGE: data = fd.read(STAGE_LEN) - cfile = CbfsFile.stage(self.stage_base_address, name, b'') + cfile = CbfsFile.stage(self.stage_base_address, name, b'', + cbfs_offset) (cfile.compress, cfile.entry, cfile.load, cfile.data_len, cfile.memlen) = struct.unpack(STAGE_FORMAT, data) cfile.data = fd.read(cfile.data_len) elif ftype == TYPE_RAW: data = fd.read(size) - cfile = CbfsFile.raw(name, data, compress) + cfile = CbfsFile.raw(name, data, cbfs_offset, compress) cfile.decompress() if DEBUG: print('data', data) elif ftype == TYPE_EMPTY: # Just read the data and discard it, since it is only padding fd.read(size) - cfile = CbfsFile('', TYPE_EMPTY, b'') + cfile = CbfsFile('', TYPE_EMPTY, b'', cbfs_offset) else: raise ValueError('Unknown type %#x when reading\n' % ftype) if cfile: @@ -674,7 +748,8 @@ class CbfsReader(object): """Read attributes from the file CBFS files can have attributes which are things that cannot fit into the - header. The only attribute currently supported is compression. + header. The only attributes currently supported are compression and the + unused tag. Args: fd: File to read from @@ -703,6 +778,8 @@ class CbfsReader(object): # We don't currently use this information atag, alen, compress, _decomp_size = struct.unpack( ATTR_COMPRESSION_FORMAT, data) + elif atag == FILE_ATTR_TAG_UNUSED2: + break else: print('Unknown attribute tag %x' % atag) attr_size -= len(data) @@ -760,7 +837,7 @@ class CbfsReader(object): return val.decode('utf-8') -def cbfstool(fname, *cbfs_args): +def cbfstool(fname, *cbfs_args, **kwargs): """Run cbfstool with provided arguments If the tool fails then this function raises an exception and prints out the @@ -773,7 +850,9 @@ def cbfstool(fname, *cbfs_args): Returns: CommandResult object containing the results """ - args = ('cbfstool', fname) + cbfs_args + args = ['cbfstool', fname] + list(cbfs_args) + if kwargs.get('base') is not None: + args += ['-b', '%#x' % kwargs['base']] result = command.RunPipe([args], capture=not VERBOSE, capture_stderr=not VERBOSE, raise_on_error=False) if result.return_code: -- cgit v1.2.3 From 1223db038a5282b400eab3e4bd2a44fa8658ff4e Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Mon, 8 Jul 2019 14:25:39 -0600 Subject: binman: Provide the actual data address for cbfs files At present a file with no explicit CBFS offset is placed in the next available location but there is no way to find out where it ended up. Update and rename the get_data() function to provide this information. Signed-off-by: Simon Glass --- tools/binman/cbfs_util.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'tools/binman/cbfs_util.py') diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py index 1cdbcb2339e..530629a5c96 100644 --- a/tools/binman/cbfs_util.py +++ b/tools/binman/cbfs_util.py @@ -185,7 +185,8 @@ class CbfsFile(object): """Class to represent a single CBFS file This is used to hold the information about a file, including its contents. - Use the get_data() method to obtain the raw output for writing to CBFS. + Use the get_data_and_offset() method to obtain the raw output for writing to + CBFS. Properties: name: Name of file @@ -319,12 +320,15 @@ class CbfsFile(object): raise ValueError('Unknown file type %#x\n' % self.ftype) return hdr_len - def get_data(self, offset=None, pad_byte=None): - """Obtain the contents of the file, in CBFS format + def get_data_and_offset(self, offset=None, pad_byte=None): + """Obtain the contents of the file, in CBFS format and the offset of + the data within the file Returns: - bytes representing the contents of this file, packed and aligned - for directly inserting into the final CBFS output + tuple: + bytes representing the contents of this file, packed and aligned + for directly inserting into the final CBFS output + offset to the file data from the start of the returned data. """ name = _pack_string(self.name) hdr_len = len(name) + FILE_HEADER_LEN @@ -368,8 +372,10 @@ class CbfsFile(object): (self.name, self.cbfs_offset, offset)) pad = tools.GetBytes(pad_byte, pad_len) hdr_len += pad_len - self.offset = len(content) + len(data) - hdr = struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, self.offset, + + # This is the offset of the start of the file's data, + size = len(content) + len(data) + hdr = struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, size, self.ftype, attr_pos, hdr_len) # Do a sanity check of the get_header_len() function, to ensure that it @@ -381,7 +387,7 @@ class CbfsFile(object): # happen. It probably indicates that get_header_len() is broken. raise ValueError("Internal error: CBFS file '%s': Expected headers of %#x bytes, got %#d" % (self.name, expected_len, actual_len)) - return hdr + name + attr + pad + content + data + return hdr + name + attr + pad + content + data, hdr_len class CbfsWriter(object): @@ -392,7 +398,7 @@ class CbfsWriter(object): cbw = CbfsWriter(size) cbw.add_file_raw('u-boot', tools.ReadFile('u-boot.bin')) ... - data = cbw.get_data() + data, cbfs_offset = cbw.get_data_and_offset() Attributes: _master_name: Name of the file containing the master header @@ -475,7 +481,7 @@ class CbfsWriter(object): todo = align_int_down(offset - upto, self._align) if todo: cbf = CbfsFile.empty(todo, self._erase_byte) - fd.write(cbf.get_data()) + fd.write(cbf.get_data_and_offset()[0]) self._skip_to(fd, offset) def _align_to(self, fd, align): @@ -579,8 +585,11 @@ class CbfsWriter(object): offset = cbf.calc_start_offset() if offset is not None: self._pad_to(fd, align_int_down(offset, self._align)) - fd.write(cbf.get_data(fd.tell(), self._erase_byte)) + pos = fd.tell() + data, data_offset = cbf.get_data_and_offset(pos, self._erase_byte) + fd.write(data) self._align_to(fd, self._align) + cbf.calced_cbfs_offset = pos + data_offset if not self._hdr_at_start: self._write_header(fd, add_fileheader=self._add_fileheader) -- cgit v1.2.3 From 52107ee4df878d26923a498b62beedbbaa5c1f7e Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Mon, 8 Jul 2019 14:25:40 -0600 Subject: binman: Use the cbfs memlen field only for uncompressed length The purpose of this badly named field is a bit ambiguous. Adjust the code to use it only to store the uncompressed length of a file, leaving it set to None if there is no compression used. This makes it easy to see if the value in this field is relevant / useful. Also set data_len for compressed fields, since it should be the length of the compressed data, not the uncompressed data. Signed-off-by: Simon Glass --- tools/binman/cbfs_util.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'tools/binman/cbfs_util.py') diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py index 530629a5c96..4691be4aee2 100644 --- a/tools/binman/cbfs_util.py +++ b/tools/binman/cbfs_util.py @@ -197,7 +197,8 @@ class CbfsFile(object): data_len: Length of (possibly compressed) data in bytes ftype: File type (TYPE_...) compression: Compression type (COMPRESS_...) - memlen: Length of data in memory (typically the uncompressed length) + memlen: Length of data in memory, i.e. the uncompressed length, None if + no compression algortihm is selected load: Load address in memory if known, else None entry: Entry address in memory if known, else None. This is where execution starts after the file is loaded @@ -213,11 +214,11 @@ class CbfsFile(object): self.data = data self.ftype = ftype self.compress = compress - self.memlen = len(data) + self.memlen = None self.load = None self.entry = None self.base_address = None - self.data_len = 0 + self.data_len = len(data) self.erase_byte = None self.size = None @@ -349,9 +350,11 @@ class CbfsFile(object): data = tools.Compress(orig_data, 'lz4') elif self.compress == COMPRESS_LZMA: data = tools.Compress(orig_data, 'lzma') + self.memlen = len(orig_data) + self.data_len = len(data) attr = struct.pack(ATTR_COMPRESSION_FORMAT, FILE_ATTR_TAG_COMPRESSION, ATTR_COMPRESSION_LEN, - self.compress, len(orig_data)) + self.compress, self.memlen) elif self.ftype == TYPE_EMPTY: data = tools.GetBytes(self.erase_byte, self.size) else: -- cgit v1.2.3 From 3a9c252583785c3aabce834e8f9a54fa94685ee8 Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Mon, 8 Jul 2019 14:25:51 -0600 Subject: binman: Support reading from CBFS entries CBFS is a bit like a section but with a custom format. Provide the list of entries and the compression type to binman so that it can extract the data from the CBFS, just like any other part of the image. Signed-off-by: Simon Glass --- tools/binman/cbfs_util.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'tools/binman/cbfs_util.py') diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py index 4691be4aee2..45e16da0aaa 100644 --- a/tools/binman/cbfs_util.py +++ b/tools/binman/cbfs_util.py @@ -142,6 +142,20 @@ def find_compress(find_name): return compress return None +def compress_name(compress): + """Look up the name of a compression algorithm + + Args: + compress: Compression algorithm number to find (COMPRESS_...) + + Returns: + Compression algorithm name (string) + + Raises: + KeyError if the algorithm number is invalid + """ + return COMPRESS_NAMES[compress] + def align_int(val, align): """Align a value up to the given alignment -- cgit v1.2.3