summaryrefslogtreecommitdiff
path: root/tools/binman
diff options
context:
space:
mode:
Diffstat (limited to 'tools/binman')
-rw-r--r--tools/binman/.gitignore1
l---------tools/binman/README.rst1
-rw-r--r--tools/binman/__init__.py0
l---------tools/binman/binman1
-rw-r--r--tools/binman/binman.rst2401
-rw-r--r--tools/binman/bintool.py587
-rw-r--r--tools/binman/bintool_test.py358
-rw-r--r--tools/binman/bintools.rst218
-rw-r--r--tools/binman/btool/_testing.py36
-rw-r--r--tools/binman/btool/bootgen.py137
-rw-r--r--tools/binman/btool/btool_gzip.py31
-rw-r--r--tools/binman/btool/bzip2.py30
-rw-r--r--tools/binman/btool/cbfstool.py219
-rw-r--r--tools/binman/btool/fdt_add_pubkey.py67
-rw-r--r--tools/binman/btool/fiptool.py114
-rw-r--r--tools/binman/btool/futility.py176
-rw-r--r--tools/binman/btool/ifwitool.py166
-rw-r--r--tools/binman/btool/lz4.py128
-rw-r--r--tools/binman/btool/lzma_alone.py126
-rw-r--r--tools/binman/btool/lzop.py30
-rw-r--r--tools/binman/btool/mkeficapsule.py127
-rw-r--r--tools/binman/btool/mkimage.py69
-rw-r--r--tools/binman/btool/openssl.py359
-rw-r--r--tools/binman/btool/xz.py31
-rw-r--r--tools/binman/btool/zstd.py30
-rw-r--r--tools/binman/cbfs_util.py885
-rwxr-xr-xtools/binman/cbfs_util_test.py607
-rw-r--r--tools/binman/cmdline.py214
-rw-r--r--tools/binman/control.py880
-rw-r--r--tools/binman/elf.py573
-rw-r--r--tools/binman/elf_test.py393
-rw-r--r--tools/binman/entries.rst2867
-rw-r--r--tools/binman/entry.py1384
-rw-r--r--tools/binman/entry_test.py138
-rw-r--r--tools/binman/etype/_testing.py168
-rw-r--r--tools/binman/etype/atf_bl31.py24
-rw-r--r--tools/binman/etype/atf_fip.py273
-rw-r--r--tools/binman/etype/blob.py108
-rw-r--r--tools/binman/etype/blob_dtb.py91
-rw-r--r--tools/binman/etype/blob_ext.py28
-rw-r--r--tools/binman/etype/blob_ext_list.py59
-rw-r--r--tools/binman/etype/blob_named_by_arg.py36
-rw-r--r--tools/binman/etype/blob_phase.py59
-rw-r--r--tools/binman/etype/cbfs.py303
-rw-r--r--tools/binman/etype/collection.py70
-rw-r--r--tools/binman/etype/cros_ec_rw.py21
-rw-r--r--tools/binman/etype/efi_capsule.py155
-rw-r--r--tools/binman/etype/efi_empty_capsule.py86
-rw-r--r--tools/binman/etype/encrypted.py138
-rw-r--r--tools/binman/etype/fdtmap.py166
-rw-r--r--tools/binman/etype/files.py69
-rw-r--r--tools/binman/etype/fill.py34
-rw-r--r--tools/binman/etype/fit.py869
-rw-r--r--tools/binman/etype/fmap.py96
-rw-r--r--tools/binman/etype/gbb.py104
-rw-r--r--tools/binman/etype/image_header.py112
-rw-r--r--tools/binman/etype/intel_cmc.py22
-rw-r--r--tools/binman/etype/intel_descriptor.py80
-rw-r--r--tools/binman/etype/intel_fit.py32
-rw-r--r--tools/binman/etype/intel_fit_ptr.py41
-rw-r--r--tools/binman/etype/intel_fsp.py26
-rw-r--r--tools/binman/etype/intel_fsp_m.py26
-rw-r--r--tools/binman/etype/intel_fsp_s.py26
-rw-r--r--tools/binman/etype/intel_fsp_t.py25
-rw-r--r--tools/binman/etype/intel_ifwi.py148
-rw-r--r--tools/binman/etype/intel_me.py29
-rw-r--r--tools/binman/etype/intel_mrc.py26
-rw-r--r--tools/binman/etype/intel_refcode.py26
-rw-r--r--tools/binman/etype/intel_vbt.py21
-rw-r--r--tools/binman/etype/intel_vga.py24
-rw-r--r--tools/binman/etype/mkimage.py256
-rw-r--r--tools/binman/etype/null.py25
-rw-r--r--tools/binman/etype/nxp_imx8mimage.py74
-rw-r--r--tools/binman/etype/opensbi.py23
-rw-r--r--tools/binman/etype/powerpc_mpc85xx_bootpg_resetvec.py24
-rw-r--r--tools/binman/etype/pre_load.py163
-rw-r--r--tools/binman/etype/rockchip_tpl.py20
-rw-r--r--tools/binman/etype/scp.py19
-rw-r--r--tools/binman/etype/section.py1048
-rw-r--r--tools/binman/etype/tee_os.py96
-rw-r--r--tools/binman/etype/text.py78
-rw-r--r--tools/binman/etype/ti_board_config.py264
-rw-r--r--tools/binman/etype/ti_dm.py22
-rw-r--r--tools/binman/etype/ti_secure.py173
-rw-r--r--tools/binman/etype/ti_secure_rom.py256
-rw-r--r--tools/binman/etype/u_boot.py30
-rw-r--r--tools/binman/etype/u_boot_dtb.py31
-rw-r--r--tools/binman/etype/u_boot_dtb_with_ucode.py95
-rw-r--r--tools/binman/etype/u_boot_elf.py38
-rw-r--r--tools/binman/etype/u_boot_env.py42
-rw-r--r--tools/binman/etype/u_boot_expanded.py24
-rw-r--r--tools/binman/etype/u_boot_img.py27
-rw-r--r--tools/binman/etype/u_boot_nodtb.py27
-rw-r--r--tools/binman/etype/u_boot_spl.py39
-rw-r--r--tools/binman/etype/u_boot_spl_bss_pad.py44
-rw-r--r--tools/binman/etype/u_boot_spl_dtb.py28
-rw-r--r--tools/binman/etype/u_boot_spl_elf.py25
-rw-r--r--tools/binman/etype/u_boot_spl_expanded.py45
-rw-r--r--tools/binman/etype/u_boot_spl_nodtb.py36
-rw-r--r--tools/binman/etype/u_boot_spl_pubkey_dtb.py112
-rw-r--r--tools/binman/etype/u_boot_spl_with_ucode_ptr.py25
-rw-r--r--tools/binman/etype/u_boot_tpl.py39
-rw-r--r--tools/binman/etype/u_boot_tpl_bss_pad.py44
-rw-r--r--tools/binman/etype/u_boot_tpl_dtb.py28
-rw-r--r--tools/binman/etype/u_boot_tpl_dtb_with_ucode.py25
-rw-r--r--tools/binman/etype/u_boot_tpl_elf.py25
-rw-r--r--tools/binman/etype/u_boot_tpl_expanded.py45
-rw-r--r--tools/binman/etype/u_boot_tpl_nodtb.py36
-rw-r--r--tools/binman/etype/u_boot_tpl_with_ucode_ptr.py27
-rw-r--r--tools/binman/etype/u_boot_ucode.py100
-rw-r--r--tools/binman/etype/u_boot_vpl.py36
-rw-r--r--tools/binman/etype/u_boot_vpl_bss_pad.py44
-rw-r--r--tools/binman/etype/u_boot_vpl_dtb.py28
-rw-r--r--tools/binman/etype/u_boot_vpl_elf.py25
-rw-r--r--tools/binman/etype/u_boot_vpl_expanded.py45
-rw-r--r--tools/binman/etype/u_boot_vpl_nodtb.py34
-rw-r--r--tools/binman/etype/u_boot_with_ucode_ptr.py96
-rw-r--r--tools/binman/etype/vblock.py102
-rw-r--r--tools/binman/etype/x509_cert.py166
-rw-r--r--tools/binman/etype/x86_reset16.py29
-rw-r--r--tools/binman/etype/x86_reset16_spl.py29
-rw-r--r--tools/binman/etype/x86_reset16_tpl.py29
-rw-r--r--tools/binman/etype/x86_start16.py31
-rw-r--r--tools/binman/etype/x86_start16_spl.py31
-rw-r--r--tools/binman/etype/x86_start16_tpl.py32
-rw-r--r--tools/binman/etype/xilinx_bootgen.py225
-rw-r--r--tools/binman/fdt_test.py86
-rwxr-xr-xtools/binman/fip_util.py627
-rwxr-xr-xtools/binman/fip_util_test.py398
-rw-r--r--tools/binman/fmap_util.py121
-rw-r--r--tools/binman/ftest.py7464
-rw-r--r--tools/binman/image.py420
-rw-r--r--tools/binman/image_test.py44
-rw-r--r--tools/binman/index.rst9
-rwxr-xr-xtools/binman/main.py152
-rw-r--r--tools/binman/missing-blob-help52
-rw-r--r--tools/binman/pyproject.toml29
-rw-r--r--tools/binman/setup.py12
-rw-r--r--tools/binman/state.py536
-rw-r--r--tools/binman/test/001_invalid.dts5
-rw-r--r--tools/binman/test/002_missing_node.dts6
-rw-r--r--tools/binman/test/003_empty.dts9
-rw-r--r--tools/binman/test/004_invalid_entry.dts11
-rw-r--r--tools/binman/test/005_simple.dts11
-rw-r--r--tools/binman/test/006_dual_image.dts22
-rw-r--r--tools/binman/test/007_bad_align.dts12
-rw-r--r--tools/binman/test/008_pack.dts30
-rw-r--r--tools/binman/test/009_pack_extra.dts42
-rw-r--r--tools/binman/test/010_pack_align_power2.dts12
-rw-r--r--tools/binman/test/011_pack_align_size_power2.dts12
-rw-r--r--tools/binman/test/012_pack_inv_align.dts13
-rw-r--r--tools/binman/test/013_pack_inv_size_align.dts13
-rw-r--r--tools/binman/test/014_pack_overlap.dts16
-rw-r--r--tools/binman/test/015_pack_overflow.dts12
-rw-r--r--tools/binman/test/016_pack_image_overflow.dts13
-rw-r--r--tools/binman/test/017_pack_image_size.dts13
-rw-r--r--tools/binman/test/018_pack_image_align.dts13
-rw-r--r--tools/binman/test/019_pack_inv_image_align.dts14
-rw-r--r--tools/binman/test/020_pack_inv_image_align_power2.dts13
-rw-r--r--tools/binman/test/021_image_pad.dts16
-rw-r--r--tools/binman/test/022_image_name.dts21
-rw-r--r--tools/binman/test/023_blob.dts12
-rw-r--r--tools/binman/test/024_sorted.dts17
-rw-r--r--tools/binman/test/025_pack_zero_size.dts15
-rw-r--r--tools/binman/test/026_pack_u_boot_dtb.dts14
-rw-r--r--tools/binman/test/027_pack_4gb_no_size.dts18
-rw-r--r--tools/binman/test/028_pack_4gb_outside.dts19
-rw-r--r--tools/binman/test/029_x86_rom.dts19
-rw-r--r--tools/binman/test/030_x86_rom_me_no_desc.dts16
-rw-r--r--tools/binman/test/031_x86_rom_me.dts20
-rw-r--r--tools/binman/test/032_intel_vga.dts14
-rw-r--r--tools/binman/test/033_x86_start16.dts13
-rw-r--r--tools/binman/test/034_x86_ucode.dts29
-rw-r--r--tools/binman/test/035_x86_single_ucode.dts26
-rw-r--r--tools/binman/test/036_u_boot_img.dts11
-rw-r--r--tools/binman/test/037_x86_no_ucode.dts20
-rw-r--r--tools/binman/test/038_x86_ucode_missing_node.dts26
-rw-r--r--tools/binman/test/039_x86_ucode_missing_node2.dts23
-rw-r--r--tools/binman/test/040_x86_ucode_not_in_image.dts28
-rw-r--r--tools/binman/test/041_unknown_pos_size.dts12
-rw-r--r--tools/binman/test/042_intel_fsp.dts14
-rw-r--r--tools/binman/test/043_intel_cmc.dts14
-rw-r--r--tools/binman/test/044_x86_optional_ucode.dts30
-rw-r--r--tools/binman/test/045_prop_test.dts23
-rw-r--r--tools/binman/test/046_intel_vbt.dts14
-rw-r--r--tools/binman/test/047_spl_bss_pad.dts17
-rw-r--r--tools/binman/test/048_x86_start16_spl.dts13
-rw-r--r--tools/binman/test/049_x86_ucode_spl.dts29
-rw-r--r--tools/binman/test/050_intel_mrc.dts13
-rw-r--r--tools/binman/test/051_u_boot_spl_dtb.dts13
-rw-r--r--tools/binman/test/052_u_boot_spl_nodtb.dts11
-rw-r--r--tools/binman/test/053_symbols.dts20
-rw-r--r--tools/binman/test/054_unit_address.dts15
-rw-r--r--tools/binman/test/055_sections.dts32
-rw-r--r--tools/binman/test/056_name_prefix.dts30
-rw-r--r--tools/binman/test/057_unknown_contents.dts14
-rw-r--r--tools/binman/test/058_x86_ucode_spl_needs_retry.dts36
-rw-r--r--tools/binman/test/059_change_size.dts14
-rw-r--r--tools/binman/test/060_fdt_update.dts31
-rw-r--r--tools/binman/test/061_fdt_update_bad.dts32
-rw-r--r--tools/binman/test/062_entry_args.dts14
-rw-r--r--tools/binman/test/063_entry_args_missing.dts13
-rw-r--r--tools/binman/test/064_entry_args_required.dts14
-rw-r--r--tools/binman/test/065_entry_args_unknown_datatype.dts15
-rw-r--r--tools/binman/test/066_text.dts33
-rw-r--r--tools/binman/test/067_fmap.dts30
-rw-r--r--tools/binman/test/068_blob_named_by_arg.dts12
-rw-r--r--tools/binman/test/069_fill.dts15
-rw-r--r--tools/binman/test/070_fill_no_size.dts14
-rw-r--r--tools/binman/test/071_gbb.dts31
-rw-r--r--tools/binman/test/072_gbb_too_small.dts10
-rw-r--r--tools/binman/test/073_gbb_no_size.dts9
-rw-r--r--tools/binman/test/074_vblock.dts28
-rw-r--r--tools/binman/test/075_vblock_no_content.dts23
-rw-r--r--tools/binman/test/076_vblock_bad_phandle.dts24
-rw-r--r--tools/binman/test/077_vblock_bad_entry.dts27
-rw-r--r--tools/binman/test/078_u_boot_tpl.dts11
-rw-r--r--tools/binman/test/079_uses_pos.dts10
-rw-r--r--tools/binman/test/080_fill_empty.dts15
-rw-r--r--tools/binman/test/081_x86_start16_tpl.dts14
-rw-r--r--tools/binman/test/082_fdt_update_all.dts20
-rw-r--r--tools/binman/test/083_compress.dts11
-rw-r--r--tools/binman/test/084_files.dts11
-rw-r--r--tools/binman/test/085_files_compress.dts11
-rw-r--r--tools/binman/test/086_files_none.dts12
-rw-r--r--tools/binman/test/087_files_no_pattern.dts11
-rw-r--r--tools/binman/test/088_extend_size.dts43
-rw-r--r--tools/binman/test/089_extend_size_bad.dts14
-rw-r--r--tools/binman/test/090_hash.dts12
-rw-r--r--tools/binman/test/091_hash_no_algo.dts11
-rw-r--r--tools/binman/test/092_hash_bad_algo.dts12
-rw-r--r--tools/binman/test/093_x86_tpl_ucode.dts29
-rw-r--r--tools/binman/test/094_fmap_x86.dts20
-rw-r--r--tools/binman/test/095_fmap_x86_section.dts22
-rw-r--r--tools/binman/test/096_elf.dts16
-rw-r--r--tools/binman/test/097_elf_strip.dts15
-rw-r--r--tools/binman/test/098_4gb_and_skip_at_start_together.dts21
-rw-r--r--tools/binman/test/099_hash_section.dts18
-rw-r--r--tools/binman/test/100_intel_refcode.dts14
-rw-r--r--tools/binman/test/101_sections_offset.dts35
-rw-r--r--tools/binman/test/102_cbfs_raw.dts20
-rw-r--r--tools/binman/test/103_cbfs_raw_ppc.dts21
-rw-r--r--tools/binman/test/104_cbfs_stage.dts19
-rw-r--r--tools/binman/test/105_cbfs_raw_compress.dts26
-rw-r--r--tools/binman/test/106_cbfs_bad_arch.dts15
-rw-r--r--tools/binman/test/107_cbfs_no_size.dts13
-rw-r--r--tools/binman/test/108_cbfs_no_contents.dts17
-rw-r--r--tools/binman/test/109_cbfs_bad_compress.dts18
-rw-r--r--tools/binman/test/110_cbfs_name.dts24
-rw-r--r--tools/binman/test/111_x86_rom_ifwi.dts29
-rw-r--r--tools/binman/test/112_x86_rom_ifwi_nodesc.dts28
-rw-r--r--tools/binman/test/113_x86_rom_ifwi_nodata.dts29
-rw-r--r--tools/binman/test/114_cbfs_offset.dts26
-rw-r--r--tools/binman/test/115_fdtmap.dts13
-rw-r--r--tools/binman/test/116_fdtmap_hdr.dts17
-rw-r--r--tools/binman/test/117_fdtmap_hdr_start.dts19
-rw-r--r--tools/binman/test/118_fdtmap_hdr_pos.dts19
-rw-r--r--tools/binman/test/119_fdtmap_hdr_missing.dts16
-rw-r--r--tools/binman/test/120_hdr_no_location.dts16
-rw-r--r--tools/binman/test/121_entry_extend.dts20
-rw-r--r--tools/binman/test/122_entry_extend_twice.dts21
-rw-r--r--tools/binman/test/123_entry_extend_section.dts22
-rw-r--r--tools/binman/test/124_compress_dtb.dts14
-rw-r--r--tools/binman/test/125_cbfs_update.dts21
-rw-r--r--tools/binman/test/126_cbfs_bad_type.dts17
-rw-r--r--tools/binman/test/127_list.dts33
-rw-r--r--tools/binman/test/128_decode_image.dts36
-rw-r--r--tools/binman/test/129_decode_image_nohdr.dts33
-rw-r--r--tools/binman/test/130_list_fdtmap.dts36
-rw-r--r--tools/binman/test/131_pack_align_section.dts28
-rw-r--r--tools/binman/test/132_replace.dts21
-rw-r--r--tools/binman/test/133_replace_multi.dts33
-rw-r--r--tools/binman/test/134_fdt_update_all_repack.dts23
-rw-r--r--tools/binman/test/135_fdtmap_hdr_middle.dts16
-rw-r--r--tools/binman/test/136_fdtmap_hdr_startbad.dts16
-rw-r--r--tools/binman/test/137_fdtmap_hdr_endbad.dts16
-rw-r--r--tools/binman/test/138_fdtmap_hdr_nosize.dts16
-rw-r--r--tools/binman/test/139_replace_repack.dts22
-rw-r--r--tools/binman/test/140_entry_shrink.dts20
-rw-r--r--tools/binman/test/141_descriptor_offset.dts20
-rw-r--r--tools/binman/test/142_replace_cbfs.dts37
-rw-r--r--tools/binman/test/143_replace_all.dts28
-rw-r--r--tools/binman/test/144_x86_reset16.dts13
-rw-r--r--tools/binman/test/145_x86_reset16_spl.dts13
-rw-r--r--tools/binman/test/146_x86_reset16_tpl.dts13
-rw-r--r--tools/binman/test/147_intel_fit.dts20
-rw-r--r--tools/binman/test/148_intel_fit_missing.dts17
-rw-r--r--tools/binman/test/149_symbols_tpl.dts27
-rw-r--r--tools/binman/test/150_powerpc_mpc85xx_bootpg_resetvec.dts16
-rw-r--r--tools/binman/test/151_x86_rom_ifwi_section.dts33
-rw-r--r--tools/binman/test/152_intel_fsp_m.dts14
-rw-r--r--tools/binman/test/153_intel_fsp_s.dts14
-rw-r--r--tools/binman/test/154_intel_fsp_t.dts14
-rw-r--r--tools/binman/test/155_symbols_tpl_x86.dts30
-rw-r--r--tools/binman/test/156_mkimage.dts23
-rw-r--r--tools/binman/test/157_blob_ext.dts14
-rw-r--r--tools/binman/test/158_blob_ext_missing.dts16
-rw-r--r--tools/binman/test/159_blob_ext_missing_sect.dts23
-rw-r--r--tools/binman/test/160_pack_overlap_zero.dts18
-rw-r--r--tools/binman/test/161_fit.dts62
-rw-r--r--tools/binman/test/162_fit_external.dts64
-rw-r--r--tools/binman/test/163_x86_rom_me_empty.dts22
-rw-r--r--tools/binman/test/164_x86_rom_me_missing.dts22
-rw-r--r--tools/binman/test/165_section_ignore_hash_signature.dts40
-rw-r--r--tools/binman/test/166_pad_in_sections.dts26
-rw-r--r--tools/binman/test/167_fit_image_subentry_alignment.dts57
-rw-r--r--tools/binman/test/168_fit_missing_blob.dts48
-rw-r--r--tools/binman/test/169_atf_bl31.dts16
-rw-r--r--tools/binman/test/170_fit_fdt.dts58
-rw-r--r--tools/binman/test/171_fit_fdt_missing_prop.dts54
-rw-r--r--tools/binman/test/172_scp.dts16
-rw-r--r--tools/binman/test/173_missing_blob.dts14
-rw-r--r--tools/binman/test/174_env.dts20
-rw-r--r--tools/binman/test/175_env_no_size.dts19
-rw-r--r--tools/binman/test/176_env_too_small.dts20
-rw-r--r--tools/binman/test/177_skip_at_start.dts19
-rw-r--r--tools/binman/test/178_skip_at_start_pad.dts21
-rw-r--r--tools/binman/test/179_skip_at_start_section_pad.dts22
-rw-r--r--tools/binman/test/180_section_pad.dts27
-rw-r--r--tools/binman/test/181_section_align.dts34
-rw-r--r--tools/binman/test/182_compress_image.dts14
-rw-r--r--tools/binman/test/183_compress_image_less.dts14
-rw-r--r--tools/binman/test/184_compress_section_size.dts17
-rw-r--r--tools/binman/test/185_compress_section.dts16
-rw-r--r--tools/binman/test/186_compress_extra.dts37
-rw-r--r--tools/binman/test/187_symbols_sub.dts22
-rw-r--r--tools/binman/test/188_image_entryarg.dts21
-rw-r--r--tools/binman/test/189_vblock_content.dts31
-rw-r--r--tools/binman/test/190_files_align.dts12
-rw-r--r--tools/binman/test/191_read_image_skip.dts23
-rw-r--r--tools/binman/test/192_u_boot_tpl_nodtb.dts13
-rw-r--r--tools/binman/test/193_tpl_bss_pad.dts19
-rw-r--r--tools/binman/test/194_fdt_incl.dts17
-rw-r--r--tools/binman/test/195_fdt_incl_tpl.dts13
-rw-r--r--tools/binman/test/196_symbols_nodtb.dts26
-rw-r--r--tools/binman/test/197_symbols_expand.dts23
-rw-r--r--tools/binman/test/198_collection.dts27
-rw-r--r--tools/binman/test/199_collection_section.dts32
-rw-r--r--tools/binman/test/200_align_default.dts30
-rw-r--r--tools/binman/test/201_opensbi.dts14
-rw-r--r--tools/binman/test/202_section_timeout.dts21
-rw-r--r--tools/binman/test/203_fip.dts21
-rw-r--r--tools/binman/test/204_fip_other.dts22
-rw-r--r--tools/binman/test/205_fip_no_type.dts15
-rw-r--r--tools/binman/test/206_fip_uuid.dts22
-rw-r--r--tools/binman/test/207_fip_ls.dts25
-rw-r--r--tools/binman/test/208_fip_replace.dts33
-rw-r--r--tools/binman/test/209_fip_missing.dts19
-rw-r--r--tools/binman/test/210_fip_size.dts19
-rw-r--r--tools/binman/test/211_fip_bad_align.dts18
-rw-r--r--tools/binman/test/212_fip_collection.dts24
-rw-r--r--tools/binman/test/213_fdtmap_alt_format.dts15
-rw-r--r--tools/binman/test/214_no_alt_format.dts13
-rw-r--r--tools/binman/test/215_blob_ext_list.dts14
-rw-r--r--tools/binman/test/216_blob_ext_list_missing.dts14
-rw-r--r--tools/binman/test/217_fake_blob.dts14
-rw-r--r--tools/binman/test/218_blob_ext_list_fake.dts14
-rw-r--r--tools/binman/test/219_fit_gennode.dts26
-rw-r--r--tools/binman/test/220_fit_subentry_bintool.dts39
-rw-r--r--tools/binman/test/221_fit_subentry_hash.dts52
-rw-r--r--tools/binman/test/222_tee_os.dts14
-rw-r--r--tools/binman/test/223_fit_fdt_oper.dts56
-rw-r--r--tools/binman/test/224_fit_bad_oper.dts25
-rw-r--r--tools/binman/test/225_expand_size_bad.dts10
-rw-r--r--tools/binman/test/225_ti_dm.dts13
-rw-r--r--tools/binman/test/226_fit_split_elf.dts73
-rw-r--r--tools/binman/test/227_fit_bad_dir.dts9
-rw-r--r--tools/binman/test/228_fit_bad_dir_config.dts9
-rw-r--r--tools/binman/test/229_mkimage_missing.dts18
-rw-r--r--tools/binman/test/230_pre_load.dts22
-rw-r--r--tools/binman/test/231_pre_load_pkcs.dts23
-rw-r--r--tools/binman/test/232_pre_load_pss.dts23
-rw-r--r--tools/binman/test/233_pre_load_invalid_padding.dts23
-rw-r--r--tools/binman/test/234_pre_load_invalid_sha.dts23
-rw-r--r--tools/binman/test/235_pre_load_invalid_algo.dts23
-rw-r--r--tools/binman/test/236_pre_load_invalid_key.dts23
-rw-r--r--tools/binman/test/237_unique_names.dts34
-rw-r--r--tools/binman/test/238_unique_names_multi.dts38
-rw-r--r--tools/binman/test/239_replace_with_bintool.dts39
-rw-r--r--tools/binman/test/240_fit_extract_replace.dts74
-rw-r--r--tools/binman/test/241_replace_section_simple.dts23
-rw-r--r--tools/binman/test/242_mkimage_name.dts18
-rw-r--r--tools/binman/test/243_mkimage_image.dts21
-rw-r--r--tools/binman/test/244_mkimage_image_no_content.dts22
-rw-r--r--tools/binman/test/245_mkimage_image_bad.dts22
-rw-r--r--tools/binman/test/246_collection_other.dts29
-rw-r--r--tools/binman/test/247_mkimage_coll.dts27
-rw-r--r--tools/binman/test/248_compress_dtb_prepend_invalid.dts17
-rw-r--r--tools/binman/test/249_compress_dtb_prepend_length.dts19
-rw-r--r--tools/binman/test/250_compress_dtb_invalid.dts16
-rw-r--r--tools/binman/test/251_compress_dtb_zstd.dts16
-rw-r--r--tools/binman/test/252_mkimage_mult_data.dts21
-rw-r--r--tools/binman/test/253_mkimage_mult_no_content.dts22
-rw-r--r--tools/binman/test/254_mkimage_filename.dts18
-rw-r--r--tools/binman/test/255_u_boot_vpl.dts11
-rw-r--r--tools/binman/test/256_u_boot_vpl_nodtb.dts13
-rw-r--r--tools/binman/test/257_fdt_incl_vpl.dts13
-rw-r--r--tools/binman/test/258_vpl_bss_pad.dts19
-rw-r--r--tools/binman/test/259_symlink.dts16
-rw-r--r--tools/binman/test/260_symbols_elf.dts27
-rw-r--r--tools/binman/test/261_section_fname.dts29
-rw-r--r--tools/binman/test/262_absent.dts20
-rw-r--r--tools/binman/test/263_tee_os_opt.dts22
-rw-r--r--tools/binman/test/264_tee_os_opt_fit.dts34
-rw-r--r--tools/binman/test/265_tee_os_opt_fit_bad.dts40
-rw-r--r--tools/binman/test/266_blob_ext_opt.dts21
-rw-r--r--tools/binman/test/267_section_inner.dts16
-rw-r--r--tools/binman/test/268_null.dts19
-rw-r--r--tools/binman/test/269_overlap.dts21
-rw-r--r--tools/binman/test/270_overlap_null.dts24
-rw-r--r--tools/binman/test/271_overlap_bad.dts21
-rw-r--r--tools/binman/test/272_overlap_no_size.dts19
-rw-r--r--tools/binman/test/273_blob_symbol.dts24
-rw-r--r--tools/binman/test/274_offset_from_elf.dts30
-rw-r--r--tools/binman/test/275_fit_align.dts59
-rw-r--r--tools/binman/test/276_fit_firmware_loadables.dts96
-rw-r--r--tools/binman/test/277_replace_fit_sibling.dts61
-rw-r--r--tools/binman/test/278_replace_section_deep.dts25
-rw-r--r--tools/binman/test/279_x509_cert.dts19
-rw-r--r--tools/binman/test/280_fit_sign.dts63
-rw-r--r--tools/binman/test/281_sign_non_fit.dts65
-rw-r--r--tools/binman/test/282_symbols_disable.dts25
-rw-r--r--tools/binman/test/283_mkimage_special.dts24
-rw-r--r--tools/binman/test/284_fit_fdt_list.dts58
-rw-r--r--tools/binman/test/285_spl_expand.dts13
-rw-r--r--tools/binman/test/286_template.dts42
-rw-r--r--tools/binman/test/287_template_multi.dts27
-rw-r--r--tools/binman/test/288_template_fit.dts37
-rw-r--r--tools/binman/test/289_template_section.dts52
-rw-r--r--tools/binman/test/290_mkimage_sym.dts27
-rw-r--r--tools/binman/test/291_rockchip_tpl.dts16
-rw-r--r--tools/binman/test/292_mkimage_missing_multiple.dts19
-rw-r--r--tools/binman/test/293_ti_board_cfg.dts14
-rw-r--r--tools/binman/test/294_ti_board_cfg_combined.dts25
-rw-r--r--tools/binman/test/295_ti_board_cfg_no_type.dts11
-rw-r--r--tools/binman/test/296_ti_secure.dts17
-rw-r--r--tools/binman/test/297_ti_secure_rom.dts18
-rw-r--r--tools/binman/test/298_ti_secure_rom_combined.dts25
-rw-r--r--tools/binman/test/299_ti_secure_rom_a.dts19
-rw-r--r--tools/binman/test/300_ti_secure_rom_b.dts18
-rw-r--r--tools/binman/test/301_encrypted_no_algo.dts15
-rw-r--r--tools/binman/test/302_encrypted_invalid_iv_file.dts18
-rw-r--r--tools/binman/test/303_encrypted_missing_key.dts23
-rw-r--r--tools/binman/test/304_encrypted_key_source.dts24
-rw-r--r--tools/binman/test/305_encrypted_key_file.dts24
-rw-r--r--tools/binman/test/306_spl_pubkey_dtb.dts16
-rw-r--r--tools/binman/test/307_xilinx_bootgen_sign.dts22
-rw-r--r--tools/binman/test/308_xilinx_bootgen_sign_enc.dts24
-rw-r--r--tools/binman/test/309_template_phandle.dts51
-rw-r--r--tools/binman/test/310_template_phandle_dup.dts65
-rw-r--r--tools/binman/test/311_capsule.dts18
-rw-r--r--tools/binman/test/312_capsule_signed.dts20
-rw-r--r--tools/binman/test/313_capsule_version.dts19
-rw-r--r--tools/binman/test/314_capsule_signed_ver.dts21
-rw-r--r--tools/binman/test/315_capsule_oemflags.dts19
-rw-r--r--tools/binman/test/316_capsule_missing_key.dts19
-rw-r--r--tools/binman/test/317_capsule_missing_index.dts17
-rw-r--r--tools/binman/test/318_capsule_missing_guid.dts16
-rw-r--r--tools/binman/test/319_capsule_accept.dts13
-rw-r--r--tools/binman/test/320_capsule_revert.dts11
-rw-r--r--tools/binman/test/321_capsule_accept_missing_guid.dts11
-rw-r--r--tools/binman/test/322_empty_capsule_type_missing.dts12
-rw-r--r--tools/binman/test/323_capsule_accept_revert_missing.dts13
-rw-r--r--tools/binman/test/323_ti_board_cfg_phony.dts14
-rw-r--r--tools/binman/test/324_ti_secure_firewall.dts28
-rw-r--r--tools/binman/test/325_ti_secure_firewall_missing_property.dts28
-rw-r--r--tools/binman/test/Makefile99
-rw-r--r--tools/binman/test/blob_syms.c18
-rw-r--r--tools/binman/test/blob_syms.lds30
-rw-r--r--tools/binman/test/bss_data.c16
-rw-r--r--tools/binman/test/bss_data.lds15
-rw-r--r--tools/binman/test/bss_data_zero.c16
-rw-r--r--tools/binman/test/bss_data_zero.lds15
-rw-r--r--tools/binman/test/descriptor.binbin0 -> 4096 bytes
-rw-r--r--tools/binman/test/dev.key28
-rw-r--r--tools/binman/test/elf_sections.c20
-rw-r--r--tools/binman/test/elf_sections.lds31
-rw-r--r--tools/binman/test/embed_data.c17
-rw-r--r--tools/binman/test/embed_data.lds24
-rw-r--r--tools/binman/test/files/1.dat1
-rw-r--r--tools/binman/test/files/2.dat1
-rw-r--r--tools/binman/test/files/ignored_dir.dat/ignore0
-rw-r--r--tools/binman/test/files/not-this-one1
-rw-r--r--tools/binman/test/fitimage.bin.gzbin0 -> 8418 bytes
-rw-r--r--tools/binman/test/generated/autoconf.h3
-rw-r--r--tools/binman/test/ifwi.bin.gzbin0 -> 1884 bytes
-rw-r--r--tools/binman/test/key.key52
-rw-r--r--tools/binman/test/key.pem32
-rw-r--r--tools/binman/test/u_boot_binman_embed.c13
-rw-r--r--tools/binman/test/u_boot_binman_embed.lds29
-rw-r--r--tools/binman/test/u_boot_binman_embed_sm.c13
-rw-r--r--tools/binman/test/u_boot_binman_syms.c16
-rw-r--r--tools/binman/test/u_boot_binman_syms.lds30
l---------tools/binman/test/u_boot_binman_syms_bad.c1
-rw-r--r--tools/binman/test/u_boot_binman_syms_bad.lds28
-rw-r--r--tools/binman/test/u_boot_binman_syms_size.c13
l---------tools/binman/test/u_boot_binman_syms_x86.c1
-rw-r--r--tools/binman/test/u_boot_binman_syms_x86.lds30
-rw-r--r--tools/binman/test/u_boot_no_ucode_ptr.c10
-rw-r--r--tools/binman/test/u_boot_ucode_ptr.c10
-rw-r--r--tools/binman/test/u_boot_ucode_ptr.lds18
-rw-r--r--tools/binman/test/yaml/config.yaml18
-rw-r--r--tools/binman/test/yaml/config_phony.yaml18
-rw-r--r--tools/binman/test/yaml/schema.yaml49
-rw-r--r--tools/binman/test/yaml/schema_notype.yaml38
505 files changed, 39617 insertions, 0 deletions
diff --git a/tools/binman/.gitignore b/tools/binman/.gitignore
new file mode 100644
index 00000000000..0d20b6487c6
--- /dev/null
+++ b/tools/binman/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tools/binman/README.rst b/tools/binman/README.rst
new file mode 120000
index 00000000000..b734f544b73
--- /dev/null
+++ b/tools/binman/README.rst
@@ -0,0 +1 @@
+binman.rst \ No newline at end of file
diff --git a/tools/binman/__init__.py b/tools/binman/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/tools/binman/__init__.py
diff --git a/tools/binman/binman b/tools/binman/binman
new file mode 120000
index 00000000000..11a5d8e18ab
--- /dev/null
+++ b/tools/binman/binman
@@ -0,0 +1 @@
+main.py \ No newline at end of file
diff --git a/tools/binman/binman.rst b/tools/binman/binman.rst
new file mode 100644
index 00000000000..230e055667f
--- /dev/null
+++ b/tools/binman/binman.rst
@@ -0,0 +1,2401 @@
+.. SPDX-License-Identifier: GPL-2.0+
+.. Copyright (c) 2016 Google, Inc
+
+Introduction
+============
+
+Firmware often consists of several components which must be packaged together.
+For example, we may have SPL, U-Boot, a device tree and an environment area
+grouped together and placed in MMC flash. When the system starts, it must be
+able to find these pieces.
+
+Building firmware should be separate from packaging it. Many of the complexities
+of modern firmware build systems come from trying to do both at once. With
+binman, you build all the pieces that are needed, using whatever assortment of
+projects and build systems are needed, then use binman to stitch everything
+together.
+
+
+What it does
+------------
+
+Binman reads your board's device tree and finds a node which describes the
+required image layout. It uses this to work out what to place where.
+
+Binman provides a mechanism for building images, from simple SPL + U-Boot
+combinations, to more complex arrangements with many parts. It also allows
+users to inspect images, extract and replace binaries within them, repacking if
+needed.
+
+
+Features
+--------
+
+Apart from basic padding, alignment and positioning features, Binman supports
+hierarchical images, compression, hashing and dealing with the binary blobs
+which are a sad trend in open-source firmware at present.
+
+Executable binaries can access the location of other binaries in an image by
+using special linker symbols (zero-overhead but somewhat limited) or by reading
+the devicetree description of the image.
+
+Binman is designed primarily for use with U-Boot and associated binaries such
+as ARM Trusted Firmware, but it is suitable for use with other projects, such
+as Zephyr. Binman also provides facilities useful in Chromium OS, such as CBFS,
+vblocks and the like.
+
+Binman provides a way to process binaries before they are included, by adding a
+Python plug-in.
+
+Binman is intended for use with U-Boot but is designed to be general enough
+to be useful in other image-packaging situations.
+
+
+Motivation
+----------
+
+As mentioned above, packaging of firmware is quite a different task from
+building the various parts. In many cases the various binaries which go into
+the image come from separate build systems. For example, ARM Trusted Firmware
+is used on ARMv8 devices but is not built in the U-Boot tree. If a Linux kernel
+is included in the firmware image, it is built elsewhere.
+
+It is of course possible to add more and more build rules to the U-Boot
+build system to cover these cases. It can shell out to other Makefiles and
+build scripts. But it seems better to create a clear divide between building
+software and packaging it.
+
+At present this is handled by manual instructions, different for each board,
+on how to create images that will boot. By turning these instructions into a
+standard format, we can support making valid images for any board without
+manual effort, lots of READMEs, etc.
+
+Benefits:
+
+ - Each binary can have its own build system and tool chain without creating
+ any dependencies between them
+ - Avoids the need for a single-shot build: individual parts can be updated
+ and brought in as needed
+ - Provides for a standard image description available in the build and at
+ run-time
+ - SoC-specific image-signing tools can be accommodated
+ - Avoids cluttering the U-Boot build system with image-building code
+ - The image description is automatically available at run-time in U-Boot,
+ SPL. It can be made available to other software also
+ - The image description is easily readable (it's a text file in device-tree
+ format) and permits flexible packing of binaries
+
+
+Terminology
+-----------
+
+Binman uses the following terms:
+
+- image - an output file containing a firmware image
+- binary - an input binary that goes into the image
+
+
+Installation
+------------
+
+You can install binman using::
+
+ pip install binary-manager
+
+The name is chosen since binman conflicts with an existing package.
+
+If you are using binman within the U-Boot tree, it may be easiest to add a
+symlink from your local `~/.bin` directory to `/path/to/tools/binman/binman`.
+
+
+Relationship to FIT
+-------------------
+
+FIT is U-Boot's official image format. It supports multiple binaries with
+load / execution addresses, compression. It also supports verification
+through hashing and RSA signatures.
+
+FIT was originally designed to support booting a Linux kernel (with an
+optional ramdisk) and device tree chosen from various options in the FIT.
+Now that U-Boot supports configuration via device tree, it is possible to
+load U-Boot from a FIT, with the device tree chosen by SPL.
+
+Binman considers FIT to be one of the binaries it can place in the image.
+
+Where possible it is best to put as much as possible in the FIT, with binman
+used to deal with cases not covered by FIT. Examples include initial
+execution (since FIT itself does not have an executable header) and dealing
+with device boundaries, such as the read-only/read-write separation in SPI
+flash.
+
+For U-Boot, binman should not be used to create ad-hoc images in place of
+FIT.
+
+Note that binman can itself create a FIT. This helps to move mkimage
+invocations out of the Makefile and into binman image descriptions. It also
+helps by removing the need for ad-hoc tools like `make_fit_atf.py`.
+
+
+Relationship to mkimage
+-----------------------
+
+The mkimage tool provides a means to create a FIT. Traditionally it has
+needed an image description file: a device tree, like binman, but in a
+different format. More recently it has started to support a '-f auto' mode
+which can generate that automatically.
+
+More relevant to binman, mkimage also permits creation of many SoC-specific
+image types. These can be listed by running 'mkimage -T list'. Examples
+include 'rksd', the Rockchip SD/MMC boot format. The mkimage tool is often
+called from the U-Boot build system for this reason.
+
+Binman considers the output files created by mkimage to be binary blobs
+which it can place in an image. Binman does not replace the mkimage tool or
+this purpose. It would be possible in some situations to create a new entry
+type for the images in mkimage, but this would not add functionality. It
+seems better to use the mkimage tool to generate binaries and avoid blurring
+the boundaries between building input files (mkimage) and packaging then
+into a final image (binman).
+
+Note that binman can itself invoke mkimage. This helps to move mkimage
+invocations out of the Makefile and into binman image descriptions.
+
+
+Using binman
+============
+
+Example use of binman in U-Boot
+-------------------------------
+
+Binman aims to replace some of the ad-hoc image creation in the U-Boot
+build system.
+
+Consider sunxi. It has the following steps:
+
+ #. It uses a custom mksunxiboot tool to build an SPL image called
+ sunxi-spl.bin. This should probably move into mkimage.
+
+ #. It uses mkimage to package U-Boot into a legacy image file (so that it can
+ hold the load and execution address) called u-boot.img.
+
+ #. It builds a final output image called u-boot-sunxi-with-spl.bin which
+ consists of sunxi-spl.bin, some padding and u-boot.img.
+
+Binman is intended to replace the last step. The U-Boot build system builds
+u-boot.bin and sunxi-spl.bin. Binman can then take over creation of
+sunxi-spl.bin by calling mksunxiboot or mkimage. In any case, it would then
+create the image from the component parts.
+
+This simplifies the U-Boot Makefile somewhat, since various pieces of logic
+can be replaced by a call to binman.
+
+
+Invoking binman within U-Boot
+-----------------------------
+
+Within U-Boot, binman is invoked by the build system, i.e. when you type 'make'
+or use buildman to build U-Boot. There is no need to run binman independently
+during development. Everything happens automatically and is set up for your
+SoC or board so that binman produced the right things.
+
+The general policy is that the Makefile builds all the binaries in INPUTS-y
+(the 'inputs' rule), then binman is run to produce the final images (the 'all'
+rule).
+
+There should be only one invocation of binman in Makefile, the very last step
+that pulls everything together. At present there are some arch-specific
+invocations as well, but these should be dropped when those architectures are
+converted to use binman properly.
+
+As above, the term 'binary' is used for something in INPUTS-y and 'image' is
+used for the things that binman creates. So the binaries are inputs to the
+image(s) and it is the image that is actually loaded on the board.
+
+Again, at present, there are a number of things created in Makefile which should
+be done by binman (when we get around to it), like `u-boot-ivt.img`,
+`lpc32xx-spl.img`, `u-boot-with-nand-spl.imx`, `u-boot-spl-padx4.sfp` and
+`u-boot-mtk.bin`, just to pick on a few. When completed this will remove about
+400 lines from `Makefile`.
+
+Since binman is invoked only once, it must of course create all the images that
+are needed, in that one invocation. It does this by working through the image
+descriptions one by one, collecting the input binaries, processing them as
+needed and producing the final images.
+
+The same binaries may be used by multiple images. For example binman may be used
+to produce an SD-card image and a SPI-flash image. In this case the binaries
+going into the process are the same, but binman produces slightly different
+images in each case.
+
+For some SoCs, U-Boot is not the only project that produces the necessary
+binaries. For example, ARM Trusted Firmware (ATF) is a project that produces
+binaries which must be incorporate, such as `bl31.elf` or `bl31.bin`. For this
+to work you must have built ATF before you build U-Boot and you must tell U-Boot
+where to find the bl31 image, using the BL31 environment variable.
+
+How do you know how to incorporate ATF? It is handled by the atf-bl31 entry type
+(etype). An etype is an implementation of reading a binary into binman, in this
+case the `bl31.bin` file. When you build U-Boot but do not set the BL31
+environment variable, binman provides a help message, which comes from
+`missing-blob-help`::
+
+ See the documentation for your board. You may need to build ARM Trusted
+ Firmware and build with BL31=/path/to/bl31.bin
+
+The mechanism by which binman is advised of this is also in the Makefile. See
+the `-a atf-bl31-path=${BL31}` piece in `cmd_binman`. This tells binman to
+set the EntryArg `atf-bl31-path` to the value of the `BL31` environment
+variable. Within binman, this EntryArg is picked up by the `Entry_atf_bl31`
+etype. An EntryArg is simply an argument to the entry. The `atf-bl31-path`
+name is documented in :ref:`etype_atf_bl31`.
+
+Taking this a little further, when binman is used to create a FIT, it supports
+using an ELF file, e.g. `bl31.elf` and splitting it into separate pieces (with
+`fit,operation = "split-elf"`), each with its own load address.
+
+
+Invoking binman outside U-Boot
+------------------------------
+
+While binman is invoked from within the U-Boot build system, it is also possible
+to invoke it separately. This is typically used in a production build system,
+where signing is completed (with real keys) and any missing binaries are
+provided.
+
+For example, for build testing there is no need to provide a real signature,
+nor is there any need to provide a real ATF BL31 binary (for example). These can
+be added later by invoking binman again, providing all the required inputs
+from the first time, plus any that were missing or placeholders.
+
+So in practice binman is often used twice:
+
+- once within the U-Boot build system, for development and testing
+- again outside U-Boot to assembly and final production images
+
+While the same input binaries are used in each case, you will of course you will
+need to create your own binman command line, similar to that in `cmd_binman` in
+the Makefile. You may find the -I and --toolpath options useful. The
+device tree file is provided to binman in binary form, so there is no need to
+have access to the original `.dts` sources.
+
+
+Assembling the image description
+--------------------------------
+
+Since binman uses the device tree for its image description, you can use the
+same files that describe your board's hardware to describe how the image is
+assembled. Typically the images description is in a common file used by all
+boards with a particular SoC (e.g. `imx8mp-u-boot.dtsi`).
+
+Where a particular boards needs to make changes, it can override properties in
+the SoC file, just as it would for any other device tree property. It can also
+add a image that is specific to the board.
+
+Another way to control the image description to make use of CONFIG options in
+the description. For example, if the start offset of a particular entry varies
+by board, you can add a Kconfig for that and reference it in the description::
+
+ u-boot-spl {
+ };
+
+ fit {
+ offset = <CONFIG_SPL_PAD_TO>;
+ ...
+ };
+
+The SoC can provide a default value but boards can override that as needed and
+binman will take care of it.
+
+It is even possible to control which entries appear in the image, by using the
+C preprocessor::
+
+ #ifdef CONFIG_HAVE_MRC
+ intel-mrc {
+ offset = <CFG_X86_MRC_ADDR>;
+ };
+ #endif
+
+Only boards which enable `HAVE_MRC` will include this entry.
+
+Obviously a similar approach can be used to control which images are produced,
+with a Kconfig option to enable a SPI image, for example. However there is
+generally no harm in producing an image that is not used. If a board uses MMC
+but not SPI, but the SoC supports booting from both, then both images can be
+produced, with only on or other being used by particular boards. This can help
+reduce the need for having multiple defconfig targets for a board where the
+only difference is the boot media, enabling / disabling secure boot, etc.
+
+Of course you can use the device tree itself to pass any board-specific
+information that is needed by U-Boot at runtime (see binman_syms_ for how to
+make binman insert these values directly into executables like SPL).
+
+There is one more way this can be done: with individual .dtsi files for each
+image supported by the SoC. Then the board `.dts` file can include the ones it
+wants. This is not recommended, since it is likely to be difficult to maintain
+and harder to understand the relationship between the different boards.
+
+
+Producing images for multiple boards
+------------------------------------
+
+When invoked within U-Boot, binman only builds a single set of images, for
+the chosen board. This is set by the `CONFIG_DEFAULT_DEVICE_TREE` option.
+
+However, U-Boot generally builds all the device tree files associated with an
+SoC. These are written to the (e.g. for ARM) `arch/arm/dts` directory. Each of
+these contains the full binman description for that board. Often the best
+approach is to build a single image that includes all these device tree binaries
+and allow SPL to select the correct one on boot.
+
+However, it is also possible to build separate images for each board, simply by
+invoking binman multiple times, once for each device tree file, using a
+different output directory. This will produce one set of images for each board.
+
+
+Example use of binman for x86
+-----------------------------
+
+In most cases x86 images have a lot of binary blobs, 'black-box' code
+provided by Intel which must be run for the platform to work. Typically
+these blobs are not relocatable and must be placed at fixed areas in the
+firmware image.
+
+Currently this is handled by ifdtool, which places microcode, FSP, MRC, VGA
+BIOS, reference code and Intel ME binaries into a u-boot.rom file.
+
+Binman is intended to replace all of this, with ifdtool left to handle only
+the configuration of the Intel-format descriptor.
+
+
+Installing binman
+-----------------
+
+First install prerequisites, e.g:
+
+.. code-block:: bash
+
+ sudo apt-get install python-pyelftools python3-pyelftools lzma-alone \
+ liblz4-tool
+
+You can run binman directly if you put it on your PATH. But if you want to
+install into your `~/.local` Python directory, use:
+
+.. code-block:: bash
+
+ pip install tools/patman tools/dtoc tools/binman
+
+Note that binman makes use of libraries from patman and dtoc, which is why these
+need to be installed. Also you need `libfdt` and `pylibfdt` which can be
+installed like this:
+
+.. code-block:: bash
+
+ git clone git://git.kernel.org/pub/scm/utils/dtc/dtc.git
+ cd dtc
+ pip install .
+ make NO_PYTHON=1 install
+
+This installs the `libfdt.so` library into `~/lib` so you can use
+`LD_LIBRARY_PATH=~/lib` when running binman. If you want to install it in the
+system-library directory, replace the last line with:
+
+.. code-block:: bash
+
+ make NO_PYTHON=1 PREFIX=/ install
+
+Running binman
+--------------
+
+Type:
+
+.. code-block:: bash
+
+ make NO_PYTHON=1 PREFIX=/ install
+ binman build -b <board_name>
+
+to build an image for a board. The board name is the same name used when
+configuring U-Boot (e.g. for sandbox_defconfig the board name is 'sandbox').
+Binman assumes that the input files for the build are in ../b/<board_name>.
+
+Or you can specify this explicitly:
+
+.. code-block:: bash
+
+ make NO_PYTHON=1 PREFIX=/ install
+ binman build -I <build_path>
+
+where <build_path> is the build directory containing the output of the U-Boot
+build.
+
+(Future work will make this more configurable)
+
+In either case, binman picks up the device tree file (u-boot.dtb) and looks
+for its instructions in the 'binman' node.
+
+Binman has a few other options which you can see by running 'binman -h'.
+
+
+Enabling binman for a board
+---------------------------
+
+At present binman is invoked from a rule in the main Makefile. You should be
+able to enable CONFIG_BINMAN to enable this rule.
+
+The output file is typically named image.bin and is located in the output
+directory. If input files are needed to you add these to INPUTS-y either in the
+main Makefile or in a config.mk file in your arch subdirectory.
+
+Once binman is executed it will pick up its instructions from a device-tree
+file, typically <soc>-u-boot.dtsi, where <soc> is your CONFIG_SYS_SOC value.
+You can use other, more specific CONFIG options - see 'Automatic .dtsi
+inclusion' below.
+
+.. _binman_syms:
+
+Access to binman entry offsets at run time (symbols)
+----------------------------------------------------
+
+Binman assembles images and determines where each entry is placed in the image.
+This information may be useful to U-Boot at run time. For example, in SPL it
+is useful to be able to find the location of U-Boot so that it can be executed
+when SPL is finished.
+
+Binman allows you to declare symbols in the SPL image which are filled in
+with their correct values during the build. For example:
+
+.. code-block:: c
+
+ binman_sym_declare(ulong, u_boot_any, image_pos);
+
+declares a ulong value which will be assigned to the image-pos of any U-Boot
+image (u-boot.bin, u-boot.img, u-boot-nodtb.bin) that is present in the image.
+You can access this value with something like:
+
+.. code-block:: c
+
+ ulong u_boot_offset = binman_sym(ulong, u_boot_any, image_pos);
+
+Thus u_boot_offset will be set to the image-pos of U-Boot in memory, assuming
+that the whole image has been loaded, or is available in flash. You can then
+jump to that address to start U-Boot.
+
+At present this feature is only supported in SPL and TPL. In principle it is
+possible to fill in such symbols in U-Boot proper, as well, but a future C
+library is planned for this instead, to read from the device tree.
+
+As well as image-pos, it is possible to read the size of an entry and its
+offset (which is the start position of the entry within its parent).
+
+A small technical note: Binman automatically adds the base address of the image
+(i.e. __image_copy_start) to the value of the image-pos symbol, so that when the
+image is loaded to its linked address, the value will be correct and actually
+point into the image.
+
+For example, say SPL is at the start of the image and linked to start at address
+80108000. If U-Boot's image-pos is 0x8000 then binman will write an image-pos
+for U-Boot of 80110000 into the SPL binary, since it assumes the image is loaded
+to 80108000, with SPL at 80108000 and U-Boot at 80110000.
+
+For x86 devices (with the end-at-4gb property) this base address is not added
+since it is assumed that images are XIP and the offsets already include the
+address.
+
+While U-Boot's symbol updating is handled automatically by the u-boot-spl
+entry type (and others), it is possible to use this feature with any blob. To
+do this, add a `write-symbols` (boolean) property to the node, set the ELF
+filename using `elf-filename` and set 'elf-base-sym' to the base symbol for the
+start of the binary image (this defaults to `__image_copy_start` which is what
+U-Boot uses). See `testBlobSymbol()` for an example.
+
+.. _binman_fdt:
+
+Access to binman entry offsets at run time (fdt)
+------------------------------------------------
+
+Binman can update the U-Boot FDT to include the final position and size of
+each entry in the images it processes. The option to enable this is -u and it
+causes binman to make sure that the 'offset', 'image-pos' and 'size' properties
+are set correctly for every entry. Since it is not necessary to specify these in
+the image definition, binman calculates the final values and writes these to
+the device tree. These can be used by U-Boot at run-time to find the location
+of each entry.
+
+Alternatively, an FDT map entry can be used to add a special FDT containing
+just the information about the image. This is preceded by a magic string so can
+be located anywhere in the image. An image header (typically at the start or end
+of the image) can be used to point to the FDT map. See fdtmap and image-header
+entries for more information.
+
+Map files
+---------
+
+The -m option causes binman to output a .map file for each image that it
+generates. This shows the offset and size of each entry. For example::
+
+ Offset Size Name
+ 00000000 00000028 main-section
+ 00000000 00000010 section@0
+ 00000000 00000004 u-boot
+ 00000010 00000010 section@1
+ 00000000 00000004 u-boot
+
+This shows a hierarchical image with two sections, each with a single entry. The
+offsets of the sections are absolute hex byte offsets within the image. The
+offsets of the entries are relative to their respective sections. The size of
+each entry is also shown, in bytes (hex). The indentation shows the entries
+nested inside their sections.
+
+
+Passing command-line arguments to entries
+-----------------------------------------
+
+Sometimes it is useful to pass binman the value of an entry property from the
+command line. For example some entries need access to files and it is not
+always convenient to put these filenames in the image definition (device tree).
+
+The -a option supports this::
+
+ -a <prop>=<value>
+
+where::
+
+ <prop> is the property to set
+ <value> is the value to set it to
+
+Not all properties can be provided this way. Only some entries support it,
+typically for filenames.
+
+
+Image description format
+========================
+
+The binman node is called 'binman'. An example image description is shown
+below::
+
+ binman {
+ filename = "u-boot-sunxi-with-spl.bin";
+ pad-byte = <0xff>;
+ blob {
+ filename = "spl/sunxi-spl.bin";
+ };
+ u-boot {
+ offset = <CONFIG_SPL_PAD_TO>;
+ };
+ };
+
+
+This requests binman to create an image file called u-boot-sunxi-with-spl.bin
+consisting of a specially formatted SPL (spl/sunxi-spl.bin, built by the
+normal U-Boot Makefile), some 0xff padding, and a U-Boot legacy image. The
+padding comes from the fact that the second binary is placed at
+CONFIG_SPL_PAD_TO. If that line were omitted then the U-Boot binary would
+immediately follow the SPL binary.
+
+The binman node describes an image. The sub-nodes describe entries in the
+image. Each entry represents a region within the overall image. The name of
+the entry (blob, u-boot) tells binman what to put there. For 'blob' we must
+provide a filename. For 'u-boot', binman knows that this means 'u-boot.bin'.
+
+Entries are normally placed into the image sequentially, one after the other.
+The image size is the total size of all entries. As you can see, you can
+specify the start offset of an entry using the 'offset' property.
+
+Note that due to a device tree requirement, all entries must have a unique
+name. If you want to put the same binary in the image multiple times, you can
+use any unique name, with the 'type' property providing the type.
+
+The attributes supported for entries are described below.
+
+offset:
+ This sets the offset of an entry within the image or section containing
+ it. The first byte of the image is normally at offset 0. If 'offset' is
+ not provided, binman sets it to the end of the previous region, or the
+ start of the image's entry area (normally 0) if there is no previous
+ region.
+
+align:
+ This sets the alignment of the entry. The entry offset is adjusted
+ so that the entry starts on an aligned boundary within the containing
+ section or image. For example 'align = <16>' means that the entry will
+ start on a 16-byte boundary. This may mean that padding is added before
+ the entry. The padding is part of the containing section but is not
+ included in the entry, meaning that an empty space may be created before
+ the entry starts. Alignment should be a power of 2. If 'align' is not
+ provided, no alignment is performed.
+
+size:
+ This sets the size of the entry. The contents will be padded out to
+ this size. If this is not provided, it will be set to the size of the
+ contents.
+
+min-size:
+ Sets the minimum size of the entry. This size includes explicit padding
+ ('pad-before' and 'pad-after'), but not padding added to meet alignment
+ requirements. While this does not affect the contents of the entry within
+ binman itself (the padding is performed only when its parent section is
+ assembled), the end result will be that the entry ends with the padding
+ bytes, so may grow. Defaults to 0.
+
+pad-before:
+ Padding before the contents of the entry. Normally this is 0, meaning
+ that the contents start at the beginning of the entry. This can be used
+ to offset the entry contents a little. While this does not affect the
+ contents of the entry within binman itself (the padding is performed
+ only when its parent section is assembled), the end result will be that
+ the entry starts with the padding bytes, so may grow. Defaults to 0.
+
+pad-after:
+ Padding after the contents of the entry. Normally this is 0, meaning
+ that the entry ends at the last byte of content (unless adjusted by
+ other properties). This allows room to be created in the image for
+ this entry to expand later. While this does not affect the contents of
+ the entry within binman itself (the padding is performed only when its
+ parent section is assembled), the end result will be that the entry ends
+ with the padding bytes, so may grow. Defaults to 0.
+
+align-size:
+ This sets the alignment of the entry size. For example, to ensure
+ that the size of an entry is a multiple of 64 bytes, set this to 64.
+ While this does not affect the contents of the entry within binman
+ itself (the padding is performed only when its parent section is
+ assembled), the end result is that the entry ends with the padding
+ bytes, so may grow. If 'align-size' is not provided, no alignment is
+ performed.
+
+align-end:
+ This sets the alignment of the end of an entry with respect to the
+ containing section. Some entries require that they end on an alignment
+ boundary, regardless of where they start. This does not move the start
+ of the entry, so the contents of the entry will still start at the
+ beginning. But there may be padding at the end. While this does not
+ affect the contents of the entry within binman itself (the padding is
+ performed only when its parent section is assembled), the end result
+ is that the entry ends with the padding bytes, so may grow.
+ If 'align-end' is not provided, no alignment is performed.
+
+filename:
+ For 'blob' types this provides the filename containing the binary to
+ put into the entry. If binman knows about the entry type (like
+ u-boot-bin), then there is no need to specify this.
+
+type:
+ Sets the type of an entry. This defaults to the entry name, but it is
+ possible to use any name, and then add (for example) 'type = "u-boot"'
+ to specify the type.
+
+offset-unset:
+ Indicates that the offset of this entry should not be set by placing
+ it immediately after the entry before. Instead, is set by another
+ entry which knows where this entry should go. When this boolean
+ property is present, binman will give an error if another entry does
+ not set the offset (with the GetOffsets() method).
+
+image-pos:
+ This cannot be set on entry (or at least it is ignored if it is), but
+ with the -u option, binman will set it to the absolute image position
+ for each entry. This makes it easy to find out exactly where the entry
+ ended up in the image, regardless of parent sections, etc.
+
+extend-size:
+ Extend the size of this entry to fit available space. This space is only
+ limited by the size of the image/section and the position of the next
+ entry.
+
+compress:
+ Sets the compression algortihm to use (for blobs only). See the entry
+ documentation for details.
+
+missing-msg:
+ Sets the tag of the message to show if this entry is missing. This is
+ used for external blobs. When they are missing it is helpful to show
+ information about what needs to be fixed. See missing-blob-help for the
+ message for each tag.
+
+no-expanded:
+ By default binman substitutes entries with expanded versions if available,
+ so that a `u-boot` entry type turns into `u-boot-expanded`, for example. The
+ `--no-expanded` command-line option disables this globally. The
+ `no-expanded` property disables this just for a single entry. Put the
+ `no-expanded` boolean property in the node to select this behaviour.
+
+optional:
+ External blobs are normally required to be present for the image to be
+ built (but see `External blobs`_). This properly allows an entry to be
+ optional, so that when it is cannot be found, this problem is ignored and
+ an empty file is used for this blob. This should be used only when the blob
+ is entirely optional and is not needed for correct operation of the image.
+ Note that missing, optional blobs do not produce a non-zero exit code from
+ binman, although it does show a warning about the missing external blob.
+
+insert-template:
+ This is not strictly speaking an entry property, since it is processed early
+ in Binman before the entries are read. It is a list of phandles of nodes to
+ include in the current (target) node. For each node, its subnodes and their
+ properties are brought into the target node. See Templates_ below for
+ more information.
+
+The attributes supported for images and sections are described below. Several
+are similar to those for entries.
+
+size:
+ Sets the image size in bytes, for example 'size = <0x100000>' for a
+ 1MB image.
+
+offset:
+ This is similar to 'offset' in entries, setting the offset of a section
+ within the image or section containing it. The first byte of the section
+ is normally at offset 0. If 'offset' is not provided, binman sets it to
+ the end of the previous region, or the start of the image's entry area
+ (normally 0) if there is no previous region.
+
+align-size:
+ This sets the alignment of the image size. For example, to ensure
+ that the image ends on a 512-byte boundary, use 'align-size = <512>'.
+ If 'align-size' is not provided, no alignment is performed.
+
+pad-before:
+ This sets the padding before the image entries. The first entry will
+ be positioned after the padding. This defaults to 0.
+
+pad-after:
+ This sets the padding after the image entries. The padding will be
+ placed after the last entry. This defaults to 0.
+
+pad-byte:
+ This specifies the pad byte to use when padding in the image. It
+ defaults to 0. To use 0xff, you would add 'pad-byte = <0xff>'.
+
+filename:
+ This specifies the image filename. It defaults to 'image.bin'.
+
+sort-by-offset:
+ This causes binman to reorder the entries as needed to make sure they
+ are in increasing positional order. This can be used when your entry
+ order may not match the positional order. A common situation is where
+ the 'offset' properties are set by CONFIG options, so their ordering is
+ not known a priori.
+
+ This is a boolean property so needs no value. To enable it, add a
+ line 'sort-by-offset;' to your description.
+
+multiple-images:
+ Normally only a single image is generated. To create more than one
+ image, put this property in the binman node. For example, this will
+ create image1.bin containing u-boot.bin, and image2.bin containing
+ both spl/u-boot-spl.bin and u-boot.bin::
+
+ binman {
+ multiple-images;
+ image1 {
+ u-boot {
+ };
+ };
+
+ image2 {
+ spl {
+ };
+ u-boot {
+ };
+ };
+ };
+
+end-at-4gb:
+ For x86 machines the ROM offsets start just before 4GB and extend
+ up so that the image finished at the 4GB boundary. This boolean
+ option can be enabled to support this. The image size must be
+ provided so that binman knows when the image should start. For an
+ 8MB ROM, the offset of the first entry would be 0xfff80000 with
+ this option, instead of 0 without this option.
+
+skip-at-start:
+ This property specifies the entry offset of the first entry.
+
+ For PowerPC mpc85xx based CPU, CONFIG_TEXT_BASE is the entry
+ offset of the first entry. It can be 0xeff40000 or 0xfff40000 for
+ nor flash boot, 0x201000 for sd boot etc.
+
+ 'end-at-4gb' property is not applicable where CONFIG_TEXT_BASE +
+ Image size != 4gb.
+
+align-default:
+ Specifies the default alignment for entries in this section, if they do
+ not specify an alignment. Note that this only applies to top-level entries
+ in the section (direct subentries), not any subentries of those entries.
+ This means that each section must specify its own default alignment, if
+ required.
+
+symlink:
+ Adds a symlink to the image with string given in the symlink property.
+
+overlap:
+ Indicates that this entry overlaps with others in the same section. These
+ entries should appear at the end of the section. Overlapping entries are not
+ packed with other entries, but their contents are written over other entries
+ in the section. Overlapping entries must have an explicit offset and size.
+
+write-symbols:
+ Indicates that the blob should be updated with symbol values calculated by
+ binman. This is automatic for certain entry types, e.g. `u-boot-spl`. See
+ binman_syms_ for more information.
+
+no-write-symbols:
+ Disables symbol writing for this entry. This can be used in entry types
+ where symbol writing is automatic. For example, if `u-boot-spl` refers to
+ the `u_boot_any_image_pos` symbol but U-Boot is not available in the image
+ containing SPL, this can be used to disable the writing. Quite likely this
+ indicates a bug in your setup.
+
+elf-filename:
+ Sets the file name of a blob's associated ELF file. For example, if the
+ blob is `zephyr.bin` then the ELF file may be `zephyr.elf`. This allows
+ binman to locate symbols and understand the structure of the blob. See
+ binman_syms_ for more information.
+
+elf-base-sym:
+ Sets the name of the ELF symbol that points to the start of a blob. For
+ U-Boot this is `__image_copy_start` and that is the default used by binman
+ if this property is missing. For other projects, a difference symbol may be
+ needed. Add this symbol to the properties for the blob so that symbols can
+ be read correctly. See binman_syms_ for more information.
+
+offset-from-elf:
+ Sets the offset of an entry based on a symbol value in an another entry.
+ The format is <&phandle>, "sym_name", <offset> where phandle is the entry
+ containing the blob (with associated ELF file providing symbols), <sym_name>
+ is the symbol to lookup (relative to elf-base-sym) and <offset> is an offset
+ to add to that value.
+
+preserve:
+ Indicates that this entry should be preserved by any firmware updates. This
+ flag should be checked by the updater when it is deciding which entries to
+ update. This flag is normally attached to sections but can be attached to
+ a single entry in a section if the updater supports it. Not that binman
+ itself has no control over the updater's behaviour, so this is just a
+ signal. It is not enforced by binman.
+
+Examples of the above options can be found in the tests. See the
+tools/binman/test directory.
+
+It is possible to have the same binary appear multiple times in the image,
+either by using a unit number suffix (u-boot@0, u-boot@1) or by using a
+different name for each and specifying the type with the 'type' attribute.
+
+
+Sections and hierachical images
+-------------------------------
+
+Sometimes it is convenient to split an image into several pieces, each of which
+contains its own set of binaries. An example is a flash device where part of
+the image is read-only and part is read-write. We can set up sections for each
+of these, and place binaries in them independently. The image is still produced
+as a single output file.
+
+This feature provides a way of creating hierarchical images. For example here
+is an example image with two copies of U-Boot. One is read-only (ro), intended
+to be written only in the factory. Another is read-write (rw), so that it can be
+upgraded in the field. The sizes are fixed so that the ro/rw boundary is known
+and can be programmed::
+
+ binman {
+ section@0 {
+ read-only;
+ name-prefix = "ro-";
+ size = <0x100000>;
+ u-boot {
+ };
+ };
+ section@1 {
+ name-prefix = "rw-";
+ size = <0x100000>;
+ u-boot {
+ };
+ };
+ };
+
+This image could be placed into a SPI flash chip, with the protection boundary
+set at 1MB.
+
+A few special properties are provided for sections:
+
+read-only:
+ Indicates that this section is read-only. This has no impact on binman's
+ operation, but his property can be read at run time.
+
+name-prefix:
+ This string is prepended to all the names of the binaries in the
+ section. In the example above, the 'u-boot' binaries which actually be
+ renamed to 'ro-u-boot' and 'rw-u-boot'. This can be useful to
+ distinguish binaries with otherwise identical names.
+
+filename:
+ This allows the contents of the section to be written to a file in the
+ output directory. This can sometimes be useful to use the data in one
+ section in different image, since there is currently no way to share data
+ beteen images other than through files.
+
+Image Properties
+----------------
+
+Image nodes act like sections but also have a few extra properties:
+
+filename:
+ Output filename for the image. This defaults to image.bin (or in the
+ case of multiple images <nodename>.bin where <nodename> is the name of
+ the image node.
+
+allow-repack:
+ Create an image that can be repacked. With this option it is possible
+ to change anything in the image after it is created, including updating
+ the position and size of image components. By default this is not
+ permitted since it is not possibly to know whether this might violate a
+ constraint in the image description. For example, if a section has to
+ increase in size to hold a larger binary, that might cause the section
+ to fall out of its allow region (e.g. read-only portion of flash).
+
+ Adding this property causes the original offset and size values in the
+ image description to be stored in the FDT and fdtmap.
+
+
+Image dependencies
+------------------
+
+Binman does not currently support images that depend on each other. For example,
+if one image creates `fred.bin` and then the next uses this `fred.bin` to
+produce a final `image.bin`, then the behaviour is undefined. It may work, or it
+may produce an error about `fred.bin` being missing, or it may use a version of
+`fred.bin` from a previous run.
+
+Often this can be handled by incorporating the dependency into the second
+image. For example, instead of::
+
+ binman {
+ multiple-images;
+
+ fred {
+ u-boot {
+ };
+ fill {
+ size = <0x100>;
+ };
+ };
+
+ image {
+ blob {
+ filename = "fred.bin";
+ };
+ u-boot-spl {
+ };
+ };
+
+you can do this::
+
+ binman {
+ image {
+ fred {
+ type = "section";
+ u-boot {
+ };
+ fill {
+ size = <0x100>;
+ };
+ };
+ u-boot-spl {
+ };
+ };
+
+
+
+Hashing Entries
+---------------
+
+It is possible to ask binman to hash the contents of an entry and write that
+value back to the device-tree node. For example::
+
+ binman {
+ u-boot {
+ hash {
+ algo = "sha256";
+ };
+ };
+ };
+
+Here, a new 'value' property will be written to the 'hash' node containing
+the hash of the 'u-boot' entry. Only SHA256 is supported at present. Whole
+sections can be hased if desired, by adding the 'hash' node to the section.
+
+The has value can be chcked at runtime by hashing the data actually read and
+comparing this has to the value in the device tree.
+
+
+Expanded entries
+----------------
+
+Binman automatically replaces 'u-boot' with an expanded version of that, i.e.
+'u-boot-expanded'. This means that when you write::
+
+ u-boot {
+ };
+
+you actually get::
+
+ u-boot {
+ type = "u-boot-expanded';
+ };
+
+which in turn expands to::
+
+ u-boot {
+ type = "section";
+
+ u-boot-nodtb {
+ };
+
+ u-boot-dtb {
+ };
+ };
+
+U-Boot's various phase binaries actually comprise two or three pieces.
+For example, u-boot.bin has the executable followed by a devicetree.
+
+With binman we want to be able to update that devicetree with full image
+information so that it is accessible to the executable. This is tricky
+if it is not clear where the devicetree starts.
+
+The above feature ensures that the devicetree is clearly separated from the
+U-Boot executable and can be updated separately by binman as needed. It can be
+disabled with the --no-expanded flag if required.
+
+The same applies for u-boot-spl and u-boot-tpl. In those cases, the expansion
+includes the BSS padding, so for example::
+
+ spl {
+ type = "u-boot-spl"
+ };
+
+you actually get::
+
+ spl {
+ type = "u-boot-expanded';
+ };
+
+which in turn expands to::
+
+ spl {
+ type = "section";
+
+ u-boot-spl-nodtb {
+ };
+
+ u-boot-spl-bss-pad {
+ };
+
+ u-boot-spl-dtb {
+ };
+ };
+
+Of course we should not expand SPL if it has no devicetree. Also if the BSS
+padding is not needed (because BSS is in RAM as with CONFIG_SPL_SEPARATE_BSS),
+the 'u-boot-spl-bss-pad' subnode should not be created. The use of the expaned
+entry type is controlled by the UseExpanded() method. In the SPL case it checks
+the 'spl-dtb' entry arg, which is 'y' or '1' if SPL has a devicetree.
+
+For the BSS case, a 'spl-bss-pad' entry arg controls whether it is present. All
+entry args are provided by the U-Boot Makefile.
+
+
+Optional entries
+----------------
+
+Some entries need to exist only if certain conditions are met. For example, an
+entry may want to appear in the image only if a file has a particular format.
+Obviously the entry must exist in the image description for it to be processed
+at all, so a way needs to be found to have the entry remove itself.
+
+To handle this, when entry.ObtainContents() is called, the entry can call
+entry.mark_absent() to mark itself as absent, passing a suitable message as the
+reason.
+
+Any absent entries are dropped immediately after ObtainContents() has been
+called on all entries.
+
+It is not possible for an entry to mark itself absent at any other point in the
+processing. It must happen in the ObtainContents() method.
+
+The effect is as if the entry had never been present at all, since the image
+is packed without it and it disappears from the list of entries.
+
+
+Compression
+-----------
+
+Binman support compression for 'blob' entries (those of type 'blob' and
+derivatives). To enable this for an entry, add a 'compress' property::
+
+ blob {
+ filename = "datafile";
+ compress = "lz4";
+ };
+
+The entry will then contain the compressed data, using the 'lz4' compression
+algorithm. Currently this is the only one that is supported. The uncompressed
+size is written to the node in an 'uncomp-size' property, if -u is used.
+
+Compression is also supported for sections. In that case the entire section is
+compressed in one block, including all its contents. This means that accessing
+an entry from the section required decompressing the entire section. Also, the
+size of a section indicates the space that it consumes in its parent section
+(and typically the image). With compression, the section may contain more data,
+and the uncomp-size property indicates that, as above. The contents of the
+section is compressed first, before any padding is added. This ensures that the
+padding itself is not compressed, which would be a waste of time.
+
+
+Automatic .dtsi inclusion
+-------------------------
+
+It is sometimes inconvenient to add a 'binman' node to the .dts file for each
+board. This can be done by using #include to bring in a common file. Another
+approach supported by the U-Boot build system is to automatically include
+a common header. You can then put the binman node (and anything else that is
+specific to U-Boot, such as bootph-all properies) in that header file.
+
+Binman will search for the following files in arch/<arch>/dts::
+
+ <dts>-u-boot.dtsi where <dts> is the base name of the .dts file
+ <CONFIG_SYS_SOC>-u-boot.dtsi
+ <CONFIG_SYS_CPU>-u-boot.dtsi
+ <CONFIG_SYS_VENDOR>-u-boot.dtsi
+ u-boot.dtsi
+
+U-Boot will only use the first one that it finds. If you need to include a
+more general file you can do that from the more specific file using #include.
+If you are having trouble figuring out what is going on, you can use
+`DEVICE_TREE_DEBUG=1` with your build::
+
+ make DEVICE_TREE_DEBUG=1
+ scripts/Makefile.lib:334: Automatic .dtsi inclusion: options:
+ arch/arm/dts/juno-r2-u-boot.dtsi arch/arm/dts/-u-boot.dtsi
+ arch/arm/dts/armv8-u-boot.dtsi arch/arm/dts/armltd-u-boot.dtsi
+ arch/arm/dts/u-boot.dtsi ... found: "arch/arm/dts/juno-r2-u-boot.dtsi"
+
+
+Templates
+=========
+
+Sometimes multiple images need to be created which have all have a common
+part. For example, a board may generate SPI and eMMC images which both include
+a FIT. Since the FIT includes many entries, it is tedious to repeat them twice
+in the image description.
+
+Templates provide a simple way to handle this::
+
+ binman {
+ multiple-images;
+ common_part: template-1 {
+ some-property;
+ fit {
+ ... lots of entries in here
+ };
+
+ text {
+ text = "base image";
+ };
+ };
+
+ spi-image {
+ filename = "image-spi.bin";
+ insert-template = <&fit>;
+
+ /* things specific to SPI follow */
+ footer {
+ ];
+
+ text {
+ text = "SPI image";
+ };
+ };
+
+ mmc-image {
+ filename = "image-mmc.bin";
+ insert-template = <&fit>;
+
+ /* things specific to MMC follow */
+ footer {
+ ];
+
+ text {
+ text = "MMC image";
+ };
+ };
+ };
+
+The template node name must start with 'template', so it is not considered to be
+an image itself.
+
+The mechanism is very simple. For each phandle in the 'insert-templates'
+property, the source node is looked up. Then the subnodes of that source node
+are copied into the target node, i.e. the one containing the `insert-template`
+property.
+
+If the target node has a node with the same name as a template, its properties
+override corresponding properties in the template. This allows the template to
+be uses as a base, with the node providing updates to the properties as needed.
+The overriding happens recursively.
+
+Template nodes appear first in each node that they are inserted into and
+ordering of template nodes is preserved. Other nodes come afterwards. If a
+template node also appears in the target node, then the template node sets the
+order. Thus the template can be used to set the ordering, even if the target
+node provides all the properties. In the above example, `fit` and `text` appear
+first in the `spi-image` and `mmc-image` images, followed by `footer`.
+
+Where there are multiple template nodes, they are inserted in that order. so
+the first template node appears first, then the second.
+
+Properties in the template node are inserted into the destination node if they
+do not exist there. In the example above, `some-property` is added to each of
+`spi-image` and `mmc-image`.
+
+Note that template nodes are removed from the binman description after
+processing and before binman builds the image descriptions.
+
+The initial devicetree produced by the templating process is written to the
+`u-boot.dtb.tmpl1` file. This can be useful to see what is going on if there is
+a failure before the final `u-boot.dtb.out` file is written. A second
+`u-boot.dtb.tmpl2` file is written when the templates themselves are removed.
+
+Dealing with phandles
+---------------------
+
+Templates can contain phandles and these are copied to the destination node.
+However this should be used with care, since if a template is instantiated twice
+then the phandle will be copied twice, resulting in a devicetree with duplicate
+phandles, i.e. the same phandle used by two different nodes. Binman detects this
+situation and produces an error, for example::
+
+ Duplicate phandle 1 in nodes /binman/image/fit/images/atf/atf-bl31 and
+ /binman/image-2/fit/images/atf/atf-bl31
+
+In this case an atf-bl31 node containing a phandle has been copied into two
+different target nodes, resulting in the same phandle for each. See
+testTemplatePhandleDup() for the test case.
+
+The solution is typically to put the phandles in the corresponding target nodes
+(one for each) and remove the phandle from the template.
+
+Updating an ELF file
+====================
+
+For the EFI app, where U-Boot is loaded from UEFI and runs as an app, there is
+no way to update the devicetree after U-Boot is built. Normally this works by
+creating a new u-boot.dtb.out with he updated devicetree, which is automatically
+built into the output image. With ELF this is not possible since the ELF is
+not part of an image, just a stand-along file. We must create an updated ELF
+file with the new devicetree.
+
+This is handled by the --update-fdt-in-elf option. It takes four arguments,
+separated by comma:
+
+ infile - filename of input ELF file, e.g. 'u-boot's
+ outfile - filename of output ELF file, e.g. 'u-boot.out'
+ begin_sym - symbol at the start of the embedded devicetree, e.g.
+ '__dtb_dt_begin'
+ end_sym - symbol at the start of the embedded devicetree, e.g.
+ '__dtb_dt_end'
+
+When this flag is used, U-Boot does all the normal packaging, but as an
+additional step, it creates a new ELF file with the new devicetree embedded in
+it.
+
+If logging is enabled you will see a message like this::
+
+ Updating file 'u-boot' with data length 0x400a (16394) between symbols
+ '__dtb_dt_begin' and '__dtb_dt_end'
+
+There must be enough space for the updated devicetree. If not, an error like
+the following is produced::
+
+ ValueError: Not enough space in 'u-boot' for data length 0x400a (16394);
+ size is 0x1744 (5956)
+
+
+Entry Documentation
+===================
+
+For details on the various entry types supported by binman and how to use them,
+see entries.rst which is generated from the source code using:
+
+ binman entry-docs >tools/binman/entries.rst
+
+.. toctree::
+ :maxdepth: 2
+
+ entries
+
+
+Managing images
+===============
+
+Listing images
+--------------
+
+It is possible to list the entries in an existing firmware image created by
+binman, provided that there is an 'fdtmap' entry in the image. For example::
+
+ $ binman ls -i image.bin
+ Name Image-pos Size Entry-type Offset Uncomp-size
+ ----------------------------------------------------------------------
+ main-section c00 section 0
+ u-boot 0 4 u-boot 0
+ section 5fc section 4
+ cbfs 100 400 cbfs 0
+ u-boot 138 4 u-boot 38
+ u-boot-dtb 180 108 u-boot-dtb 80 3b5
+ u-boot-dtb 500 1ff u-boot-dtb 400 3b5
+ fdtmap 6fc 381 fdtmap 6fc
+ image-header bf8 8 image-header bf8
+
+This shows the hierarchy of the image, the position, size and type of each
+entry, the offset of each entry within its parent and the uncompressed size if
+the entry is compressed.
+
+It is also possible to list just some files in an image, e.g.::
+
+ $ binman ls -i image.bin section/cbfs
+ Name Image-pos Size Entry-type Offset Uncomp-size
+ --------------------------------------------------------------------
+ cbfs 100 400 cbfs 0
+ u-boot 138 4 u-boot 38
+ u-boot-dtb 180 108 u-boot-dtb 80 3b5
+
+or with wildcards::
+
+ $ binman ls -i image.bin "*cb*" "*head*"
+ Name Image-pos Size Entry-type Offset Uncomp-size
+ ----------------------------------------------------------------------
+ cbfs 100 400 cbfs 0
+ u-boot 138 4 u-boot 38
+ u-boot-dtb 180 108 u-boot-dtb 80 3b5
+ image-header bf8 8 image-header bf8
+
+If an older version of binman is used to list images created by a newer one, it
+is possible that it will contain entry types that are not supported. These still
+show with the correct type, but binman just sees them as blobs (plain binary
+data). Any special features of that etype are not supported by the old binman.
+
+
+Extracting files from images
+----------------------------
+
+You can extract files from an existing firmware image created by binman,
+provided that there is an 'fdtmap' entry in the image. For example::
+
+ $ binman extract -i image.bin section/cbfs/u-boot
+
+which will write the uncompressed contents of that entry to the file 'u-boot' in
+the current directory. You can also extract to a particular file, in this case
+u-boot.bin::
+
+ $ binman extract -i image.bin section/cbfs/u-boot -f u-boot.bin
+
+It is possible to extract all files into a destination directory, which will
+put files in subdirectories matching the entry hierarchy::
+
+ $ binman extract -i image.bin -O outdir
+
+or just a selection::
+
+ $ binman extract -i image.bin "*u-boot*" -O outdir
+
+Some entry types have alternative formats, for example fdtmap which allows
+extracted just the devicetree binary without the fdtmap header::
+
+ $ binman extract -i /tmp/b/odroid-c4/image.bin -f out.dtb -F fdt fdtmap
+ $ fdtdump out.dtb
+ /dts-v1/;
+ // magic: 0xd00dfeed
+ // totalsize: 0x8ab (2219)
+ // off_dt_struct: 0x38
+ // off_dt_strings: 0x82c
+ // off_mem_rsvmap: 0x28
+ // version: 17
+ // last_comp_version: 2
+ // boot_cpuid_phys: 0x0
+ // size_dt_strings: 0x7f
+ // size_dt_struct: 0x7f4
+
+ / {
+ image-node = "binman";
+ image-pos = <0x00000000>;
+ size = <0x0011162b>;
+ ...
+
+Use `-F list` to see what alternative formats are available::
+
+ $ binman extract -i /tmp/b/odroid-c4/image.bin -F list
+ Flag (-F) Entry type Description
+ fdt fdtmap Extract the devicetree blob from the fdtmap
+
+
+Replacing files in an image
+---------------------------
+
+You can replace files in an existing firmware image created by binman, provided
+that there is an 'fdtmap' entry in the image. For example::
+
+ $ binman replace -i image.bin section/cbfs/u-boot
+
+which will write the contents of the file 'u-boot' from the current directory
+to the that entry, compressing if necessary. If the entry size changes, you must
+add the 'allow-repack' property to the original image before generating it (see
+above), otherwise you will get an error.
+
+You can also use a particular file, in this case u-boot.bin::
+
+ $ binman replace -i image.bin section/cbfs/u-boot -f u-boot.bin
+
+It is possible to replace all files from a source directory which uses the same
+hierarchy as the entries::
+
+ $ binman replace -i image.bin -I indir
+
+Files that are missing will generate a warning.
+
+You can also replace just a selection of entries::
+
+ $ binman replace -i image.bin "*u-boot*" -I indir
+
+It is possible to replace whole sections as well, but in that case any
+information about entries within the section may become outdated. This is
+because Binman cannot know whether things have moved around or resized within
+the section, once you have updated its data.
+
+Technical note: With 'allow-repack', Binman writes information about the
+original offset and size properties of each entry, if any were specified, in
+the 'orig-offset' and 'orig-size' properties. This allows Binman to distinguish
+between an entry which ended up being packed at an offset (or assigned a size)
+and an entry which had a particular offset / size requested in the Binman
+configuration. Where are particular offset / size was requested, this is treated
+as set in stone, so Binman will ensure it doesn't change. Without this feature,
+repacking an entry might cause it to disobey the original constraints provided
+when it was created.
+
+
+Signing FIT container with private key in an image
+--------------------------------------------------
+
+You can sign FIT container with private key in your image.
+For example::
+
+ $ binman sign -i image.bin -k privatekey -a sha256,rsa4096 fit
+
+binman will extract FIT container, sign and replace it immediately.
+
+If you want to sign and replace FIT container in place::
+
+ $ binman sign -i image.bin -k privatekey -a sha256,rsa4096 -f fit.fit fit
+
+which will sign FIT container with private key and replace it immediately
+inside your image.
+
+.. _`BinmanLogging`:
+
+Logging
+-------
+
+Binman normally operates silently unless there is an error, in which case it
+just displays the error. The -D/--debug option can be used to create a full
+backtrace when errors occur. You can use BINMAN_DEBUG=1 when building to select
+this.
+
+Internally binman logs some output while it is running. This can be displayed
+by increasing the -v/--verbosity from the default of 1:
+
+ 0: silent
+ 1: warnings only
+ 2: notices (important messages)
+ 3: info about major operations
+ 4: detailed information about each operation
+ 5: debug (all output)
+
+You can use BINMAN_VERBOSE=5 (for example) when building to select this.
+
+
+Bintools
+========
+
+`Bintool` is the name binman gives to a binary tool which it uses to create and
+manipulate binaries that binman cannot handle itself. Bintools are often
+necessary since Binman only supports a subset of the available file formats
+natively.
+
+Many SoC vendors invent ways to load code into their SoC using new file formats,
+sometimes changing the format with successive SoC generations. Sometimes the
+tool is available as Open Source. Sometimes it is a pre-compiled binary that
+must be downloaded from the vendor's website. Sometimes it is available in
+source form but difficult or slow to build.
+
+Even for images that use bintools, binman still assembles the image from its
+image description. It may handle parts of the image natively and part with
+various bintools.
+
+Binman relies on these tools so provides various features to manage them:
+
+- Determining whether the tool is currently installed
+- Downloading or building the tool
+- Determining the version of the tool that is installed
+- Deciding which tools are needed to build an image
+
+The Bintool class is an interface to the tool, a thin level of abstration, using
+Python functions to run the tool for each purpose (e.g. creating a new
+structure, adding a file to an existing structure) rather than just lists of
+string arguments.
+
+As with external blobs, bintools (which are like 'external' tools) can be
+missing. When building an image requires a bintool and it is not installed,
+binman detects this and reports the problem, but continues to build an image.
+This is useful in CI systems which want to check that everything is correct but
+don't have access to the bintools.
+
+To make this work, all calls to bintools (e.g. with Bintool.run_cmd()) must cope
+with the tool being missing, i.e. when None is returned, by:
+
+- Calling self.record_missing_bintool()
+- Setting up some fake contents so binman can continue
+
+Of course the image will not work, but binman reports which bintools are needed
+and also provide a way to fetch them.
+
+To see the available bintools, use::
+
+ binman tool --list
+
+To fetch tools which are missing, use::
+
+ binman tool --fetch missing
+
+You can also use `--fetch all` to fetch all tools or `--fetch <tool>` to fetch
+a particular tool. Some tools are built from source code, in which case you will
+need to have at least the `build-essential` and `git` packages installed.
+
+Tools are fetched into the `~/.binman-tools` directory. This directory is
+automatically added to the toolpath so there is no need to use `--toolpath` to
+specify it. If you want to use these tools outside binman, you may want to
+add this directory to your `PATH`. For example, if you use bash, add this to
+the end of `.bashrc`::
+
+ PATH="$HOME/.binman-tools:$PATH"
+
+To select a custom directory, use the `--tooldir` option.
+
+Bintool Documentation
+=====================
+
+To provide details on the various bintools supported by binman, bintools.rst is
+generated from the source code using:
+
+ binman bintool-docs >tools/binman/bintools.rst
+
+.. toctree::
+ :maxdepth: 2
+
+ bintools
+
+Binman commands and arguments
+=============================
+
+Usage::
+
+ binman [-h] [-B BUILD_DIR] [-D] [--tooldir TOOLDIR] [-H]
+ [--toolpath TOOLPATH] [-T THREADS] [--test-section-timeout]
+ [-v VERBOSITY] [-V]
+ {build,bintool-docs,entry-docs,ls,extract,replace,test,tool} ...
+
+Binman provides the following commands:
+
+- **build** - build images
+- **bintools-docs** - generate documentation about bintools
+- **entry-docs** - generate documentation about entry types
+- **ls** - list an image
+- **extract** - extract files from an image
+- **replace** - replace one or more entries in an image
+- **test** - run tests
+- **tool** - manage bintools
+
+Options:
+
+-h, --help
+ Show help message and exit
+
+-B BUILD_DIR, --build-dir BUILD_DIR
+ Directory containing the build output
+
+-D, --debug
+ Enabling debugging (provides a full traceback on error)
+
+--tooldir TOOLDIR Set the directory to store tools
+
+-H, --full-help
+ Display the README file
+
+--toolpath TOOLPATH
+ Add a path to the list of directories containing tools
+
+-T THREADS, --threads THREADS
+ Number of threads to use (0=single-thread). Note that -T0 is useful for
+ debugging since everything runs in one thread.
+
+-v VERBOSITY, --verbosity VERBOSITY
+ Control verbosity: 0=silent, 1=warnings, 2=notices, 3=info, 4=detail,
+ 5=debug
+
+-V, --version
+ Show the binman version
+
+Test options:
+
+--test-section-timeout
+ Use a zero timeout for section multi-threading (for testing)
+
+Commands are described below.
+
+binman build
+------------
+
+This builds one or more images using the provided image description.
+
+Usage::
+
+ binman build [-h] [-a ENTRY_ARG] [-b BOARD] [-d DT] [--fake-dtb]
+ [--fake-ext-blobs] [--force-missing-bintools FORCE_MISSING_BINTOOLS]
+ [-i IMAGE] [-I INDIR] [-m] [-M] [-n] [-O OUTDIR] [-p] [-u]
+ [--update-fdt-in-elf UPDATE_FDT_IN_ELF] [-W]
+
+Options:
+
+-h, --help
+ Show help message and exit
+
+-a ENTRY_ARG, --entry-arg ENTRY_ARG
+ Set argument value `arg=value`. See
+ `Passing command-line arguments to entries`_.
+
+-b BOARD, --board BOARD
+ Board name to build. This can be used instead of `-d`, in which case the
+ file `u-boot.dtb` is used, within the build directory's board subdirectory.
+
+-d DT, --dt DT
+ Configuration file (.dtb) to use. This must have a top-level node called
+ `binman`. See `Image description format`_.
+
+-i IMAGE, --image IMAGE
+ Image filename to build (if not specified, build all)
+
+-I INDIR, --indir INDIR
+ Add a path to the list of directories to use for input files. This can be
+ specified multiple times to add more than one path.
+
+-m, --map
+ Output a map file for each image. See `Map files`_.
+
+-M, --allow-missing
+ Allow external blobs and bintools to be missing. See `External blobs`_.
+
+-n, --no-expanded
+ Don't use 'expanded' versions of entries where available; normally 'u-boot'
+ becomes 'u-boot-expanded', for example. See `Expanded entries`_.
+
+-O OUTDIR, --outdir OUTDIR
+ Path to directory to use for intermediate and output files
+
+-p, --preserve
+ Preserve temporary output directory even if option -O is not given
+
+-u, --update-fdt
+ Update the binman node with offset/size info. See
+ `Access to binman entry offsets at run time (fdt)`_.
+
+--update-fdt-in-elf UPDATE_FDT_IN_ELF
+ Update an ELF file with the output dtb. The argument is a string consisting
+ of four parts, separated by commas. See `Updating an ELF file`_.
+
+-W, --ignore-missing
+ Return success even if there are missing blobs/bintools (requires -M)
+
+Options used only for testing:
+
+--fake-dtb
+ Use fake device tree contents
+
+--fake-ext-blobs
+ Create fake ext blobs with dummy content
+
+--force-missing-bintools FORCE_MISSING_BINTOOLS
+ Comma-separated list of bintools to consider missing
+
+binman bintool-docs
+-------------------
+
+Usage::
+
+ binman bintool-docs [-h]
+
+This outputs documentation for the bintools in rST format. See
+`Bintool Documentation`_.
+
+binman entry-docs
+-----------------
+
+Usage::
+
+ binman entry-docs [-h]
+
+This outputs documentation for the entry types in rST format. See
+`Entry Documentation`_.
+
+binman ls
+---------
+
+Usage::
+
+ binman ls [-h] -i IMAGE [paths ...]
+
+Positional arguments:
+
+paths
+ Paths within file to list (wildcard)
+
+Pptions:
+
+-h, --help
+ show help message and exit
+
+-i IMAGE, --image IMAGE
+ Image filename to list
+
+This lists an image, showing its contents. See `Listing images`_.
+
+binman extract
+--------------
+
+Usage::
+
+ binman extract [-h] [-F FORMAT] -i IMAGE [-f FILENAME] [-O OUTDIR] [-U]
+ [paths ...]
+
+Positional arguments:
+
+Paths
+ Paths within file to extract (wildcard)
+
+Options:
+
+-h, --help
+ show help message and exit
+
+-F FORMAT, --format FORMAT
+ Select an alternative format for extracted data
+
+-i IMAGE, --image IMAGE
+ Image filename to extract
+
+-f FILENAME, --filename FILENAME
+ Output filename to write to
+
+-O OUTDIR, --outdir OUTDIR
+ Path to directory to use for output files
+
+-U, --uncompressed
+ Output raw uncompressed data for compressed entries
+
+This extracts the contents of entries from an image. See
+`Extracting files from images`_.
+
+binman replace
+--------------
+
+Usage::
+
+ binman replace [-h] [-C] -i IMAGE [-f FILENAME] [-F] [-I INDIR] [-m]
+ [paths ...]
+
+Positional arguments:
+
+paths
+ Paths within file to replace (wildcard)
+
+Options:
+
+-h, --help
+ show help message and exit
+
+-C, --compressed
+ Input data is already compressed if needed for the entry
+
+-i IMAGE, --image IMAGE
+ Image filename to update
+
+-f FILENAME, --filename FILENAME
+ Input filename to read from
+
+-F, --fix-size
+ Don't allow entries to be resized
+
+-I INDIR, --indir INDIR
+ Path to directory to use for input files
+
+-m, --map
+ Output a map file for the updated image
+
+-O OUTDIR, --outdir OUTDIR
+ Path to directory to use for intermediate and output files
+
+-p, --preserve
+ Preserve temporary output directory even if option -O is not given
+
+This replaces one or more entries in an existing image. See
+`Replacing files in an image`_.
+
+binman test
+-----------
+
+Usage::
+
+ binman test [-h] [-P PROCESSES] [-T] [-X] [tests ...]
+
+Positional arguments:
+
+tests
+ Test names to run (omit for all)
+
+Options:
+
+-h, --help
+ show help message and exit
+
+-P PROCESSES, --processes PROCESSES
+ set number of processes to use for running tests. This defaults to the
+ number of CPUs on the machine
+
+-T, --test-coverage
+ run tests and check for 100% coverage
+
+-X, --test-preserve-dirs
+ Preserve and display test-created input directories; also preserve the
+ output directory if a single test is run (pass test name at the end of the
+ command line
+
+binman sign
+-----------
+
+Usage::
+
+ binman sign [-h] -a ALGO [-f FILE] -i IMAGE -k KEY [paths ...]
+
+positional arguments:
+
+paths
+ Paths within file to sign (wildcard)
+
+options:
+
+-h, --help
+ show this help message and exit
+
+-a ALGO, --algo ALGO
+ Hash algorithm e.g. sha256,rsa4096
+
+-f FILE, --file FILE
+ Input filename to sign
+
+-i IMAGE, --image IMAGE
+ Image filename to update
+
+-k KEY, --key KEY
+ Private key file for signing
+
+binman tool
+-----------
+
+Usage::
+
+ binman tool [-h] [-l] [-f] [bintools ...]
+
+Positional arguments:
+
+bintools
+ Bintools to process
+
+Options:
+
+-h, --help
+ show help message and exit
+
+-l, --list
+ List all known bintools
+
+-f, --fetch
+ Fetch a bintool from a known location. Use `all` to fetch all and `missing`
+ to fetch any missing tools.
+
+
+Technical details
+=================
+
+Order of image creation
+-----------------------
+
+Image creation proceeds in the following order, for each entry in the image.
+
+1. AddMissingProperties() - binman can add calculated values to the device
+tree as part of its processing, for example the offset and size of each
+entry. This method adds any properties associated with this, expanding the
+device tree as needed. These properties can have placeholder values which are
+set later by SetCalculatedProperties(). By that stage the size of sections
+cannot be changed (since it would cause the images to need to be repacked),
+but the correct values can be inserted.
+
+2. ProcessFdt() - process the device tree information as required by the
+particular entry. This may involve adding or deleting properties. If the
+processing is complete, this method should return True. If the processing
+cannot complete because it needs the ProcessFdt() method of another entry to
+run first, this method should return False, in which case it will be called
+again later.
+
+3. GetEntryContents() - the contents of each entry are obtained, normally by
+reading from a file. This calls the Entry.ObtainContents() to read the
+contents. The default version of Entry.ObtainContents() calls
+Entry.GetDefaultFilename() and then reads that file. So a common mechanism
+to select a file to read is to override that function in the subclass. The
+functions must return True when they have read the contents. Binman will
+retry calling the functions a few times if False is returned, allowing
+dependencies between the contents of different entries.
+
+4. GetEntryOffsets() - calls Entry.GetOffsets() for each entry. This can
+return a dict containing entries that need updating. The key should be the
+entry name and the value is a tuple (offset, size). This allows an entry to
+provide the offset and size for other entries. The default implementation
+of GetEntryOffsets() returns {}.
+
+5. PackEntries() - calls Entry.Pack() which figures out the offset and
+size of an entry. The 'current' image offset is passed in, and the function
+returns the offset immediately after the entry being packed. The default
+implementation of Pack() is usually sufficient.
+
+Note: for sections, this also checks that the entries do not overlap, nor extend
+outside the section. If the section does not have a defined size, the size is
+set large enough to hold all the entries. For entries that are explicitly marked
+as overlapping, this check is skipped.
+
+6. SetImagePos() - sets the image position of every entry. This is the absolute
+position 'image-pos', as opposed to 'offset' which is relative to the containing
+section. This must be done after all offsets are known, which is why it is quite
+late in the ordering.
+
+7. SetCalculatedProperties() - update any calculated properties in the device
+tree. This sets the correct 'offset' and 'size' vaues, for example.
+
+8. ProcessEntryContents() - this calls Entry.ProcessContents() on each entry.
+The default implementatoin does nothing. This can be overriden to adjust the
+contents of an entry in some way. For example, it would be possible to create
+an entry containing a hash of the contents of some other entries. At this
+stage the offset and size of entries should not be adjusted unless absolutely
+necessary, since it requires a repack (going back to PackEntries()).
+
+9. ResetForPack() - if the ProcessEntryContents() step failed, in that an entry
+has changed its size, then there is no alternative but to go back to step 5 and
+try again, repacking the entries with the updated size. ResetForPack() removes
+the fixed offset/size values added by binman, so that the packing can start from
+scratch.
+
+10. WriteSymbols() - write the value of symbols into the U-Boot SPL binary.
+See 'Access to binman entry offsets at run time' below for a description of
+what happens in this stage.
+
+11. BuildImage() - builds the image and writes it to a file
+
+12. WriteMap() - writes a text file containing a map of the image. This is the
+final step.
+
+
+.. _`External tools`:
+
+External tools
+--------------
+
+Binman can make use of external command-line tools to handle processing of
+entry contents or to generate entry contents. These tools are executed using
+the 'tools' module's Run() method. The tools generally must exist on the PATH,
+but the --toolpath option can be used to specify additional search paths to
+use. This option can be specified multiple times to add more than one path.
+
+For some compile tools binman will use the versions specified by commonly-used
+environment variables like CC and HOSTCC for the C compiler, based on whether
+the tool's output will be used for the target or for the host machine. If those
+aren't given, it will also try to derive target-specific versions from the
+CROSS_COMPILE environment variable during a cross-compilation.
+
+If the tool is not available in the path you can use BINMAN_TOOLPATHS to specify
+a space-separated list of paths to search, e.g.::
+
+ BINMAN_TOOLPATHS="/tools/g12a /tools/tegra" binman ...
+
+
+.. _`External blobs`:
+
+External blobs
+--------------
+
+Binary blobs, even if the source code is available, complicate building
+firmware. The instructions can involve multiple steps and the binaries may be
+hard to build or obtain. Binman at least provides a unified description of how
+to build the final image, no matter what steps are needed to get there.
+
+Binman also provides a `blob-ext` entry type that pulls in a binary blob from an
+external file. If the file is missing, binman can optionally complete the build
+and just report a warning. Use the `-M/--allow-missing` option to enble this.
+This is useful in CI systems which want to check that everything is correct but
+don't have access to the blobs.
+
+If the blobs are in a different directory, you can specify this with the `-I`
+option.
+
+For U-Boot, you can set the BINMAN_INDIRS environment variable to provide a
+space-separated list of directories to search for binary blobs::
+
+ BINMAN_INDIRS="odroid-c4/fip/g12a \
+ odroid-c4/build/board/hardkernel/odroidc4/firmware \
+ odroid-c4/build/scp_task" binman ...
+
+Note that binman fails with exit code 103 when there are missing blobs. If you
+wish binman to continue anyway, you can pass `-W` to binman.
+
+
+Code coverage
+-------------
+
+Binman is a critical tool and is designed to be very testable. Entry
+implementations target 100% test coverage. Run 'binman test -T' to check this.
+
+To enable Python test coverage on Debian-type distributions (e.g. Ubuntu)::
+
+ $ sudo apt-get install python-coverage python3-coverage python-pytest
+
+
+Exit status
+-----------
+
+Binman produces the following exit codes:
+
+0
+ Success
+
+1
+ Any sort of failure - see output for more details
+
+103
+ There are missing external blobs or bintools. This is only returned if
+ -M is passed to binman, otherwise missing blobs return an exit status of 1.
+ Note, if -W is passed as well as -M, then this is converted into a warning
+ and will return an exit status of 0 instead.
+
+
+U-Boot environment variables for binman
+---------------------------------------
+
+The U-Boot Makefile supports various environment variables to control binman.
+All of these are set within the Makefile and result in passing various
+environment variables (or make flags) to binman:
+
+BINMAN_DEBUG
+ Enables backtrace debugging by adding a `-D` argument. See
+ :ref:`BinmanLogging`.
+
+BINMAN_INDIRS
+ Sets the search path for input files used by binman by adding one or more
+ `-I` arguments. See :ref:`External blobs`.
+
+BINMAN_TOOLPATHS
+ Sets the search path for external tool used by binman by adding one or more
+ `--toolpath` arguments. See :ref:`External tools`.
+
+BINMAN_VERBOSE
+ Sets the logging verbosity of binman by adding a `-v` argument. See
+ :ref:`BinmanLogging`.
+
+
+Error messages
+--------------
+
+This section provides some guidance for some of the less obvious error messages
+produced by binman.
+
+
+Expected __bss_size symbol
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Example::
+
+ binman: Node '/binman/u-boot-spl-ddr/u-boot-spl/u-boot-spl-bss-pad':
+ Expected __bss_size symbol in spl/u-boot-spl
+
+This indicates that binman needs the `__bss_size` symbol to be defined in the
+SPL binary, where `spl/u-boot-spl` is the ELF file containing the symbols. The
+symbol tells binman the size of the BSS region, in bytes. It needs this to be
+able to pad the image so that the following entries do not overlap the BSS,
+which would cause them to be overwritte by variable access in SPL.
+
+This symbols is normally defined in the linker script, immediately after
+_bss_start and __bss_end are defined, like this::
+
+ __bss_size = __bss_end - __bss_start;
+
+You may need to add it to your linker script if you get this error.
+
+
+Concurrent tests
+----------------
+
+Binman tries to run tests concurrently. This means that the tests make use of
+all available CPUs to run.
+
+ To enable this::
+
+ $ sudo apt-get install python-subunit python3-subunit
+
+Use '-P 1' to disable this. It is automatically disabled when code coverage is
+being used (-T) since they are incompatible.
+
+
+Debugging tests
+---------------
+
+Sometimes when debugging tests it is useful to keep the input and output
+directories so they can be examined later. Use -X or --test-preserve-dirs for
+this.
+
+
+Running tests on non-x86 architectures
+--------------------------------------
+
+Binman's tests have been written under the assumption that they'll be run on a
+x86-like host and there hasn't been an attempt to make them portable yet.
+However, it's possible to run the tests by cross-compiling to x86.
+
+To install an x86 cross-compiler on Debian-type distributions (e.g. Ubuntu)::
+
+ $ sudo apt-get install gcc-x86-64-linux-gnu
+
+Then, you can run the tests under cross-compilation::
+
+ $ CROSS_COMPILE=x86_64-linux-gnu- binman test -T
+
+You can also use gcc-i686-linux-gnu similar to the above.
+
+
+Writing new entries and debugging
+---------------------------------
+
+The behaviour of entries is defined by the Entry class. All other entries are
+a subclass of this. An important subclass is Entry_blob which takes binary
+data from a file and places it in the entry. In fact most entry types are
+subclasses of Entry_blob.
+
+Each entry type is a separate file in the tools/binman/etype directory. Each
+file contains a class called Entry_<type> where <type> is the entry type.
+New entry types can be supported by adding new files in that directory.
+These will automatically be detected by binman when needed.
+
+Entry properties are documented in entry.py. The entry subclasses are free
+to change the values of properties to support special behaviour. For example,
+when Entry_blob loads a file, it sets content_size to the size of the file.
+Entry classes can adjust other entries. For example, an entry that knows
+where other entries should be positioned can set up those entries' offsets
+so they don't need to be set in the binman decription. It can also adjust
+entry contents.
+
+Most of the time such essoteric behaviour is not needed, but it can be
+essential for complex images.
+
+If you need to specify a particular device-tree compiler to use, you can define
+the DTC environment variable. This can be useful when the system dtc is too
+old.
+
+To enable a full backtrace and other debugging features in binman, pass
+BINMAN_DEBUG=1 to your build::
+
+ make qemu-x86_defconfig
+ make BINMAN_DEBUG=1
+
+To enable verbose logging from binman, base BINMAN_VERBOSE to your build, which
+adds a -v<level> option to the call to binman::
+
+ make qemu-x86_defconfig
+ make BINMAN_VERBOSE=5
+
+
+Building sections in parallel
+-----------------------------
+
+By default binman uses multiprocessing to speed up compilation of large images.
+This works at a section level, with one thread for each entry in the section.
+This can speed things up if the entries are large and use compression.
+
+This feature can be disabled with the '-T' flag, which defaults to a suitable
+value for your machine. This depends on the Python version, e.g on v3.8 it uses
+12 threads on an 8-core machine. See ConcurrentFutures_ for more details.
+
+The special value -T0 selects single-threaded mode, useful for debugging during
+development, since dealing with exceptions and problems in threads is more
+difficult. This avoids any use of ThreadPoolExecutor.
+
+
+Collecting data for an entry type
+---------------------------------
+
+Some entry types deal with data obtained from others. For example,
+`Entry_mkimage` calls the `mkimage` tool with data from its subnodes::
+
+ mkimage {
+ args = "-n test -T script";
+
+ u-boot-spl {
+ };
+
+ u-boot {
+ };
+ };
+
+This shows mkimage being passed a file consisting of SPL and U-Boot proper. It
+is created by calling `Entry.collect_contents_to_file()`. Note that in this
+case, the data is passed to mkimage for processing but does not appear
+separately in the image. It may not appear at all, depending on what mkimage
+does. The contents of the `mkimage` entry are entirely dependent on the
+processing done by the entry, with the provided subnodes (`u-boot-spl` and
+`u-boot`) simply providing the input data for that processing.
+
+Note that `Entry.collect_contents_to_file()` simply concatenates the data from
+the different entries together, with no control over alignment, etc. Another
+approach is to subclass `Entry_section` so that those features become available,
+such as `size` and `pad-byte`. Then the contents of the entry can be obtained by
+calling `super().BuildSectionData()` in the entry's BuildSectionData()
+implementation to get the input data, then write it to a file and process it
+however is desired.
+
+There are other ways to obtain data also, depending on the situation. If the
+entry type is simply signing data which exists elsewhere in the image, then
+you can use `Entry_collection` as a base class. It lets you use a property
+called `content` which lists the entries containing data to be processed. This
+is used by `Entry_vblock`, for example::
+
+ u_boot: u-boot {
+ };
+
+ vblock {
+ content = <&u_boot &dtb>;
+ keyblock = "firmware.keyblock";
+ signprivate = "firmware_data_key.vbprivk";
+ version = <1>;
+ kernelkey = "kernel_subkey.vbpubk";
+ preamble-flags = <1>;
+ };
+
+ dtb: u-boot-dtb {
+ };
+
+which shows an image containing `u-boot` and `u-boot-dtb`, with the `vblock`
+image collecting their contents to produce input for its signing process,
+without affecting those entries, which still appear in the final image
+untouched.
+
+Another example is where an entry type needs several independent pieces of input
+to function. For example, `Entry_fip` allows a number of different binary blobs
+to be placed in their own individual places in a custom data structure in the
+output image. To make that work you can add subnodes for each of them and call
+`Entry.Create()` on each subnode, as `Entry_fip` does. Then the data for each
+blob can come from any suitable place, such as an `Entry_u_boot` or an
+`Entry_blob` or anything else::
+
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x123456789abcdef>;
+ filename = "bl31.bin";
+ };
+
+ u-boot {
+ fip-uuid = [fc 65 13 92 4a 5b 11 ec
+ 94 35 ff 2d 1c fc 79 9c];
+ };
+ };
+
+The `soc-fw` node is a `blob-ext` (i.e. it reads in a named binary file) whereas
+`u-boot` is a normal entry type. This works because `Entry_fip` selects the
+`blob-ext` entry type if the node name (here `soc-fw`) is recognised as being
+a known blob type.
+
+When adding new entry types you are encouraged to use subnodes to provide the
+data for processing, unless the `content` approach is more suitable. Consider
+whether the input entries are contained within (or consumed by) the entry, vs
+just being 'referenced' by the entry. In the latter case, the `content` approach
+makes more sense. Ad-hoc properties and other methods of obtaining data are
+discouraged, since it adds to confusion for users.
+
+History / Credits
+-----------------
+
+Binman takes a lot of inspiration from a Chrome OS tool called
+'cros_bundle_firmware', which I wrote some years ago. That tool was based on
+a reasonably simple and sound design but has expanded greatly over the
+years. In particular its handling of x86 images is convoluted.
+
+Quite a few lessons have been learned which are hopefully applied here.
+
+
+Design notes
+------------
+
+On the face of it, a tool to create firmware images should be fairly simple:
+just find all the input binaries and place them at the right place in the
+image. The difficulty comes from the wide variety of input types (simple
+flat binaries containing code, packaged data with various headers), packing
+requirments (alignment, spacing, device boundaries) and other required
+features such as hierarchical images.
+
+The design challenge is to make it easy to create simple images, while
+allowing the more complex cases to be supported. For example, for most
+images we don't much care exactly where each binary ends up, so we should
+not have to specify that unnecessarily.
+
+New entry types should aim to provide simple usage where possible. If new
+core features are needed, they can be added in the Entry base class.
+
+
+To do
+-----
+
+Some ideas:
+
+- Use of-platdata to make the information available to code that is unable
+ to use device tree (such as a very small SPL image). For now, limited info is
+ available via linker symbols
+- Allow easy building of images by specifying just the board name
+- Support building an image for a board (-b) more completely, with a
+ configurable build directory
+- Detect invalid properties in nodes
+- Sort the fdtmap by offset
+- Output temporary files to a different directory
+- Rationalise the fdt, fdt_util and pylibfdt modules which currently have some
+ overlapping and confusing functionality
+- Update the fdt library to use a better format for Prop.value (the current one
+ is useful for dtoc but not much else)
+- Figure out how to make Fdt support changing the node order, so that
+ Node.AddSubnode() can support adding a node before another, existing node.
+ Perhaps it should completely regenerate the flat tree?
+- Support images which depend on each other
+
+--
+Simon Glass <sjg@chromium.org>
+7/7/2016
+
+.. _ConcurrentFutures: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
diff --git a/tools/binman/bintool.py b/tools/binman/bintool.py
new file mode 100644
index 00000000000..3c4ad1adbb9
--- /dev/null
+++ b/tools/binman/bintool.py
@@ -0,0 +1,587 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+# Copyright (C) 2022 Weidmüller Interface GmbH & Co. KG
+# Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
+#
+"""Base class for all bintools
+
+This defines the common functionality for all bintools, including running
+the tool, checking its version and fetching it if needed.
+"""
+
+import collections
+import glob
+import importlib
+import multiprocessing
+import os
+import shutil
+import tempfile
+import urllib.error
+
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+BINMAN_DIR = os.path.dirname(os.path.realpath(__file__))
+
+# Format string for listing bintools, see also the header in list_all()
+FORMAT = '%-16.16s %-12.12s %-26.26s %s'
+
+# List of known modules, to avoid importing the module multiple times
+modules = {}
+
+# Possible ways of fetching a tool (FETCH_COUNT is number of ways)
+FETCH_ANY, FETCH_BIN, FETCH_BUILD, FETCH_COUNT = range(4)
+
+FETCH_NAMES = {
+ FETCH_ANY: 'any method',
+ FETCH_BIN: 'binary download',
+ FETCH_BUILD: 'build from source'
+ }
+
+# Status of tool fetching
+FETCHED, FAIL, PRESENT, STATUS_COUNT = range(4)
+
+class Bintool:
+ """Tool which operates on binaries to help produce entry contents
+
+ This is the base class for all bintools
+ """
+ # List of bintools to regard as missing
+ missing_list = []
+
+ # Directory to store tools. Note that this set up by set_tool_dir() which
+ # must be called before this class is used.
+ tooldir = ''
+
+ def __init__(self, name, desc, version_regex=None, version_args='-V'):
+ self.name = name
+ self.desc = desc
+ self.version_regex = version_regex
+ self.version_args = version_args
+
+ @staticmethod
+ def find_bintool_class(btype):
+ """Look up the bintool class for bintool
+
+ Args:
+ byte: Bintool to use, e.g. 'mkimage'
+
+ Returns:
+ The bintool class object if found, else a tuple:
+ module name that could not be found
+ exception received
+ """
+ # Convert something like 'u-boot' to 'u_boot' since we are only
+ # interested in the type.
+ module_name = btype.replace('-', '_')
+ module = modules.get(module_name)
+ class_name = f'Bintool{module_name}'
+
+ # Import the module if we have not already done so
+ if not module:
+ try:
+ module = importlib.import_module('binman.btool.' + module_name)
+ except ImportError as exc:
+ try:
+ # Deal with classes which must be renamed due to conflicts
+ # with Python libraries
+ module = importlib.import_module('binman.btool.btool_' +
+ module_name)
+ except ImportError:
+ return module_name, exc
+ modules[module_name] = module
+
+ # Look up the expected class name
+ return getattr(module, class_name)
+
+ @staticmethod
+ def create(name):
+ """Create a new bintool object
+
+ Args:
+ name (str): Bintool to create, e.g. 'mkimage'
+
+ Returns:
+ A new object of the correct type (a subclass of Binutil)
+ """
+ cls = Bintool.find_bintool_class(name)
+ if isinstance(cls, tuple):
+ raise ValueError("Cannot import bintool module '%s': %s" % cls)
+
+ # Call its constructor to get the object we want.
+ obj = cls(name)
+ return obj
+
+ @classmethod
+ def set_tool_dir(cls, pathname):
+ """Set the path to use to store and find tools"""
+ cls.tooldir = pathname
+
+ def show(self):
+ """Show a line of information about a bintool"""
+ if self.is_present():
+ version = self.version()
+ else:
+ version = '-'
+ print(FORMAT % (self.name, version, self.desc,
+ self.get_path() or '(not found)'))
+
+ @classmethod
+ def set_missing_list(cls, missing_list):
+ cls.missing_list = missing_list or []
+
+ @staticmethod
+ def get_tool_list(include_testing=False):
+ """Get a list of the known tools
+
+ Returns:
+ list of str: names of all tools known to binman
+ """
+ files = glob.glob(os.path.join(BINMAN_DIR, 'btool/*'))
+ names = [os.path.splitext(os.path.basename(fname))[0]
+ for fname in files]
+ names = [name for name in names if name[0] != '_']
+ names = [name[6:] if name.startswith('btool_') else name
+ for name in names]
+ if include_testing:
+ names.append('_testing')
+ return sorted(names)
+
+ @staticmethod
+ def list_all():
+ """List all the bintools known to binman"""
+ names = Bintool.get_tool_list()
+ print(FORMAT % ('Name', 'Version', 'Description', 'Path'))
+ print(FORMAT % ('-' * 15,'-' * 11, '-' * 25, '-' * 30))
+ for name in names:
+ btool = Bintool.create(name)
+ btool.show()
+
+ def is_present(self):
+ """Check if a bintool is available on the system
+
+ Returns:
+ bool: True if available, False if not
+ """
+ if self.name in self.missing_list:
+ return False
+ return bool(self.get_path())
+
+ def get_path(self):
+ """Get the path of a bintool
+
+ Returns:
+ str: Path to the tool, if available, else None
+ """
+ return tools.tool_find(self.name)
+
+ def fetch_tool(self, method, col, skip_present):
+ """Fetch a single tool
+
+ Args:
+ method (FETCH_...): Method to use
+ col (terminal.Color): Color terminal object
+ skip_present (boo;): Skip fetching if it is already present
+
+ Returns:
+ int: Result of fetch either FETCHED, FAIL, PRESENT
+ """
+ def try_fetch(meth):
+ res = None
+ try:
+ res = self.fetch(meth)
+ except urllib.error.URLError as uerr:
+ message = uerr.reason
+ print(col.build(col.RED, f'- {message}'))
+
+ except ValueError as exc:
+ print(f'Exception: {exc}')
+ return res
+
+ if skip_present and self.is_present():
+ return PRESENT
+ print(col.build(col.YELLOW, 'Fetch: %s' % self.name))
+ if method == FETCH_ANY:
+ for try_method in range(1, FETCH_COUNT):
+ print(f'- trying method: {FETCH_NAMES[try_method]}')
+ result = try_fetch(try_method)
+ if result:
+ break
+ else:
+ result = try_fetch(method)
+ if not result:
+ return FAIL
+ if result is not True:
+ fname, tmpdir = result
+ dest = os.path.join(self.tooldir, self.name)
+ os.makedirs(self.tooldir, exist_ok=True)
+ print(f"- writing to '{dest}'")
+ shutil.move(fname, dest)
+ if tmpdir:
+ shutil.rmtree(tmpdir)
+ return FETCHED
+
+ @staticmethod
+ def fetch_tools(method, names_to_fetch):
+ """Fetch bintools from a suitable place
+
+ This fetches or builds the requested bintools so that they can be used
+ by binman
+
+ Args:
+ names_to_fetch (list of str): names of bintools to fetch
+
+ Returns:
+ True on success, False on failure
+ """
+ def show_status(color, prompt, names):
+ print(col.build(
+ color, f'{prompt}:%s{len(names):2}: %s' %
+ (' ' * (16 - len(prompt)), ' '.join(names))))
+
+ col = terminal.Color()
+ skip_present = False
+ name_list = names_to_fetch
+ if len(names_to_fetch) == 1 and names_to_fetch[0] in ['all', 'missing']:
+ name_list = Bintool.get_tool_list()
+ if names_to_fetch[0] == 'missing':
+ skip_present = True
+ print(col.build(col.YELLOW,
+ 'Fetching tools: %s' % ' '.join(name_list)))
+ status = collections.defaultdict(list)
+ for name in name_list:
+ btool = Bintool.create(name)
+ result = btool.fetch_tool(method, col, skip_present)
+ status[result].append(name)
+ if result == FAIL:
+ if method == FETCH_ANY:
+ print('- failed to fetch with all methods')
+ else:
+ print(f"- method '{FETCH_NAMES[method]}' is not supported")
+
+ if len(name_list) > 1:
+ if skip_present:
+ show_status(col.GREEN, 'Already present', status[PRESENT])
+ show_status(col.GREEN, 'Tools fetched', status[FETCHED])
+ if status[FAIL]:
+ show_status(col.RED, 'Failures', status[FAIL])
+ return not status[FAIL]
+
+ def run_cmd_result(self, *args, binary=False, raise_on_error=True):
+ """Run the bintool using command-line arguments
+
+ Args:
+ args (list of str): Arguments to provide, in addition to the bintool
+ name
+ binary (bool): True to return output as bytes instead of str
+ raise_on_error (bool): True to raise a ValueError exception if the
+ tool returns a non-zero return code
+
+ Returns:
+ CommandResult: Resulting output from the bintool, or None if the
+ tool is not present
+ """
+ if self.name in self.missing_list:
+ return None
+ name = os.path.expanduser(self.name) # Expand paths containing ~
+ all_args = (name,) + args
+ env = tools.get_env_with_path()
+ tout.debug(f"bintool: {' '.join(all_args)}")
+ result = command.run_pipe(
+ [all_args], capture=True, capture_stderr=True, env=env,
+ raise_on_error=False, binary=binary)
+
+ if result.return_code:
+ # Return None if the tool was not found. In this case there is no
+ # output from the tool and it does not appear on the path. We still
+ # try to run it (as above) since RunPipe() allows faking the tool's
+ # output
+ if not any([result.stdout, result.stderr, tools.tool_find(name)]):
+ tout.info(f"bintool '{name}' not found")
+ return None
+ if raise_on_error:
+ tout.info(f"bintool '{name}' failed")
+ raise ValueError("Error %d running '%s': %s" %
+ (result.return_code, ' '.join(all_args),
+ result.stderr or result.stdout))
+ if result.stdout:
+ tout.debug(result.stdout)
+ if result.stderr:
+ tout.debug(result.stderr)
+ return result
+
+ def run_cmd(self, *args, binary=False):
+ """Run the bintool using command-line arguments
+
+ Args:
+ args (list of str): Arguments to provide, in addition to the bintool
+ name
+ binary (bool): True to return output as bytes instead of str
+
+ Returns:
+ str or bytes: Resulting stdout from the bintool
+ """
+ result = self.run_cmd_result(*args, binary=binary)
+ if result:
+ return result.stdout
+
+ @classmethod
+ def build_from_git(cls, git_repo, make_targets, bintool_path, flags=None):
+ """Build a bintool from a git repo
+
+ This clones the repo in a temporary directory, builds it with 'make',
+ then returns the filename of the resulting executable bintool
+
+ Args:
+ git_repo (str): URL of git repo
+ make_targets (list of str): List of targets to pass to 'make' to build
+ the tool
+ bintool_path (str): Relative path of the tool in the repo, after
+ build is complete
+ flags (list of str): Flags or variables to pass to make, or None
+
+ Returns:
+ tuple:
+ str: Filename of fetched file to copy to a suitable directory
+ str: Name of temp directory to remove, or None
+ or None on error
+ """
+ tmpdir = tempfile.mkdtemp(prefix='binmanf.')
+ print(f"- clone git repo '{git_repo}' to '{tmpdir}'")
+ tools.run('git', 'clone', '--depth', '1', git_repo, tmpdir)
+ for target in make_targets:
+ print(f"- build target '{target}'")
+ cmd = ['make', '-C', tmpdir, '-j', f'{multiprocessing.cpu_count()}',
+ target]
+ if flags:
+ cmd += flags
+ tools.run(*cmd)
+
+ fname = os.path.join(tmpdir, bintool_path)
+ if not os.path.exists(fname):
+ print(f"- File '{fname}' was not produced")
+ return None
+ return fname, tmpdir
+
+ @classmethod
+ def fetch_from_url(cls, url):
+ """Fetch a bintool from a URL
+
+ Args:
+ url (str): URL to fetch from
+
+ Returns:
+ tuple:
+ str: Filename of fetched file to copy to a suitable directory
+ str: Name of temp directory to remove, or None
+ """
+ fname, tmpdir = tools.download(url)
+ tools.run('chmod', 'a+x', fname)
+ return fname, tmpdir
+
+ @classmethod
+ def fetch_from_drive(cls, drive_id):
+ """Fetch a bintool from Google drive
+
+ Args:
+ drive_id (str): ID of file to fetch. For a URL of the form
+ 'https://drive.google.com/file/d/xxx/view?usp=sharing' the value
+ passed here should be 'xxx'
+
+ Returns:
+ tuple:
+ str: Filename of fetched file to copy to a suitable directory
+ str: Name of temp directory to remove, or None
+ """
+ url = f'https://drive.google.com/uc?export=download&id={drive_id}'
+ return cls.fetch_from_url(url)
+
+ @classmethod
+ def apt_install(cls, package):
+ """Install a bintool using the 'apt' tool
+
+ This requires use of servo so may request a password
+
+ Args:
+ package (str): Name of package to install
+
+ Returns:
+ True, assuming it completes without error
+ """
+ args = ['sudo', 'apt', 'install', '-y', package]
+ print('- %s' % ' '.join(args))
+ tools.run(*args)
+ return True
+
+ @staticmethod
+ def WriteDocs(modules, test_missing=None):
+ """Write out documentation about the various bintools to stdout
+
+ Args:
+ modules: List of modules to include
+ test_missing: Used for testing. This is a module to report
+ as missing
+ """
+ print('''.. SPDX-License-Identifier: GPL-2.0+
+
+Binman bintool Documentation
+============================
+
+This file describes the bintools (binary tools) supported by binman. Bintools
+are binman's name for external executables that it runs to generate or process
+binaries. It is fairly easy to create new bintools. Just add a new file to the
+'btool' directory. You can use existing bintools as examples.
+
+
+''')
+ modules = sorted(modules)
+ missing = []
+ for name in modules:
+ module = Bintool.find_bintool_class(name)
+ docs = getattr(module, '__doc__')
+ if test_missing == name:
+ docs = None
+ if docs:
+ lines = docs.splitlines()
+ first_line = lines[0]
+ rest = [line[4:] for line in lines[1:]]
+ hdr = 'Bintool: %s: %s' % (name, first_line)
+ print(hdr)
+ print('-' * len(hdr))
+ print('\n'.join(rest))
+ print()
+ print()
+ else:
+ missing.append(name)
+
+ if missing:
+ raise ValueError('Documentation is missing for modules: %s' %
+ ', '.join(missing))
+
+ # pylint: disable=W0613
+ def fetch(self, method):
+ """Fetch handler for a bintool
+
+ This should be implemented by the base class
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ tuple:
+ str: Filename of fetched file to copy to a suitable directory
+ str: Name of temp directory to remove, or None
+ or True if the file was fetched and already installed
+ or None if no fetch() implementation is available
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ print(f"No method to fetch bintool '{self.name}'")
+ return False
+
+ def version(self):
+ """Version handler for a bintool
+
+ Returns:
+ str: Version string for this bintool
+ """
+ if self.version_regex is None:
+ return 'unknown'
+
+ import re
+
+ result = self.run_cmd_result(self.version_args)
+ out = result.stdout.strip()
+ if not out:
+ out = result.stderr.strip()
+ if not out:
+ return 'unknown'
+
+ m_version = re.search(self.version_regex, out)
+ return m_version.group(1) if m_version else out
+
+
+class BintoolPacker(Bintool):
+ """Tool which compression / decompression entry contents
+
+ This is a bintools base class for compression / decompression packer
+
+ Properties:
+ name: Name of packer tool
+ compression: Compression type (COMPRESS_...), value of 'name' property
+ if none
+ compress_args: List of positional args provided to tool for compress,
+ ['--compress'] if none
+ decompress_args: List of positional args provided to tool for
+ decompress, ['--decompress'] if none
+ fetch_package: Name of the tool installed using the apt, value of 'name'
+ property if none
+ version_regex: Regular expressions to extract the version from tool
+ version output, '(v[0-9.]+)' if none
+ """
+ def __init__(self, name, compression=None, compress_args=None,
+ decompress_args=None, fetch_package=None,
+ version_regex=r'(v[0-9.]+)', version_args='-V'):
+ desc = '%s compression' % (compression if compression else name)
+ super().__init__(name, desc, version_regex, version_args)
+ if compress_args is None:
+ compress_args = ['--compress']
+ self.compress_args = compress_args
+ if decompress_args is None:
+ decompress_args = ['--decompress']
+ self.decompress_args = decompress_args
+ if fetch_package is None:
+ fetch_package = name
+ self.fetch_package = fetch_package
+
+ def compress(self, indata):
+ """Compress data
+
+ Args:
+ indata (bytes): Data to compress
+
+ Returns:
+ bytes: Compressed data
+ """
+ with tempfile.NamedTemporaryFile(prefix='comp.tmp',
+ dir=tools.get_output_dir()) as tmp:
+ tools.write_file(tmp.name, indata)
+ args = self.compress_args + ['--stdout', tmp.name]
+ return self.run_cmd(*args, binary=True)
+
+ def decompress(self, indata):
+ """Decompress data
+
+ Args:
+ indata (bytes): Data to decompress
+
+ Returns:
+ bytes: Decompressed data
+ """
+ with tempfile.NamedTemporaryFile(prefix='decomp.tmp',
+ dir=tools.get_output_dir()) as inf:
+ tools.write_file(inf.name, indata)
+ args = self.decompress_args + ['--stdout', inf.name]
+ return self.run_cmd(*args, binary=True)
+
+ def fetch(self, method):
+ """Fetch handler
+
+ This installs the gzip package using the apt utility.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched and now installed, None if a method
+ other than FETCH_BIN was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != FETCH_BIN:
+ return None
+ return self.apt_install(self.fetch_package)
diff --git a/tools/binman/bintool_test.py b/tools/binman/bintool_test.py
new file mode 100644
index 00000000000..f9b16d4c73b
--- /dev/null
+++ b/tools/binman/bintool_test.py
@@ -0,0 +1,358 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+"""Tests for the Bintool class"""
+
+import collections
+import os
+import shutil
+import tempfile
+import unittest
+import unittest.mock
+import urllib.error
+
+from binman import bintool
+from binman.bintool import Bintool
+
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
+
+# pylint: disable=R0904
+class TestBintool(unittest.TestCase):
+ """Tests for the Bintool class"""
+ def setUp(self):
+ # Create a temporary directory for test files
+ self._indir = tempfile.mkdtemp(prefix='bintool.')
+ self.seq = None
+ self.count = None
+ self.fname = None
+ self.btools = None
+
+ def tearDown(self):
+ """Remove the temporary input directory and its contents"""
+ if self._indir:
+ shutil.rmtree(self._indir)
+ self._indir = None
+
+ def test_missing_btype(self):
+ """Test that unknown bintool types are detected"""
+ with self.assertRaises(ValueError) as exc:
+ Bintool.create('missing')
+ self.assertIn("No module named 'binman.btool.missing'",
+ str(exc.exception))
+
+ def test_fresh_bintool(self):
+ """Check that the _testing bintool is not cached"""
+ btest = Bintool.create('_testing')
+ btest.present = True
+ btest2 = Bintool.create('_testing')
+ self.assertFalse(btest2.present)
+
+ def test_version(self):
+ """Check handling of a tool being present or absent"""
+ btest = Bintool.create('_testing')
+ with test_util.capture_sys_output() as (stdout, _):
+ btest.show()
+ self.assertFalse(btest.is_present())
+ self.assertIn('-', stdout.getvalue())
+ btest.present = True
+ self.assertTrue(btest.is_present())
+ self.assertEqual('123', btest.version())
+ with test_util.capture_sys_output() as (stdout, _):
+ btest.show()
+ self.assertIn('123', stdout.getvalue())
+
+ def test_fetch_present(self):
+ """Test fetching of a tool"""
+ btest = Bintool.create('_testing')
+ btest.present = True
+ col = terminal.Color()
+ self.assertEqual(bintool.PRESENT,
+ btest.fetch_tool(bintool.FETCH_ANY, col, True))
+
+ @classmethod
+ def check_fetch_url(cls, fake_download, method):
+ """Check the output from fetching a tool
+
+ Args:
+ fake_download (function): Function to call instead of
+ tools.download()
+ method (bintool.FETCH_...: Fetch method to use
+
+ Returns:
+ str: Contents of stdout
+ """
+ btest = Bintool.create('_testing')
+ col = terminal.Color()
+ with unittest.mock.patch.object(tools, 'download',
+ side_effect=fake_download):
+ with test_util.capture_sys_output() as (stdout, _):
+ btest.fetch_tool(method, col, False)
+ return stdout.getvalue()
+
+ def test_fetch_url_err(self):
+ """Test an error while fetching a tool from a URL"""
+ def fail_download(url):
+ """Take the tools.download() function by raising an exception"""
+ raise urllib.error.URLError('my error')
+
+ stdout = self.check_fetch_url(fail_download, bintool.FETCH_ANY)
+ self.assertIn('my error', stdout)
+
+ def test_fetch_url_exception(self):
+ """Test an exception while fetching a tool from a URL"""
+ def cause_exc(url):
+ raise ValueError('exc error')
+
+ stdout = self.check_fetch_url(cause_exc, bintool.FETCH_ANY)
+ self.assertIn('exc error', stdout)
+
+ def test_fetch_method(self):
+ """Test fetching using a particular method"""
+ def fail_download(url):
+ """Take the tools.download() function by raising an exception"""
+ raise urllib.error.URLError('my error')
+
+ stdout = self.check_fetch_url(fail_download, bintool.FETCH_BIN)
+ self.assertIn('my error', stdout)
+
+ def test_fetch_pass_fail(self):
+ """Test fetching multiple tools with some passing and some failing"""
+ def handle_download(_):
+ """Take the tools.download() function by writing a file"""
+ if self.seq:
+ raise urllib.error.URLError('not found')
+ self.seq += 1
+ tools.write_file(fname, expected)
+ return fname, dirname
+
+ expected = b'this is a test'
+ dirname = os.path.join(self._indir, 'download_dir')
+ os.mkdir(dirname)
+ fname = os.path.join(dirname, 'downloaded')
+
+ # Rely on bintool to create this directory
+ destdir = os.path.join(self._indir, 'dest_dir')
+
+ dest_fname = os.path.join(destdir, '_testing')
+ self.seq = 0
+
+ with unittest.mock.patch.object(bintool.Bintool, 'tooldir', destdir):
+ with unittest.mock.patch.object(tools, 'download',
+ side_effect=handle_download):
+ with test_util.capture_sys_output() as (stdout, _):
+ Bintool.fetch_tools(bintool.FETCH_ANY, ['_testing'] * 2)
+ self.assertTrue(os.path.exists(dest_fname))
+ data = tools.read_file(dest_fname)
+ self.assertEqual(expected, data)
+
+ lines = stdout.getvalue().splitlines()
+ self.assertTrue(len(lines) > 2)
+ self.assertEqual('Tools fetched: 1: _testing', lines[-2])
+ self.assertEqual('Failures: 1: _testing', lines[-1])
+
+ def test_tool_list(self):
+ """Test listing available tools"""
+ self.assertGreater(len(Bintool.get_tool_list()), 3)
+
+ def check_fetch_all(self, method):
+ """Helper to check the operation of fetching all tools"""
+
+ # pylint: disable=W0613
+ def fake_fetch(method, col, skip_present):
+ """Fakes the Binutils.fetch() function
+
+ Returns FETCHED and FAIL on alternate calls
+ """
+ self.seq += 1
+ result = bintool.FETCHED if self.seq & 1 else bintool.FAIL
+ self.count[result] += 1
+ return result
+
+ self.seq = 0
+ self.count = collections.defaultdict(int)
+ with unittest.mock.patch.object(bintool.Bintool, 'fetch_tool',
+ side_effect=fake_fetch):
+ with test_util.capture_sys_output() as (stdout, _):
+ Bintool.fetch_tools(method, ['all'])
+ lines = stdout.getvalue().splitlines()
+ self.assertIn(f'{self.count[bintool.FETCHED]}: ', lines[-2])
+ self.assertIn(f'{self.count[bintool.FAIL]}: ', lines[-1])
+
+ def test_fetch_all(self):
+ """Test fetching all tools"""
+ self.check_fetch_all(bintool.FETCH_ANY)
+
+ def test_fetch_all_specific(self):
+ """Test fetching all tools with a specific method"""
+ self.check_fetch_all(bintool.FETCH_BIN)
+
+ def test_fetch_missing(self):
+ """Test fetching missing tools"""
+ # pylint: disable=W0613
+ def fake_fetch2(method, col, skip_present):
+ """Fakes the Binutils.fetch() function
+
+ Returns PRESENT only for the '_testing' bintool
+ """
+ btool = list(self.btools.values())[self.seq]
+ self.seq += 1
+ print('fetch', btool.name)
+ if btool.name == '_testing':
+ return bintool.PRESENT
+ return bintool.FETCHED
+
+ # Preload a list of tools to return when get_tool_list() and create()
+ # are called
+ all_tools = Bintool.get_tool_list(True)
+ self.btools = collections.OrderedDict()
+ for name in all_tools:
+ self.btools[name] = Bintool.create(name)
+ self.seq = 0
+ with unittest.mock.patch.object(bintool.Bintool, 'fetch_tool',
+ side_effect=fake_fetch2):
+ with unittest.mock.patch.object(bintool.Bintool,
+ 'get_tool_list',
+ side_effect=[all_tools]):
+ with unittest.mock.patch.object(bintool.Bintool, 'create',
+ side_effect=self.btools.values()):
+ with test_util.capture_sys_output() as (stdout, _):
+ Bintool.fetch_tools(bintool.FETCH_ANY, ['missing'])
+ lines = stdout.getvalue().splitlines()
+ num_tools = len(self.btools)
+ fetched = [line for line in lines if 'Tools fetched:' in line].pop()
+ present = [line for line in lines if 'Already present:' in line].pop()
+ self.assertIn(f'{num_tools - 1}: ', fetched)
+ self.assertIn('1: ', present)
+
+ def check_build_method(self, write_file):
+ """Check the output from fetching using the BUILD method
+
+ Args:
+ write_file (bool): True to write the output file when 'make' is
+ called
+
+ Returns:
+ tuple:
+ str: Filename of written file (or missing 'make' output)
+ str: Contents of stdout
+ """
+ def fake_run(*cmd):
+ if cmd[0] == 'make':
+ # See Bintool.build_from_git()
+ tmpdir = cmd[2]
+ self.fname = os.path.join(tmpdir, 'pathname')
+ if write_file:
+ tools.write_file(self.fname, b'hello')
+
+ btest = Bintool.create('_testing')
+ col = terminal.Color()
+ self.fname = None
+ with unittest.mock.patch.object(bintool.Bintool, 'tooldir',
+ self._indir):
+ with unittest.mock.patch.object(tools, 'run', side_effect=fake_run):
+ with test_util.capture_sys_output() as (stdout, _):
+ btest.fetch_tool(bintool.FETCH_BUILD, col, False)
+ fname = os.path.join(self._indir, '_testing')
+ return fname if write_file else self.fname, stdout.getvalue()
+
+ def test_build_method(self):
+ """Test fetching using the build method"""
+ fname, stdout = self.check_build_method(write_file=True)
+ self.assertTrue(os.path.exists(fname))
+ self.assertIn(f"writing to '{fname}", stdout)
+
+ def test_build_method_fail(self):
+ """Test fetching using the build method when no file is produced"""
+ fname, stdout = self.check_build_method(write_file=False)
+ self.assertFalse(os.path.exists(fname))
+ self.assertIn(f"File '{fname}' was not produced", stdout)
+
+ def test_install(self):
+ """Test fetching using the install method"""
+ btest = Bintool.create('_testing')
+ btest.install = True
+ col = terminal.Color()
+ with unittest.mock.patch.object(tools, 'run', return_value=None):
+ with test_util.capture_sys_output() as _:
+ result = btest.fetch_tool(bintool.FETCH_BIN, col, False)
+ self.assertEqual(bintool.FETCHED, result)
+
+ def test_no_fetch(self):
+ """Test fetching when there is no method"""
+ btest = Bintool.create('_testing')
+ btest.disable = True
+ col = terminal.Color()
+ with test_util.capture_sys_output() as _:
+ result = btest.fetch_tool(bintool.FETCH_BIN, col, False)
+ self.assertEqual(bintool.FAIL, result)
+
+ def test_all_bintools(self):
+ """Test that all bintools can handle all available fetch types"""
+ def handle_download(_):
+ """Take the tools.download() function by writing a file"""
+ tools.write_file(fname, expected)
+ return fname, dirname
+
+ def fake_run(*cmd):
+ if cmd[0] == 'make':
+ # See Bintool.build_from_git()
+ tmpdir = cmd[2]
+ self.fname = os.path.join(tmpdir, 'pathname')
+ tools.write_file(self.fname, b'hello')
+
+ expected = b'this is a test'
+ dirname = os.path.join(self._indir, 'download_dir')
+ os.mkdir(dirname)
+ fname = os.path.join(dirname, 'downloaded')
+
+ with unittest.mock.patch.object(tools, 'run', side_effect=fake_run):
+ with unittest.mock.patch.object(tools, 'download',
+ side_effect=handle_download):
+ with test_util.capture_sys_output() as _:
+ for name in Bintool.get_tool_list():
+ btool = Bintool.create(name)
+ for method in range(bintool.FETCH_COUNT):
+ result = btool.fetch(method)
+ self.assertTrue(result is not False)
+ if result is not True and result is not None:
+ result_fname, _ = result
+ self.assertTrue(os.path.exists(result_fname))
+ data = tools.read_file(result_fname)
+ self.assertEqual(expected, data)
+ os.remove(result_fname)
+
+ def test_all_bintool_versions(self):
+ """Test handling of bintool version when it cannot be run"""
+ all_tools = Bintool.get_tool_list()
+ for name in all_tools:
+ btool = Bintool.create(name)
+ with unittest.mock.patch.object(
+ btool, 'run_cmd_result', return_value=command.CommandResult()):
+ self.assertEqual('unknown', btool.version())
+
+ def test_force_missing(self):
+ btool = Bintool.create('_testing')
+ btool.present = True
+ self.assertTrue(btool.is_present())
+
+ btool.present = None
+ Bintool.set_missing_list(['_testing'])
+ self.assertFalse(btool.is_present())
+
+ def test_failed_command(self):
+ """Check that running a command that does not exist returns None"""
+ destdir = os.path.join(self._indir, 'dest_dir')
+ os.mkdir(destdir)
+ with unittest.mock.patch.object(bintool.Bintool, 'tooldir', destdir):
+ btool = Bintool.create('_testing')
+ result = btool.run_cmd_result('fred')
+ self.assertIsNone(result)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/binman/bintools.rst b/tools/binman/bintools.rst
new file mode 100644
index 00000000000..1336f4d0115
--- /dev/null
+++ b/tools/binman/bintools.rst
@@ -0,0 +1,218 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Binman bintool Documentation
+============================
+
+This file describes the bintools (binary tools) supported by binman. Bintools
+are binman's name for external executables that it runs to generate or process
+binaries. It is fairly easy to create new bintools. Just add a new file to the
+'btool' directory. You can use existing bintools as examples.
+
+
+
+Bintool: bzip2: Compression/decompression using the bzip2 algorithm
+-------------------------------------------------------------------
+
+This bintool supports running `bzip2` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+ man bzip2
+
+
+
+Bintool: cbfstool: Coreboot filesystem (CBFS) tool
+--------------------------------------------------
+
+This bintool supports creating new CBFS images and adding files to an
+existing image, i.e. the features needed by binman.
+
+It also supports fetching a binary cbfstool, since building it from source
+is fairly slow.
+
+Documentation about CBFS is at https://www.coreboot.org/CBFS
+
+
+
+Bintool: fiptool: Image generation for ARM Trusted Firmware
+-----------------------------------------------------------
+
+This bintool supports running `fiptool` with some basic parameters as
+neeed by binman.
+
+It also supports build fiptool from source.
+
+fiptool provides a way to package firmware in an ARM Trusted Firmware
+Firmware Image Package (ATF FIP) format. It is used with Trusted Firmware A,
+for example.
+
+See `TF-A FIP tool documentation`_ for more information.
+
+.. _`TF-A FIP tool documentation`:
+ https://trustedfirmware-a.readthedocs.io/en/latest/getting_started/tools-build.html?highlight=fiptool#building-and-using-the-fip-tool
+
+
+
+Bintool: futility: Handles the 'futility' tool
+----------------------------------------------
+
+futility (flash utility) is a tool for working with Chromium OS flash
+images. This Bintool implements just the features used by Binman, related to
+GBB creation and firmware signing.
+
+A binary version of the tool can be fetched.
+
+See `Chromium OS vboot documentation`_ for more information.
+
+.. _`Chromium OS vboot documentation`:
+ https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/main/_vboot_reference/README
+
+
+
+Bintool: gzip: Compression/decompression using the gzip algorithm
+-----------------------------------------------------------------
+
+This bintool supports running `gzip` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+ man gzip
+
+
+
+Bintool: ifwitool: Handles the 'ifwitool' tool
+----------------------------------------------
+
+This bintool supports running `ifwitool` with some basic parameters as
+neeed by binman. It includes creating a file from a FIT as well as adding,
+replacing, deleting and extracting subparts.
+
+The tool is built as part of U-Boot, but a binary version can be fetched if
+required.
+
+ifwitool provides a way to package firmware in an Intel Firmware Image
+(IFWI) file on some Intel SoCs, e.g. Apolo Lake.
+
+
+
+Bintool: lz4: Compression/decompression using the LZ4 algorithm
+---------------------------------------------------------------
+
+This bintool supports running `lz4` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+ man lz4
+
+
+
+Bintool: lzma_alone: Compression/decompression using the LZMA algorithm
+-----------------------------------------------------------------------
+
+This bintool supports running `lzma_alone` to compress and decompress data,
+as used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+ man lzma_alone
+
+
+
+Bintool: lzop: Compression/decompression using the lzop algorithm
+-----------------------------------------------------------------
+
+This bintool supports running `lzop` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+ man lzop
+
+
+
+Bintool: mkimage: Image generation for U-Boot
+---------------------------------------------
+
+This bintool supports running `mkimage` with some basic parameters as
+neeed by binman.
+
+Normally binman uses the mkimage built by U-Boot. But when run outside the
+U-Boot build system, binman can use the version installed in your system.
+Support is provided for fetching this on Debian-like systems, using apt.
+
+
+
+Bintool: openssl: openssl tool
+------------------------------
+
+This bintool supports creating new openssl certificates.
+
+It also supports fetching a binary openssl
+
+Documentation about openssl is at https://www.openssl.org/
+
+
+
+Bintool: xz: Compression/decompression using the xz algorithm
+-------------------------------------------------------------
+
+This bintool supports running `xz` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+ man xz
+
+
+
+Bintool: zstd: Compression/decompression using the zstd algorithm
+-----------------------------------------------------------------
+
+This bintool supports running `zstd` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+ man zstd
+
+
+
+Bintool: fdt_add_pubkey: Add public key to device tree
+------------------------------------------------------
+
+This bintool supports running `fdt_add_pubkey` in order to add a public
+key coming from a certificate to a device-tree.
+
+Normally signing is done using `mkimage` in context of `binman sign`. However,
+in this process the public key is not added to the stage before u-boot proper.
+Using `fdt_add_pubkey` the key can be injected to the SPL independent of
+`mkimage`
+
+
+
+Bintool: bootgen: Sign ZynqMP FSBL image
+----------------------------------------
+
+This bintool supports running `bootgen` in order to sign a SPL for ZynqMP
+devices.
+
+The bintool automatically creates an appropriate input image file (.bif) for
+bootgen based on the passed arguments. The output is a bootable,
+authenticated `boot.bin` file.
diff --git a/tools/binman/btool/_testing.py b/tools/binman/btool/_testing.py
new file mode 100644
index 00000000000..4005e8a8a5d
--- /dev/null
+++ b/tools/binman/btool/_testing.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool used for testing
+
+This is not a real bintool, just one used for testing"""
+
+from binman import bintool
+
+# pylint: disable=C0103
+class Bintool_testing(bintool.Bintool):
+ """Bintool used for testing"""
+ def __init__(self, name):
+ super().__init__(name, 'testing')
+ self.present = False
+ self.install = False
+ self.disable = False
+
+ def is_present(self):
+ if self.present is None:
+ return super().is_present()
+ return self.present
+
+ def version(self):
+ return '123'
+
+ def fetch(self, method):
+ if self.disable:
+ return super().fetch(method)
+ if method == bintool.FETCH_BIN:
+ if self.install:
+ return self.apt_install('package')
+ return self.fetch_from_drive('junk')
+ if method == bintool.FETCH_BUILD:
+ return self.build_from_git('url', 'target', 'pathname')
+ return None
diff --git a/tools/binman/btool/bootgen.py b/tools/binman/btool/bootgen.py
new file mode 100644
index 00000000000..1bc9f0aa96f
--- /dev/null
+++ b/tools/binman/btool/bootgen.py
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2023 Weidmüller Interface GmbH & Co. KG
+# Lukas Funke <lukas.funke@weidmueller.com>
+#
+"""Bintool implementation for bootgen
+
+bootgen allows creating bootable SPL for Zynq(MP)
+
+Documentation is available via:
+https://www.xilinx.com/support/documents/sw_manuals/xilinx2022_1/ug1283-bootgen-user-guide.pdf
+
+Source code is available at:
+https://github.com/Xilinx/bootgen
+
+"""
+
+from binman import bintool
+from u_boot_pylib import tools
+
+# pylint: disable=C0103
+class Bintoolbootgen(bintool.Bintool):
+ """Generate bootable fsbl image for zynq/zynqmp
+
+ This bintools supports running Xilinx "bootgen" in order
+ to generate a bootable, authenticated image form an SPL.
+
+ """
+ def __init__(self, name):
+ super().__init__(name, 'Xilinx Bootgen',
+ version_regex=r'^\*\*\*\*\*\* *Xilinx Bootgen *(.*)',
+ version_args='-help')
+
+ # pylint: disable=R0913
+ def sign(self, arch, spl_elf_fname, pmufw_elf_fname,
+ psk_fname, ssk_fname, fsbl_config, auth_params, keysrc_enc,
+ output_fname):
+ """Sign SPL elf file and bundle it with PMU firmware into an image
+
+ The method bundels the SPL together with a 'Platform Management Unit'
+ (PMU)[1] firmware into a single bootable image. The image in turn is
+ signed with the provided 'secondary secret key' (ssk), which in turn is
+ signed with the 'primary secret key' (psk). In order to verify the
+ authenticity of the ppk, it's hash has to be fused into the device
+ itself.
+
+ In Xilinx terms the SPL is usually called 'FSBL'
+ (First Stage Boot Loader). The jobs of the SPL and the FSBL are mostly
+ the same: load bitstream, bootstrap u-boot.
+
+ Args:
+ arch (str): Xilinx SoC architecture. Currently only 'zynqmp' is
+ supported.
+ spl_elf_fname (str): Filename of SPL ELF file. The filename must end
+ with '.elf' in order for bootgen to recognized it as an ELF
+ file. Otherwise the start address field is missinterpreted.
+ pmufw_elf_fname (str): Filename PMU ELF firmware.
+ psk_fname (str): Filename of the primary secret key (psk). The psk
+ is a .pem file which holds the RSA private key used for signing
+ the secondary secret key.
+ ssk_fname (str): Filename of the secondary secret key. The ssk
+ is a .pem file which holds the RSA private key used for signing
+ the actual boot firmware.
+ fsbl_config (str): FSBL config options. A string list of fsbl config
+ options. Valid values according to [2] are:
+ "bh_auth_enable": Boot Header Authentication Enable: RSA
+ authentication of the bootimage is done
+ excluding the verification of PPK hash and SPK ID. This is
+ useful for debugging before bricking a device.
+ "auth_only": Boot image is only RSA signed. FSBL should not be
+ decrypted. See the
+ Zynq UltraScale+ Device Technical Reference Manual (UG1085)
+ for more information.
+ There are more options which relate to PUF (physical unclonable
+ functions). Please refer to Xilinx manuals for further info.
+ auth_params (str): Authentication parameter. A semicolon separated
+ list of authentication parameters. Valid values according to [3]
+ are:
+ "ppk_select=<0|1>" - Select which ppk to use
+ "spk_id=<32-bit spk id>" - Specifies which SPK can be
+ used or revoked, default is 0x0
+ "spk_select=<spk-efuse/user-efuse>" - To differentiate spk and
+ user efuses.
+ "auth_header" - To authenticate headers when no partition
+ is authenticated.
+ keysrc_enc (str): This specifies the Key source for encryption.
+ Valid values according to [3] are:
+ "bbram_red_key" - RED key stored in BBRAM
+ "efuse_red_key" - RED key stored in eFUSE
+ "efuse_gry_key" - Grey (Obfuscated) Key stored in eFUSE.
+ "bh_gry_key" - Grey (Obfuscated) Key stored in boot header
+ "bh_blk_key" - Black Key stored in boot header
+ "efuse_blk_key" - Black Key stored in eFUSE
+ "kup_key" - User Key
+
+ output_fname (str): Filename where bootgen should write the result
+
+ Returns:
+ str: Bootgen output from stdout
+
+ [1] https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18841724/PMU+Firmware
+ [2] https://docs.xilinx.com/r/en-US/ug1283-bootgen-user-guide/fsbl_config
+ [3] https://docs.xilinx.com/r/en-US/ug1283-bootgen-user-guide/auth_params
+ [4] https://docs.xilinx.com/r/en-US/ug1283-bootgen-user-guide/keysrc_encryption
+ """
+
+ _fsbl_config = f"[fsbl_config] {fsbl_config}" if fsbl_config else ""
+ _auth_params = f"[auth_params] {auth_params}" if auth_params else ""
+ _keysrc_enc = f"[keysrc_encryption] {keysrc_enc}" if keysrc_enc else ""
+
+ bif_template = f"""u_boot_spl_aes_rsa: {{
+ [pskfile] {psk_fname}
+ [sskfile] {ssk_fname}
+ {_keysrc_enc}
+ {_fsbl_config}
+ {_auth_params}
+ [ bootloader,
+ authentication = rsa,
+ destination_cpu=a53-0] {spl_elf_fname}
+ [pmufw_image] {pmufw_elf_fname}
+ }}"""
+ args = ["-arch", arch]
+
+ bif_fname = tools.get_output_filename('bootgen-in.sign.bif')
+ tools.write_file(bif_fname, bif_template, False)
+ args += ["-image", bif_fname, '-w', '-o', output_fname]
+ return self.run_cmd(*args)
+
+ def fetch(self, method):
+ """Fetch bootgen from git"""
+ if method != bintool.FETCH_BUILD:
+ return None
+
+ result = self.build_from_git(
+ 'https://github.com/Xilinx/bootgen',
+ ['all'],
+ 'bootgen')
+ return result
diff --git a/tools/binman/btool/btool_gzip.py b/tools/binman/btool/btool_gzip.py
new file mode 100644
index 00000000000..0d75028120f
--- /dev/null
+++ b/tools/binman/btool/btool_gzip.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2022 Weidmüller Interface GmbH & Co. KG
+# Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
+#
+"""Bintool implementation for gzip
+
+gzip allows compression and decompression of files.
+
+Documentation is available via::
+
+ man gzip
+"""
+
+from binman import bintool
+
+# pylint: disable=C0103
+class Bintoolgzip(bintool.BintoolPacker):
+ """Compression/decompression using the gzip algorithm
+
+ This bintool supports running `gzip` to compress and decompress data, as
+ used by binman.
+
+ It is also possible to fetch the tool, which uses `apt` to install it.
+
+ Documentation is available via::
+
+ man gzip
+ """
+ def __init__(self, name):
+ super().__init__(name, compress_args=[],
+ version_regex=r'gzip ([0-9.]+)')
diff --git a/tools/binman/btool/bzip2.py b/tools/binman/btool/bzip2.py
new file mode 100644
index 00000000000..c3897d63acb
--- /dev/null
+++ b/tools/binman/btool/bzip2.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2022 Weidmüller Interface GmbH & Co. KG
+# Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
+#
+"""Bintool implementation for bzip2
+
+bzip2 allows compression and decompression of files.
+
+Documentation is available via::
+
+ man bzip2
+"""
+
+from binman import bintool
+
+# pylint: disable=C0103
+class Bintoolbzip2(bintool.BintoolPacker):
+ """Compression/decompression using the bzip2 algorithm
+
+ This bintool supports running `bzip2` to compress and decompress data, as
+ used by binman.
+
+ It is also possible to fetch the tool, which uses `apt` to install it.
+
+ Documentation is available via::
+
+ man bzip2
+ """
+ def __init__(self, name):
+ super().__init__(name, version_regex=r'bzip2.*Version ([0-9.]+)', version_args='--help')
diff --git a/tools/binman/btool/cbfstool.py b/tools/binman/btool/cbfstool.py
new file mode 100644
index 00000000000..29be2d8a2b5
--- /dev/null
+++ b/tools/binman/btool/cbfstool.py
@@ -0,0 +1,219 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for cbfstool
+
+cfstool provides a number of features useful with Coreboot Filesystem binaries.
+
+Documentation is at https://www.coreboot.org/CBFS
+
+Source code is at https://github.com/coreboot/coreboot/blob/master/util/cbfstool/cbfstool.c
+
+Here is the help:
+
+cbfstool: Management utility for CBFS formatted ROM images
+
+USAGE:
+ cbfstool [-h]
+ cbfstool FILE COMMAND [-v] [PARAMETERS]...
+
+OPTIONs:
+ -H header_offset Do not search for header; use this offset*
+ -T Output top-aligned memory address
+ -u Accept short data; fill upward/from bottom
+ -d Accept short data; fill downward/from top
+ -F Force action
+ -g Generate position and alignment arguments
+ -U Unprocessed; don't decompress or make ELF
+ -v Provide verbose output
+ -h Display this help message
+
+COMMANDs:
+ add [-r image,regions] -f FILE -n NAME -t TYPE [-A hash] \
+ [-c compression] [-b base-address | -a alignment] \
+ [-p padding size] [-y|--xip if TYPE is FSP] \
+ [-j topswap-size] (Intel CPUs only) [--ibb]
+ Add a component
+ -j valid size: 0x10000 0x20000 0x40000 0x80000 0x100000
+ add-payload [-r image,regions] -f FILE -n NAME [-A hash] \
+ [-c compression] [-b base-address] \
+ (linux specific: [-C cmdline] [-I initrd])
+ Add a payload to the ROM
+ add-stage [-r image,regions] -f FILE -n NAME [-A hash] \
+ [-c compression] [-b base] [-S section-to-ignore] \
+ [-a alignment] [-y|--xip] [-P page-size] [--ibb]
+ Add a stage to the ROM
+ add-flat-binary [-r image,regions] -f FILE -n NAME \
+ [-A hash] -l load-address -e entry-point \
+ [-c compression] [-b base]
+ Add a 32bit flat mode binary
+ add-int [-r image,regions] -i INTEGER -n NAME [-b base]
+ Add a raw 64-bit integer value
+ add-master-header [-r image,regions] \
+ [-j topswap-size] (Intel CPUs only)
+ Add a legacy CBFS master header
+ remove [-r image,regions] -n NAME
+ Remove a component
+ compact -r image,regions
+ Defragment CBFS image.
+ copy -r image,regions -R source-region
+ Create a copy (duplicate) cbfs instance in fmap
+ create -m ARCH -s size [-b bootblock offset] \
+ [-o CBFS offset] [-H header offset] [-B bootblock]
+ Create a legacy ROM file with CBFS master header*
+ create -M flashmap [-r list,of,regions,containing,cbfses]
+ Create a new-style partitioned firmware image
+ locate [-r image,regions] -f FILE -n NAME [-P page-size] \
+ [-a align] [-T]
+ Find a place for a file of that size
+ layout [-w]
+ List mutable (or, with -w, readable) image regions
+ print [-r image,regions]
+ Show the contents of the ROM
+ extract [-r image,regions] [-m ARCH] -n NAME -f FILE [-U]
+ Extracts a file from ROM
+ write [-F] -r image,regions -f file [-u | -d] [-i int]
+ Write file into same-size [or larger] raw region
+ read [-r fmap-region] -f file
+ Extract raw region contents into binary file
+ truncate [-r fmap-region]
+ Truncate CBFS and print new size on stdout
+ expand [-r fmap-region]
+ Expand CBFS to span entire region
+OFFSETs:
+ Numbers accompanying -b, -H, and -o switches* may be provided
+ in two possible formats: if their value is greater than
+ 0x80000000, they are interpreted as a top-aligned x86 memory
+ address; otherwise, they are treated as an offset into flash.
+ARCHes:
+ arm64, arm, mips, ppc64, power8, riscv, x86, unknown
+TYPEs:
+ bootblock, cbfs header, stage, simple elf, fit, optionrom, bootsplash, raw,
+ vsa, mbi, microcode, fsp, mrc, cmos_default, cmos_layout, spd,
+ mrc_cache, mma, efi, struct, deleted, null
+
+* Note that these actions and switches are only valid when
+ working with legacy images whose structure is described
+ primarily by a CBFS master header. New-style images, in
+ contrast, exclusively make use of an FMAP to describe their
+ layout: this must minimally contain an 'FMAP' section
+ specifying the location of this FMAP itself and a 'COREBOOT'
+ section describing the primary CBFS. It should also be noted
+ that, when working with such images, the -F and -r switches
+ default to 'COREBOOT' for convenience, and both the -b switch to
+ CBFS operations and the output of the locate action become
+ relative to the selected CBFS region's lowest address.
+ The one exception to this rule is the top-aligned address,
+ which is always relative to the end of the entire image
+ rather than relative to the local region; this is true for
+ for both input (sufficiently large) and output (-T) data.
+
+
+Since binman has a native implementation of CBFS (see cbfs_util.py), we don't
+actually need this tool, except for sanity checks in the tests.
+"""
+
+from binman import bintool
+
+class Bintoolcbfstool(bintool.Bintool):
+ """Coreboot filesystem (CBFS) tool
+
+ This bintool supports creating new CBFS images and adding files to an
+ existing image, i.e. the features needed by binman.
+
+ It also supports fetching a binary cbfstool, since building it from source
+ is fairly slow.
+
+ Documentation about CBFS is at https://www.coreboot.org/CBFS
+ """
+ def __init__(self, name):
+ super().__init__(name, 'Manipulate CBFS files')
+
+ def create_new(self, cbfs_fname, size, arch='x86'):
+ """Create a new CBFS
+
+ Args:
+ cbfs_fname (str): Filename of CBFS to create
+ size (int): Size of CBFS in bytes
+ arch (str): Architecture for which this CBFS is intended
+
+ Returns:
+ str: Tool output
+ """
+ args = [cbfs_fname, 'create', '-s', f'{size:#x}', '-m', arch]
+ return self.run_cmd(*args)
+
+ # pylint: disable=R0913
+ def add_raw(self, cbfs_fname, name, fname, compress=None, base=None):
+ """Add a raw file to the CBFS
+
+ Args:
+ cbfs_fname (str): Filename of CBFS to create
+ name (str): Name to use inside the CBFS
+ fname (str): Filename of file to add
+ compress (str): Compression to use (cbfs_util.COMPRESS_NAMES) or
+ None for None
+ base (int): Address to place the file, or None for anywhere
+
+ Returns:
+ str: Tool output
+ """
+ args = [cbfs_fname,
+ 'add',
+ '-n', name,
+ '-t', 'raw',
+ '-f', fname,
+ '-c', compress or 'none']
+ if base:
+ args += ['-b', f'{base:#x}']
+ return self.run_cmd(*args)
+
+ def add_stage(self, cbfs_fname, name, fname):
+ """Add a stage file to the CBFS
+
+ Args:
+ cbfs_fname (str): Filename of CBFS to create
+ name (str): Name to use inside the CBFS
+ fname (str): Filename of file to add
+
+ Returns:
+ str: Tool output
+ """
+ args = [cbfs_fname,
+ 'add-stage',
+ '-n', name,
+ '-f', fname
+ ]
+ return self.run_cmd(*args)
+
+ def fail(self):
+ """Run cbfstool with invalid arguments to check it reports failure
+
+ This is really just a sanity check
+
+ Returns:
+ CommandResult: Result from running the bad command
+ """
+ args = ['missing-file', 'bad-command']
+ return self.run_cmd_result(*args)
+
+ def fetch(self, method):
+ """Fetch handler for cbfstool
+
+ This installs cbfstool by downloading from Google Drive.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched and now installed, None if a method
+ other than FETCH_BIN was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BIN:
+ return None
+ fname, tmpdir = self.fetch_from_drive(
+ '1IOnE0Qvy97d-0WOCwF64xBGpKSY2sMtJ')
+ return fname, tmpdir
diff --git a/tools/binman/btool/fdt_add_pubkey.py b/tools/binman/btool/fdt_add_pubkey.py
new file mode 100644
index 00000000000..a50774200c9
--- /dev/null
+++ b/tools/binman/btool/fdt_add_pubkey.py
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2023 Weidmüller Interface GmbH & Co. KG
+# Lukas Funke <lukas.funke@weidmueller.com>
+#
+"""Bintool implementation for fdt_add_pubkey"""
+
+from binman import bintool
+
+class Bintoolfdt_add_pubkey(bintool.Bintool):
+ """Add public key to control dtb (spl or u-boot proper)
+
+ This bintool supports running `fdt_add_pubkey`.
+
+ Normally mkimage adds signature information to the control dtb. However
+ binman images are built independent from each other. Thus it is required
+ to add the public key separately from mkimage.
+ """
+ def __init__(self, name):
+ super().__init__(name, 'Generate image for U-Boot')
+
+ # pylint: disable=R0913
+ def run(self, input_fname, keydir, keyname, required, algo):
+ """Run fdt_add_pubkey
+
+ Args:
+ input_fname (str): dtb file to sign
+ keydir (str): Directory with public key. Optional parameter,
+ default value: '.' (current directory)
+ keyname (str): Public key name. Optional parameter,
+ default value: key
+ required (str): If present this indicates that the key must be
+ verified for the image / configuration to be considered valid.
+ algo (str): Cryptographic algorithm. Optional parameter,
+ default value: sha1,rsa2048
+ """
+ args = []
+ if algo:
+ args += ['-a', algo]
+ if keydir:
+ args += ['-k', keydir]
+ if keyname:
+ args += ['-n', keyname]
+ if required:
+ args += ['-r', required]
+
+ args += [ input_fname ]
+
+ return self.run_cmd(*args)
+
+ def fetch(self, method):
+ """Fetch handler for fdt_add_pubkey
+
+ This installs fdt_add_pubkey using the apt utility.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched and now installed, None if a method
+ other than FETCH_BIN was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BIN:
+ return None
+ return self.apt_install('u-boot-tools')
diff --git a/tools/binman/btool/fiptool.py b/tools/binman/btool/fiptool.py
new file mode 100644
index 00000000000..34002f54af9
--- /dev/null
+++ b/tools/binman/btool/fiptool.py
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for fiptool
+
+fiptool provides a way to package firmware in an ARM Trusted Firmware Firmware
+Image Package (ATF FIP) format. It is used with Trusted Firmware A, for example.
+
+Documentation is at:
+https://trustedfirmware-a.readthedocs.io/en/latest/getting_started/tools-build.html?highlight=fiptool#building-and-using-the-fip-tool
+
+Source code is at:
+https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git
+
+Here is the help:
+
+usage: fiptool [--verbose] <command> [<args>]
+Global options supported:
+ --verbose Enable verbose output for all commands.
+
+Commands supported:
+ info List images contained in FIP.
+ create Create a new FIP with the given images.
+ update Update an existing FIP with the given images.
+ unpack Unpack images from FIP.
+ remove Remove images from FIP.
+ version Show fiptool version.
+ help Show help for given command.
+
+"""
+
+from binman import bintool
+
+class Bintoolfiptool(bintool.Bintool):
+ """Image generation for ARM Trusted Firmware
+
+ This bintool supports running `fiptool` with some basic parameters as
+ neeed by binman.
+
+ It also supports build fiptool from source.
+
+ fiptool provides a way to package firmware in an ARM Trusted Firmware
+ Firmware Image Package (ATF FIP) format. It is used with Trusted Firmware A,
+ for example.
+
+ See `TF-A FIP tool documentation`_ for more information.
+
+ .. _`TF-A FIP tool documentation`:
+ https://trustedfirmware-a.readthedocs.io/en/latest/getting_started/tools-build.html?highlight=fiptool#building-and-using-the-fip-tool
+ """
+ def __init__(self, name):
+ super().__init__(name, 'Manipulate ATF FIP files', r'^(.*)$', 'version')
+
+ def info(self, fname):
+ """Get info on a FIP image
+
+ Args:
+ fname (str): Filename to check
+
+ Returns:
+ str: Tool output
+ """
+ args = ['info', fname]
+ return self.run_cmd(*args)
+
+ # pylint: disable=R0913
+ def create_new(self, fname, align, plat_toc_flags, fwu, tb_fw, blob_uuid,
+ blob_file):
+ """Create a new FIP
+
+ Args:
+ fname (str): Filename to write to
+ align (int): Alignment to use for entries
+ plat_toc_flags (int): Flags to use for the TOC header
+ fwu (str): Filename for the fwu entry
+ tb_fw (str): Filename for the tb_fw entry
+ blob_uuid (str): UUID for the blob entry
+ blob_file (str): Filename for the blob entry
+
+ Returns:
+ str: Tool output
+ """
+ args = [
+ 'create',
+ '--align', f'{align:x}',
+ '--plat-toc-flags', f'{plat_toc_flags:#x}',
+ '--fwu', fwu,
+ '--tb-fw', tb_fw,
+ '--blob', f'uuid={blob_uuid},file={blob_file}',
+ fname]
+ return self.run_cmd(*args)
+
+ def create_bad(self):
+ """Run fiptool with invalid arguments"""
+ args = ['create', '--fred']
+ return self.run_cmd_result(*args)
+
+ def fetch(self, method):
+ """Fetch handler for fiptool
+
+ This builds the tool from source
+
+ Returns:
+ tuple:
+ str: Filename of fetched file to copy to a suitable directory
+ str: Name of temp directory to remove, or None
+ """
+ if method != bintool.FETCH_BUILD:
+ return None
+ result = self.build_from_git(
+ 'https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git',
+ ['fiptool'],
+ 'tools/fiptool/fiptool')
+ return result
diff --git a/tools/binman/btool/futility.py b/tools/binman/btool/futility.py
new file mode 100644
index 00000000000..0d3980d071d
--- /dev/null
+++ b/tools/binman/btool/futility.py
@@ -0,0 +1,176 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for futility
+
+futility (flash utility) is a tool for working with Chromium OS flash images.
+This implements just the features used by Binman.
+
+Documentation is at:
+ https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/main/_vboot_reference/README
+
+Source code:
+ https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/master/_vboot_reference/futility
+
+Here is the help:
+Usage: futility [options] COMMAND [args...]
+
+This is the unified firmware utility, which will eventually replace
+most of the distinct verified boot tools formerly produced by the
+vboot_reference package.
+
+When symlinked under the name of one of those previous tools, it should
+fully implement the original behavior. It can also be invoked directly
+as futility, followed by the original name as the first argument.
+
+Global options:
+
+ --vb1 Use only vboot v1.0 binary formats
+ --vb21 Use only vboot v2.1 binary formats
+ --debug Be noisy about what's going on
+
+The following commands are built-in:
+
+ bdb Common boot flow utility
+ create Create a keypair from an RSA .pem file
+ dump_fmap Display FMAP contents from a firmware image
+ dump_kernel_config Prints the kernel command line
+ gbb Manipulate the Google Binary Block (GBB)
+ gbb_utility Legacy name for `gbb` command
+ help Show a bit of help (you're looking at it)
+ load_fmap Replace the contents of specified FMAP areas
+ pcr Simulate a TPM PCR extension operation
+ show Display the content of various binary components
+ sign Sign / resign various binary components
+ update Update system firmware
+ validate_rec_mrc Validates content of Recovery MRC cache
+ vbutil_firmware Verified boot firmware utility
+ vbutil_kernel Creates, signs, and verifies the kernel partition
+ vbutil_key Wraps RSA keys with vboot headers
+ vbutil_keyblock Creates, signs, and verifies a keyblock
+ verify Verify the signatures of various binary components
+ version Show the futility source revision and build date
+"""
+
+from binman import bintool
+
+class Bintoolfutility(bintool.Bintool):
+ """Handles the 'futility' tool
+
+ futility (flash utility) is a tool for working with Chromium OS flash
+ images. This Bintool implements just the features used by Binman, related to
+ GBB creation and firmware signing.
+
+ A binary version of the tool can be fetched.
+
+ See `Chromium OS vboot documentation`_ for more information.
+
+ .. _`Chromium OS vboot documentation`:
+ https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/main/_vboot_reference/README
+ """
+ def __init__(self, name):
+ super().__init__(name, 'Chromium OS firmware utility', r'^(.*)$', 'version')
+
+ def gbb_create(self, fname, sizes):
+ """Create a new Google Binary Block
+
+ Args:
+ fname (str): Filename to write to
+ sizes (list of int): Sizes of each regions:
+ hwid_size, rootkey_size, bmpfv_size, recoverykey_size
+
+ Returns:
+ str: Tool output
+ """
+ args = [
+ 'gbb_utility',
+ '-c',
+ ','.join(['%#x' % size for size in sizes]),
+ fname
+ ]
+ return self.run_cmd(*args)
+
+ # pylint: disable=R0913
+ def gbb_set(self, fname, hwid, rootkey, recoverykey, flags, bmpfv):
+ """Set the parameters in a Google Binary Block
+
+ Args:
+ fname (str): Filename to update
+ hwid (str): Hardware ID to use
+ rootkey (str): Filename of root key, e.g. 'root_key.vbpubk'
+ recoverykey (str): Filename of recovery key,
+ e.g. 'recovery_key.vbpubk'
+ flags (int): GBB flags to use
+ bmpfv (str): Filename of firmware bitmaps (bmpblk file)
+
+ Returns:
+ str: Tool output
+ """
+ args = ['gbb_utility',
+ '-s',
+ f'--hwid={hwid}',
+ f'--rootkey={rootkey}',
+ f'--recoverykey={recoverykey}',
+ f'--flags={flags}',
+ f'--bmpfv={bmpfv}',
+ fname
+ ]
+ return self.run_cmd(*args)
+
+ def sign_firmware(self, vblock, keyblock, signprivate, version, firmware,
+ kernelkey, flags):
+ """Sign firmware to create a vblock file
+
+ Args:
+ vblock (str): Filename to write the vblock too
+ keyblock (str): Filename of keyblock file
+ signprivate (str): Filename of private key
+ version (int): Version number
+ firmware (str): Filename of firmware binary to sign
+ kernelkey (str): Filename of kernel key
+ flags (int): Preamble flags
+
+ Returns:
+ str: Tool output
+ """
+ args = [
+ 'vbutil_firmware',
+ '--vblock', vblock,
+ '--keyblock', keyblock,
+ '--signprivate', signprivate,
+ '--version', version,
+ '--fv', firmware,
+ '--kernelkey', kernelkey,
+ '--flags', flags
+ ]
+ return self.run_cmd(*args)
+
+ def fetch(self, method):
+ """Fetch handler for futility
+
+ This installs futility using a binary download.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched, None if a method other than FETCH_BIN
+ was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BUILD:
+ return None
+
+ # The Chromium OS repo is here:
+ # https://chromium.googlesource.com/chromiumos/platform/vboot_reference/
+ #
+ # Unfortunately this requires logging in and obtaining a line for the
+ # .gitcookies file. So use a mirror instead.
+ result = self.build_from_git(
+ 'https://github.com/sjg20/vboot_reference.git',
+ ['all'],
+ 'build/futility/futility',
+ flags=['USE_FLASHROM=0'])
+ return result
diff --git a/tools/binman/btool/ifwitool.py b/tools/binman/btool/ifwitool.py
new file mode 100644
index 00000000000..96778fce87f
--- /dev/null
+++ b/tools/binman/btool/ifwitool.py
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for ifwitool
+
+ifwitool provides a way to package firmware in an Intel Firmware Image (IFWI)
+file on some Intel SoCs, e.g. Apolo Lake.
+
+Documentation is not really available so far as I can tell
+
+Source code is at tools/ifwitool.c which is a cleaned-up version of
+https://github.com/coreboot/coreboot/blob/master/util/cbfstool/ifwitool.c
+
+Here is the help:
+
+ifwitool: Utility for IFWI manipulation
+
+USAGE:
+ /tmp/b/sandbox/tools/ifwitool [-h]
+ /tmp/b/sandbox/tools/ifwitool FILE COMMAND [PARAMETERS]
+
+COMMANDs:
+ add -f FILE -n NAME [-d -e ENTRY]
+ create -f FILE
+ delete -n NAME
+ extract -f FILE -n NAME [-d -e ENTRY]
+ print [-d]
+ replace -f FILE -n NAME [-d -e ENTRY]
+OPTIONs:
+ -f FILE : File to read/write/create/extract
+ -d : Perform directory operation
+ -e ENTRY: Name of directory entry to operate on
+ -v : Verbose level
+ -h : Help message
+ -n NAME : Name of sub-partition to operate on
+
+NAME should be one of:
+SMIP(SMIP)
+RBEP(CSE_RBE)
+FTPR(CSE_BUP)
+UCOD(Microcode)
+IBBP(Bootblock)
+S_BPDT(S-BPDT)
+OBBP(OEM boot block)
+NFTP(CSE_MAIN)
+ISHP(ISH)
+DLMP(CSE_IDLM)
+IFP_OVERRIDE(IFP_OVERRIDE)
+DEBUG_TOKENS(Debug Tokens)
+UFS_PHY(UFS Phy)
+UFS_GPP(UFS GPP)
+PMCP(PMC firmware)
+IUNP(IUNIT)
+NVM_CONFIG(NVM Config)
+UEP(UEP)
+UFS_RATE_B(UFS Rate B Config)
+"""
+
+from binman import bintool
+
+class Bintoolifwitool(bintool.Bintool):
+ """Handles the 'ifwitool' tool
+
+ This bintool supports running `ifwitool` with some basic parameters as
+ neeed by binman. It includes creating a file from a FIT as well as adding,
+ replacing, deleting and extracting subparts.
+
+ The tool is built as part of U-Boot, but a binary version can be fetched if
+ required.
+
+ ifwitool provides a way to package firmware in an Intel Firmware Image
+ (IFWI) file on some Intel SoCs, e.g. Apolo Lake.
+ """
+ def __init__(self, name):
+ super().__init__(name, 'Manipulate Intel IFWI files')
+
+ def create_ifwi(self, intel_fit, ifwi_file):
+ """Create a new IFWI file, using an existing Intel FIT binary
+
+ Args:
+ intel_fit (str): Filename of exist Intel FIT file
+ ifwi_file (str): Output filename to write the new IFWI too
+
+ Returns:
+ str: Tool output
+ """
+ args = [intel_fit, 'create', '-f', ifwi_file]
+ return self.run_cmd(*args)
+
+ def delete_subpart(self, ifwi_file, subpart):
+ """Delete a subpart within the IFWI file
+
+ Args:
+ ifwi_file (str): IFWI filename to update
+ subpart (str): Name of subpart to delete, e.g. 'OBBP'
+
+ Returns:
+ str: Tool output
+ """
+ args = [ifwi_file, 'delete', '-n', subpart]
+ return self.run_cmd(*args)
+
+ # pylint: disable=R0913
+ def add_subpart(self, ifwi_file, subpart, entry_name, infile,
+ replace=False):
+ """Add or replace a subpart within the IFWI file
+
+ Args:
+ ifwi_file (str): IFWI filename to update
+ subpart (str): Name of subpart to add/replace
+ entry_nme (str): Name of entry to add/replace
+ replace (bool): True to replace the existing entry, False to add a
+ new one
+
+ Returns:
+ str: Tool output
+ """
+ args = [
+ ifwi_file,
+ 'replace' if replace else 'add',
+ '-n', subpart,
+ '-d', '-e', entry_name,
+ '-f', infile,
+ ]
+ return self.run_cmd(*args)
+
+ def extract(self, ifwi_file, subpart, entry_name, outfile):
+ """Extract a subpart from the IFWI file
+
+ Args:
+ ifwi_file (str): IFWI filename to extract from
+ subpart (str): Name of subpart to extract
+ entry_nme (str): Name of entry to extract
+
+ Returns:
+ str: Tool output
+ """
+ args = [
+ ifwi_file,
+ 'extract',
+ '-n', subpart,
+ '-d', '-e', entry_name,
+ '-f', outfile,
+ ]
+ return self.run_cmd(*args)
+
+ def fetch(self, method):
+ """Fetch handler for ifwitool
+
+ This installs ifwitool using a binary download.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched, None if a method other than FETCH_BIN
+ was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BIN:
+ return None
+ fname, tmpdir = self.fetch_from_drive(
+ '18JDghOxlt2Hcc5jv51O1t6uNVHQ0XKJS')
+ return fname, tmpdir
diff --git a/tools/binman/btool/lz4.py b/tools/binman/btool/lz4.py
new file mode 100644
index 00000000000..fd520d13a56
--- /dev/null
+++ b/tools/binman/btool/lz4.py
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for lz4
+
+lz4 allows compression and decompression of files.
+
+Documentation is available via::
+
+ man lz4
+
+Here is the help:
+
+*** LZ4 command line interface 64-bits v1.9.3, by Yann Collet ***
+Usage :
+ lz4 [arg] [input] [output]
+
+input : a filename
+ with no FILE, or when FILE is - or stdin, read standard input
+Arguments :
+ -1 : Fast compression (default)
+ -9 : High compression
+ -d : decompression (default for .lz4 extension)
+ -z : force compression
+ -D FILE: use FILE as dictionary
+ -f : overwrite output without prompting
+ -k : preserve source files(s) (default)
+--rm : remove source file(s) after successful de/compression
+ -h/-H : display help/long help and exit
+
+Advanced arguments :
+ -V : display Version number and exit
+ -v : verbose mode
+ -q : suppress warnings; specify twice to suppress errors too
+ -c : force write to standard output, even if it is the console
+ -t : test compressed file integrity
+ -m : multiple input files (implies automatic output filenames)
+ -r : operate recursively on directories (sets also -m)
+ -l : compress using Legacy format (Linux kernel compression)
+ -B# : cut file into blocks of size # bytes [32+]
+ or predefined block size [4-7] (default: 7)
+ -BI : Block Independence (default)
+ -BD : Block dependency (improves compression ratio)
+ -BX : enable block checksum (default:disabled)
+--no-frame-crc : disable stream checksum (default:enabled)
+--content-size : compressed frame includes original size (default:not present)
+--list FILE : lists information about .lz4 files (useful for files compressed
+ with --content-size flag)
+--[no-]sparse : sparse mode (default:enabled on file, disabled on stdout)
+--favor-decSpeed: compressed files decompress faster, but are less compressed
+--fast[=#]: switch to ultra fast compression level (default: 1)
+--best : same as -12
+Benchmark arguments :
+ -b# : benchmark file(s), using # compression level (default : 1)
+ -e# : test all compression levels from -bX to # (default : 1)
+ -i# : minimum evaluation time in seconds (default : 3s)
+"""
+
+import re
+import tempfile
+
+from binman import bintool
+from u_boot_pylib import tools
+
+# pylint: disable=C0103
+class Bintoollz4(bintool.Bintool):
+ """Compression/decompression using the LZ4 algorithm
+
+ This bintool supports running `lz4` to compress and decompress data, as
+ used by binman.
+
+ It is also possible to fetch the tool, which uses `apt` to install it.
+
+ Documentation is available via::
+
+ man lz4
+ """
+ def __init__(self, name):
+ super().__init__(name, 'lz4 compression', r'.* (v[0-9.]*),.*')
+
+ def compress(self, indata):
+ """Compress data with lz4
+
+ Args:
+ indata (bytes): Data to compress
+
+ Returns:
+ bytes: Compressed data
+ """
+ with tempfile.NamedTemporaryFile(prefix='comp.tmp',
+ dir=tools.get_output_dir()) as tmp:
+ tools.write_file(tmp.name, indata)
+ args = ['--no-frame-crc', '-B4', '-5', '-c', tmp.name]
+ return self.run_cmd(*args, binary=True)
+
+ def decompress(self, indata):
+ """Decompress data with lz4
+
+ Args:
+ indata (bytes): Data to decompress
+
+ Returns:
+ bytes: Decompressed data
+ """
+ with tempfile.NamedTemporaryFile(prefix='decomp.tmp',
+ dir=tools.get_output_dir()) as inf:
+ tools.write_file(inf.name, indata)
+ args = ['-cd', inf.name]
+ return self.run_cmd(*args, binary=True)
+
+ def fetch(self, method):
+ """Fetch handler for lz4
+
+ This installs the lz4 package using the apt utility.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched and now installed, None if a method
+ other than FETCH_BIN was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BIN:
+ return None
+ return self.apt_install('lz4')
diff --git a/tools/binman/btool/lzma_alone.py b/tools/binman/btool/lzma_alone.py
new file mode 100644
index 00000000000..1fda2f68c7b
--- /dev/null
+++ b/tools/binman/btool/lzma_alone.py
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for lzma_alone
+
+lzma_alone allows compression and decompression of files, using an older version
+of lzma.
+
+Documentation is available via::
+
+ man lzma_alone
+
+Here is the help:
+
+LZMA 9.22 beta : Igor Pavlov : Public domain : 2011-04-18
+
+Usage: LZMA <e|d> inputFile outputFile [<switches>...]
+ e: encode file
+ d: decode file
+ b: Benchmark
+<Switches>
+ -a{N}: set compression mode - [0, 1], default: 1 (max)
+ -d{N}: set dictionary size - [12, 30], default: 23 (8MB)
+ -fb{N}: set number of fast bytes - [5, 273], default: 128
+ -mc{N}: set number of cycles for match finder
+ -lc{N}: set number of literal context bits - [0, 8], default: 3
+ -lp{N}: set number of literal pos bits - [0, 4], default: 0
+ -pb{N}: set number of pos bits - [0, 4], default: 2
+ -mf{MF_ID}: set Match Finder: [bt2, bt3, bt4, hc4], default: bt4
+ -mt{N}: set number of CPU threads
+ -eos: write End Of Stream marker
+ -si: read data from stdin
+ -so: write data to stdout
+"""
+
+import re
+import tempfile
+
+from binman import bintool
+from u_boot_pylib import tools
+
+# pylint: disable=C0103
+class Bintoollzma_alone(bintool.Bintool):
+ """Compression/decompression using the LZMA algorithm
+
+ This bintool supports running `lzma_alone` to compress and decompress data,
+ as used by binman.
+
+ It is also possible to fetch the tool, which uses `apt` to install it.
+
+ Documentation is available via::
+
+ man lzma_alone
+ """
+ def __init__(self, name):
+ super().__init__(name, 'lzma_alone compression')
+
+ def compress(self, indata):
+ """Compress data with lzma_alone
+
+ Args:
+ indata (bytes): Data to compress
+
+ Returns:
+ bytes: Compressed data
+ """
+ with tempfile.NamedTemporaryFile(prefix='comp.tmp',
+ dir=tools.get_output_dir()) as inf:
+ tools.write_file(inf.name, indata)
+ with tempfile.NamedTemporaryFile(prefix='compo.otmp',
+ dir=tools.get_output_dir()) as outf:
+ args = ['e', inf.name, outf.name, '-lc1', '-lp0', '-pb0', '-d8']
+ self.run_cmd(*args, binary=True)
+ return tools.read_file(outf.name)
+
+ def decompress(self, indata):
+ """Decompress data with lzma_alone
+
+ Args:
+ indata (bytes): Data to decompress
+
+ Returns:
+ bytes: Decompressed data
+ """
+ with tempfile.NamedTemporaryFile(prefix='decomp.tmp',
+ dir=tools.get_output_dir()) as inf:
+ tools.write_file(inf.name, indata)
+ with tempfile.NamedTemporaryFile(prefix='compo.otmp',
+ dir=tools.get_output_dir()) as outf:
+ args = ['d', inf.name, outf.name]
+ self.run_cmd(*args, binary=True)
+ return tools.read_file(outf.name, binary=True)
+
+ def fetch(self, method):
+ """Fetch handler for lzma_alone
+
+ This installs the lzma-alone package using the apt utility.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched and now installed, None if a method
+ other than FETCH_BIN was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BIN:
+ return None
+ return self.apt_install('lzma-alone')
+
+ def version(self):
+ """Version handler
+
+ Returns:
+ str: Version number of lzma_alone
+ """
+ out = self.run_cmd_result('', raise_on_error=False).stderr.strip()
+ lines = out.splitlines()
+ if not lines:
+ return super().version()
+ out = lines[0]
+ # e.g. LZMA 9.22 beta : Igor Pavlov : Public domain : 2011-04-18
+ m_version = re.match(r'LZMA ([^:]*).*', out)
+ return m_version.group(1).strip() if m_version else out
diff --git a/tools/binman/btool/lzop.py b/tools/binman/btool/lzop.py
new file mode 100644
index 00000000000..f6903b4db75
--- /dev/null
+++ b/tools/binman/btool/lzop.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2022 Weidmüller Interface GmbH & Co. KG
+# Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
+#
+"""Bintool implementation for lzop
+
+lzop allows compression and decompression of files.
+
+Documentation is available via::
+
+ man lzop
+"""
+
+from binman import bintool
+
+# pylint: disable=C0103
+class Bintoollzop(bintool.BintoolPacker):
+ """Compression/decompression using the lzop algorithm
+
+ This bintool supports running `lzop` to compress and decompress data, as
+ used by binman.
+
+ It is also possible to fetch the tool, which uses `apt` to install it.
+
+ Documentation is available via::
+
+ man lzop
+ """
+ def __init__(self, name):
+ super().__init__(name, 'lzo', compress_args=[])
diff --git a/tools/binman/btool/mkeficapsule.py b/tools/binman/btool/mkeficapsule.py
new file mode 100644
index 00000000000..ef1da638df1
--- /dev/null
+++ b/tools/binman/btool/mkeficapsule.py
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2023 Linaro Limited
+#
+"""Bintool implementation for mkeficapsule tool
+
+mkeficapsule is a tool used for generating EFI capsules.
+
+The following are the commandline options to be provided
+to the tool
+Usage: mkeficapsule [options] <image blob> <output file>
+Options:
+ -g, --guid <guid string> guid for image blob type
+ -i, --index <index> update image index
+ -I, --instance <instance> update hardware instance
+ -v, --fw-version <version> firmware version
+ -p, --private-key <privkey file> private key file
+ -c, --certificate <cert file> signer's certificate file
+ -m, --monotonic-count <count> monotonic count
+ -d, --dump_sig dump signature (*.p7)
+ -A, --fw-accept firmware accept capsule, requires GUID, no image blob
+ -R, --fw-revert firmware revert capsule, takes no GUID, no image blob
+ -o, --capoemflag Capsule OEM Flag, an integer between 0x0000 and 0xffff
+ -h, --help print a help message
+"""
+
+from binman import bintool
+
+class Bintoolmkeficapsule(bintool.Bintool):
+ """Handles the 'mkeficapsule' tool
+
+ This bintool is used for generating the EFI capsules. The
+ capsule generation parameters can either be specified through
+ commandline, or through a config file.
+ """
+ def __init__(self, name):
+ super().__init__(name, 'mkeficapsule tool for generating capsules')
+
+ def generate_capsule(self, image_index, image_guid, hardware_instance,
+ payload, output_fname, priv_key, pub_key,
+ monotonic_count=0, version=0, oemflags=0):
+ """Generate a capsule through commandline-provided parameters
+
+ Args:
+ image_index (int): Unique number for identifying payload image
+ image_guid (str): GUID used for identifying the image
+ hardware_instance (int): Optional unique hardware instance of
+ a device in the system. 0 if not being used
+ payload (str): Path to the input payload image
+ output_fname (str): Path to the output capsule file
+ priv_key (str): Path to the private key
+ pub_key(str): Path to the public key
+ monotonic_count (int): Count used when signing an image
+ version (int): Image version (Optional)
+ oemflags (int): Optional 16 bit OEM flags
+
+ Returns:
+ str: Tool output
+ """
+ args = [
+ f'--index={image_index}',
+ f'--guid={image_guid}',
+ f'--instance={hardware_instance}'
+ ]
+
+ if version:
+ args += [f'--fw-version={version}']
+ if oemflags:
+ args += [f'--capoemflag={oemflags}']
+ if priv_key and pub_key:
+ args += [
+ f'--monotonic-count={monotonic_count}',
+ f'--private-key={priv_key}',
+ f'--certificate={pub_key}'
+ ]
+
+ args += [
+ payload,
+ output_fname
+ ]
+
+ return self.run_cmd(*args)
+
+ def generate_empty_capsule(self, image_guid, output_fname,
+ accept=True):
+ """Generate empty capsules for FWU A/B updates
+
+ Args:
+ image_guid (str): GUID used for identifying the image
+ in case of an accept capsule
+ output_fname (str): Path to the output capsule file
+ accept (bool): Generate an accept capsule,
+ else a revert capsule
+
+ Returns:
+ str: Tool output
+ """
+ if accept:
+ args = [
+ f'--guid={image_guid}',
+ '--fw-accept'
+ ]
+ else:
+ args = [ '--fw-revert' ]
+
+ args += [ output_fname ]
+
+ return self.run_cmd(*args)
+
+ def fetch(self, method):
+ """Fetch handler for mkeficapsule
+
+ This builds the tool from source
+
+ Returns:
+ tuple:
+ str: Filename of fetched file to copy to a suitable directory
+ str: Name of temp directory to remove, or None
+ """
+ if method != bintool.FETCH_BUILD:
+ return None
+
+ cmd = ['tools-only_defconfig', 'tools']
+ result = self.build_from_git(
+ 'https://source.denx.de/u-boot/u-boot.git',
+ cmd,
+ 'tools/mkeficapsule')
+ return result
diff --git a/tools/binman/btool/mkimage.py b/tools/binman/btool/mkimage.py
new file mode 100644
index 00000000000..39a4c8c1432
--- /dev/null
+++ b/tools/binman/btool/mkimage.py
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for mkimage"""
+
+import re
+
+from binman import bintool
+
+class Bintoolmkimage(bintool.Bintool):
+ """Image generation for U-Boot
+
+ This bintool supports running `mkimage` with some basic parameters as
+ needed by binman.
+
+ Normally binman uses the mkimage built by U-Boot. But when run outside the
+ U-Boot build system, binman can use the version installed in your system.
+ Support is provided for fetching this on Debian-like systems, using apt.
+ """
+ def __init__(self, name):
+ super().__init__(name, 'Generate image for U-Boot', r'mkimage version (.*)')
+
+ # pylint: disable=R0913
+ def run(self, reset_timestamp=False, output_fname=None, external=False,
+ pad=None, align=None):
+ """Run mkimage
+
+ Args:
+ reset_timestamp: True to update the timestamp in the FIT
+ output_fname: Output filename to write to
+ external: True to create an 'external' FIT, where the binaries are
+ located outside the main data structure
+ pad: Bytes to use for padding the FIT devicetree output. This allows
+ other things to be easily added later, if required, such as
+ signatures
+ align: Bytes to use for alignment of the FIT and its external data
+ version: True to get the mkimage version
+ """
+ args = []
+ if external:
+ args.append('-E')
+ if pad:
+ args += ['-p', f'{pad:x}']
+ if align:
+ args += ['-B', f'{align:x}']
+ if reset_timestamp:
+ args.append('-t')
+ if output_fname:
+ args += ['-F', output_fname]
+ return self.run_cmd(*args)
+
+ def fetch(self, method):
+ """Fetch handler for mkimage
+
+ This installs mkimage using the apt utility.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched and now installed, None if a method
+ other than FETCH_BIN was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BIN:
+ return None
+ return self.apt_install('u-boot-tools')
diff --git a/tools/binman/btool/openssl.py b/tools/binman/btool/openssl.py
new file mode 100644
index 00000000000..c6df64c5316
--- /dev/null
+++ b/tools/binman/btool/openssl.py
@@ -0,0 +1,359 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for openssl
+
+openssl provides a number of features useful for signing images
+
+Documentation is at https://www.coreboot.org/CBFS
+
+Source code is at https://www.openssl.org/
+"""
+
+import hashlib
+
+from binman import bintool
+from u_boot_pylib import tools
+
+
+VALID_SHAS = [256, 384, 512, 224]
+SHA_OIDS = {256:'2.16.840.1.101.3.4.2.1',
+ 384:'2.16.840.1.101.3.4.2.2',
+ 512:'2.16.840.1.101.3.4.2.3',
+ 224:'2.16.840.1.101.3.4.2.4'}
+
+class Bintoolopenssl(bintool.Bintool):
+ """openssl tool
+
+ This bintool supports creating new openssl certificates.
+
+ It also supports fetching a binary openssl
+
+ Documentation about openssl is at https://www.openssl.org/
+ """
+ def __init__(self, name):
+ super().__init__(
+ name, 'openssl cryptography toolkit',
+ version_regex=r'OpenSSL (.*) \(', version_args='version')
+
+ def x509_cert(self, cert_fname, input_fname, key_fname, cn, revision,
+ config_fname):
+ """Create a certificate
+
+ Args:
+ cert_fname (str): Filename of certificate to create
+ input_fname (str): Filename containing data to sign
+ key_fname (str): Filename of .pem file
+ cn (str): Common name
+ revision (int): Revision number
+ config_fname (str): Filename to write fconfig into
+
+ Returns:
+ str: Tool output
+ """
+ indata = tools.read_file(input_fname)
+ hashval = hashlib.sha512(indata).hexdigest()
+ with open(config_fname, 'w', encoding='utf-8') as outf:
+ print(f'''[ req ]
+distinguished_name = req_distinguished_name
+x509_extensions = v3_ca
+prompt = no
+dirstring_type = nobmp
+
+[ req_distinguished_name ]
+CN = {cert_fname}
+
+[ v3_ca ]
+basicConstraints = CA:true
+1.3.6.1.4.1.294.1.3 = ASN1:SEQUENCE:swrv
+1.3.6.1.4.1.294.1.34 = ASN1:SEQUENCE:sysfw_image_integrity
+
+[ swrv ]
+swrv = INTEGER:{revision}
+
+[ sysfw_image_integrity ]
+shaType = OID:2.16.840.1.101.3.4.2.3
+shaValue = FORMAT:HEX,OCT:{hashval}
+imageSize = INTEGER:{len(indata)}
+''', file=outf)
+ args = ['req', '-new', '-x509', '-key', key_fname, '-nodes',
+ '-outform', 'DER', '-out', cert_fname, '-config', config_fname,
+ '-sha512']
+ return self.run_cmd(*args)
+
+ def x509_cert_sysfw(self, cert_fname, input_fname, key_fname, sw_rev,
+ config_fname, req_dist_name_dict, firewall_cert_data):
+ """Create a certificate to be booted by system firmware
+
+ Args:
+ cert_fname (str): Filename of certificate to create
+ input_fname (str): Filename containing data to sign
+ key_fname (str): Filename of .pem file
+ sw_rev (int): Software revision
+ config_fname (str): Filename to write fconfig into
+ req_dist_name_dict (dict): Dictionary containing key-value pairs of
+ req_distinguished_name section extensions, must contain extensions for
+ C, ST, L, O, OU, CN and emailAddress
+ firewall_cert_data (dict):
+ - auth_in_place (int): The Priv ID for copying as the
+ specific host in firewall protected region
+ - num_firewalls (int): The number of firewalls in the
+ extended certificate
+ - certificate (str): Extended firewall certificate with
+ the information for the firewall configurations.
+
+ Returns:
+ str: Tool output
+ """
+ indata = tools.read_file(input_fname)
+ hashval = hashlib.sha512(indata).hexdigest()
+ with open(config_fname, 'w', encoding='utf-8') as outf:
+ print(f'''[ req ]
+distinguished_name = req_distinguished_name
+x509_extensions = v3_ca
+prompt = no
+dirstring_type = nobmp
+
+[ req_distinguished_name ]
+C = {req_dist_name_dict['C']}
+ST = {req_dist_name_dict['ST']}
+L = {req_dist_name_dict['L']}
+O = {req_dist_name_dict['O']}
+OU = {req_dist_name_dict['OU']}
+CN = {req_dist_name_dict['CN']}
+emailAddress = {req_dist_name_dict['emailAddress']}
+
+[ v3_ca ]
+basicConstraints = CA:true
+1.3.6.1.4.1.294.1.3 = ASN1:SEQUENCE:swrv
+1.3.6.1.4.1.294.1.34 = ASN1:SEQUENCE:sysfw_image_integrity
+1.3.6.1.4.1.294.1.35 = ASN1:SEQUENCE:sysfw_image_load
+1.3.6.1.4.1.294.1.37 = ASN1:SEQUENCE:firewall
+
+[ swrv ]
+swrv = INTEGER:{sw_rev}
+
+[ sysfw_image_integrity ]
+shaType = OID:2.16.840.1.101.3.4.2.3
+shaValue = FORMAT:HEX,OCT:{hashval}
+imageSize = INTEGER:{len(indata)}
+
+[ sysfw_image_load ]
+destAddr = FORMAT:HEX,OCT:00000000
+authInPlace = INTEGER:{hex(firewall_cert_data['auth_in_place'])}
+
+[ firewall ]
+numFirewallRegions = INTEGER:{firewall_cert_data['num_firewalls']}
+{firewall_cert_data['certificate']}
+''', file=outf)
+ args = ['req', '-new', '-x509', '-key', key_fname, '-nodes',
+ '-outform', 'DER', '-out', cert_fname, '-config', config_fname,
+ '-sha512']
+ return self.run_cmd(*args)
+
+ def x509_cert_rom(self, cert_fname, input_fname, key_fname, sw_rev,
+ config_fname, req_dist_name_dict, cert_type, bootcore,
+ bootcore_opts, load_addr, sha):
+ """Create a certificate
+
+ Args:
+ cert_fname (str): Filename of certificate to create
+ input_fname (str): Filename containing data to sign
+ key_fname (str): Filename of .pem file
+ sw_rev (int): Software revision
+ config_fname (str): Filename to write fconfig into
+ req_dist_name_dict (dict): Dictionary containing key-value pairs of
+ req_distinguished_name section extensions, must contain extensions for
+ C, ST, L, O, OU, CN and emailAddress
+ cert_type (int): Certification type
+ bootcore (int): Booting core
+ bootcore_opts(int): Booting core option, lockstep (0) or split (2) mode
+ load_addr (int): Load address of image
+ sha (int): Hash function
+
+ Returns:
+ str: Tool output
+ """
+ indata = tools.read_file(input_fname)
+ hashval = hashlib.sha512(indata).hexdigest()
+ with open(config_fname, 'w', encoding='utf-8') as outf:
+ print(f'''
+[ req ]
+ distinguished_name = req_distinguished_name
+ x509_extensions = v3_ca
+ prompt = no
+ dirstring_type = nobmp
+
+ [ req_distinguished_name ]
+C = {req_dist_name_dict['C']}
+ST = {req_dist_name_dict['ST']}
+L = {req_dist_name_dict['L']}
+O = {req_dist_name_dict['O']}
+OU = {req_dist_name_dict['OU']}
+CN = {req_dist_name_dict['CN']}
+emailAddress = {req_dist_name_dict['emailAddress']}
+
+ [ v3_ca ]
+ basicConstraints = CA:true
+ 1.3.6.1.4.1.294.1.1 = ASN1:SEQUENCE:boot_seq
+ 1.3.6.1.4.1.294.1.2 = ASN1:SEQUENCE:image_integrity
+ 1.3.6.1.4.1.294.1.3 = ASN1:SEQUENCE:swrv
+# 1.3.6.1.4.1.294.1.4 = ASN1:SEQUENCE:encryption
+ 1.3.6.1.4.1.294.1.8 = ASN1:SEQUENCE:debug
+
+ [ boot_seq ]
+ certType = INTEGER:{cert_type}
+ bootCore = INTEGER:{bootcore}
+ bootCoreOpts = INTEGER:{bootcore_opts}
+ destAddr = FORMAT:HEX,OCT:{load_addr:08x}
+ imageSize = INTEGER:{len(indata)}
+
+ [ image_integrity ]
+ shaType = OID:{SHA_OIDS[sha]}
+ shaValue = FORMAT:HEX,OCT:{hashval}
+
+ [ swrv ]
+ swrv = INTEGER:{sw_rev}
+
+# [ encryption ]
+# initalVector = FORMAT:HEX,OCT:TEST_IMAGE_ENC_IV
+# randomString = FORMAT:HEX,OCT:TEST_IMAGE_ENC_RS
+# iterationCnt = INTEGER:TEST_IMAGE_KEY_DERIVE_INDEX
+# salt = FORMAT:HEX,OCT:TEST_IMAGE_KEY_DERIVE_SALT
+
+ [ debug ]
+ debugUID = FORMAT:HEX,OCT:0000000000000000000000000000000000000000000000000000000000000000
+ debugType = INTEGER:4
+ coreDbgEn = INTEGER:0
+ coreDbgSecEn = INTEGER:0
+''', file=outf)
+ args = ['req', '-new', '-x509', '-key', key_fname, '-nodes',
+ '-outform', 'DER', '-out', cert_fname, '-config', config_fname,
+ '-sha512']
+ return self.run_cmd(*args)
+
+ def x509_cert_rom_combined(self, cert_fname, input_fname, key_fname, sw_rev,
+ config_fname, req_dist_name_dict, load_addr, sha, total_size, num_comps,
+ sysfw_inner_cert_ext_boot_sequence_string, dm_data_ext_boot_sequence_string,
+ imagesize_sbl, hashval_sbl, load_addr_sysfw, imagesize_sysfw,
+ hashval_sysfw, load_addr_sysfw_data, imagesize_sysfw_data,
+ hashval_sysfw_data, sysfw_inner_cert_ext_boot_block,
+ dm_data_ext_boot_block, bootcore_opts):
+ """Create a certificate
+
+ Args:
+ cert_fname (str): Filename of certificate to create
+ input_fname (str): Filename containing data to sign
+ key_fname (str): Filename of .pem file
+ sw_rev (int): Software revision
+ config_fname (str): Filename to write fconfig into
+ req_dist_name_dict (dict): Dictionary containing key-value pairs of
+ req_distinguished_name section extensions, must contain extensions for
+ C, ST, L, O, OU, CN and emailAddress
+ cert_type (int): Certification type
+ bootcore (int): Booting core
+ load_addr (int): Load address of image
+ sha (int): Hash function
+ bootcore_opts (int): Booting core option, lockstep (0) or split (2) mode
+
+ Returns:
+ str: Tool output
+ """
+ indata = tools.read_file(input_fname)
+ hashval = hashlib.sha512(indata).hexdigest()
+ sha_type = SHA_OIDS[sha]
+ with open(config_fname, 'w', encoding='utf-8') as outf:
+ print(f'''
+[ req ]
+distinguished_name = req_distinguished_name
+x509_extensions = v3_ca
+prompt = no
+dirstring_type = nobmp
+
+[ req_distinguished_name ]
+C = {req_dist_name_dict['C']}
+ST = {req_dist_name_dict['ST']}
+L = {req_dist_name_dict['L']}
+O = {req_dist_name_dict['O']}
+OU = {req_dist_name_dict['OU']}
+CN = {req_dist_name_dict['CN']}
+emailAddress = {req_dist_name_dict['emailAddress']}
+
+[ v3_ca ]
+basicConstraints = CA:true
+1.3.6.1.4.1.294.1.3=ASN1:SEQUENCE:swrv
+1.3.6.1.4.1.294.1.9=ASN1:SEQUENCE:ext_boot_info
+1.3.6.1.4.1.294.1.8=ASN1:SEQUENCE:debug
+
+[swrv]
+swrv=INTEGER:{sw_rev}
+
+[ext_boot_info]
+extImgSize=INTEGER:{total_size}
+numComp=INTEGER:{num_comps}
+sbl=SEQUENCE:sbl
+sysfw=SEQUENCE:sysfw
+sysfw_data=SEQUENCE:sysfw_data
+{sysfw_inner_cert_ext_boot_sequence_string}
+{dm_data_ext_boot_sequence_string}
+
+[sbl]
+compType = INTEGER:1
+bootCore = INTEGER:16
+compOpts = INTEGER:{bootcore_opts}
+destAddr = FORMAT:HEX,OCT:{load_addr:08x}
+compSize = INTEGER:{imagesize_sbl}
+shaType = OID:{sha_type}
+shaValue = FORMAT:HEX,OCT:{hashval_sbl}
+
+[sysfw]
+compType = INTEGER:2
+bootCore = INTEGER:0
+compOpts = INTEGER:0
+destAddr = FORMAT:HEX,OCT:{load_addr_sysfw:08x}
+compSize = INTEGER:{imagesize_sysfw}
+shaType = OID:{sha_type}
+shaValue = FORMAT:HEX,OCT:{hashval_sysfw}
+
+[sysfw_data]
+compType = INTEGER:18
+bootCore = INTEGER:0
+compOpts = INTEGER:0
+destAddr = FORMAT:HEX,OCT:{load_addr_sysfw_data:08x}
+compSize = INTEGER:{imagesize_sysfw_data}
+shaType = OID:{sha_type}
+shaValue = FORMAT:HEX,OCT:{hashval_sysfw_data}
+
+[ debug ]
+debugUID = FORMAT:HEX,OCT:0000000000000000000000000000000000000000000000000000000000000000
+debugType = INTEGER:4
+coreDbgEn = INTEGER:0
+coreDbgSecEn = INTEGER:0
+
+{sysfw_inner_cert_ext_boot_block}
+
+{dm_data_ext_boot_block}
+ ''', file=outf)
+ args = ['req', '-new', '-x509', '-key', key_fname, '-nodes',
+ '-outform', 'DER', '-out', cert_fname, '-config', config_fname,
+ '-sha512']
+ return self.run_cmd(*args)
+
+ def fetch(self, method):
+ """Fetch handler for openssl
+
+ This installs the openssl package using the apt utility.
+
+ Args:
+ method (FETCH_...): Method to use
+
+ Returns:
+ True if the file was fetched and now installed, None if a method
+ other than FETCH_BIN was requested
+
+ Raises:
+ Valuerror: Fetching could not be completed
+ """
+ if method != bintool.FETCH_BIN:
+ return None
+ return self.apt_install('openssl')
diff --git a/tools/binman/btool/xz.py b/tools/binman/btool/xz.py
new file mode 100644
index 00000000000..e2b413d18bd
--- /dev/null
+++ b/tools/binman/btool/xz.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2022 Weidmüller Interface GmbH & Co. KG
+# Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
+#
+"""Bintool implementation for xz
+
+xz allows compression and decompression of files.
+
+Documentation is available via::
+
+ man xz
+"""
+
+from binman import bintool
+
+# pylint: disable=C0103
+class Bintoolxz(bintool.BintoolPacker):
+ """Compression/decompression using the xz algorithm
+
+ This bintool supports running `xz` to compress and decompress data, as
+ used by binman.
+
+ It is also possible to fetch the tool, which uses `apt` to install it.
+
+ Documentation is available via::
+
+ man xz
+ """
+ def __init__(self, name):
+ super().__init__(name, fetch_package='xz-utils',
+ version_regex=r'xz \(XZ Utils\) ([0-9.]+)')
diff --git a/tools/binman/btool/zstd.py b/tools/binman/btool/zstd.py
new file mode 100644
index 00000000000..299bd371269
--- /dev/null
+++ b/tools/binman/btool/zstd.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2022 Weidmüller Interface GmbH & Co. KG
+# Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
+#
+"""Bintool implementation for zstd
+
+zstd allows compression and decompression of files.
+
+Documentation is available via::
+
+ man zstd
+"""
+
+from binman import bintool
+
+# pylint: disable=C0103
+class Bintoolzstd(bintool.BintoolPacker):
+ """Compression/decompression using the zstd algorithm
+
+ This bintool supports running `zstd` to compress and decompress data, as
+ used by binman.
+
+ It is also possible to fetch the tool, which uses `apt` to install it.
+
+ Documentation is available via::
+
+ man zstd
+ """
+ def __init__(self, name):
+ super().__init__(name)
diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py
new file mode 100644
index 00000000000..671cafa34c0
--- /dev/null
+++ b/tools/binman/cbfs_util.py
@@ -0,0 +1,885 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Support for coreboot's CBFS format
+
+CBFS supports a header followed by a number of files, generally targeted at SPI
+flash.
+
+The format is somewhat defined by documentation in the coreboot tree although
+it is necessary to rely on the C structures and source code (mostly cbfstool)
+to fully understand it.
+
+Currently supported: raw and stage types with compression, padding empty areas
+ with empty files, fixed-offset files
+"""
+
+from collections import OrderedDict
+import io
+import struct
+import sys
+
+from binman import bintool
+from binman import elf
+from u_boot_pylib import command
+from u_boot_pylib import tools
+
+# Set to True to enable printing output while working
+DEBUG = False
+
+# Set to True to enable output from running cbfstool for debugging
+VERBOSE = False
+
+# The master header, at the start of the CBFS
+HEADER_FORMAT = '>IIIIIIII'
+HEADER_LEN = 0x20
+HEADER_MAGIC = 0x4f524243
+HEADER_VERSION1 = 0x31313131
+HEADER_VERSION2 = 0x31313132
+
+# The file header, at the start of each file in the CBFS
+FILE_HEADER_FORMAT = b'>8sIIII'
+FILE_HEADER_LEN = 0x18
+FILE_MAGIC = b'LARCHIVE'
+ATTRIBUTE_ALIGN = 4 # All attribute sizes must be divisible by this
+
+# A stage-header attribute containing information about 'stage' files
+# Yes this is correct: this header is in litte-endian format
+ATTR_STAGE_FORMAT = '>IIQII'
+ATTR_STAGE_LEN = 0x18
+
+# An attribute describring the compression used in a file
+ATTR_COMPRESSION_FORMAT = '>IIII'
+ATTR_COMPRESSION_LEN = 0x10
+
+# Attribute tags
+FILE_ATTR_TAG_COMPRESSION = 0x42435a4c
+FILE_ATTR_TAG_HASH = 0x68736148
+FILE_ATTR_TAG_POSITION = 0x42435350 # PSCB
+FILE_ATTR_TAG_ALIGNMENT = 0x42434c41 # ALCB
+FILE_ATTR_TAG_PADDING = 0x47444150 # PDNG
+FILE_ATTR_TAG_STAGEHEADER = 0x53746748 # StgH
+
+# This is 'the size of bootblock reserved in firmware image (cbfs.txt)'
+# Not much more info is available, but we set it to 4, due to this comment in
+# cbfstool.c:
+# This causes 4 bytes to be left out at the end of the image, for two reasons:
+# 1. The cbfs master header pointer resides there
+# 2. Ssme cbfs implementations assume that an image that resides below 4GB has
+# a bootblock and get confused when the end of the image is at 4GB == 0.
+MIN_BOOTBLOCK_SIZE = 4
+
+# Files start aligned to this boundary in the CBFS
+ENTRY_ALIGN = 0x40
+
+# CBFSs must declare an architecture since much of the logic is designed with
+# x86 in mind. The effect of setting this value is not well documented, but in
+# general x86 is used and this makes use of a boot block and an image that ends
+# at the end of 32-bit address space.
+ARCHITECTURE_UNKNOWN = 0xffffffff
+ARCHITECTURE_X86 = 0x00000001
+ARCHITECTURE_ARM = 0x00000010
+ARCHITECTURE_AARCH64 = 0x0000aa64
+ARCHITECTURE_MIPS = 0x00000100
+ARCHITECTURE_RISCV = 0xc001d0de
+ARCHITECTURE_PPC64 = 0x407570ff
+
+ARCH_NAMES = {
+ ARCHITECTURE_UNKNOWN : 'unknown',
+ ARCHITECTURE_X86 : 'x86',
+ ARCHITECTURE_ARM : 'arm',
+ ARCHITECTURE_AARCH64 : 'arm64',
+ ARCHITECTURE_MIPS : 'mips',
+ ARCHITECTURE_RISCV : 'riscv',
+ ARCHITECTURE_PPC64 : 'ppc64',
+ }
+
+# File types. Only supported ones are included here
+TYPE_CBFSHEADER = 0x02 # Master header, HEADER_FORMAT
+TYPE_LEGACY_STAGE = 0x10 # Stage, holding an executable
+TYPE_STAGE = 0x11 # New-type stage with ATTR_STAGE_FORMAT
+TYPE_RAW = 0x50 # Raw file, possibly compressed
+TYPE_EMPTY = 0xffffffff # Empty data
+
+# Compression types
+COMPRESS_NONE, COMPRESS_LZMA, COMPRESS_LZ4 = range(3)
+
+COMPRESS_NAMES = {
+ COMPRESS_NONE : 'none',
+ COMPRESS_LZMA : 'lzma',
+ COMPRESS_LZ4 : 'lz4',
+ }
+
+def find_arch(find_name):
+ """Look up an architecture name
+
+ Args:
+ find_name: Architecture name to find
+
+ Returns:
+ ARCHITECTURE_... value or None if not found
+ """
+ for arch, name in ARCH_NAMES.items():
+ if name == find_name:
+ return arch
+ return None
+
+def find_compress(find_name):
+ """Look up a compression algorithm name
+
+ Args:
+ find_name: Compression algorithm name to find
+
+ Returns:
+ COMPRESS_... value or None if not found
+ """
+ for compress, name in COMPRESS_NAMES.items():
+ if name == find_name:
+ return compress
+ return None
+
+def compress_name(compress):
+ """Look up the name of a compression algorithm
+
+ Args:
+ compress: Compression algorithm number to find (COMPRESS_...)
+
+ Returns:
+ Compression algorithm name (string)
+
+ Raises:
+ KeyError if the algorithm number is invalid
+ """
+ return COMPRESS_NAMES[compress]
+
+def align_int(val, align):
+ """Align a value up to the given alignment
+
+ Args:
+ val: Integer value to align
+ align: Integer alignment value (e.g. 4 to align to 4-byte boundary)
+
+ Returns:
+ integer value aligned to the required boundary, rounding up if necessary
+ """
+ return int((val + align - 1) / align) * align
+
+def align_int_down(val, align):
+ """Align a value down to the given alignment
+
+ Args:
+ val: Integer value to align
+ align: Integer alignment value (e.g. 4 to align to 4-byte boundary)
+
+ Returns:
+ integer value aligned to the required boundary, rounding down if
+ necessary
+ """
+ return int(val / align) * align
+
+def _pack_string(instr):
+ """Pack a string to the required aligned size by adding padding
+
+ Args:
+ instr: String to process
+
+ Returns:
+ String with required padding (at least one 0x00 byte) at the end
+ """
+ val = tools.to_bytes(instr)
+ pad_len = align_int(len(val) + 1, ATTRIBUTE_ALIGN)
+ return val + tools.get_bytes(0, pad_len - len(val))
+
+
+class CbfsFile(object):
+ """Class to represent a single CBFS file
+
+ This is used to hold the information about a file, including its contents.
+ Use the get_data_and_offset() method to obtain the raw output for writing to
+ CBFS.
+
+ Properties:
+ name: Name of file
+ offset: Offset of file data from start of file header
+ cbfs_offset: Offset of file data in bytes from start of CBFS, or None to
+ place this file anyway
+ data: Contents of file, uncompressed
+ orig_data: Original data added to the file, possibly compressed
+ data_len: Length of (possibly compressed) data in bytes
+ ftype: File type (TYPE_...)
+ compression: Compression type (COMPRESS_...)
+ memlen: Length of data in memory, i.e. the uncompressed length, None if
+ no compression algortihm is selected
+ load: Load address in memory if known, else None
+ entry: Entry address in memory if known, else None. This is where
+ execution starts after the file is loaded
+ base_address: Base address to use for 'stage' files
+ erase_byte: Erase byte to use for padding between the file header and
+ contents (used for empty files)
+ size: Size of the file in bytes (used for empty files)
+ """
+ def __init__(self, name, ftype, data, cbfs_offset, compress=COMPRESS_NONE):
+ self.name = name
+ self.offset = None
+ self.cbfs_offset = cbfs_offset
+ self.data = data
+ self.orig_data = data
+ self.ftype = ftype
+ self.compress = compress
+ self.memlen = None
+ self.load = None
+ self.entry = None
+ self.base_address = None
+ self.data_len = len(data)
+ self.erase_byte = None
+ self.size = None
+ if self.compress == COMPRESS_LZ4:
+ self.comp_bintool = bintool.Bintool.create('lz4')
+ elif self.compress == COMPRESS_LZMA:
+ self.comp_bintool = bintool.Bintool.create('lzma_alone')
+ else:
+ self.comp_bintool = None
+
+ def decompress(self):
+ """Handle decompressing data if necessary"""
+ indata = self.data
+ if self.comp_bintool:
+ data = self.comp_bintool.decompress(indata)
+ else:
+ data = indata
+ self.memlen = len(data)
+ self.data = data
+ self.data_len = len(indata)
+
+ @classmethod
+ def stage(cls, base_address, name, data, cbfs_offset):
+ """Create a new stage file
+
+ Args:
+ base_address: Int base address for memory-mapping of ELF file
+ name: String file name to put in CBFS (does not need to correspond
+ to the name that the file originally came from)
+ data: Contents of file
+ cbfs_offset: Offset of file data in bytes from start of CBFS, or
+ None to place this file anyway
+
+ Returns:
+ CbfsFile object containing the file information
+ """
+ cfile = CbfsFile(name, TYPE_STAGE, data, cbfs_offset)
+ cfile.base_address = base_address
+ return cfile
+
+ @classmethod
+ def raw(cls, name, data, cbfs_offset, compress):
+ """Create a new raw file
+
+ Args:
+ name: String file name to put in CBFS (does not need to correspond
+ to the name that the file originally came from)
+ data: Contents of file
+ cbfs_offset: Offset of file data in bytes from start of CBFS, or
+ None to place this file anyway
+ compress: Compression algorithm to use (COMPRESS_...)
+
+ Returns:
+ CbfsFile object containing the file information
+ """
+ return CbfsFile(name, TYPE_RAW, data, cbfs_offset, compress)
+
+ @classmethod
+ def empty(cls, space_to_use, erase_byte):
+ """Create a new empty file of a given size
+
+ Args:
+ space_to_use:: Size of available space, which must be at least as
+ large as the alignment size for this CBFS
+ erase_byte: Byte to use for contents of file (repeated through the
+ whole file)
+
+ Returns:
+ CbfsFile object containing the file information
+ """
+ cfile = CbfsFile('', TYPE_EMPTY, b'', None)
+ cfile.size = space_to_use - FILE_HEADER_LEN - ATTRIBUTE_ALIGN
+ cfile.erase_byte = erase_byte
+ return cfile
+
+ def calc_start_offset(self):
+ """Check if this file needs to start at a particular offset in CBFS
+
+ Returns:
+ None if the file can be placed anywhere, or
+ the largest offset where the file could start (integer)
+ """
+ if self.cbfs_offset is None:
+ return None
+ return self.cbfs_offset - self.get_header_len()
+
+ def get_header_len(self):
+ """Get the length of headers required for a file
+
+ This is the minimum length required before the actual data for this file
+ could start. It might start later if there is padding.
+
+ Returns:
+ Total length of all non-data fields, in bytes
+ """
+ name = _pack_string(self.name)
+ hdr_len = len(name) + FILE_HEADER_LEN
+ if self.ftype == TYPE_STAGE:
+ hdr_len += ATTR_STAGE_LEN
+ elif self.ftype == TYPE_RAW:
+ if self.compress:
+ hdr_len += ATTR_COMPRESSION_LEN
+ elif self.ftype == TYPE_EMPTY:
+ pass
+ else:
+ raise ValueError('Unknown file type %#x\n' % self.ftype)
+ return hdr_len
+
+ def get_data_and_offset(self, offset=None, pad_byte=None):
+ """Obtain the contents of the file, in CBFS format and the offset of
+ the data within the file
+
+ Returns:
+ tuple:
+ bytes representing the contents of this file, packed and aligned
+ for directly inserting into the final CBFS output
+ offset to the file data from the start of the returned data.
+ """
+ name = _pack_string(self.name)
+ hdr_len = len(name) + FILE_HEADER_LEN
+ attr_pos = 0
+ content = b''
+ attr = b''
+ pad = b''
+ data = self.data
+ if self.ftype == TYPE_STAGE:
+ elf_data = elf.DecodeElf(data, self.base_address)
+ attr = struct.pack(ATTR_STAGE_FORMAT, FILE_ATTR_TAG_STAGEHEADER,
+ ATTR_STAGE_LEN, elf_data.load,
+ elf_data.entry - elf_data.load, elf_data.memsize)
+ data = elf_data.data
+ elif self.ftype == TYPE_RAW:
+ orig_data = data
+ if self.comp_bintool:
+ data = self.comp_bintool.compress(orig_data)
+ self.memlen = len(orig_data)
+ self.data_len = len(data)
+ if self.compress:
+ attr = struct.pack(ATTR_COMPRESSION_FORMAT,
+ FILE_ATTR_TAG_COMPRESSION,
+ ATTR_COMPRESSION_LEN, self.compress,
+ self.memlen)
+ elif self.ftype == TYPE_EMPTY:
+ data = tools.get_bytes(self.erase_byte, self.size)
+ else:
+ raise ValueError('Unknown type %#x when writing\n' % self.ftype)
+ if attr:
+ attr_pos = hdr_len
+ hdr_len += len(attr)
+ if self.cbfs_offset is not None:
+ pad_len = self.cbfs_offset - offset - hdr_len
+ if pad_len < 0: # pragma: no cover
+ # Test coverage of this is not available since this should never
+ # happen. It indicates that get_header_len() provided an
+ # incorrect value (too small) so that we decided that we could
+ # put this file at the requested place, but in fact a previous
+ # file extends far enough into the CBFS that this is not
+ # possible.
+ raise ValueError("Internal error: CBFS file '%s': Requested offset %#x but current output position is %#x" %
+ (self.name, self.cbfs_offset, offset))
+ pad = tools.get_bytes(pad_byte, pad_len)
+ if attr_pos:
+ attr_pos += pad_len
+ hdr_len += pad_len
+
+ # This is the offset of the start of the file's data,
+ size = len(content) + len(data)
+ hdr = struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, size,
+ self.ftype, attr_pos, hdr_len)
+
+ # Do a sanity check of the get_header_len() function, to ensure that it
+ # stays in lockstep with this function
+ expected_len = self.get_header_len()
+ actual_len = len(hdr + name + attr)
+ if expected_len != actual_len: # pragma: no cover
+ # Test coverage of this is not available since this should never
+ # happen. It probably indicates that get_header_len() is broken.
+ raise ValueError("Internal error: CBFS file '%s': Expected headers of %#x bytes, got %#x" %
+ (self.name, expected_len, actual_len))
+ return hdr + name + pad + attr + content + data, hdr_len
+
+
+class CbfsWriter(object):
+ """Class to handle writing a Coreboot File System (CBFS)
+
+ Usage is something like:
+
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', tools.read_file('u-boot.bin'))
+ ...
+ data, cbfs_offset = cbw.get_data_and_offset()
+
+ Attributes:
+ _master_name: Name of the file containing the master header
+ _size: Size of the filesystem, in bytes
+ _files: Ordered list of files in the CBFS, each a CbfsFile
+ _arch: Architecture of the CBFS (ARCHITECTURE_...)
+ _bootblock_size: Size of the bootblock, typically at the end of the CBFS
+ _erase_byte: Byte to use for empty space in the CBFS
+ _align: Alignment to use for files, typically ENTRY_ALIGN
+ _base_address: Boot block offset in bytes from the start of CBFS.
+ Typically this is located at top of the CBFS. It is 0 when there is
+ no boot block
+ _header_offset: Offset of master header in bytes from start of CBFS
+ _contents_offset: Offset of first file header
+ _hdr_at_start: True if the master header is at the start of the CBFS,
+ instead of the end as normal for x86
+ _add_fileheader: True to add a fileheader around the master header
+ """
+ def __init__(self, size, arch=ARCHITECTURE_X86):
+ """Set up a new CBFS
+
+ This sets up all properties to default values. Files can be added using
+ add_file_raw(), etc.
+
+ Args:
+ size: Size of CBFS in bytes
+ arch: Architecture to declare for CBFS
+ """
+ self._master_name = 'cbfs master header'
+ self._size = size
+ self._files = OrderedDict()
+ self._arch = arch
+ self._bootblock_size = 0
+ self._erase_byte = 0xff
+
+ # Small padding to align a file uses 0
+ self._small_pad_byte = 0
+ self._align = ENTRY_ALIGN
+ self._add_fileheader = False
+ if self._arch == ARCHITECTURE_X86:
+ # Allow 4 bytes for the header pointer. That holds the
+ # twos-compliment negative offset of the master header in bytes
+ # measured from one byte past the end of the CBFS
+ self._base_address = self._size - max(self._bootblock_size,
+ MIN_BOOTBLOCK_SIZE)
+ self._header_offset = self._base_address - HEADER_LEN
+ self._contents_offset = 0
+ self._hdr_at_start = False
+ else:
+ # For non-x86, different rules apply
+ self._base_address = 0
+ self._header_offset = align_int(self._base_address +
+ self._bootblock_size, 4)
+ self._contents_offset = align_int(self._header_offset +
+ FILE_HEADER_LEN +
+ self._bootblock_size, self._align)
+ self._hdr_at_start = True
+
+ def _skip_to(self, fd, offset, pad_byte):
+ """Write out pad bytes until a given offset
+
+ Args:
+ fd: File objext to write to
+ offset: Offset to write to
+ """
+ if fd.tell() > offset:
+ raise ValueError('No space for data before offset %#x (current offset %#x)' %
+ (offset, fd.tell()))
+ fd.write(tools.get_bytes(pad_byte, offset - fd.tell()))
+
+ def _pad_to(self, fd, offset, pad_byte):
+ """Write out pad bytes and/or an empty file until a given offset
+
+ Args:
+ fd: File objext to write to
+ offset: Offset to write to
+ """
+ self._align_to(fd, self._align, pad_byte)
+ upto = fd.tell()
+ if upto > offset:
+ raise ValueError('No space for data before pad offset %#x (current offset %#x)' %
+ (offset, upto))
+ todo = align_int_down(offset - upto, self._align)
+ if todo:
+ cbf = CbfsFile.empty(todo, self._erase_byte)
+ fd.write(cbf.get_data_and_offset()[0])
+ self._skip_to(fd, offset, pad_byte)
+
+ def _align_to(self, fd, align, pad_byte):
+ """Write out pad bytes until a given alignment is reached
+
+ This only aligns if the resulting output would not reach the end of the
+ CBFS, since we want to leave the last 4 bytes for the master-header
+ pointer.
+
+ Args:
+ fd: File objext to write to
+ align: Alignment to require (e.g. 4 means pad to next 4-byte
+ boundary)
+ """
+ offset = align_int(fd.tell(), align)
+ if offset < self._size:
+ self._skip_to(fd, offset, pad_byte)
+
+ def add_file_stage(self, name, data, cbfs_offset=None):
+ """Add a new stage file to the CBFS
+
+ Args:
+ name: String file name to put in CBFS (does not need to correspond
+ to the name that the file originally came from)
+ data: Contents of file
+ cbfs_offset: Offset of this file's data within the CBFS, in bytes,
+ or None to place this file anywhere
+
+ Returns:
+ CbfsFile object created
+ """
+ cfile = CbfsFile.stage(self._base_address, name, data, cbfs_offset)
+ self._files[name] = cfile
+ return cfile
+
+ def add_file_raw(self, name, data, cbfs_offset=None,
+ compress=COMPRESS_NONE):
+ """Create a new raw file
+
+ Args:
+ name: String file name to put in CBFS (does not need to correspond
+ to the name that the file originally came from)
+ data: Contents of file
+ cbfs_offset: Offset of this file's data within the CBFS, in bytes,
+ or None to place this file anywhere
+ compress: Compression algorithm to use (COMPRESS_...)
+
+ Returns:
+ CbfsFile object created
+ """
+ cfile = CbfsFile.raw(name, data, cbfs_offset, compress)
+ self._files[name] = cfile
+ return cfile
+
+ def _write_header(self, fd, add_fileheader):
+ """Write out the master header to a CBFS
+
+ Args:
+ fd: File object
+ add_fileheader: True to place the master header in a file header
+ record
+ """
+ if fd.tell() > self._header_offset:
+ raise ValueError('No space for header at offset %#x (current offset %#x)' %
+ (self._header_offset, fd.tell()))
+ if not add_fileheader:
+ self._pad_to(fd, self._header_offset, self._erase_byte)
+ hdr = struct.pack(HEADER_FORMAT, HEADER_MAGIC, HEADER_VERSION2,
+ self._size, self._bootblock_size, self._align,
+ self._contents_offset, self._arch, 0xffffffff)
+ if add_fileheader:
+ name = _pack_string(self._master_name)
+ fd.write(struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, len(hdr),
+ TYPE_CBFSHEADER, 0,
+ FILE_HEADER_LEN + len(name)))
+ fd.write(name)
+ self._header_offset = fd.tell()
+ fd.write(hdr)
+ self._align_to(fd, self._align, self._erase_byte)
+ else:
+ fd.write(hdr)
+
+ def get_data(self):
+ """Obtain the full contents of the CBFS
+
+ Thhis builds the CBFS with headers and all required files.
+
+ Returns:
+ 'bytes' type containing the data
+ """
+ fd = io.BytesIO()
+
+ # THe header can go at the start in some cases
+ if self._hdr_at_start:
+ self._write_header(fd, add_fileheader=self._add_fileheader)
+ self._skip_to(fd, self._contents_offset, self._erase_byte)
+
+ # Write out each file
+ for cbf in self._files.values():
+ # Place the file at its requested place, if any
+ offset = cbf.calc_start_offset()
+ if offset is not None:
+ self._pad_to(fd, align_int_down(offset, self._align),
+ self._erase_byte)
+ pos = fd.tell()
+ data, data_offset = cbf.get_data_and_offset(pos,
+ self._small_pad_byte)
+ fd.write(data)
+ self._align_to(fd, self._align, self._erase_byte)
+ cbf.calced_cbfs_offset = pos + data_offset
+ if not self._hdr_at_start:
+ self._write_header(fd, add_fileheader=self._add_fileheader)
+
+ # Pad to the end and write a pointer to the CBFS master header
+ self._pad_to(fd, self._base_address or self._size - 4, self._erase_byte)
+ rel_offset = self._header_offset - self._size
+ fd.write(struct.pack('<I', rel_offset & 0xffffffff))
+
+ return fd.getvalue()
+
+
+class CbfsReader(object):
+ """Class to handle reading a Coreboot File System (CBFS)
+
+ Usage is something like:
+ cbfs = cbfs_util.CbfsReader(data)
+ cfile = cbfs.files['u-boot']
+ self.WriteFile('u-boot.bin', cfile.data)
+
+ Attributes:
+ files: Ordered list of CbfsFile objects
+ align: Alignment to use for files, typically ENTRT_ALIGN
+ stage_base_address: Base address to use when mapping ELF files into the
+ CBFS for TYPE_STAGE files. If this is larger than the code address
+ of the ELF file, then data at the start of the ELF file will not
+ appear in the CBFS. Currently there are no tests for behaviour as
+ documentation is sparse
+ magic: Integer magic number from master header (HEADER_MAGIC)
+ version: Version number of CBFS (HEADER_VERSION2)
+ rom_size: Size of CBFS
+ boot_block_size: Size of boot block
+ cbfs_offset: Offset of the first file in bytes from start of CBFS
+ arch: Architecture of CBFS file (ARCHITECTURE_...)
+ """
+ def __init__(self, data, read=True):
+ self.align = ENTRY_ALIGN
+ self.arch = None
+ self.boot_block_size = None
+ self.cbfs_offset = None
+ self.files = OrderedDict()
+ self.magic = None
+ self.rom_size = None
+ self.stage_base_address = 0
+ self.version = None
+ self.data = data
+ if read:
+ self.read()
+
+ def read(self):
+ """Read all the files in the CBFS and add them to self.files"""
+ with io.BytesIO(self.data) as fd:
+ # First, get the master header
+ if not self._find_and_read_header(fd, len(self.data)):
+ raise ValueError('Cannot find master header')
+ fd.seek(self.cbfs_offset)
+
+ # Now read in the files one at a time
+ while True:
+ cfile = self._read_next_file(fd)
+ if cfile:
+ self.files[cfile.name] = cfile
+ elif cfile is False:
+ break
+
+ def _find_and_read_header(self, fd, size):
+ """Find and read the master header in the CBFS
+
+ This looks at the pointer word at the very end of the CBFS. This is an
+ offset to the header relative to the size of the CBFS, which is assumed
+ to be known. Note that the offset is in *little endian* format.
+
+ Args:
+ fd: File to read from
+ size: Size of file
+
+ Returns:
+ True if header was found, False if not
+ """
+ orig_pos = fd.tell()
+ fd.seek(size - 4)
+ rel_offset, = struct.unpack('<I', fd.read(4))
+ pos = (size + rel_offset) & 0xffffffff
+ fd.seek(pos)
+ found = self._read_header(fd)
+ if not found:
+ print('Relative offset seems wrong, scanning whole image')
+ for pos in range(0, size - HEADER_LEN, 4):
+ fd.seek(pos)
+ found = self._read_header(fd)
+ if found:
+ break
+ fd.seek(orig_pos)
+ return found
+
+ def _read_next_file(self, fd):
+ """Read the next file from a CBFS
+
+ Args:
+ fd: File to read from
+
+ Returns:
+ CbfsFile object, if found
+ None if no object found, but data was parsed (e.g. TYPE_CBFSHEADER)
+ False if at end of CBFS and reading should stop
+ """
+ file_pos = fd.tell()
+ data = fd.read(FILE_HEADER_LEN)
+ if len(data) < FILE_HEADER_LEN:
+ print('File header at %#x ran out of data' % file_pos)
+ return False
+ magic, size, ftype, attr, offset = struct.unpack(FILE_HEADER_FORMAT,
+ data)
+ if magic != FILE_MAGIC:
+ return False
+ pos = fd.tell()
+ name = self._read_string(fd)
+ if name is None:
+ print('String at %#x ran out of data' % pos)
+ return False
+
+ if DEBUG:
+ print('name', name)
+
+ # If there are attribute headers present, read those
+ attrs = self._read_attr(fd, file_pos, attr, offset)
+ if attrs is None:
+ return False
+
+ # Create the correct CbfsFile object depending on the type
+ cfile = None
+ cbfs_offset = file_pos + offset
+ fd.seek(cbfs_offset, io.SEEK_SET)
+ if DEBUG:
+ print(f'ftype {ftype:x}')
+ if ftype == TYPE_CBFSHEADER:
+ self._read_header(fd)
+ elif ftype == TYPE_STAGE:
+ cfile = CbfsFile.stage(self.stage_base_address, name, b'',
+ cbfs_offset)
+ cfile.load, entry_offset, cfile.memlen = attrs
+ cfile.entry = cfile.load + entry_offset
+ cfile.data = fd.read(cfile.memlen)
+ cfile.data_len = cfile.memlen
+ elif ftype == TYPE_RAW:
+ data = fd.read(size)
+ cfile = CbfsFile.raw(name, data, cbfs_offset, attrs)
+ cfile.decompress()
+ if DEBUG:
+ print('data', data)
+ elif ftype == TYPE_EMPTY:
+ # Just read the data and discard it, since it is only padding
+ fd.read(size)
+ cfile = CbfsFile('', TYPE_EMPTY, b'', cbfs_offset)
+ else:
+ raise ValueError('Unknown type %#x when reading\n' % ftype)
+ if cfile:
+ cfile.offset = offset
+
+ # Move past the padding to the start of a possible next file. If we are
+ # already at an alignment boundary, then there is no padding.
+ pad = (self.align - fd.tell() % self.align) % self.align
+ fd.seek(pad, io.SEEK_CUR)
+ return cfile
+
+ @classmethod
+ def _read_attr(cls, fd, file_pos, attr, offset):
+ """Read attributes from the file
+
+ CBFS files can have attributes which are things that cannot fit into the
+ header. The only attributes currently supported are compression, stage
+ header and the unused tag
+
+ Args:
+ fd: File to read from
+ file_pos: Position of file in fd
+ attr: Offset of attributes, 0 if none
+ offset: Offset of file data (used to indicate the end of the
+ attributes)
+
+ Returns:
+ Either:
+ Compression to use for the file (COMPRESS_...)
+ tuple containing stage info:
+ load address
+ entry offset
+ memory size
+ """
+ attrs = None
+ if not attr:
+ return COMPRESS_NONE
+ attr_size = offset - attr
+ fd.seek(file_pos + attr, io.SEEK_SET)
+ while attr_size:
+ pos = fd.tell()
+ hdr = fd.read(8)
+ if len(hdr) < 8:
+ print('Attribute tag at %x ran out of data' % pos)
+ return None
+ atag, alen = struct.unpack(">II", hdr)
+ data = hdr + fd.read(alen - 8)
+ if atag == FILE_ATTR_TAG_COMPRESSION:
+ # We don't currently use this information
+ atag, alen, compress, _decomp_size = struct.unpack(
+ ATTR_COMPRESSION_FORMAT, data)
+ attrs = compress
+ elif atag == FILE_ATTR_TAG_STAGEHEADER:
+ atag, alen, load, entry_offset, memsize = struct.unpack(
+ ATTR_STAGE_FORMAT, data)
+ attrs = (load, entry_offset, memsize)
+ else:
+ print('Unknown attribute tag %x' % atag)
+ attr_size -= len(data)
+ return attrs
+
+ def _read_header(self, fd):
+ """Read the master header
+
+ Reads the header and stores the information obtained into the member
+ variables.
+
+ Args:
+ fd: File to read from
+
+ Returns:
+ True if header was read OK, False if it is truncated or has the
+ wrong magic or version
+ """
+ pos = fd.tell()
+ data = fd.read(HEADER_LEN)
+ if len(data) < HEADER_LEN:
+ print('Header at %x ran out of data' % pos)
+ return False
+ (self.magic, self.version, self.rom_size, self.boot_block_size,
+ self.align, self.cbfs_offset, self.arch, _) = struct.unpack(
+ HEADER_FORMAT, data)
+ return self.magic == HEADER_MAGIC and (
+ self.version == HEADER_VERSION1 or
+ self.version == HEADER_VERSION2)
+
+ @classmethod
+ def _read_string(cls, fd):
+ """Read a string from a file
+
+ This reads a string and aligns the data to the next alignment boundary.
+ The string must be nul-terminated
+
+ Args:
+ fd: File to read from
+
+ Returns:
+ string read ('str' type) encoded to UTF-8, or None if we ran out of
+ data
+ """
+ val = b''
+ while True:
+ data = fd.read(ATTRIBUTE_ALIGN)
+ if len(data) < ATTRIBUTE_ALIGN:
+ return None
+ pos = data.find(b'\0')
+ if pos == -1:
+ val += data
+ else:
+ val += data[:pos]
+ break
+ return val.decode('utf-8')
diff --git a/tools/binman/cbfs_util_test.py b/tools/binman/cbfs_util_test.py
new file mode 100755
index 00000000000..4c415b7ce94
--- /dev/null
+++ b/tools/binman/cbfs_util_test.py
@@ -0,0 +1,607 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Tests for cbfs_util
+
+These create and read various CBFSs and compare the results with expected
+values and with cbfstool
+"""
+
+import io
+import os
+import shutil
+import struct
+import tempfile
+import unittest
+
+from binman import bintool
+from binman import cbfs_util
+from binman.cbfs_util import CbfsWriter
+from binman import elf
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
+
+U_BOOT_DATA = b'1234'
+U_BOOT_DTB_DATA = b'udtb'
+COMPRESS_DATA = b'compress xxxxxxxxxxxxxxxxxxxxxx data'
+
+
+class TestCbfs(unittest.TestCase):
+ """Test of cbfs_util classes"""
+ #pylint: disable=W0212
+ @classmethod
+ def setUpClass(cls):
+ # Create a temporary directory for test files
+ cls._indir = tempfile.mkdtemp(prefix='cbfs_util.')
+ tools.set_input_dirs([cls._indir])
+
+ # Set up some useful data files
+ TestCbfs._make_input_file('u-boot.bin', U_BOOT_DATA)
+ TestCbfs._make_input_file('u-boot.dtb', U_BOOT_DTB_DATA)
+ TestCbfs._make_input_file('compress', COMPRESS_DATA)
+
+ # Set up a temporary output directory, used by the tools library when
+ # compressing files
+ tools.prepare_output_dir(None)
+
+ cls.cbfstool = bintool.Bintool.create('cbfstool')
+ cls.have_cbfstool = cls.cbfstool.is_present()
+
+ lz4 = bintool.Bintool.create('lz4')
+ cls.have_lz4 = lz4.is_present()
+
+ @classmethod
+ def tearDownClass(cls):
+ """Remove the temporary input directory and its contents"""
+ if cls._indir:
+ shutil.rmtree(cls._indir)
+ cls._indir = None
+ tools.finalise_output_dir()
+
+ @classmethod
+ def _make_input_file(cls, fname, contents):
+ """Create a new test input file, creating directories as needed
+
+ Args:
+ fname: Filename to create
+ contents: File contents to write in to the file
+ Returns:
+ Full pathname of file created
+ """
+ pathname = os.path.join(cls._indir, fname)
+ tools.write_file(pathname, contents)
+ return pathname
+
+ def _check_hdr(self, data, size, offset=0, arch=cbfs_util.ARCHITECTURE_X86):
+ """Check that the CBFS has the expected header
+
+ Args:
+ data: Data to check
+ size: Expected ROM size
+ offset: Expected offset to first CBFS file
+ arch: Expected architecture
+
+ Returns:
+ CbfsReader object containing the CBFS
+ """
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertEqual(cbfs_util.HEADER_MAGIC, cbfs.magic)
+ self.assertEqual(cbfs_util.HEADER_VERSION2, cbfs.version)
+ self.assertEqual(size, cbfs.rom_size)
+ self.assertEqual(0, cbfs.boot_block_size)
+ self.assertEqual(cbfs_util.ENTRY_ALIGN, cbfs.align)
+ self.assertEqual(offset, cbfs.cbfs_offset)
+ self.assertEqual(arch, cbfs.arch)
+ return cbfs
+
+ def _check_uboot(self, cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x20,
+ data=U_BOOT_DATA, cbfs_offset=None):
+ """Check that the U-Boot file is as expected
+
+ Args:
+ cbfs: CbfsReader object to check
+ ftype: Expected file type
+ offset: Expected offset of file
+ data: Expected data in file
+ cbfs_offset: Expected CBFS offset for file's data
+
+ Returns:
+ CbfsFile object containing the file
+ """
+ self.assertIn('u-boot', cbfs.files)
+ cfile = cbfs.files['u-boot']
+ self.assertEqual('u-boot', cfile.name)
+ self.assertEqual(offset, cfile.offset)
+ if cbfs_offset is not None:
+ self.assertEqual(cbfs_offset, cfile.cbfs_offset)
+ self.assertEqual(data, cfile.data)
+ self.assertEqual(ftype, cfile.ftype)
+ self.assertEqual(cbfs_util.COMPRESS_NONE, cfile.compress)
+ self.assertEqual(len(data), cfile.memlen)
+ return cfile
+
+ def _check_dtb(self, cbfs, offset=0x24, data=U_BOOT_DTB_DATA,
+ cbfs_offset=None):
+ """Check that the U-Boot dtb file is as expected
+
+ Args:
+ cbfs: CbfsReader object to check
+ offset: Expected offset of file
+ data: Expected data in file
+ cbfs_offset: Expected CBFS offset for file's data
+ """
+ self.assertIn('u-boot-dtb', cbfs.files)
+ cfile = cbfs.files['u-boot-dtb']
+ self.assertEqual('u-boot-dtb', cfile.name)
+ self.assertEqual(offset, cfile.offset)
+ if cbfs_offset is not None:
+ self.assertEqual(cbfs_offset, cfile.cbfs_offset)
+ self.assertEqual(U_BOOT_DTB_DATA, cfile.data)
+ self.assertEqual(cbfs_util.TYPE_RAW, cfile.ftype)
+ self.assertEqual(cbfs_util.COMPRESS_NONE, cfile.compress)
+ self.assertEqual(len(U_BOOT_DTB_DATA), cfile.memlen)
+
+ def _check_raw(self, data, size, offset=0, arch=cbfs_util.ARCHITECTURE_X86):
+ """Check that two raw files are added as expected
+
+ Args:
+ data: Data to check
+ size: Expected ROM size
+ offset: Expected offset to first CBFS file
+ arch: Expected architecture
+ """
+ cbfs = self._check_hdr(data, size, offset=offset, arch=arch)
+ self._check_uboot(cbfs)
+ self._check_dtb(cbfs)
+
+ def _get_expected_cbfs(self, size, arch='x86', compress=None, base=None):
+ """Get the file created by cbfstool for a particular scenario
+
+ Args:
+ size: Size of the CBFS in bytes
+ arch: Architecture of the CBFS, as a string
+ compress: Compression to use, e.g. cbfs_util.COMPRESS_LZMA
+ base: Base address of file, or None to put it anywhere
+
+ Returns:
+ Resulting CBFS file, or None if cbfstool is not available
+ """
+ if not self.have_cbfstool or not self.have_lz4:
+ return None
+ cbfs_fname = os.path.join(self._indir, 'test.cbfs')
+ self.cbfstool.create_new(cbfs_fname, size, arch)
+ if base:
+ base = [(1 << 32) - size + b for b in base]
+ self.cbfstool.add_raw(
+ cbfs_fname, 'u-boot',
+ tools.get_input_filename(compress and 'compress' or 'u-boot.bin'),
+ compress[0] if compress else None,
+ base[0] if base else None)
+ self.cbfstool.add_raw(
+ cbfs_fname, 'u-boot-dtb',
+ tools.get_input_filename(compress and 'compress' or 'u-boot.dtb'),
+ compress[1] if compress else None,
+ base[1] if base else None)
+ return cbfs_fname
+
+ def _compare_expected_cbfs(self, data, cbfstool_fname):
+ """Compare against what cbfstool creates
+
+ This compares what binman creates with what cbfstool creates for what
+ is proportedly the same thing.
+
+ Args:
+ data: CBFS created by binman
+ cbfstool_fname: CBFS created by cbfstool
+ """
+ if not self.have_cbfstool or not self.have_lz4:
+ return
+ expect = tools.read_file(cbfstool_fname)
+ if expect != data:
+ tools.write_file('/tmp/expect', expect)
+ tools.write_file('/tmp/actual', data)
+ print('diff -y <(xxd -g1 /tmp/expect) <(xxd -g1 /tmp/actual) | colordiff')
+ self.fail('cbfstool produced a different result')
+
+ def test_cbfs_functions(self):
+ """Test global functions of cbfs_util"""
+ self.assertEqual(cbfs_util.ARCHITECTURE_X86, cbfs_util.find_arch('x86'))
+ self.assertIsNone(cbfs_util.find_arch('bad-arch'))
+
+ self.assertEqual(cbfs_util.COMPRESS_LZMA, cbfs_util.find_compress('lzma'))
+ self.assertIsNone(cbfs_util.find_compress('bad-comp'))
+
+ def test_cbfstool_failure(self):
+ """Test failure to run cbfstool"""
+ if not self.have_cbfstool:
+ self.skipTest('No cbfstool available')
+ with self.assertRaises(ValueError) as exc:
+ out = self.cbfstool.fail()
+ self.assertIn('cbfstool missing-file bad-command', str(exc.exception))
+
+ def test_cbfs_raw(self):
+ """Test base handling of a Coreboot Filesystem (CBFS)"""
+ size = 0xb0
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+ data = cbw.get_data()
+ self._check_raw(data, size)
+ cbfs_fname = self._get_expected_cbfs(size=size)
+ self._compare_expected_cbfs(data, cbfs_fname)
+
+ def test_cbfs_invalid_file_type(self):
+ """Check handling of an invalid file type when outputiing a CBFS"""
+ size = 0xb0
+ cbw = CbfsWriter(size)
+ cfile = cbw.add_file_raw('u-boot', U_BOOT_DATA)
+
+ # Change the type manually before generating the CBFS, and make sure
+ # that the generator complains
+ cfile.ftype = 0xff
+ with self.assertRaises(ValueError) as e:
+ cbw.get_data()
+ self.assertIn('Unknown type 0xff when writing', str(e.exception))
+
+ def test_cbfs_invalid_file_type_on_read(self):
+ """Check handling of an invalid file type when reading the CBFS"""
+ size = 0xb0
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+
+ data = cbw.get_data()
+
+ # Read in the first file header
+ cbr = cbfs_util.CbfsReader(data, read=False)
+ with io.BytesIO(data) as fd:
+ self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+ pos = fd.tell()
+ hdr_data = fd.read(cbfs_util.FILE_HEADER_LEN)
+ magic, size, ftype, attr, offset = struct.unpack(
+ cbfs_util.FILE_HEADER_FORMAT, hdr_data)
+
+ # Create a new CBFS with a change to the file type
+ ftype = 0xff
+ newdata = data[:pos]
+ newdata += struct.pack(cbfs_util.FILE_HEADER_FORMAT, magic, size, ftype,
+ attr, offset)
+ newdata += data[pos + cbfs_util.FILE_HEADER_LEN:]
+
+ # Read in this CBFS and make sure that the reader complains
+ with self.assertRaises(ValueError) as e:
+ cbfs_util.CbfsReader(newdata)
+ self.assertIn('Unknown type 0xff when reading', str(e.exception))
+
+ def test_cbfs_no_space(self):
+ """Check handling of running out of space in the CBFS"""
+ size = 0x60
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ with self.assertRaises(ValueError) as e:
+ cbw.get_data()
+ self.assertIn('No space for header', str(e.exception))
+
+ def test_cbfs_no_space_skip(self):
+ """Check handling of running out of space in CBFS with file header"""
+ size = 0x5c
+ cbw = CbfsWriter(size, arch=cbfs_util.ARCHITECTURE_PPC64)
+ cbw._add_fileheader = True
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ with self.assertRaises(ValueError) as e:
+ cbw.get_data()
+ self.assertIn('No space for data before offset', str(e.exception))
+
+ def test_cbfs_no_space_pad(self):
+ """Check handling of running out of space in CBFS with file header"""
+ size = 0x70
+ cbw = CbfsWriter(size)
+ cbw._add_fileheader = True
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ with self.assertRaises(ValueError) as e:
+ cbw.get_data()
+ self.assertIn('No space for data before pad offset', str(e.exception))
+
+ def test_cbfs_bad_header_ptr(self):
+ """Check handling of a bad master-header pointer"""
+ size = 0x70
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ data = cbw.get_data()
+
+ # Add one to the pointer to make it invalid
+ newdata = data[:-4] + struct.pack('<I', cbw._header_offset + 1)
+
+ # We should still be able to find the master header by searching
+ with test_util.capture_sys_output() as (stdout, _stderr):
+ cbfs = cbfs_util.CbfsReader(newdata)
+ self.assertIn('Relative offset seems wrong', stdout.getvalue())
+ self.assertIn('u-boot', cbfs.files)
+ self.assertEqual(size, cbfs.rom_size)
+
+ def test_cbfs_bad_header(self):
+ """Check handling of a bad master header"""
+ size = 0x70
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ data = cbw.get_data()
+
+ # Drop most of the header and try reading the modified CBFS
+ newdata = data[:cbw._header_offset + 4]
+
+ with test_util.capture_sys_output() as (stdout, _stderr):
+ with self.assertRaises(ValueError) as e:
+ cbfs_util.CbfsReader(newdata)
+ self.assertIn('Relative offset seems wrong', stdout.getvalue())
+ self.assertIn('Cannot find master header', str(e.exception))
+
+ def test_cbfs_bad_file_header(self):
+ """Check handling of a bad file header"""
+ size = 0x70
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ data = cbw.get_data()
+
+ # Read in the CBFS master header (only), then stop
+ cbr = cbfs_util.CbfsReader(data, read=False)
+ with io.BytesIO(data) as fd:
+ self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+ pos = fd.tell()
+
+ # Remove all but 4 bytes of the file headerm and try to read the file
+ newdata = data[:pos + 4]
+ with test_util.capture_sys_output() as (stdout, _stderr):
+ with io.BytesIO(newdata) as fd:
+ fd.seek(pos)
+ self.assertEqual(False, cbr._read_next_file(fd))
+ self.assertIn('File header at 0x0 ran out of data', stdout.getvalue())
+
+ def test_cbfs_bad_file_string(self):
+ """Check handling of an incomplete filename string"""
+ size = 0x70
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('16-characters xx', U_BOOT_DATA)
+ data = cbw.get_data()
+
+ # Read in the CBFS master header (only), then stop
+ cbr = cbfs_util.CbfsReader(data, read=False)
+ with io.BytesIO(data) as fd:
+ self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+ pos = fd.tell()
+
+ # Create a new CBFS with only the first 16 bytes of the file name, then
+ # try to read the file
+ newdata = data[:pos + cbfs_util.FILE_HEADER_LEN + 16]
+ with test_util.capture_sys_output() as (stdout, _stderr):
+ with io.BytesIO(newdata) as fd:
+ fd.seek(pos)
+ self.assertEqual(False, cbr._read_next_file(fd))
+ self.assertIn('String at %#x ran out of data' %
+ cbfs_util.FILE_HEADER_LEN, stdout.getvalue())
+
+ def test_cbfs_debug(self):
+ """Check debug output"""
+ size = 0x70
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ data = cbw.get_data()
+
+ try:
+ cbfs_util.DEBUG = True
+ with test_util.capture_sys_output() as (stdout, _stderr):
+ cbfs_util.CbfsReader(data)
+ self.assertEqual('name u-boot\nftype 50\ndata %s\n' % U_BOOT_DATA,
+ stdout.getvalue())
+ finally:
+ cbfs_util.DEBUG = False
+
+ def test_cbfs_bad_attribute(self):
+ """Check handling of bad attribute tag"""
+ if not self.have_lz4:
+ self.skipTest('lz4 --no-frame-crc not available')
+ size = 0x140
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
+ compress=cbfs_util.COMPRESS_LZ4)
+ data = cbw.get_data()
+
+ # Search the CBFS for the expected compression tag
+ with io.BytesIO(data) as fd:
+ while True:
+ pos = fd.tell()
+ tag, = struct.unpack('>I', fd.read(4))
+ if tag == cbfs_util.FILE_ATTR_TAG_COMPRESSION:
+ break
+
+ # Create a new CBFS with the tag changed to something invalid
+ newdata = data[:pos] + struct.pack('>I', 0x123) + data[pos + 4:]
+ with test_util.capture_sys_output() as (stdout, _stderr):
+ cbfs_util.CbfsReader(newdata)
+ self.assertEqual('Unknown attribute tag 123\n', stdout.getvalue())
+
+ def test_cbfs_missing_attribute(self):
+ """Check handling of an incomplete attribute tag"""
+ if not self.have_lz4:
+ self.skipTest('lz4 --no-frame-crc not available')
+ size = 0x140
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
+ compress=cbfs_util.COMPRESS_LZ4)
+ data = cbw.get_data()
+
+ # Read in the CBFS master header (only), then stop
+ cbr = cbfs_util.CbfsReader(data, read=False)
+ with io.BytesIO(data) as fd:
+ self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+ pos = fd.tell()
+
+ # Create a new CBFS with only the first 4 bytes of the compression tag,
+ # then try to read the file. Note that the tag gets pushed out 4 bytes
+ tag_pos = (4 + pos + cbfs_util.FILE_HEADER_LEN +
+ cbfs_util.ATTRIBUTE_ALIGN)
+ newdata = data[:tag_pos + 4]
+ with test_util.capture_sys_output() as (stdout, _stderr):
+ with io.BytesIO(newdata) as fd:
+ fd.seek(pos)
+ self.assertEqual(False, cbr._read_next_file(fd))
+ self.assertIn('Attribute tag at %x ran out of data' % tag_pos,
+ stdout.getvalue())
+
+ def test_cbfs_file_master_header(self):
+ """Check handling of a file containing a master header"""
+ size = 0x100
+ cbw = CbfsWriter(size)
+ cbw._add_fileheader = True
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ data = cbw.get_data()
+
+ cbr = cbfs_util.CbfsReader(data)
+ self.assertIn('u-boot', cbr.files)
+ self.assertEqual(size, cbr.rom_size)
+
+ def test_cbfs_arch(self):
+ """Test on non-x86 architecture"""
+ size = 0x100
+ cbw = CbfsWriter(size, arch=cbfs_util.ARCHITECTURE_PPC64)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+ data = cbw.get_data()
+ self._check_raw(data, size, offset=0x40,
+ arch=cbfs_util.ARCHITECTURE_PPC64)
+
+ # Compare against what cbfstool creates
+ cbfs_fname = self._get_expected_cbfs(size=size, arch='ppc64')
+ self._compare_expected_cbfs(data, cbfs_fname)
+
+ def test_cbfs_stage(self):
+ """Tests handling of a CBFS stage"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ elf_fname = os.path.join(self._indir, 'cbfs-stage.elf')
+ elf.MakeElf(elf_fname, U_BOOT_DATA, U_BOOT_DTB_DATA)
+
+ size = 0xb0
+ cbw = CbfsWriter(size)
+ cbw.add_file_stage('u-boot', tools.read_file(elf_fname))
+
+ data = cbw.get_data()
+ cbfs = self._check_hdr(data, size)
+ load = 0xfef20000
+ entry = load + 2
+
+ cfile = self._check_uboot(cbfs, cbfs_util.TYPE_STAGE, offset=0x38,
+ data=U_BOOT_DATA + U_BOOT_DTB_DATA)
+
+ self.assertEqual(entry, cfile.entry)
+ self.assertEqual(load, cfile.load)
+ self.assertEqual(len(U_BOOT_DATA) + len(U_BOOT_DTB_DATA),
+ cfile.data_len)
+
+ # Compare against what cbfstool creates
+ if self.have_cbfstool:
+ cbfs_fname = os.path.join(self._indir, 'test.cbfs')
+ self.cbfstool.create_new(cbfs_fname, size)
+ self.cbfstool.add_stage(cbfs_fname, 'u-boot', elf_fname)
+ self._compare_expected_cbfs(data, cbfs_fname)
+
+ def test_cbfs_raw_compress(self):
+ """Test base handling of compressing raw files"""
+ if not self.have_lz4:
+ self.skipTest('lz4 --no-frame-crc not available')
+ size = 0x140
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
+ compress=cbfs_util.COMPRESS_LZ4)
+ cbw.add_file_raw('u-boot-dtb', COMPRESS_DATA, None,
+ compress=cbfs_util.COMPRESS_LZMA)
+ data = cbw.get_data()
+
+ cbfs = self._check_hdr(data, size)
+ self.assertIn('u-boot', cbfs.files)
+ cfile = cbfs.files['u-boot']
+ self.assertEqual(cfile.name, 'u-boot')
+ self.assertEqual(cfile.offset, 0x30)
+ self.assertEqual(cfile.data, COMPRESS_DATA)
+ self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
+ self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZ4)
+ self.assertEqual(cfile.memlen, len(COMPRESS_DATA))
+
+ self.assertIn('u-boot-dtb', cbfs.files)
+ cfile = cbfs.files['u-boot-dtb']
+ self.assertEqual(cfile.name, 'u-boot-dtb')
+ self.assertEqual(cfile.offset, 0x34)
+ self.assertEqual(cfile.data, COMPRESS_DATA)
+ self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
+ self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZMA)
+ self.assertEqual(cfile.memlen, len(COMPRESS_DATA))
+
+ cbfs_fname = self._get_expected_cbfs(size=size, compress=['lz4', 'lzma'])
+ self._compare_expected_cbfs(data, cbfs_fname)
+
+ def test_cbfs_raw_space(self):
+ """Test files with unused space in the CBFS"""
+ size = 0xf0
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+ data = cbw.get_data()
+ self._check_raw(data, size)
+ cbfs_fname = self._get_expected_cbfs(size=size)
+ self._compare_expected_cbfs(data, cbfs_fname)
+
+ def test_cbfs_offset(self):
+ """Test a CBFS with files at particular offsets"""
+ size = 0x200
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA, 0x40)
+ cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA, 0x140)
+
+ data = cbw.get_data()
+ cbfs = self._check_hdr(data, size)
+ self._check_uboot(cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x40,
+ cbfs_offset=0x40)
+ self._check_dtb(cbfs, offset=0x40, cbfs_offset=0x140)
+
+ cbfs_fname = self._get_expected_cbfs(size=size, base=(0x40, 0x140))
+ self._compare_expected_cbfs(data, cbfs_fname)
+
+ def test_cbfs_invalid_file_type_header(self):
+ """Check handling of an invalid file type when outputting a header"""
+ size = 0xb0
+ cbw = CbfsWriter(size)
+ cfile = cbw.add_file_raw('u-boot', U_BOOT_DATA, 0)
+
+ # Change the type manually before generating the CBFS, and make sure
+ # that the generator complains
+ cfile.ftype = 0xff
+ with self.assertRaises(ValueError) as e:
+ cbw.get_data()
+ self.assertIn('Unknown file type 0xff', str(e.exception))
+
+ def test_cbfs_offset_conflict(self):
+ """Test a CBFS with files that want to overlap"""
+ size = 0x200
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA, 0x40)
+ cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA, 0x80)
+
+ with self.assertRaises(ValueError) as e:
+ cbw.get_data()
+ self.assertIn('No space for data before pad offset', str(e.exception))
+
+ def test_cbfs_check_offset(self):
+ """Test that we can discover the offset of a file after writing it"""
+ size = 0xb0
+ cbw = CbfsWriter(size)
+ cbw.add_file_raw('u-boot', U_BOOT_DATA)
+ cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+ data = cbw.get_data()
+
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertEqual(0x20, cbfs.files['u-boot'].cbfs_offset)
+ self.assertEqual(0x64, cbfs.files['u-boot-dtb'].cbfs_offset)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/binman/cmdline.py b/tools/binman/cmdline.py
new file mode 100644
index 00000000000..9632ec115e5
--- /dev/null
+++ b/tools/binman/cmdline.py
@@ -0,0 +1,214 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+"""Command-line parser for binman"""
+
+import argparse
+from argparse import ArgumentParser
+import os
+from binman import state
+import os
+import pathlib
+
+BINMAN_DIR = pathlib.Path(__file__).parent
+HAS_TESTS = (BINMAN_DIR / "ftest.py").exists()
+
+def make_extract_parser(subparsers):
+ """make_extract_parser: Make a subparser for the 'extract' command
+
+ Args:
+ subparsers (ArgumentParser): parser to add this one to
+ """
+ extract_parser = subparsers.add_parser('extract',
+ help='Extract files from an image')
+ extract_parser.add_argument('-F', '--format', type=str,
+ help='Select an alternative format for extracted data')
+ extract_parser.add_argument('-i', '--image', type=str, required=True,
+ help='Image filename to extract')
+ extract_parser.add_argument('-f', '--filename', type=str,
+ help='Output filename to write to')
+ extract_parser.add_argument('-O', '--outdir', type=str, default='',
+ help='Path to directory to use for output files')
+ extract_parser.add_argument('paths', type=str, nargs='*',
+ help='Paths within file to extract (wildcard)')
+ extract_parser.add_argument('-U', '--uncompressed', action='store_true',
+ help='Output raw uncompressed data for compressed entries')
+
+
+#pylint: disable=R0903
+class BinmanVersion(argparse.Action):
+ """Handles the -V option to binman
+
+ This reads the version information from a file called 'version' in the same
+ directory as this file.
+
+ If not present it assumes this is running from the U-Boot tree and collects
+ the version from the Makefile.
+
+ The format of the version information is three VAR = VALUE lines, for
+ example:
+
+ VERSION = 2022
+ PATCHLEVEL = 01
+ EXTRAVERSION = -rc2
+ """
+ def __init__(self, nargs=0, **kwargs):
+ super().__init__(nargs=nargs, **kwargs)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser._print_message(f'Binman {state.GetVersion()}\n')
+ parser.exit()
+
+
+def ParseArgs(argv):
+ """Parse the binman command-line arguments
+
+ Args:
+ argv (list of str): List of string arguments
+
+ Returns:
+ tuple: (options, args) with the command-line options and arugments.
+ options provides access to the options (e.g. option.debug)
+ args is a list of string arguments
+ """
+ def _AddPreserve(pars):
+ pars.add_argument('-O', '--outdir', type=str,
+ action='store', help='Path to directory to use for intermediate '
+ 'and output files')
+ pars.add_argument('-p', '--preserve', action='store_true',\
+ help='Preserve temporary output directory even if option -O is not '
+ 'given')
+
+ if '-H' in argv:
+ argv.append('build')
+
+ epilog = '''Binman creates and manipulate images for a board from a set of binaries. Binman is
+controlled by a description in the board device tree.'''
+
+ parser = ArgumentParser(epilog=epilog)
+ parser.add_argument('-B', '--build-dir', type=str, default='b',
+ help='Directory containing the build output')
+ parser.add_argument('-D', '--debug', action='store_true',
+ help='Enabling debugging (provides a full traceback on error)')
+ parser.add_argument('-H', '--full-help', action='store_true',
+ default=False, help='Display the README file')
+ parser.add_argument('--tooldir', type=str,
+ default=os.path.join(os.path.expanduser('~/.binman-tools')),
+ help='Set the directory to store tools')
+ parser.add_argument('--toolpath', type=str, action='append',
+ help='Add a path to the list of directories containing tools')
+ parser.add_argument('-T', '--threads', type=int,
+ default=None, help='Number of threads to use (0=single-thread)')
+ parser.add_argument('--test-section-timeout', action='store_true',
+ help='Use a zero timeout for section multi-threading (for testing)')
+ parser.add_argument('-v', '--verbosity', default=1,
+ type=int, help='Control verbosity: 0=silent, 1=warnings, 2=notices, '
+ '3=info, 4=detail, 5=debug')
+ parser.add_argument('-V', '--version', nargs=0, action=BinmanVersion)
+
+ subparsers = parser.add_subparsers(dest='cmd')
+ subparsers.required = True
+
+ build_parser = subparsers.add_parser('build', help='Build firmware image')
+ build_parser.add_argument('-a', '--entry-arg', type=str, action='append',
+ help='Set argument value arg=value')
+ build_parser.add_argument('-b', '--board', type=str,
+ help='Board name to build')
+ build_parser.add_argument('-d', '--dt', type=str,
+ help='Configuration file (.dtb) to use')
+ build_parser.add_argument('--fake-dtb', action='store_true',
+ help='Use fake device tree contents (for testing only)')
+ build_parser.add_argument('--fake-ext-blobs', action='store_true',
+ help='Create fake ext blobs with dummy content (for testing only)')
+ build_parser.add_argument('--force-missing-bintools', type=str,
+ help='Comma-separated list of bintools to consider missing (for testing)')
+ build_parser.add_argument('-i', '--image', type=str, action='append',
+ help='Image filename to build (if not specified, build all)')
+ build_parser.add_argument('-I', '--indir', action='append',
+ help='Add a path to the list of directories to use for input files')
+ build_parser.add_argument('-m', '--map', action='store_true',
+ default=False, help='Output a map file for each image')
+ build_parser.add_argument('-M', '--allow-missing', action='store_true',
+ default=False, help='Allow external blobs and bintools to be missing')
+ build_parser.add_argument('-n', '--no-expanded', action='store_true',
+ help="Don't use 'expanded' versions of entries where available; "
+ "normally 'u-boot' becomes 'u-boot-expanded', for example")
+ _AddPreserve(build_parser)
+ build_parser.add_argument('-u', '--update-fdt', action='store_true',
+ default=False, help='Update the binman node with offset/size info')
+ build_parser.add_argument('--update-fdt-in-elf', type=str,
+ help='Update an ELF file with the output dtb: infile,outfile,begin_sym,end_sym')
+ build_parser.add_argument(
+ '-W', '--ignore-missing', action='store_true', default=False,
+ help='Return success even if there are missing blobs/bintools (requires -M)')
+
+ subparsers.add_parser(
+ 'bintool-docs', help='Write out bintool documentation (see bintool.rst)')
+
+ subparsers.add_parser(
+ 'entry-docs', help='Write out entry documentation (see entries.rst)')
+
+ list_parser = subparsers.add_parser('ls', help='List files in an image')
+ list_parser.add_argument('-i', '--image', type=str, required=True,
+ help='Image filename to list')
+ list_parser.add_argument('paths', type=str, nargs='*',
+ help='Paths within file to list (wildcard)')
+
+ make_extract_parser(subparsers)
+
+ replace_parser = subparsers.add_parser('replace',
+ help='Replace entries in an image')
+ replace_parser.add_argument('-C', '--compressed', action='store_true',
+ help='Input data is already compressed if needed for the entry')
+ replace_parser.add_argument('-i', '--image', type=str, required=True,
+ help='Image filename to update')
+ replace_parser.add_argument('-f', '--filename', type=str,
+ help='Input filename to read from')
+ replace_parser.add_argument('-F', '--fix-size', action='store_true',
+ help="Don't allow entries to be resized")
+ replace_parser.add_argument('-I', '--indir', type=str, default='',
+ help='Path to directory to use for input files')
+ replace_parser.add_argument('-m', '--map', action='store_true',
+ default=False, help='Output a map file for the updated image')
+ _AddPreserve(replace_parser)
+ replace_parser.add_argument('paths', type=str, nargs='*',
+ help='Paths within file to replace (wildcard)')
+
+ sign_parser = subparsers.add_parser('sign',
+ help='Sign entries in image')
+ sign_parser.add_argument('-a', '--algo', type=str, required=True,
+ help='Hash algorithm e.g. sha256,rsa4096')
+ sign_parser.add_argument('-f', '--file', type=str, required=False,
+ help='Input filename to sign')
+ sign_parser.add_argument('-i', '--image', type=str, required=True,
+ help='Image filename to update')
+ sign_parser.add_argument('-k', '--key', type=str, required=True,
+ help='Private key file for signing')
+ sign_parser.add_argument('paths', type=str, nargs='*',
+ help='Paths within file to sign (wildcard)')
+
+ if HAS_TESTS:
+ test_parser = subparsers.add_parser('test', help='Run tests')
+ test_parser.add_argument('-P', '--processes', type=int,
+ help='set number of processes to use for running tests')
+ test_parser.add_argument('-T', '--test-coverage', action='store_true',
+ default=False, help='run tests and check for 100%% coverage')
+ test_parser.add_argument(
+ '-X', '--test-preserve-dirs', action='store_true',
+ help='Preserve and display test-created input directories; also '
+ 'preserve the output directory if a single test is run (pass '
+ 'test name at the end of the command line')
+ test_parser.add_argument('tests', nargs='*',
+ help='Test names to run (omit for all)')
+
+ tool_parser = subparsers.add_parser('tool', help='Check bintools')
+ tool_parser.add_argument('-l', '--list', action='store_true',
+ help='List all known bintools')
+ tool_parser.add_argument(
+ '-f', '--fetch', action='store_true',
+ help='fetch a bintool from a known location (or: all/missing)')
+ tool_parser.add_argument('bintools', type=str, nargs='*')
+
+ return parser.parse_args(argv)
diff --git a/tools/binman/control.py b/tools/binman/control.py
new file mode 100644
index 00000000000..2f00279232b
--- /dev/null
+++ b/tools/binman/control.py
@@ -0,0 +1,880 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Creates binary images from input files controlled by a description
+#
+
+from collections import OrderedDict
+import glob
+try:
+ import importlib.resources
+except ImportError: # pragma: no cover
+ # for Python 3.6
+ import importlib_resources
+import os
+import pkg_resources
+import re
+
+import sys
+
+from binman import bintool
+from binman import cbfs_util
+from binman import elf
+from binman import entry
+from dtoc import fdt_util
+from u_boot_pylib import command
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+# These are imported if needed since they import libfdt
+state = None
+Image = None
+
+# List of images we plan to create
+# Make this global so that it can be referenced from tests
+images = OrderedDict()
+
+# Help text for each type of missing blob, dict:
+# key: Value of the entry's 'missing-msg' or entry name
+# value: Text for the help
+missing_blob_help = {}
+
+def _ReadImageDesc(binman_node, use_expanded):
+ """Read the image descriptions from the /binman node
+
+ This normally produces a single Image object called 'image'. But if
+ multiple images are present, they will all be returned.
+
+ Args:
+ binman_node: Node object of the /binman node
+ use_expanded: True if the FDT will be updated with the entry information
+ Returns:
+ OrderedDict of Image objects, each of which describes an image
+ """
+ # For Image()
+ # pylint: disable=E1102
+ images = OrderedDict()
+ if 'multiple-images' in binman_node.props:
+ for node in binman_node.subnodes:
+ if not node.name.startswith('template'):
+ images[node.name] = Image(node.name, node,
+ use_expanded=use_expanded)
+ else:
+ images['image'] = Image('image', binman_node, use_expanded=use_expanded)
+ return images
+
+def _FindBinmanNode(dtb):
+ """Find the 'binman' node in the device tree
+
+ Args:
+ dtb: Fdt object to scan
+ Returns:
+ Node object of /binman node, or None if not found
+ """
+ for node in dtb.GetRoot().subnodes:
+ if node.name == 'binman':
+ return node
+ return None
+
+def _ReadMissingBlobHelp():
+ """Read the missing-blob-help file
+
+ This file containins help messages explaining what to do when external blobs
+ are missing.
+
+ Returns:
+ Dict:
+ key: Message tag (str)
+ value: Message text (str)
+ """
+
+ def _FinishTag(tag, msg, result):
+ if tag:
+ result[tag] = msg.rstrip()
+ tag = None
+ msg = ''
+ return tag, msg
+
+ my_data = pkg_resources.resource_string(__name__, 'missing-blob-help')
+ re_tag = re.compile('^([-a-z0-9]+):$')
+ result = {}
+ tag = None
+ msg = ''
+ for line in my_data.decode('utf-8').splitlines():
+ if not line.startswith('#'):
+ m_tag = re_tag.match(line)
+ if m_tag:
+ _, msg = _FinishTag(tag, msg, result)
+ tag = m_tag.group(1)
+ elif tag:
+ msg += line + '\n'
+ _FinishTag(tag, msg, result)
+ return result
+
+def _ShowBlobHelp(level, path, text, fname):
+ tout.do_output(level, '%s (%s):' % (path, fname))
+ for line in text.splitlines():
+ tout.do_output(level, ' %s' % line)
+ tout.do_output(level, '')
+
+def _ShowHelpForMissingBlobs(level, missing_list):
+ """Show help for each missing blob to help the user take action
+
+ Args:
+ missing_list: List of Entry objects to show help for
+ """
+ global missing_blob_help
+
+ if not missing_blob_help:
+ missing_blob_help = _ReadMissingBlobHelp()
+
+ for entry in missing_list:
+ tags = entry.GetHelpTags()
+
+ # Show the first match help message
+ shown_help = False
+ for tag in tags:
+ if tag in missing_blob_help:
+ _ShowBlobHelp(level, entry._node.path, missing_blob_help[tag],
+ entry.GetDefaultFilename())
+ shown_help = True
+ break
+ # Or a generic help message
+ if not shown_help:
+ _ShowBlobHelp(level, entry._node.path, "Missing blob",
+ entry.GetDefaultFilename())
+
+def GetEntryModules(include_testing=True):
+ """Get a set of entry class implementations
+
+ Returns:
+ Set of paths to entry class filenames
+ """
+ glob_list = pkg_resources.resource_listdir(__name__, 'etype')
+ glob_list = [fname for fname in glob_list if fname.endswith('.py')]
+ return set([os.path.splitext(os.path.basename(item))[0]
+ for item in glob_list
+ if include_testing or '_testing' not in item])
+
+def WriteEntryDocs(modules, test_missing=None):
+ """Write out documentation for all entries
+
+ Args:
+ modules: List of Module objects to get docs for
+ test_missing: Used for testing only, to force an entry's documentation
+ to show as missing even if it is present. Should be set to None in
+ normal use.
+ """
+ from binman.entry import Entry
+ Entry.WriteDocs(modules, test_missing)
+
+
+def write_bintool_docs(modules, test_missing=None):
+ """Write out documentation for all bintools
+
+ Args:
+ modules: List of Module objects to get docs for
+ test_missing: Used for testing only, to force an entry's documentation
+ to show as missing even if it is present. Should be set to None in
+ normal use.
+ """
+ bintool.Bintool.WriteDocs(modules, test_missing)
+
+
+def ListEntries(image_fname, entry_paths):
+ """List the entries in an image
+
+ This decodes the supplied image and displays a table of entries from that
+ image, preceded by a header.
+
+ Args:
+ image_fname: Image filename to process
+ entry_paths: List of wildcarded paths (e.g. ['*dtb*', 'u-boot*',
+ 'section/u-boot'])
+ """
+ image = Image.FromFile(image_fname)
+
+ entries, lines, widths = image.GetListEntries(entry_paths)
+
+ num_columns = len(widths)
+ for linenum, line in enumerate(lines):
+ if linenum == 1:
+ # Print header line
+ print('-' * (sum(widths) + num_columns * 2))
+ out = ''
+ for i, item in enumerate(line):
+ width = -widths[i]
+ if item.startswith('>'):
+ width = -width
+ item = item[1:]
+ txt = '%*s ' % (width, item)
+ out += txt
+ print(out.rstrip())
+
+
+def ReadEntry(image_fname, entry_path, decomp=True):
+ """Extract an entry from an image
+
+ This extracts the data from a particular entry in an image
+
+ Args:
+ image_fname: Image filename to process
+ entry_path: Path to entry to extract
+ decomp: True to return uncompressed data, if the data is compress
+ False to return the raw data
+
+ Returns:
+ data extracted from the entry
+ """
+ global Image
+ from binman.image import Image
+
+ image = Image.FromFile(image_fname)
+ image.CollectBintools()
+ entry = image.FindEntryPath(entry_path)
+ return entry.ReadData(decomp)
+
+
+def ShowAltFormats(image):
+ """Show alternative formats available for entries in the image
+
+ This shows a list of formats available.
+
+ Args:
+ image (Image): Image to check
+ """
+ alt_formats = {}
+ image.CheckAltFormats(alt_formats)
+ print('%-10s %-20s %s' % ('Flag (-F)', 'Entry type', 'Description'))
+ for name, val in alt_formats.items():
+ entry, helptext = val
+ print('%-10s %-20s %s' % (name, entry.etype, helptext))
+
+
+def ExtractEntries(image_fname, output_fname, outdir, entry_paths,
+ decomp=True, alt_format=None):
+ """Extract the data from one or more entries and write it to files
+
+ Args:
+ image_fname: Image filename to process
+ output_fname: Single output filename to use if extracting one file, None
+ otherwise
+ outdir: Output directory to use (for any number of files), else None
+ entry_paths: List of entry paths to extract
+ decomp: True to decompress the entry data
+
+ Returns:
+ List of EntryInfo records that were written
+ """
+ image = Image.FromFile(image_fname)
+ image.CollectBintools()
+
+ if alt_format == 'list':
+ ShowAltFormats(image)
+ return
+
+ # Output an entry to a single file, as a special case
+ if output_fname:
+ if not entry_paths:
+ raise ValueError('Must specify an entry path to write with -f')
+ if len(entry_paths) != 1:
+ raise ValueError('Must specify exactly one entry path to write with -f')
+ entry = image.FindEntryPath(entry_paths[0])
+ data = entry.ReadData(decomp, alt_format)
+ tools.write_file(output_fname, data)
+ tout.notice("Wrote %#x bytes to file '%s'" % (len(data), output_fname))
+ return
+
+ # Otherwise we will output to a path given by the entry path of each entry.
+ # This means that entries will appear in subdirectories if they are part of
+ # a sub-section.
+ einfos = image.GetListEntries(entry_paths)[0]
+ tout.notice('%d entries match and will be written' % len(einfos))
+ for einfo in einfos:
+ entry = einfo.entry
+ data = entry.ReadData(decomp, alt_format)
+ path = entry.GetPath()[1:]
+ fname = os.path.join(outdir, path)
+
+ # If this entry has children, create a directory for it and put its
+ # data in a file called 'root' in that directory
+ if entry.GetEntries():
+ if fname and not os.path.exists(fname):
+ os.makedirs(fname)
+ fname = os.path.join(fname, 'root')
+ tout.notice("Write entry '%s' size %x to '%s'" %
+ (entry.GetPath(), len(data), fname))
+ tools.write_file(fname, data)
+ return einfos
+
+
+def BeforeReplace(image, allow_resize):
+ """Handle getting an image ready for replacing entries in it
+
+ Args:
+ image: Image to prepare
+ """
+ state.PrepareFromLoadedData(image)
+ image.CollectBintools()
+ image.LoadData(decomp=False)
+
+ # If repacking, drop the old offset/size values except for the original
+ # ones, so we are only left with the constraints.
+ if image.allow_repack and allow_resize:
+ image.ResetForPack()
+
+
+def ReplaceOneEntry(image, entry, data, do_compress, allow_resize):
+ """Handle replacing a single entry an an image
+
+ Args:
+ image: Image to update
+ entry: Entry to write
+ data: Data to replace with
+ do_compress: True to compress the data if needed, False if data is
+ already compressed so should be used as is
+ allow_resize: True to allow entries to change size (this does a re-pack
+ of the entries), False to raise an exception
+ """
+ if not entry.WriteData(data, do_compress):
+ if not image.allow_repack:
+ entry.Raise('Entry data size does not match, but allow-repack is not present for this image')
+ if not allow_resize:
+ entry.Raise('Entry data size does not match, but resize is disabled')
+
+
+def AfterReplace(image, allow_resize, write_map):
+ """Handle write out an image after replacing entries in it
+
+ Args:
+ image: Image to write
+ allow_resize: True to allow entries to change size (this does a re-pack
+ of the entries), False to raise an exception
+ write_map: True to write a map file
+ """
+ tout.info('Processing image')
+ ProcessImage(image, update_fdt=True, write_map=write_map,
+ get_contents=False, allow_resize=allow_resize)
+
+
+def WriteEntryToImage(image, entry, data, do_compress=True, allow_resize=True,
+ write_map=False):
+ BeforeReplace(image, allow_resize)
+ tout.info('Writing data to %s' % entry.GetPath())
+ ReplaceOneEntry(image, entry, data, do_compress, allow_resize)
+ AfterReplace(image, allow_resize=allow_resize, write_map=write_map)
+
+
+def WriteEntry(image_fname, entry_path, data, do_compress=True,
+ allow_resize=True, write_map=False):
+ """Replace an entry in an image
+
+ This replaces the data in a particular entry in an image. This size of the
+ new data must match the size of the old data unless allow_resize is True.
+
+ Args:
+ image_fname: Image filename to process
+ entry_path: Path to entry to extract
+ data: Data to replace with
+ do_compress: True to compress the data if needed, False if data is
+ already compressed so should be used as is
+ allow_resize: True to allow entries to change size (this does a re-pack
+ of the entries), False to raise an exception
+ write_map: True to write a map file
+
+ Returns:
+ Image object that was updated
+ """
+ tout.info("Write entry '%s', file '%s'" % (entry_path, image_fname))
+ image = Image.FromFile(image_fname)
+ image.CollectBintools()
+ entry = image.FindEntryPath(entry_path)
+ WriteEntryToImage(image, entry, data, do_compress=do_compress,
+ allow_resize=allow_resize, write_map=write_map)
+
+ return image
+
+
+def ReplaceEntries(image_fname, input_fname, indir, entry_paths,
+ do_compress=True, allow_resize=True, write_map=False):
+ """Replace the data from one or more entries from input files
+
+ Args:
+ image_fname: Image filename to process
+ input_fname: Single input filename to use if replacing one file, None
+ otherwise
+ indir: Input directory to use (for any number of files), else None
+ entry_paths: List of entry paths to replace
+ do_compress: True if the input data is uncompressed and may need to be
+ compressed if the entry requires it, False if the data is already
+ compressed.
+ write_map: True to write a map file
+
+ Returns:
+ List of EntryInfo records that were written
+ """
+ image_fname = os.path.abspath(image_fname)
+ image = Image.FromFile(image_fname)
+
+ image.mark_build_done()
+
+ # Replace an entry from a single file, as a special case
+ if input_fname:
+ if not entry_paths:
+ raise ValueError('Must specify an entry path to read with -f')
+ if len(entry_paths) != 1:
+ raise ValueError('Must specify exactly one entry path to write with -f')
+ entry = image.FindEntryPath(entry_paths[0])
+ data = tools.read_file(input_fname)
+ tout.notice("Read %#x bytes from file '%s'" % (len(data), input_fname))
+ WriteEntryToImage(image, entry, data, do_compress=do_compress,
+ allow_resize=allow_resize, write_map=write_map)
+ return
+
+ # Otherwise we will input from a path given by the entry path of each entry.
+ # This means that files must appear in subdirectories if they are part of
+ # a sub-section.
+ einfos = image.GetListEntries(entry_paths)[0]
+ tout.notice("Replacing %d matching entries in image '%s'" %
+ (len(einfos), image_fname))
+
+ BeforeReplace(image, allow_resize)
+
+ for einfo in einfos:
+ entry = einfo.entry
+ if entry.GetEntries():
+ tout.info("Skipping section entry '%s'" % entry.GetPath())
+ continue
+
+ path = entry.GetPath()[1:]
+ fname = os.path.join(indir, path)
+
+ if os.path.exists(fname):
+ tout.notice("Write entry '%s' from file '%s'" %
+ (entry.GetPath(), fname))
+ data = tools.read_file(fname)
+ ReplaceOneEntry(image, entry, data, do_compress, allow_resize)
+ else:
+ tout.warning("Skipping entry '%s' from missing file '%s'" %
+ (entry.GetPath(), fname))
+
+ AfterReplace(image, allow_resize=allow_resize, write_map=write_map)
+ return image
+
+def SignEntries(image_fname, input_fname, privatekey_fname, algo, entry_paths,
+ write_map=False):
+ """Sign and replace the data from one or more entries from input files
+
+ Args:
+ image_fname: Image filename to process
+ input_fname: Single input filename to use if replacing one file, None
+ otherwise
+ algo: Hashing algorithm
+ entry_paths: List of entry paths to sign
+ privatekey_fname: Private key filename
+ write_map (bool): True to write the map file
+ """
+ image_fname = os.path.abspath(image_fname)
+ image = Image.FromFile(image_fname)
+
+ image.mark_build_done()
+
+ BeforeReplace(image, allow_resize=True)
+
+ for entry_path in entry_paths:
+ entry = image.FindEntryPath(entry_path)
+ entry.UpdateSignatures(privatekey_fname, algo, input_fname)
+
+ AfterReplace(image, allow_resize=True, write_map=write_map)
+
+def _ProcessTemplates(parent):
+ """Handle any templates in the binman description
+
+ Args:
+ parent: Binman node to process (typically /binman)
+
+ Returns:
+ bool: True if any templates were processed
+
+ Search though each target node looking for those with an 'insert-template'
+ property. Use that as a list of references to template nodes to use to
+ adjust the target node.
+
+ Processing involves copying each subnode of the template node into the
+ target node.
+
+ This is done recursively, so templates can be at any level of the binman
+ image, e.g. inside a section.
+
+ See 'Templates' in the Binman documnentation for details.
+ """
+ found = False
+ for node in parent.subnodes:
+ tmpl = fdt_util.GetPhandleList(node, 'insert-template')
+ if tmpl:
+ node.copy_subnodes_from_phandles(tmpl)
+ found = True
+
+ found |= _ProcessTemplates(node)
+ return found
+
+def _RemoveTemplates(parent):
+ """Remove any templates in the binman description
+ """
+ for node in parent.subnodes:
+ if node.name.startswith('template'):
+ node.Delete()
+
+def PrepareImagesAndDtbs(dtb_fname, select_images, update_fdt, use_expanded):
+ """Prepare the images to be processed and select the device tree
+
+ This function:
+ - reads in the device tree
+ - finds and scans the binman node to create all entries
+ - selects which images to build
+ - Updates the device tress with placeholder properties for offset,
+ image-pos, etc.
+
+ Args:
+ dtb_fname: Filename of the device tree file to use (.dts or .dtb)
+ selected_images: List of images to output, or None for all
+ update_fdt: True to update the FDT wth entry offsets, etc.
+ use_expanded: True to use expanded versions of entries, if available.
+ So if 'u-boot' is called for, we use 'u-boot-expanded' instead. This
+ is needed if update_fdt is True (although tests may disable it)
+
+ Returns:
+ OrderedDict of images:
+ key: Image name (str)
+ value: Image object
+ """
+ # Import these here in case libfdt.py is not available, in which case
+ # the above help option still works.
+ from dtoc import fdt
+ from dtoc import fdt_util
+ global images
+
+ # Get the device tree ready by compiling it and copying the compiled
+ # output into a file in our output directly. Then scan it for use
+ # in binman.
+ dtb_fname = fdt_util.EnsureCompiled(dtb_fname)
+ fname = tools.get_output_filename('u-boot.dtb.out')
+ tools.write_file(fname, tools.read_file(dtb_fname))
+ dtb = fdt.FdtScan(fname)
+
+ node = _FindBinmanNode(dtb)
+ if not node:
+ raise ValueError("Device tree '%s' does not have a 'binman' "
+ "node" % dtb_fname)
+
+ if _ProcessTemplates(node):
+ dtb.Sync(True)
+ fname = tools.get_output_filename('u-boot.dtb.tmpl1')
+ tools.write_file(fname, dtb.GetContents())
+
+ _RemoveTemplates(node)
+ dtb.Sync(True)
+
+ # Rescan the dtb to pick up the new phandles
+ dtb.Scan()
+ node = _FindBinmanNode(dtb)
+ fname = tools.get_output_filename('u-boot.dtb.tmpl2')
+ tools.write_file(fname, dtb.GetContents())
+
+ images = _ReadImageDesc(node, use_expanded)
+
+ if select_images:
+ skip = []
+ new_images = OrderedDict()
+ for name, image in images.items():
+ if name in select_images:
+ new_images[name] = image
+ else:
+ skip.append(name)
+ images = new_images
+ tout.notice('Skipping images: %s' % ', '.join(skip))
+
+ state.Prepare(images, dtb)
+
+ # Prepare the device tree by making sure that any missing
+ # properties are added (e.g. 'pos' and 'size'). The values of these
+ # may not be correct yet, but we add placeholders so that the
+ # size of the device tree is correct. Later, in
+ # SetCalculatedProperties() we will insert the correct values
+ # without changing the device-tree size, thus ensuring that our
+ # entry offsets remain the same.
+ for image in images.values():
+ image.gen_entries()
+ image.CollectBintools()
+ if update_fdt:
+ image.AddMissingProperties(True)
+ image.ProcessFdt(dtb)
+
+ for dtb_item in state.GetAllFdts():
+ dtb_item.Sync(auto_resize=True)
+ dtb_item.Pack()
+ dtb_item.Flush()
+ return images
+
+
+def ProcessImage(image, update_fdt, write_map, get_contents=True,
+ allow_resize=True, allow_missing=False,
+ allow_fake_blobs=False):
+ """Perform all steps for this image, including checking and # writing it.
+
+ This means that errors found with a later image will be reported after
+ earlier images are already completed and written, but that does not seem
+ important.
+
+ Args:
+ image: Image to process
+ update_fdt: True to update the FDT wth entry offsets, etc.
+ write_map: True to write a map file
+ get_contents: True to get the image contents from files, etc., False if
+ the contents is already present
+ allow_resize: True to allow entries to change size (this does a re-pack
+ of the entries), False to raise an exception
+ allow_missing: Allow blob_ext objects to be missing
+ allow_fake_blobs: Allow blob_ext objects to be faked with dummy files
+
+ Returns:
+ True if one or more external blobs are missing or faked,
+ False if all are present
+ """
+ if get_contents:
+ image.SetAllowMissing(allow_missing)
+ image.SetAllowFakeBlob(allow_fake_blobs)
+ image.GetEntryContents()
+ image.drop_absent()
+ image.GetEntryOffsets()
+
+ # We need to pack the entries to figure out where everything
+ # should be placed. This sets the offset/size of each entry.
+ # However, after packing we call ProcessEntryContents() which
+ # may result in an entry changing size. In that case we need to
+ # do another pass. Since the device tree often contains the
+ # final offset/size information we try to make space for this in
+ # AddMissingProperties() above. However, if the device is
+ # compressed we cannot know this compressed size in advance,
+ # since changing an offset from 0x100 to 0x104 (for example) can
+ # alter the compressed size of the device tree. So we need a
+ # third pass for this.
+ passes = 5
+ for pack_pass in range(passes):
+ try:
+ image.PackEntries()
+ except Exception as e:
+ if write_map:
+ fname = image.WriteMap()
+ print("Wrote map file '%s' to show errors" % fname)
+ raise
+ image.SetImagePos()
+ if update_fdt:
+ image.SetCalculatedProperties()
+ for dtb_item in state.GetAllFdts():
+ dtb_item.Sync()
+ dtb_item.Flush()
+ image.WriteSymbols()
+ sizes_ok = image.ProcessEntryContents()
+ if sizes_ok:
+ break
+ image.ResetForPack()
+ tout.info('Pack completed after %d pass(es)' % (pack_pass + 1))
+ if not sizes_ok:
+ image.Raise('Entries changed size after packing (tried %s passes)' %
+ passes)
+
+ image.BuildImage()
+ if write_map:
+ image.WriteMap()
+
+ missing_list = []
+ image.CheckMissing(missing_list)
+ if missing_list:
+ tout.error("Image '%s' is missing external blobs and is non-functional: %s\n" %
+ (image.name, ' '.join([e.name for e in missing_list])))
+ _ShowHelpForMissingBlobs(tout.ERROR, missing_list)
+
+ faked_list = []
+ image.CheckFakedBlobs(faked_list)
+ if faked_list:
+ tout.warning(
+ "Image '%s' has faked external blobs and is non-functional: %s\n" %
+ (image.name, ' '.join([os.path.basename(e.GetDefaultFilename())
+ for e in faked_list])))
+
+ optional_list = []
+ image.CheckOptional(optional_list)
+ if optional_list:
+ tout.warning(
+ "Image '%s' is missing optional external blobs but is still functional: %s\n" %
+ (image.name, ' '.join([e.name for e in optional_list])))
+ _ShowHelpForMissingBlobs(tout.WARNING, optional_list)
+
+ missing_bintool_list = []
+ image.check_missing_bintools(missing_bintool_list)
+ if missing_bintool_list:
+ tout.warning(
+ "Image '%s' has missing bintools and is non-functional: %s\n" %
+ (image.name, ' '.join([os.path.basename(bintool.name)
+ for bintool in missing_bintool_list])))
+ return any([missing_list, faked_list, missing_bintool_list])
+
+
+def Binman(args):
+ """The main control code for binman
+
+ This assumes that help and test options have already been dealt with. It
+ deals with the core task of building images.
+
+ Args:
+ args: Command line arguments Namespace object
+ """
+ global Image
+ global state
+
+ if args.full_help:
+ with importlib.resources.path('binman', 'README.rst') as readme:
+ tools.print_full_help(str(readme))
+ return 0
+
+ # Put these here so that we can import this module without libfdt
+ from binman.image import Image
+ from binman import state
+
+ tool_paths = []
+ if args.toolpath:
+ tool_paths += args.toolpath
+ if args.tooldir:
+ tool_paths.append(args.tooldir)
+ tools.set_tool_paths(tool_paths or None)
+ bintool.Bintool.set_tool_dir(args.tooldir)
+
+ if args.cmd in ['ls', 'extract', 'replace', 'tool', 'sign']:
+ try:
+ tout.init(args.verbosity)
+ if args.cmd == 'replace':
+ tools.prepare_output_dir(args.outdir, args.preserve)
+ else:
+ tools.prepare_output_dir(None)
+ if args.cmd == 'ls':
+ ListEntries(args.image, args.paths)
+
+ if args.cmd == 'extract':
+ ExtractEntries(args.image, args.filename, args.outdir, args.paths,
+ not args.uncompressed, args.format)
+
+ if args.cmd == 'replace':
+ ReplaceEntries(args.image, args.filename, args.indir, args.paths,
+ do_compress=not args.compressed,
+ allow_resize=not args.fix_size, write_map=args.map)
+
+ if args.cmd == 'sign':
+ SignEntries(args.image, args.file, args.key, args.algo, args.paths)
+
+ if args.cmd == 'tool':
+ if args.list:
+ bintool.Bintool.list_all()
+ elif args.fetch:
+ if not args.bintools:
+ raise ValueError(
+ "Please specify bintools to fetch or 'all' or 'missing'")
+ bintool.Bintool.fetch_tools(bintool.FETCH_ANY,
+ args.bintools)
+ else:
+ raise ValueError("Invalid arguments to 'tool' subcommand")
+ except:
+ raise
+ finally:
+ tools.finalise_output_dir()
+ return 0
+
+ elf_params = None
+ if args.update_fdt_in_elf:
+ elf_params = args.update_fdt_in_elf.split(',')
+ if len(elf_params) != 4:
+ raise ValueError('Invalid args %s to --update-fdt-in-elf: expected infile,outfile,begin_sym,end_sym' %
+ elf_params)
+
+ # Try to figure out which device tree contains our image description
+ if args.dt:
+ dtb_fname = args.dt
+ else:
+ board = args.board
+ if not board:
+ raise ValueError('Must provide a board to process (use -b <board>)')
+ board_pathname = os.path.join(args.build_dir, board)
+ dtb_fname = os.path.join(board_pathname, 'u-boot.dtb')
+ if not args.indir:
+ args.indir = ['.']
+ args.indir.append(board_pathname)
+
+ try:
+ tout.init(args.verbosity)
+ elf.debug = args.debug
+ cbfs_util.VERBOSE = args.verbosity > 2
+ state.use_fake_dtb = args.fake_dtb
+
+ # Normally we replace the 'u-boot' etype with 'u-boot-expanded', etc.
+ # When running tests this can be disabled using this flag. When not
+ # updating the FDT in image, it is not needed by binman, but we use it
+ # for consistency, so that the images look the same to U-Boot at
+ # runtime.
+ use_expanded = not args.no_expanded
+ try:
+ tools.set_input_dirs(args.indir)
+ tools.prepare_output_dir(args.outdir, args.preserve)
+ state.SetEntryArgs(args.entry_arg)
+ state.SetThreads(args.threads)
+
+ images = PrepareImagesAndDtbs(dtb_fname, args.image,
+ args.update_fdt, use_expanded)
+
+ if args.test_section_timeout:
+ # Set the first image to timeout, used in testThreadTimeout()
+ images[list(images.keys())[0]].test_section_timeout = True
+ invalid = False
+ bintool.Bintool.set_missing_list(
+ args.force_missing_bintools.split(',') if
+ args.force_missing_bintools else None)
+
+ # Create the directory here instead of Entry.check_fake_fname()
+ # since that is called from a threaded context so different threads
+ # may race to create the directory
+ if args.fake_ext_blobs:
+ entry.Entry.create_fake_dir()
+
+ for image in images.values():
+ invalid |= ProcessImage(image, args.update_fdt, args.map,
+ allow_missing=args.allow_missing,
+ allow_fake_blobs=args.fake_ext_blobs)
+
+ # Write the updated FDTs to our output files
+ for dtb_item in state.GetAllFdts():
+ tools.write_file(dtb_item._fname, dtb_item.GetContents())
+
+ if elf_params:
+ data = state.GetFdtForEtype('u-boot-dtb').GetContents()
+ elf.UpdateFile(*elf_params, data)
+
+ bintool.Bintool.set_missing_list(None)
+
+ # This can only be True if -M is provided, since otherwise binman
+ # would have raised an error already
+ if invalid:
+ msg = 'Some images are invalid'
+ if args.ignore_missing:
+ tout.warning(msg)
+ else:
+ tout.error(msg)
+ return 103
+
+ # Use this to debug the time take to pack the image
+ #state.TimingShow()
+ finally:
+ tools.finalise_output_dir()
+ finally:
+ tout.uninit()
+
+ return 0
diff --git a/tools/binman/elf.py b/tools/binman/elf.py
new file mode 100644
index 00000000000..2ecc95f7eb8
--- /dev/null
+++ b/tools/binman/elf.py
@@ -0,0 +1,573 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Handle various things related to ELF images
+#
+
+from collections import namedtuple, OrderedDict
+import io
+import os
+import re
+import shutil
+import struct
+import tempfile
+
+from u_boot_pylib import command
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+ELF_TOOLS = True
+try:
+ from elftools.elf.elffile import ELFFile
+ from elftools.elf.elffile import ELFError
+ from elftools.elf.sections import SymbolTableSection
+except: # pragma: no cover
+ ELF_TOOLS = False
+
+# BSYM in little endian, keep in sync with include/binman_sym.h
+BINMAN_SYM_MAGIC_VALUE = 0x4d595342
+
+# Information about an EFL symbol:
+# section (str): Name of the section containing this symbol
+# address (int): Address of the symbol (its value)
+# size (int): Size of the symbol in bytes
+# weak (bool): True if the symbol is weak
+# offset (int or None): Offset of the symbol's data in the ELF file, or None if
+# not known
+Symbol = namedtuple('Symbol', ['section', 'address', 'size', 'weak', 'offset'])
+
+# Information about an ELF file:
+# data: Extracted program contents of ELF file (this would be loaded by an
+# ELF loader when reading this file
+# load: Load address of code
+# entry: Entry address of code
+# memsize: Number of bytes in memory occupied by loading this ELF file
+ElfInfo = namedtuple('ElfInfo', ['data', 'load', 'entry', 'memsize'])
+
+
+def GetSymbols(fname, patterns):
+ """Get the symbols from an ELF file
+
+ Args:
+ fname: Filename of the ELF file to read
+ patterns: List of regex patterns to search for, each a string
+
+ Returns:
+ None, if the file does not exist, or Dict:
+ key: Name of symbol
+ value: Hex value of symbol
+ """
+ stdout = tools.run('objdump', '-t', fname)
+ lines = stdout.splitlines()
+ if patterns:
+ re_syms = re.compile('|'.join(patterns))
+ else:
+ re_syms = None
+ syms = {}
+ syms_started = False
+ for line in lines:
+ if not line or not syms_started:
+ if 'SYMBOL TABLE' in line:
+ syms_started = True
+ line = None # Otherwise code coverage complains about 'continue'
+ continue
+ if re_syms and not re_syms.search(line):
+ continue
+
+ space_pos = line.find(' ')
+ value, rest = line[:space_pos], line[space_pos + 1:]
+ flags = rest[:7]
+ parts = rest[7:].split()
+ section, size = parts[:2]
+ if len(parts) > 2:
+ name = parts[2] if parts[2] != '.hidden' else parts[3]
+ syms[name] = Symbol(section, int(value, 16), int(size, 16),
+ flags[1] == 'w', None)
+
+ # Sort dict by address
+ return OrderedDict(sorted(syms.items(), key=lambda x: x[1].address))
+
+def _GetFileOffset(elf, addr):
+ """Get the file offset for an address
+
+ Args:
+ elf (ELFFile): ELF file to check
+ addr (int): Address to search for
+
+ Returns
+ int: Offset of that address in the ELF file, or None if not valid
+ """
+ for seg in elf.iter_segments():
+ seg_end = seg['p_vaddr'] + seg['p_filesz']
+ if seg.header['p_type'] == 'PT_LOAD':
+ if addr >= seg['p_vaddr'] and addr < seg_end:
+ return addr - seg['p_vaddr'] + seg['p_offset']
+
+def GetFileOffset(fname, addr):
+ """Get the file offset for an address
+
+ Args:
+ fname (str): Filename of ELF file to check
+ addr (int): Address to search for
+
+ Returns
+ int: Offset of that address in the ELF file, or None if not valid
+ """
+ if not ELF_TOOLS:
+ raise ValueError("Python: No module named 'elftools'")
+ with open(fname, 'rb') as fd:
+ elf = ELFFile(fd)
+ return _GetFileOffset(elf, addr)
+
+def GetSymbolFromAddress(fname, addr):
+ """Get the symbol at a particular address
+
+ Args:
+ fname (str): Filename of ELF file to check
+ addr (int): Address to search for
+
+ Returns:
+ str: Symbol name, or None if no symbol at that address
+ """
+ if not ELF_TOOLS:
+ raise ValueError("Python: No module named 'elftools'")
+ with open(fname, 'rb') as fd:
+ elf = ELFFile(fd)
+ syms = GetSymbols(fname, None)
+ for name, sym in syms.items():
+ if sym.address == addr:
+ return name
+
+def GetSymbolFileOffset(fname, patterns):
+ """Get the symbols from an ELF file
+
+ Args:
+ fname: Filename of the ELF file to read
+ patterns: List of regex patterns to search for, each a string
+
+ Returns:
+ None, if the file does not exist, or Dict:
+ key: Name of symbol
+ value: Hex value of symbol
+ """
+ if not ELF_TOOLS:
+ raise ValueError("Python: No module named 'elftools'")
+
+ syms = {}
+ with open(fname, 'rb') as fd:
+ elf = ELFFile(fd)
+
+ re_syms = re.compile('|'.join(patterns))
+ for section in elf.iter_sections():
+ if isinstance(section, SymbolTableSection):
+ for symbol in section.iter_symbols():
+ if not re_syms or re_syms.search(symbol.name):
+ addr = symbol.entry['st_value']
+ syms[symbol.name] = Symbol(
+ section.name, addr, symbol.entry['st_size'],
+ symbol.entry['st_info']['bind'] == 'STB_WEAK',
+ _GetFileOffset(elf, addr))
+
+ # Sort dict by address
+ return OrderedDict(sorted(syms.items(), key=lambda x: x[1].address))
+
+def GetSymbolAddress(fname, sym_name):
+ """Get a value of a symbol from an ELF file
+
+ Args:
+ fname: Filename of the ELF file to read
+ patterns: List of regex patterns to search for, each a string
+
+ Returns:
+ Symbol value (as an integer) or None if not found
+ """
+ syms = GetSymbols(fname, [sym_name])
+ sym = syms.get(sym_name)
+ if not sym:
+ return None
+ return sym.address
+
+def GetPackString(sym, msg):
+ """Get the struct.pack/unpack string to use with a given symbol
+
+ Args:
+ sym (Symbol): Symbol to check. Only the size member is checked
+ @msg (str): String which indicates the entry being processed, used for
+ errors
+
+ Returns:
+ str: struct string to use, .e.g. '<I'
+
+ Raises:
+ ValueError: Symbol has an unexpected size
+ """
+ if sym.size == 4:
+ return '<I'
+ elif sym.size == 8:
+ return '<Q'
+ else:
+ raise ValueError('%s has size %d: only 4 and 8 are supported' %
+ (msg, sym.size))
+
+def GetSymbolOffset(elf_fname, sym_name, base_sym=None):
+ """Read the offset of a symbol compared to base symbol
+
+ This is useful for obtaining the value of a single symbol relative to the
+ base of a binary blob.
+
+ Args:
+ elf_fname: Filename of the ELF file to read
+ sym_name (str): Name of symbol to read
+ base_sym (str): Base symbol to sue to calculate the offset (or None to
+ use '__image_copy_start'
+
+ Returns:
+ int: Offset of the symbol relative to the base symbol
+ """
+ if not base_sym:
+ base_sym = '__image_copy_start'
+ fname = tools.get_input_filename(elf_fname)
+ syms = GetSymbols(fname, [base_sym, sym_name])
+ base = syms[base_sym].address
+ val = syms[sym_name].address
+ return val - base
+
+def LookupAndWriteSymbols(elf_fname, entry, section, is_elf=False,
+ base_sym=None):
+ """Replace all symbols in an entry with their correct values
+
+ The entry contents is updated so that values for referenced symbols will be
+ visible at run time. This is done by finding out the symbols offsets in the
+ entry (using the ELF file) and replacing them with values from binman's data
+ structures.
+
+ Args:
+ elf_fname: Filename of ELF image containing the symbol information for
+ entry
+ entry: Entry to process
+ section: Section which can be used to lookup symbol values
+ base_sym: Base symbol marking the start of the image
+
+ Returns:
+ int: Number of symbols written
+ """
+ if not base_sym:
+ base_sym = '__image_copy_start'
+ fname = tools.get_input_filename(elf_fname)
+ syms = GetSymbols(fname, ['image', 'binman'])
+ if is_elf:
+ if not ELF_TOOLS:
+ msg = ("Section '%s': entry '%s'" %
+ (section.GetPath(), entry.GetPath()))
+ raise ValueError(f'{msg}: Cannot write symbols to an ELF file without Python elftools')
+ new_syms = {}
+ with open(fname, 'rb') as fd:
+ elf = ELFFile(fd)
+ for name, sym in syms.items():
+ offset = _GetFileOffset(elf, sym.address)
+ new_syms[name] = Symbol(sym.section, sym.address, sym.size,
+ sym.weak, offset)
+ syms = new_syms
+
+ if not syms:
+ tout.debug('LookupAndWriteSymbols: no syms')
+ return 0
+ base = syms.get(base_sym)
+ if not base and not is_elf:
+ tout.debug('LookupAndWriteSymbols: no base')
+ return 0
+ base_addr = 0 if is_elf else base.address
+ count = 0
+ for name, sym in syms.items():
+ if name.startswith('_binman'):
+ msg = ("Section '%s': Symbol '%s'\n in entry '%s'" %
+ (section.GetPath(), name, entry.GetPath()))
+ if is_elf:
+ # For ELF files, use the file offset
+ offset = sym.offset
+ else:
+ # For blobs use the offset of the symbol, calculated by
+ # subtracting the base address which by definition is at the
+ # start
+ offset = sym.address - base.address
+ if offset < 0 or offset + sym.size > entry.contents_size:
+ raise ValueError('%s has offset %x (size %x) but the contents '
+ 'size is %x' % (entry.GetPath(), offset,
+ sym.size,
+ entry.contents_size))
+ pack_string = GetPackString(sym, msg)
+ if name == '_binman_sym_magic':
+ value = BINMAN_SYM_MAGIC_VALUE
+ else:
+ # Look up the symbol in our entry tables.
+ value = section.GetImage().LookupImageSymbol(name, sym.weak,
+ msg, base_addr)
+ if value is None:
+ value = -1
+ pack_string = pack_string.lower()
+ value_bytes = struct.pack(pack_string, value)
+ tout.debug('%s:\n insert %s, offset %x, value %x, length %d' %
+ (msg, name, offset, value, len(value_bytes)))
+ entry.data = (entry.data[:offset] + value_bytes +
+ entry.data[offset + sym.size:])
+ count += 1
+ if count:
+ tout.detail(
+ f"Section '{section.GetPath()}': entry '{entry.GetPath()}' : {count} symbols")
+ return count
+
+def GetSymbolValue(sym, data, msg):
+ """Get the value of a symbol
+
+ This can only be used on symbols with an integer value.
+
+ Args:
+ sym (Symbol): Symbol to check
+ data (butes): Data for the ELF file - the symbol data appears at offset
+ sym.offset
+ @msg (str): String which indicates the entry being processed, used for
+ errors
+
+ Returns:
+ int: Value of the symbol
+
+ Raises:
+ ValueError: Symbol has an unexpected size
+ """
+ pack_string = GetPackString(sym, msg)
+ value = struct.unpack(pack_string, data[sym.offset:sym.offset + sym.size])
+ return value[0]
+
+def MakeElf(elf_fname, text, data):
+ """Make an elf file with the given data in a single section
+
+ The output file has a several section including '.text' and '.data',
+ containing the info provided in arguments.
+
+ Args:
+ elf_fname: Output filename
+ text: Text (code) to put in the file's .text section
+ data: Data to put in the file's .data section
+ """
+ outdir = tempfile.mkdtemp(prefix='binman.elf.')
+ s_file = os.path.join(outdir, 'elf.S')
+
+ # Spilt the text into two parts so that we can make the entry point two
+ # bytes after the start of the text section
+ text_bytes1 = ['\t.byte\t%#x' % byte for byte in text[:2]]
+ text_bytes2 = ['\t.byte\t%#x' % byte for byte in text[2:]]
+ data_bytes = ['\t.byte\t%#x' % byte for byte in data]
+ with open(s_file, 'w') as fd:
+ print('''/* Auto-generated C program to produce an ELF file for testing */
+
+.section .text
+.code32
+.globl _start
+.type _start, @function
+%s
+_start:
+%s
+.ident "comment"
+
+.comm fred,8,4
+
+.section .empty
+.globl _empty
+_empty:
+.byte 1
+
+.globl ernie
+.data
+.type ernie, @object
+.size ernie, 4
+ernie:
+%s
+''' % ('\n'.join(text_bytes1), '\n'.join(text_bytes2), '\n'.join(data_bytes)),
+ file=fd)
+ lds_file = os.path.join(outdir, 'elf.lds')
+
+ # Use a linker script to set the alignment and text address.
+ with open(lds_file, 'w') as fd:
+ print('''/* Auto-generated linker script to produce an ELF file for testing */
+
+PHDRS
+{
+ text PT_LOAD ;
+ data PT_LOAD ;
+ empty PT_LOAD FLAGS ( 6 ) ;
+ note PT_NOTE ;
+}
+
+SECTIONS
+{
+ . = 0xfef20000;
+ ENTRY(_start)
+ .text . : SUBALIGN(0)
+ {
+ *(.text)
+ } :text
+ .data : {
+ *(.data)
+ } :data
+ _bss_start = .;
+ .empty : {
+ *(.empty)
+ } :empty
+ /DISCARD/ : {
+ *(.note.gnu.property)
+ }
+ .note : {
+ *(.comment)
+ } :note
+ .bss _bss_start (OVERLAY) : {
+ *(.bss)
+ }
+}
+''', file=fd)
+ # -static: Avoid requiring any shared libraries
+ # -nostdlib: Don't link with C library
+ # -Wl,--build-id=none: Don't generate a build ID, so that we just get the
+ # text section at the start
+ # -m32: Build for 32-bit x86
+ # -T...: Specifies the link script, which sets the start address
+ cc, args = tools.get_target_compile_tool('cc')
+ args += ['-static', '-nostdlib', '-Wl,--build-id=none', '-m32', '-T',
+ lds_file, '-o', elf_fname, s_file]
+ stdout = command.output(cc, *args)
+ shutil.rmtree(outdir)
+
+def DecodeElf(data, location):
+ """Decode an ELF file and return information about it
+
+ Args:
+ data: Data from ELF file
+ location: Start address of data to return
+
+ Returns:
+ ElfInfo object containing information about the decoded ELF file
+ """
+ if not ELF_TOOLS:
+ raise ValueError("Python: No module named 'elftools'")
+ file_size = len(data)
+ with io.BytesIO(data) as fd:
+ elf = ELFFile(fd)
+ data_start = 0xffffffff
+ data_end = 0
+ mem_end = 0
+ virt_to_phys = 0
+
+ for i in range(elf.num_segments()):
+ segment = elf.get_segment(i)
+ if segment['p_type'] != 'PT_LOAD' or not segment['p_memsz']:
+ skipped = 1 # To make code-coverage see this line
+ continue
+ start = segment['p_paddr']
+ mend = start + segment['p_memsz']
+ rend = start + segment['p_filesz']
+ data_start = min(data_start, start)
+ data_end = max(data_end, rend)
+ mem_end = max(mem_end, mend)
+ if not virt_to_phys:
+ virt_to_phys = segment['p_paddr'] - segment['p_vaddr']
+
+ output = bytearray(data_end - data_start)
+ for i in range(elf.num_segments()):
+ segment = elf.get_segment(i)
+ if segment['p_type'] != 'PT_LOAD' or not segment['p_memsz']:
+ skipped = 1 # To make code-coverage see this line
+ continue
+ start = segment['p_paddr']
+ offset = 0
+ if start < location:
+ offset = location - start
+ start = location
+ # A legal ELF file can have a program header with non-zero length
+ # but zero-length file size and a non-zero offset which, added
+ # together, are greater than input->size (i.e. the total file size).
+ # So we need to not even test in the case that p_filesz is zero.
+ # Note: All of this code is commented out since we don't have a test
+ # case for it.
+ size = segment['p_filesz']
+ #if not size:
+ #continue
+ #end = segment['p_offset'] + segment['p_filesz']
+ #if end > file_size:
+ #raise ValueError('Underflow copying out the segment. File has %#x bytes left, segment end is %#x\n',
+ #file_size, end)
+ output[start - data_start:start - data_start + size] = (
+ segment.data()[offset:])
+ return ElfInfo(output, data_start, elf.header['e_entry'] + virt_to_phys,
+ mem_end - data_start)
+
+def UpdateFile(infile, outfile, start_sym, end_sym, insert):
+ tout.notice("Creating file '%s' with data length %#x (%d) between symbols '%s' and '%s'" %
+ (outfile, len(insert), len(insert), start_sym, end_sym))
+ syms = GetSymbolFileOffset(infile, [start_sym, end_sym])
+ if len(syms) != 2:
+ raise ValueError("Expected two symbols '%s' and '%s': got %d: %s" %
+ (start_sym, end_sym, len(syms),
+ ','.join(syms.keys())))
+
+ size = syms[end_sym].offset - syms[start_sym].offset
+ if len(insert) > size:
+ raise ValueError("Not enough space in '%s' for data length %#x (%d); size is %#x (%d)" %
+ (infile, len(insert), len(insert), size, size))
+
+ data = tools.read_file(infile)
+ newdata = data[:syms[start_sym].offset]
+ newdata += insert + tools.get_bytes(0, size - len(insert))
+ newdata += data[syms[end_sym].offset:]
+ tools.write_file(outfile, newdata)
+ tout.info('Written to offset %#x' % syms[start_sym].offset)
+
+def read_loadable_segments(data):
+ """Read segments from an ELF file
+
+ Args:
+ data (bytes): Contents of file
+
+ Returns:
+ tuple:
+ list of segments, each:
+ int: Segment number (0 = first)
+ int: Start address of segment in memory
+ bytes: Contents of segment
+ int: entry address for image
+
+ Raises:
+ ValueError: elftools is not available
+ """
+ if not ELF_TOOLS:
+ raise ValueError("Python: No module named 'elftools'")
+ with io.BytesIO(data) as inf:
+ try:
+ elf = ELFFile(inf)
+ except ELFError as err:
+ raise ValueError(err)
+ entry = elf.header['e_entry']
+ segments = []
+ for i in range(elf.num_segments()):
+ segment = elf.get_segment(i)
+ if segment['p_type'] != 'PT_LOAD' or not segment['p_memsz']:
+ skipped = 1 # To make code-coverage see this line
+ continue
+ start = segment['p_offset']
+ rend = start + segment['p_filesz']
+ segments.append((i, segment['p_paddr'], data[start:rend]))
+ return segments, entry
+
+def is_valid(data):
+ """Check if some binary data is a valid ELF file
+
+ Args:
+ data (bytes): Bytes to check
+
+ Returns:
+ bool: True if a valid Elf file, False if not
+ """
+ try:
+ DecodeElf(data, 0)
+ return True
+ except ELFError:
+ return False
diff --git a/tools/binman/elf_test.py b/tools/binman/elf_test.py
new file mode 100644
index 00000000000..b64134123c1
--- /dev/null
+++ b/tools/binman/elf_test.py
@@ -0,0 +1,393 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2017 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the elf module
+
+import os
+import shutil
+import struct
+import sys
+import tempfile
+import unittest
+
+from binman import elf
+from u_boot_pylib import command
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+
+
+class FakeEntry:
+ """A fake Entry object, usedfor testing
+
+ This supports an entry with a given size.
+ """
+ def __init__(self, contents_size):
+ self.contents_size = contents_size
+ self.data = tools.get_bytes(ord('a'), contents_size)
+
+ def GetPath(self):
+ return 'entry_path'
+
+
+class FakeSection:
+ """A fake Section object, used for testing
+
+ This has the minimum feature set needed to support testing elf functions.
+ A LookupSymbol() function is provided which returns a fake value for amu
+ symbol requested.
+ """
+ def __init__(self, sym_value=1):
+ self.sym_value = sym_value
+
+ def GetPath(self):
+ return 'section_path'
+
+ def LookupImageSymbol(self, name, weak, msg, base_addr):
+ """Fake implementation which returns the same value for all symbols"""
+ return self.sym_value
+
+ def GetImage(self):
+ return self
+
+def BuildElfTestFiles(target_dir):
+ """Build ELF files used for testing in binman
+
+ This compiles and links the test files into the specified directory. It uses
+ the Makefile and source files in the binman test/ directory.
+
+ Args:
+ target_dir: Directory to put the files into
+ """
+ if not os.path.exists(target_dir):
+ os.mkdir(target_dir)
+ testdir = os.path.join(binman_dir, 'test')
+
+ # If binman is involved from the main U-Boot Makefile the -r and -R
+ # flags are set in MAKEFLAGS. This prevents this Makefile from working
+ # correctly. So drop any make flags here.
+ if 'MAKEFLAGS' in os.environ:
+ del os.environ['MAKEFLAGS']
+ try:
+ tools.run('make', '-C', target_dir, '-f',
+ os.path.join(testdir, 'Makefile'), 'SRC=%s/' % testdir)
+ except ValueError as e:
+ # The test system seems to suppress this in a strange way
+ print(e)
+
+
+class TestElf(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls._indir = tempfile.mkdtemp(prefix='elf.')
+ tools.set_input_dirs(['.'])
+ BuildElfTestFiles(cls._indir)
+
+ @classmethod
+ def tearDownClass(cls):
+ if cls._indir:
+ shutil.rmtree(cls._indir)
+
+ @classmethod
+ def ElfTestFile(cls, fname):
+ return os.path.join(cls._indir, fname)
+
+ def testAllSymbols(self):
+ """Test that we can obtain a symbol from the ELF file"""
+ fname = self.ElfTestFile('u_boot_ucode_ptr')
+ syms = elf.GetSymbols(fname, [])
+ self.assertIn('_dt_ucode_base_size', syms)
+
+ def testRegexSymbols(self):
+ """Test that we can obtain from the ELF file by regular expression"""
+ fname = self.ElfTestFile('u_boot_ucode_ptr')
+ syms = elf.GetSymbols(fname, ['ucode'])
+ self.assertIn('_dt_ucode_base_size', syms)
+ syms = elf.GetSymbols(fname, ['missing'])
+ self.assertNotIn('_dt_ucode_base_size', syms)
+ syms = elf.GetSymbols(fname, ['missing', 'ucode'])
+ self.assertIn('_dt_ucode_base_size', syms)
+
+ def testMissingFile(self):
+ """Test that a missing file is detected"""
+ entry = FakeEntry(10)
+ section = FakeSection()
+ with self.assertRaises(ValueError) as e:
+ elf.LookupAndWriteSymbols('missing-file', entry, section)
+ self.assertIn("Filename 'missing-file' not found in input path",
+ str(e.exception))
+
+ def testOutsideFile(self):
+ """Test a symbol which extends outside the entry area is detected"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ entry = FakeEntry(10)
+ section = FakeSection()
+ elf_fname = self.ElfTestFile('u_boot_binman_syms')
+ with self.assertRaises(ValueError) as e:
+ elf.LookupAndWriteSymbols(elf_fname, entry, section)
+ self.assertIn('entry_path has offset 8 (size 8) but the contents size '
+ 'is a', str(e.exception))
+
+ def testMissingImageStart(self):
+ """Test that we detect a missing __image_copy_start symbol
+
+ This is needed to mark the start of the image. Without it we cannot
+ locate the offset of a binman symbol within the image.
+ """
+ entry = FakeEntry(10)
+ section = FakeSection()
+ elf_fname = self.ElfTestFile('u_boot_binman_syms_bad')
+ count = elf.LookupAndWriteSymbols(elf_fname, entry, section)
+ self.assertEqual(0, count)
+
+ def testBadSymbolSize(self):
+ """Test that an attempt to use an 8-bit symbol are detected
+
+ Only 32 and 64 bits are supported, since we need to store an offset
+ into the image.
+ """
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ entry = FakeEntry(10)
+ section = FakeSection()
+ elf_fname =self.ElfTestFile('u_boot_binman_syms_size')
+ with self.assertRaises(ValueError) as e:
+ elf.LookupAndWriteSymbols(elf_fname, entry, section)
+ self.assertIn('has size 1: only 4 and 8 are supported',
+ str(e.exception))
+
+ def testNoValue(self):
+ """Test the case where we have no value for the symbol
+
+ This should produce -1 values for all three symbols, taking up the
+ first 16 bytes of the image.
+ """
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ entry = FakeEntry(28)
+ section = FakeSection(sym_value=None)
+ elf_fname = self.ElfTestFile('u_boot_binman_syms')
+ count = elf.LookupAndWriteSymbols(elf_fname, entry, section)
+ self.assertEqual(5, count)
+ expected = (struct.pack('<L', elf.BINMAN_SYM_MAGIC_VALUE) +
+ tools.get_bytes(255, 20) +
+ tools.get_bytes(ord('a'), 4))
+ self.assertEqual(expected, entry.data)
+
+ def testDebug(self):
+ """Check that enabling debug in the elf module produced debug output"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ try:
+ tout.init(tout.DEBUG)
+ entry = FakeEntry(24)
+ section = FakeSection()
+ elf_fname = self.ElfTestFile('u_boot_binman_syms')
+ with test_util.capture_sys_output() as (stdout, stderr):
+ elf.LookupAndWriteSymbols(elf_fname, entry, section)
+ self.assertTrue(len(stdout.getvalue()) > 0)
+ finally:
+ tout.init(tout.WARNING)
+
+ def testMakeElf(self):
+ """Test for the MakeElf function"""
+ outdir = tempfile.mkdtemp(prefix='elf.')
+ expected_text = b'1234'
+ expected_data = b'wxyz'
+ elf_fname = os.path.join(outdir, 'elf')
+ bin_fname = os.path.join(outdir, 'bin')
+
+ # Make an Elf file and then convert it to a fkat binary file. This
+ # should produce the original data.
+ elf.MakeElf(elf_fname, expected_text, expected_data)
+ objcopy, args = tools.get_target_compile_tool('objcopy')
+ args += ['-O', 'binary', elf_fname, bin_fname]
+ stdout = command.output(objcopy, *args)
+ with open(bin_fname, 'rb') as fd:
+ data = fd.read()
+ self.assertEqual(expected_text + expected_data, data)
+ shutil.rmtree(outdir)
+
+ def testDecodeElf(self):
+ """Test for the MakeElf function"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ outdir = tempfile.mkdtemp(prefix='elf.')
+ expected_text = b'1234'
+ expected_data = b'wxyz'
+ elf_fname = os.path.join(outdir, 'elf')
+ elf.MakeElf(elf_fname, expected_text, expected_data)
+ data = tools.read_file(elf_fname)
+
+ load = 0xfef20000
+ entry = load + 2
+ expected = expected_text + expected_data
+ self.assertEqual(elf.ElfInfo(expected, load, entry, len(expected)),
+ elf.DecodeElf(data, 0))
+ self.assertEqual(elf.ElfInfo(b'\0\0' + expected[2:],
+ load, entry, len(expected)),
+ elf.DecodeElf(data, load + 2))
+ shutil.rmtree(outdir)
+
+ def testEmbedData(self):
+ """Test for the GetSymbolFileOffset() function"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+
+ fname = self.ElfTestFile('embed_data')
+ offset = elf.GetSymbolFileOffset(fname, ['embed_start', 'embed_end'])
+ start = offset['embed_start'].offset
+ end = offset['embed_end'].offset
+ data = tools.read_file(fname)
+ embed_data = data[start:end]
+ expect = struct.pack('<IIIII', 2, 3, 0x1234, 0x5678, 0)
+ self.assertEqual(expect, embed_data)
+
+ def testEmbedFail(self):
+ """Test calling GetSymbolFileOffset() without elftools"""
+ old_val = elf.ELF_TOOLS
+ try:
+ elf.ELF_TOOLS = False
+ fname = self.ElfTestFile('embed_data')
+ with self.assertRaises(ValueError) as e:
+ elf.GetSymbolFileOffset(fname, ['embed_start', 'embed_end'])
+ with self.assertRaises(ValueError) as e:
+ elf.DecodeElf(tools.read_file(fname), 0xdeadbeef)
+ with self.assertRaises(ValueError) as e:
+ elf.GetFileOffset(fname, 0xdeadbeef)
+ with self.assertRaises(ValueError) as e:
+ elf.GetSymbolFromAddress(fname, 0xdeadbeef)
+ with self.assertRaises(ValueError) as e:
+ entry = FakeEntry(10)
+ section = FakeSection()
+ elf.LookupAndWriteSymbols(fname, entry, section, True)
+
+ self.assertIn(
+ "Section 'section_path': entry 'entry_path': Cannot write symbols to an ELF file without Python elftools",
+ str(e.exception))
+ finally:
+ elf.ELF_TOOLS = old_val
+
+ def testEmbedDataNoSym(self):
+ """Test for GetSymbolFileOffset() getting no symbols"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+
+ fname = self.ElfTestFile('embed_data')
+ offset = elf.GetSymbolFileOffset(fname, ['missing_sym'])
+ self.assertEqual({}, offset)
+
+ def test_read_loadable_segments(self):
+ """Test for read_loadable_segments()"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ fname = self.ElfTestFile('embed_data')
+ segments, entry = elf.read_loadable_segments(tools.read_file(fname))
+
+ def test_read_segments_fail(self):
+ """Test for read_loadable_segments() without elftools"""
+ old_val = elf.ELF_TOOLS
+ try:
+ elf.ELF_TOOLS = False
+ fname = self.ElfTestFile('embed_data')
+ with self.assertRaises(ValueError) as e:
+ elf.read_loadable_segments(tools.read_file(fname))
+ self.assertIn("Python: No module named 'elftools'",
+ str(e.exception))
+ finally:
+ elf.ELF_TOOLS = old_val
+
+ def test_read_segments_bad_data(self):
+ """Test for read_loadable_segments() with an invalid ELF file"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ fname = self.ElfTestFile('embed_data')
+ with self.assertRaises(ValueError) as e:
+ elf.read_loadable_segments(tools.get_bytes(100, 100))
+ self.assertIn('Magic number does not match', str(e.exception))
+
+ def test_get_file_offset(self):
+ """Test GetFileOffset() gives the correct file offset for a symbol"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ fname = self.ElfTestFile('embed_data')
+ syms = elf.GetSymbols(fname, ['embed'])
+ addr = syms['embed'].address
+ offset = elf.GetFileOffset(fname, addr)
+ data = tools.read_file(fname)
+
+ # Just use the first 4 bytes and assume it is little endian
+ embed_data = data[offset:offset + 4]
+ embed_value = struct.unpack('<I', embed_data)[0]
+ self.assertEqual(0x1234, embed_value)
+
+ def test_get_file_offset_fail(self):
+ """Test calling GetFileOffset() without elftools"""
+ old_val = elf.ELF_TOOLS
+ try:
+ elf.ELF_TOOLS = False
+ fname = self.ElfTestFile('embed_data')
+ with self.assertRaises(ValueError) as e:
+ elf.GetFileOffset(fname, 0)
+ self.assertIn("Python: No module named 'elftools'",
+ str(e.exception))
+ finally:
+ elf.ELF_TOOLS = old_val
+
+ def test_get_symbol_from_address(self):
+ """Test GetSymbolFromAddress()"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ fname = self.ElfTestFile('elf_sections')
+ sym_name = 'calculate'
+ syms = elf.GetSymbols(fname, [sym_name])
+ addr = syms[sym_name].address
+ sym = elf.GetSymbolFromAddress(fname, addr)
+ self.assertEqual(sym_name, sym)
+
+ def test_get_symbol_from_address_fail(self):
+ """Test calling GetSymbolFromAddress() without elftools"""
+ old_val = elf.ELF_TOOLS
+ try:
+ elf.ELF_TOOLS = False
+ fname = self.ElfTestFile('embed_data')
+ with self.assertRaises(ValueError) as e:
+ elf.GetSymbolFromAddress(fname, 0x1000)
+ self.assertIn("Python: No module named 'elftools'",
+ str(e.exception))
+ finally:
+ elf.ELF_TOOLS = old_val
+
+ def test_is_valid(self):
+ """Test is_valid()"""
+ self.assertEqual(False, elf.is_valid(b''))
+ self.assertEqual(False, elf.is_valid(b'1234'))
+
+ fname = self.ElfTestFile('elf_sections')
+ data = tools.read_file(fname)
+ self.assertEqual(True, elf.is_valid(data))
+ self.assertEqual(False, elf.is_valid(data[4:]))
+
+ def test_get_symbol_offset(self):
+ fname = self.ElfTestFile('embed_data')
+ syms = elf.GetSymbols(fname, ['embed_start', 'embed'])
+ expected = syms['embed'].address - syms['embed_start'].address
+ val = elf.GetSymbolOffset(fname, 'embed', 'embed_start')
+ self.assertEqual(expected, val)
+
+ with self.assertRaises(KeyError) as e:
+ elf.GetSymbolOffset(fname, 'embed')
+ self.assertIn('__image_copy_start', str(e.exception))
+
+ def test_get_symbol_address(self):
+ fname = self.ElfTestFile('embed_data')
+ addr = elf.GetSymbolAddress(fname, 'region_size')
+ self.assertEqual(0, addr)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/binman/entries.rst b/tools/binman/entries.rst
new file mode 100644
index 00000000000..254afe76074
--- /dev/null
+++ b/tools/binman/entries.rst
@@ -0,0 +1,2867 @@
+Binman Entry Documentation
+==========================
+
+This file describes the entry types supported by binman. These entry types can
+be placed in an image one by one to build up a final firmware image. It is
+fairly easy to create new entry types. Just add a new file to the 'etype'
+directory. You can use the existing entries as examples.
+
+Note that some entries are subclasses of others, using and extending their
+features to produce new behaviours.
+
+
+
+.. _etype_atf_bl31:
+
+Entry: atf-bl31: ARM Trusted Firmware (ATF) BL31 blob
+-----------------------------------------------------
+
+Properties / Entry arguments:
+ - atf-bl31-path: Filename of file to read into entry. This is typically
+ called bl31.bin or bl31.elf
+
+This entry holds the run-time firmware, typically started by U-Boot SPL.
+See the U-Boot README for your architecture or board for how to use it. See
+https://github.com/ARM-software/arm-trusted-firmware for more information
+about ATF.
+
+
+
+.. _etype_atf_fip:
+
+Entry: atf-fip: ARM Trusted Firmware's Firmware Image Package (FIP)
+-------------------------------------------------------------------
+
+A FIP_ provides a way to group binaries in a firmware image, used by ARM's
+Trusted Firmware A (TF-A) code. It is a simple format consisting of a
+table of contents with information about the type, offset and size of the
+binaries in the FIP. It is quite similar to FMAP, with the major difference
+that it uses UUIDs to indicate the type of each entry.
+
+Note: It is recommended to always add an fdtmap to every image, as well as
+any FIPs so that binman and other tools can access the entire image
+correctly.
+
+The UUIDs correspond to useful names in `fiptool`, provided by ATF to
+operate on FIPs. Binman uses these names to make it easier to understand
+what is going on, although it is possible to provide a UUID if needed.
+
+The contents of the FIP are defined by subnodes of the atf-fip entry, e.g.::
+
+ atf-fip {
+ soc-fw {
+ filename = "bl31.bin";
+ };
+
+ scp-fwu-cfg {
+ filename = "bl2u.bin";
+ };
+
+ u-boot {
+ fip-type = "nt-fw";
+ };
+ };
+
+This describes a FIP with three entries: soc-fw, scp-fwu-cfg and nt-fw.
+You can use normal (non-external) binaries like U-Boot simply by adding a
+FIP type, with the `fip-type` property, as above.
+
+Since FIP exists to bring blobs together, Binman assumes that all FIP
+entries are external binaries. If a binary may not exist, you can use the
+`--allow-missing` flag to Binman, in which case the image is still created,
+even though it will not actually work.
+
+The size of the FIP depends on the size of the binaries. There is currently
+no way to specify a fixed size. If the `atf-fip` node has a `size` entry,
+this affects the space taken up by the `atf-fip` entry, but the FIP itself
+does not expand to use that space.
+
+Some other FIP features are available with Binman. The header and the
+entries have 64-bit flag works. The flag flags do not seem to be defined
+anywhere, but you can use `fip-hdr-flags` and fip-flags` to set the values
+of the header and entries respectively.
+
+FIP entries can be aligned to a particular power-of-two boundary. Use
+fip-align for this.
+
+Binman only understands the entry types that are included in its
+implementation. It is possible to specify a 16-byte UUID instead, using the
+fip-uuid property. In this case Binman doesn't know what its type is, so
+just uses the UUID. See the `u-boot` node in this example::
+
+ binman {
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ fip-align = <16>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x456>;
+ filename = "bl31.bin";
+ };
+
+ scp-fwu-cfg {
+ filename = "bl2u.bin";
+ };
+
+ u-boot {
+ fip-uuid = [fc 65 13 92 4a 5b 11 ec
+ 94 35 ff 2d 1c fc 79 9c];
+ };
+ };
+ fdtmap {
+ };
+ };
+
+Binman allows reading and updating FIP entries after the image is created,
+provided that an FDPMAP is present too. Updates which change the size of a
+FIP entry will cause it to be expanded or contracted as needed.
+
+Properties for top-level atf-fip node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+fip-hdr-flags (64 bits)
+ Sets the flags for the FIP header.
+
+Properties for subnodes
+~~~~~~~~~~~~~~~~~~~~~~~
+
+fip-type (str)
+ FIP type to use for this entry. This is needed if the entry
+ name is not a valid type. Value types are defined in `fip_util.py`.
+ The FIP type defines the UUID that is used (they map 1:1).
+
+fip-uuid (16 bytes)
+ If there is no FIP-type name defined, or it is not supported by Binman,
+ this property sets the UUID. It should be a 16-byte value, following the
+ hex digits of the UUID.
+
+fip-flags (64 bits)
+ Set the flags for a FIP entry. Use in one of the subnodes of the
+ 7atf-fip entry.
+
+fip-align
+ Set the alignment for a FIP entry, FIP entries can be aligned to a
+ particular power-of-two boundary. The default is 1.
+
+Adding new FIP-entry types
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When new FIP entries are defined by TF-A they appear in the
+`TF-A source tree`_. You can use `fip_util.py` to update Binman to support
+new types, then `send a patch`_ to the U-Boot mailing list. There are two
+source files that the tool examples:
+
+- `include/tools_share/firmware_image_package.h` has the UUIDs
+- `tools/fiptool/tbbr_config.c` has the name and descripion for each UUID
+
+To run the tool::
+
+ $ tools/binman/fip_util.py -s /path/to/arm-trusted-firmware
+ Warning: UUID 'UUID_NON_TRUSTED_WORLD_KEY_CERT' is not mentioned in tbbr_config.c file
+ Existing code in 'tools/binman/fip_util.py' is up-to-date
+
+If it shows there is an update, it writes a new version of `fip_util.py`
+to `fip_util.py.out`. You can change the output file using the `-i` flag.
+If you have a problem, use `-D` to enable traceback debugging.
+
+FIP commentary
+~~~~~~~~~~~~~~
+
+As a side effect of use of UUIDs, FIP does not support multiple
+entries of the same type, such as might be used to store fonts or graphics
+icons, for example. For verified boot it could be used for each part of the
+image (e.g. separate FIPs for A and B) but cannot describe the whole
+firmware image. As with FMAP there is no hierarchy defined, although FMAP
+works around this by having 'section' areas which encompass others. A
+similar workaround would be possible with FIP but is not currently defined.
+
+It is recommended to always add an fdtmap to every image, as well as any
+FIPs so that binman and other tools can access the entire image correctly.
+
+.. _FIP: https://trustedfirmware-a.readthedocs.io/en/latest/design/firmware-design.html#firmware-image-package-fip
+.. _`TF-A source tree`: https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git
+.. _`send a patch`: https://www.denx.de/wiki/U-Boot/Patches
+
+
+
+.. _etype_blob:
+
+Entry: blob: Arbitrary binary blob
+----------------------------------
+
+Note: This should not be used by itself. It is normally used as a parent
+class by other entry types.
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+ - compress: Compression algorithm to use:
+ none: No compression
+ lz4: Use lz4 compression (via 'lz4' command-line utility)
+
+This entry reads data from a file and places it in the entry. The
+default filename is often specified specified by the subclass. See for
+example the 'u-boot' entry which provides the filename 'u-boot.bin'.
+
+If compression is enabled, an extra 'uncomp-size' property is written to
+the node (if enabled with -u) which provides the uncompressed size of the
+data.
+
+
+
+.. _etype_blob_dtb:
+
+Entry: blob-dtb: A blob that holds a device tree
+------------------------------------------------
+
+This is a blob containing a device tree. The contents of the blob are
+obtained from the list of available device-tree files, managed by the
+'state' module.
+
+Additional attributes:
+ prepend: Header used (e.g. 'length')
+
+
+
+.. _etype_blob_ext:
+
+Entry: blob-ext: Externally built binary blob
+---------------------------------------------
+
+Note: This should not be used by itself. It is normally used as a parent
+class by other entry types.
+
+If the file providing this blob is missing, binman can optionally ignore it
+and produce a broken image with a warning.
+
+See 'blob' for Properties / Entry arguments.
+
+
+
+.. _etype_blob_ext_list:
+
+Entry: blob-ext-list: List of externally built binary blobs
+-----------------------------------------------------------
+
+This is like blob-ext except that a number of blobs can be provided,
+typically with some sort of relationship, e.g. all are DDC parameters.
+
+If any of the external files needed by this llist is missing, binman can
+optionally ignore it and produce a broken image with a warning.
+
+Args:
+ filenames: List of filenames to read and include
+
+
+
+.. _etype_blob_named_by_arg:
+
+Entry: blob-named-by-arg: A blob entry which gets its filename property from its subclass
+-----------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - <xxx>-path: Filename containing the contents of this entry (optional,
+ defaults to None)
+
+where <xxx> is the blob_fname argument to the constructor.
+
+This entry cannot be used directly. Instead, it is used as a parent class
+for another entry, which defined blob_fname. This parameter is used to
+set the entry-arg or property containing the filename. The entry-arg or
+property is in turn used to set the actual filename.
+
+See cros_ec_rw for an example of this.
+
+
+
+.. _etype_blob_phase:
+
+Entry: blob-phase: Section that holds a phase binary
+----------------------------------------------------
+
+This is a base class that should not normally be used directly. It is used
+when converting a 'u-boot' entry automatically into a 'u-boot-expanded'
+entry; similarly for SPL.
+
+
+
+.. _etype_cbfs:
+
+Entry: cbfs: Coreboot Filesystem (CBFS)
+---------------------------------------
+
+A CBFS provides a way to group files into a group. It has a simple directory
+structure and allows the position of individual files to be set, since it is
+designed to support execute-in-place in an x86 SPI-flash device. Where XIP
+is not used, it supports compression and storing ELF files.
+
+CBFS is used by coreboot as its way of orgnanising SPI-flash contents.
+
+The contents of the CBFS are defined by subnodes of the cbfs entry, e.g.::
+
+ cbfs {
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+
+This creates a CBFS 1MB in size two files in it: u-boot.bin and u-boot.dtb.
+Note that the size is required since binman does not support calculating it.
+The contents of each entry is just what binman would normally provide if it
+were not a CBFS node. A blob type can be used to import arbitrary files as
+with the second subnode below::
+
+ cbfs {
+ size = <0x100000>;
+ u-boot {
+ cbfs-name = "BOOT";
+ cbfs-type = "raw";
+ };
+
+ dtb {
+ type = "blob";
+ filename = "u-boot.dtb";
+ cbfs-type = "raw";
+ cbfs-compress = "lz4";
+ cbfs-offset = <0x100000>;
+ };
+ };
+
+This creates a CBFS 1MB in size with u-boot.bin (named "BOOT") and
+u-boot.dtb (named "dtb") and compressed with the lz4 algorithm.
+
+
+Properties supported in the top-level CBFS node:
+
+cbfs-arch:
+ Defaults to "x86", but you can specify the architecture if needed.
+
+
+Properties supported in the CBFS entry subnodes:
+
+cbfs-name:
+ This is the name of the file created in CBFS. It defaults to the entry
+ name (which is the node name), but you can override it with this
+ property.
+
+cbfs-type:
+ This is the CBFS file type. The following are supported:
+
+ raw:
+ This is a 'raw' file, although compression is supported. It can be
+ used to store any file in CBFS.
+
+ stage:
+ This is an ELF file that has been loaded (i.e. mapped to memory), so
+ appears in the CBFS as a flat binary. The input file must be an ELF
+ image, for example this puts "u-boot" (the ELF image) into a 'stage'
+ entry::
+
+ cbfs {
+ size = <0x100000>;
+ u-boot-elf {
+ cbfs-name = "BOOT";
+ cbfs-type = "stage";
+ };
+ };
+
+ You can use your own ELF file with something like::
+
+ cbfs {
+ size = <0x100000>;
+ something {
+ type = "blob";
+ filename = "cbfs-stage.elf";
+ cbfs-type = "stage";
+ };
+ };
+
+ As mentioned, the file is converted to a flat binary, so it is
+ equivalent to adding "u-boot.bin", for example, but with the load and
+ start addresses specified by the ELF. At present there is no option
+ to add a flat binary with a load/start address, similar to the
+ 'add-flat-binary' option in cbfstool.
+
+cbfs-offset:
+ This is the offset of the file's data within the CBFS. It is used to
+ specify where the file should be placed in cases where a fixed position
+ is needed. Typical uses are for code which is not relocatable and must
+ execute in-place from a particular address. This works because SPI flash
+ is generally mapped into memory on x86 devices. The file header is
+ placed before this offset so that the data start lines up exactly with
+ the chosen offset. If this property is not provided, then the file is
+ placed in the next available spot.
+
+The current implementation supports only a subset of CBFS features. It does
+not support other file types (e.g. payload), adding multiple files (like the
+'files' entry with a pattern supported by binman), putting files at a
+particular offset in the CBFS and a few other things.
+
+Of course binman can create images containing multiple CBFSs, simply by
+defining these in the binman config::
+
+
+ binman {
+ size = <0x800000>;
+ cbfs {
+ offset = <0x100000>;
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+
+ cbfs2 {
+ offset = <0x700000>;
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ image {
+ type = "blob";
+ filename = "image.jpg";
+ };
+ };
+ };
+
+This creates an 8MB image with two CBFSs, one at offset 1MB, one at 7MB,
+both of size 1MB.
+
+
+
+.. _etype_collection:
+
+Entry: collection: An entry which contains a collection of other entries
+------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - content: List of phandles to entries to include
+
+This allows reusing the contents of other entries. The contents of the
+listed entries are combined to form this entry. This serves as a useful
+base class for entry types which need to process data from elsewhere in
+the image, not necessarily child entries.
+
+The entries can generally be anywhere in the same image, even if they are in
+a different section from this entry.
+
+
+
+.. _etype_cros_ec_rw:
+
+Entry: cros-ec-rw: A blob entry which contains a Chromium OS read-write EC image
+--------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - cros-ec-rw-path: Filename containing the EC image
+
+This entry holds a Chromium OS EC (embedded controller) image, for use in
+updating the EC on startup via software sync.
+
+
+
+.. _etype_efi_capsule:
+
+Entry: capsule: Entry for generating EFI Capsule files
+------------------------------------------------------
+
+The parameters needed for generation of the capsules can be provided
+as properties in the entry.
+
+Properties / Entry arguments:
+ - image-index: Unique number for identifying corresponding
+ payload image. Number between 1 and descriptor count, i.e.
+ the total number of firmware images that can be updated. Mandatory
+ property.
+ - image-guid: Image GUID which will be used for identifying the
+ updatable image on the board. Mandatory property.
+ - hardware-instance: Optional number for identifying unique
+ hardware instance of a device in the system. Default value of 0
+ for images where value is not to be used.
+ - fw-version: Value of image version that can be put on the capsule
+ through the Firmware Management Protocol(FMP) header.
+ - monotonic-count: Count used when signing an image.
+ - private-key: Path to PEM formatted .key private key file. Mandatory
+ property for generating signed capsules.
+ - public-key-cert: Path to PEM formatted .crt public key certificate
+ file. Mandatory property for generating signed capsules.
+ - oem-flags - OEM flags to be passed through capsule header.
+
+ Since this is a subclass of Entry_section, all properties of the parent
+ class also apply here. Except for the properties stated as mandatory, the
+ rest of the properties are optional.
+
+For more details on the description of the capsule format, and the capsule
+update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
+specification`_.
+
+The capsule parameters like image index and image GUID are passed as
+properties in the entry. The payload to be used in the capsule is to be
+provided as a subnode of the capsule entry.
+
+A typical capsule entry node would then look something like this::
+
+ capsule {
+ type = "efi-capsule";
+ image-index = <0x1>;
+ /* Image GUID for testing capsule update */
+ image-guid = SANDBOX_UBOOT_IMAGE_GUID;
+ hardware-instance = <0x0>;
+ private-key = "path/to/the/private/key";
+ public-key-cert = "path/to/the/public-key-cert";
+ oem-flags = <0x8000>;
+
+ u-boot {
+ };
+ };
+
+In the above example, the capsule payload is the U-Boot image. The
+capsule entry would read the contents of the payload and put them
+into the capsule. Any external file can also be specified as the
+payload using the blob-ext subnode.
+
+.. _`UEFI specification`: https://uefi.org/sites/default/files/resources/UEFI_Spec_2_10_Aug29.pdf
+
+
+
+.. _etype_efi_empty_capsule:
+
+Entry: efi-empty-capsule: Entry for generating EFI Empty Capsule files
+----------------------------------------------------------------------
+
+The parameters needed for generation of the empty capsules can
+be provided as properties in the entry.
+
+Properties / Entry arguments:
+ - image-guid: Image GUID which will be used for identifying the
+ updatable image on the board. Mandatory for accept capsule.
+ - capsule-type - String to indicate type of capsule to generate. Valid
+ values are 'accept' and 'revert'.
+
+For more details on the description of the capsule format, and the capsule
+update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
+specification`_. For more information on the empty capsule, refer the
+sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_.
+
+A typical accept empty capsule entry node would then look something
+like this::
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ /* GUID of the image being accepted */
+ image-type-id = SANDBOX_UBOOT_IMAGE_GUID;
+ capsule-type = "accept";
+ };
+
+A typical revert empty capsule entry node would then look something
+like this::
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ capsule-type = "revert";
+ };
+
+The empty capsules do not have any input payload image.
+
+.. _`UEFI specification`: https://uefi.org/sites/default/files/resources/UEFI_Spec_2_10_Aug29.pdf
+.. _`Dependable Boot specification`: https://git.codelinaro.org/linaro/dependable-boot/mbfw/uploads/6f7ddfe3be24e18d4319e108a758d02e/mbfw.pdf
+
+
+
+.. _etype_encrypted:
+
+Entry: encrypted: Externally built encrypted binary blob
+--------------------------------------------------------
+
+This entry provides the functionality to include information about how to
+decrypt an encrypted binary. This information is added to the
+resulting device tree by adding a new cipher node in the entry's parent
+node (i.e. the binary).
+
+The key that must be used to decrypt the binary is either directly embedded
+in the device tree or indirectly by specifying a key source. The key source
+can be used as an id of a key that is stored in an external device.
+
+Using an embedded key
+~~~~~~~~~~~~~~~~~~~~~
+
+This is an example using an embedded key::
+
+ blob-ext {
+ filename = "encrypted-blob.bin";
+ };
+
+ encrypted {
+ algo = "aes256-gcm";
+ iv-filename = "encrypted-blob.bin.iv";
+ key-filename = "encrypted-blob.bin.key";
+ };
+
+This entry generates the following device tree structure form the example
+above::
+
+ data = [...]
+ cipher {
+ algo = "aes256-gcm";
+ key = <0x...>;
+ iv = <0x...>;
+ };
+
+The data property is generated by the blob-ext etype, the cipher node and
+its content is generated by this etype.
+
+Using an external key
+~~~~~~~~~~~~~~~~~~~~~
+
+Instead of embedding the key itself into the device tree, it is also
+possible to address an externally stored key by specifying a 'key-source'
+instead of the 'key'::
+
+ blob-ext {
+ filename = "encrypted-blob.bin";
+ };
+
+ encrypted {
+ algo = "aes256-gcm";
+ iv-filename = "encrypted-blob.bin.iv";
+ key-source = "external-key-id";
+ };
+
+This entry generates the following device tree structure form the example
+above::
+
+ data = [...]
+ cipher {
+ algo = "aes256-gcm";
+ key-source = "external-key-id";
+ iv = <0x...>;
+ };
+
+Properties
+~~~~~~~~~~
+
+Properties / Entry arguments:
+ - algo: The encryption algorithm. Currently no algorithm is supported
+ out-of-the-box. Certain algorithms will be added in future
+ patches.
+ - iv-filename: The name of the file containing the initialization
+ vector (in short iv). See
+ https://en.wikipedia.org/wiki/Initialization_vector
+ - key-filename: The name of the file containing the key. Either
+ key-filename or key-source must be provided.
+ - key-source: The key that should be used. Either key-filename or
+ key-source must be provided.
+
+
+
+.. _etype_fdtmap:
+
+Entry: fdtmap: An entry which contains an FDT map
+-------------------------------------------------
+
+Properties / Entry arguments:
+ None
+
+An FDT map is just a header followed by an FDT containing a list of all the
+entries in the image. The root node corresponds to the image node in the
+original FDT, and an image-name property indicates the image name in that
+original tree.
+
+The header is the string _FDTMAP_ followed by 8 unused bytes.
+
+When used, this entry will be populated with an FDT map which reflects the
+entries in the current image. Hierarchy is preserved, and all offsets and
+sizes are included.
+
+Note that the -u option must be provided to ensure that binman updates the
+FDT with the position of each entry.
+
+Example output for a simple image with U-Boot and an FDT map::
+
+ / {
+ image-name = "binman";
+ size = <0x00000112>;
+ image-pos = <0x00000000>;
+ offset = <0x00000000>;
+ u-boot {
+ size = <0x00000004>;
+ image-pos = <0x00000000>;
+ offset = <0x00000000>;
+ };
+ fdtmap {
+ size = <0x0000010e>;
+ image-pos = <0x00000004>;
+ offset = <0x00000004>;
+ };
+ };
+
+If allow-repack is used then 'orig-offset' and 'orig-size' properties are
+added as necessary. See the binman README.
+
+When extracting files, an alternative 'fdt' format is available for fdtmaps.
+Use `binman extract -F fdt ...` to use this. It will export a devicetree,
+without the fdtmap header, so it can be viewed with `fdtdump`.
+
+
+
+.. _etype_files:
+
+Entry: files: A set of files arranged in a section
+--------------------------------------------------
+
+Properties / Entry arguments:
+ - pattern: Filename pattern to match the files to include
+ - files-compress: Compression algorithm to use:
+ none: No compression
+ lz4: Use lz4 compression (via 'lz4' command-line utility)
+ - files-align: Align each file to the given alignment
+
+This entry reads a number of files and places each in a separate sub-entry
+within this entry. To access these you need to enable device-tree updates
+at run-time so you can obtain the file positions.
+
+
+
+.. _etype_fill:
+
+Entry: fill: An entry which is filled to a particular byte value
+----------------------------------------------------------------
+
+Properties / Entry arguments:
+ - fill-byte: Byte to use to fill the entry
+
+Note that the size property must be set since otherwise this entry does not
+know how large it should be.
+
+You can often achieve the same effect using the pad-byte property of the
+overall image, in that the space between entries will then be padded with
+that byte. But this entry is sometimes useful for explicitly setting the
+byte value of a region.
+
+
+
+.. _etype_fit:
+
+Entry: fit: Flat Image Tree (FIT)
+---------------------------------
+
+This calls mkimage to create a FIT (U-Boot Flat Image Tree) based on the
+input provided.
+
+Nodes for the FIT should be written out in the binman configuration just as
+they would be in a file passed to mkimage.
+
+For example, this creates an image containing a FIT with U-Boot SPL::
+
+ binman {
+ fit {
+ description = "Test FIT";
+ fit,fdt-list = "of-list";
+
+ images {
+ kernel@1 {
+ description = "SPL";
+ os = "u-boot";
+ type = "rkspi";
+ arch = "arm";
+ compression = "none";
+ load = <0>;
+ entry = <0>;
+
+ u-boot-spl {
+ };
+ };
+ };
+ };
+ };
+
+More complex setups can be created, with generated nodes, as described
+below.
+
+Properties (in the 'fit' node itself)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Special properties have a `fit,` prefix, indicating that they should be
+processed but not included in the final FIT.
+
+The top-level 'fit' node supports the following special properties:
+
+ fit,external-offset
+ Indicates that the contents of the FIT are external and provides the
+ external offset. This is passed to mkimage via the -E and -p flags.
+
+ fit,align
+ Indicates what alignment to use for the FIT and its external data,
+ and provides the alignment to use. This is passed to mkimage via
+ the -B flag.
+
+ fit,fdt-list
+ Indicates the entry argument which provides the list of device tree
+ files for the gen-fdt-nodes operation (as below). This is often
+ `of-list` meaning that `-a of-list="dtb1 dtb2..."` should be passed
+ to binman.
+
+ fit,fdt-list-val
+ As an alternative to fit,fdt-list the list of device tree files
+ can be provided in this property as a string list, e.g.::
+
+ fit,fdt-list-val = "dtb1", "dtb2";
+
+Substitutions
+~~~~~~~~~~~~~
+
+Node names and property values support a basic string-substitution feature.
+Available substitutions for '@' nodes (and property values) are:
+
+SEQ:
+ Sequence number of the generated fdt (1, 2, ...)
+NAME
+ Name of the dtb as provided (i.e. without adding '.dtb')
+
+The `default` property, if present, will be automatically set to the name
+if of configuration whose devicetree matches the `default-dt` entry
+argument, e.g. with `-a default-dt=sun50i-a64-pine64-lts`.
+
+Available substitutions for property values in these nodes are:
+
+DEFAULT-SEQ:
+ Sequence number of the default fdt, as provided by the 'default-dt'
+ entry argument
+
+Available operations
+~~~~~~~~~~~~~~~~~~~~
+
+You can add an operation to an '@' node to indicate which operation is
+required::
+
+ @fdt-SEQ {
+ fit,operation = "gen-fdt-nodes";
+ ...
+ };
+
+Available operations are:
+
+gen-fdt-nodes
+ Generate FDT nodes as above. This is the default if there is no
+ `fit,operation` property.
+
+split-elf
+ Split an ELF file into a separate node for each segment.
+
+Generating nodes from an FDT list (gen-fdt-nodes)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+U-Boot supports creating fdt and config nodes automatically. To do this,
+pass an `of-list` property (e.g. `-a of-list=file1 file2`). This tells
+binman that you want to generates nodes for two files: `file1.dtb` and
+`file2.dtb`. The `fit,fdt-list` property (see above) indicates that
+`of-list` should be used. If the property is missing you will get an error.
+
+Then add a 'generator node', a node with a name starting with '@'::
+
+ images {
+ @fdt-SEQ {
+ description = "fdt-NAME";
+ type = "flat_dt";
+ compression = "none";
+ };
+ };
+
+This tells binman to create nodes `fdt-1` and `fdt-2` for each of your two
+files. All the properties you specify will be included in the node. This
+node acts like a template to generate the nodes. The generator node itself
+does not appear in the output - it is replaced with what binman generates.
+A 'data' property is created with the contents of the FDT file.
+
+You can create config nodes in a similar way::
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ @config-SEQ {
+ description = "NAME";
+ firmware = "atf";
+ loadables = "uboot";
+ fdt = "fdt-SEQ";
+ };
+ };
+
+This tells binman to create nodes `config-1` and `config-2`, i.e. a config
+for each of your two files.
+
+Note that if no devicetree files are provided (with '-a of-list' as above)
+then no nodes will be generated.
+
+Generating nodes from an ELF file (split-elf)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This uses the node as a template to generate multiple nodes. The following
+special properties are available:
+
+split-elf
+ Split an ELF file into a separate node for each segment. This uses the
+ node as a template to generate multiple nodes. The following special
+ properties are available:
+
+ fit,load
+ Generates a `load = <...>` property with the load address of the
+ segment
+
+ fit,entry
+ Generates a `entry = <...>` property with the entry address of the
+ ELF. This is only produced for the first entry
+
+ fit,data
+ Generates a `data = <...>` property with the contents of the segment
+
+ fit,firmware
+ Generates a `firmware = <...>` property. Provides a list of possible
+ nodes to be used as the `firmware` property value. The first valid
+ node is picked as the firmware. Any remaining valid nodes is
+ prepended to the `loadable` property generated by `fit,loadables`
+
+ fit,loadables
+ Generates a `loadable = <...>` property with a list of the generated
+ nodes (including all nodes if this operation is used multiple times)
+
+
+Here is an example showing ATF, TEE and a device tree all combined::
+
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ u-boot {
+ description = "U-Boot (64-bit)";
+ type = "standalone";
+ os = "U-Boot";
+ arch = "arm64";
+ compression = "none";
+ load = <CONFIG_TEXT_BASE>;
+ u-boot-nodtb {
+ };
+ };
+ @fdt-SEQ {
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ };
+ @atf-SEQ {
+ fit,operation = "split-elf";
+ description = "ARM Trusted Firmware";
+ type = "firmware";
+ arch = "arm64";
+ os = "arm-trusted-firmware";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ atf-bl31 {
+ };
+ hash {
+ algo = "sha256";
+ };
+ };
+
+ @tee-SEQ {
+ fit,operation = "split-elf";
+ description = "TEE";
+ type = "tee";
+ arch = "arm64";
+ os = "tee";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ tee-os {
+ };
+ hash {
+ algo = "sha256";
+ };
+ };
+ };
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ @config-SEQ {
+ description = "conf-NAME.dtb";
+ fdt = "fdt-SEQ";
+ fit,firmware = "atf-1", "u-boot";
+ fit,loadables;
+ };
+ };
+ };
+
+If ATF-BL31 is available, this generates a node for each segment in the
+ELF file, for example::
+
+ images {
+ atf-1 {
+ data = <...contents of first segment...>;
+ data-offset = <0x00000000>;
+ entry = <0x00040000>;
+ load = <0x00040000>;
+ compression = "none";
+ os = "arm-trusted-firmware";
+ arch = "arm64";
+ type = "firmware";
+ description = "ARM Trusted Firmware";
+ hash {
+ algo = "sha256";
+ value = <...hash of first segment...>;
+ };
+ };
+ atf-2 {
+ data = <...contents of second segment...>;
+ load = <0xff3b0000>;
+ compression = "none";
+ os = "arm-trusted-firmware";
+ arch = "arm64";
+ type = "firmware";
+ description = "ARM Trusted Firmware";
+ hash {
+ algo = "sha256";
+ value = <...hash of second segment...>;
+ };
+ };
+ };
+
+The same applies for OP-TEE if that is available.
+
+If each binary is not available, the relevant template node (@atf-SEQ or
+@tee-SEQ) is removed from the output.
+
+This also generates a `config-xxx` node for each device tree in `of-list`.
+Note that the U-Boot build system uses `-a of-list=$(CONFIG_OF_LIST)`
+so you can use `CONFIG_OF_LIST` to define that list. In this example it is
+set up for `firefly-rk3399` with a single device tree and the default set
+with `-a default-dt=$(CONFIG_DEFAULT_DEVICE_TREE)`, so the resulting output
+is::
+
+ configurations {
+ default = "config-1";
+ config-1 {
+ loadables = "u-boot", "atf-2", "atf-3", "tee-1", "tee-2";
+ description = "rk3399-firefly.dtb";
+ fdt = "fdt-1";
+ firmware = "atf-1";
+ };
+ };
+
+U-Boot SPL can then load the firmware (ATF) and all the loadables (U-Boot
+proper, ATF and TEE), then proceed with the boot.
+
+
+
+.. _etype_fmap:
+
+Entry: fmap: An entry which contains an Fmap section
+----------------------------------------------------
+
+Properties / Entry arguments:
+ None
+
+FMAP is a simple format used by flashrom, an open-source utility for
+reading and writing the SPI flash, typically on x86 CPUs. The format
+provides flashrom with a list of areas, so it knows what it in the flash.
+It can then read or write just a single area, instead of the whole flash.
+
+The format is defined by the flashrom project, in the file lib/fmap.h -
+see www.flashrom.org/Flashrom for more information.
+
+When used, this entry will be populated with an FMAP which reflects the
+entries in the current image. Note that any hierarchy is squashed, since
+FMAP does not support this. Sections are represented as an area appearing
+before its contents, so that it is possible to reconstruct the hierarchy
+from the FMAP by using the offset information. This convention does not
+seem to be documented, but is used in Chromium OS.
+
+To mark an area as preserved, use the normal 'preserved' flag in the entry.
+This will result in the corresponding FMAP area having the
+FMAP_AREA_PRESERVE flag. This flag does not automatically propagate down to
+child entries.
+
+CBFS entries appear as a single entry, i.e. the sub-entries are ignored.
+
+
+
+.. _etype_gbb:
+
+Entry: gbb: An entry which contains a Chromium OS Google Binary Block
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - hardware-id: Hardware ID to use for this build (a string)
+ - keydir: Directory containing the public keys to use
+ - bmpblk: Filename containing images used by recovery
+
+Chromium OS uses a GBB to store various pieces of information, in particular
+the root and recovery keys that are used to verify the boot process. Some
+more details are here:
+
+ https://www.chromium.org/chromium-os/firmware-porting-guide/2-concepts
+
+but note that the page dates from 2013 so is quite out of date. See
+README.chromium for how to obtain the required keys and tools.
+
+
+
+.. _etype_image_header:
+
+Entry: image-header: An entry which contains a pointer to the FDT map
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+ location: Location of header ("start" or "end" of image). This is
+ optional. If omitted then the entry must have an offset property.
+
+This adds an 8-byte entry to the start or end of the image, pointing to the
+location of the FDT map. The format is a magic number followed by an offset
+from the start or end of the image, in twos-compliment format.
+
+This entry must be in the top-level part of the image.
+
+NOTE: If the location is at the start/end, you will probably need to specify
+sort-by-offset for the image, unless you actually put the image header
+first/last in the entry list.
+
+
+
+.. _etype_intel_cmc:
+
+Entry: intel-cmc: Intel Chipset Micro Code (CMC) file
+-----------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains microcode for some devices in a special format. An
+example filename is 'Microcode/C0_22211.BIN'.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_descriptor:
+
+Entry: intel-descriptor: Intel flash descriptor block (4KB)
+-----------------------------------------------------------
+
+Properties / Entry arguments:
+ filename: Filename of file containing the descriptor. This is typically
+ a 4KB binary file, sometimes called 'descriptor.bin'
+
+This entry is placed at the start of flash and provides information about
+the SPI flash regions. In particular it provides the base address and
+size of the ME (Management Engine) region, allowing us to place the ME
+binary in the right place.
+
+With this entry in your image, the position of the 'intel-me' entry will be
+fixed in the image, which avoids you needed to specify an offset for that
+region. This is useful, because it is not possible to change the position
+of the ME region without updating the descriptor.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_fit:
+
+Entry: intel-fit: Intel Firmware Image Table (FIT)
+--------------------------------------------------
+
+This entry contains a dummy FIT as required by recent Intel CPUs. The FIT
+contains information about the firmware and microcode available in the
+image.
+
+At present binman only supports a basic FIT with no microcode.
+
+
+
+.. _etype_intel_fit_ptr:
+
+Entry: intel-fit-ptr: Intel Firmware Image Table (FIT) pointer
+--------------------------------------------------------------
+
+This entry contains a pointer to the FIT. It is required to be at address
+0xffffffc0 in the image.
+
+
+
+.. _etype_intel_fsp:
+
+Entry: intel-fsp: Intel Firmware Support Package (FSP) file
+-----------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains binary blobs which are used on some devices to make the
+platform work. U-Boot executes this code since it is not possible to set up
+the hardware using U-Boot open-source code. Documentation is typically not
+available in sufficient detail to allow this.
+
+An example filename is 'FSP/QUEENSBAY_FSP_GOLD_001_20-DECEMBER-2013.fd'
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_fsp_m:
+
+Entry: intel-fsp-m: Intel Firmware Support Package (FSP) memory init
+--------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains a binary blob which is used on some devices to set up
+SDRAM. U-Boot executes this code in SPL so that it can make full use of
+memory. Documentation is typically not available in sufficient detail to
+allow U-Boot do this this itself..
+
+An example filename is 'fsp_m.bin'
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_fsp_s:
+
+Entry: intel-fsp-s: Intel Firmware Support Package (FSP) silicon init
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains a binary blob which is used on some devices to set up
+the silicon. U-Boot executes this code in U-Boot proper after SDRAM is
+running, so that it can make full use of memory. Documentation is typically
+not available in sufficient detail to allow U-Boot do this this itself.
+
+An example filename is 'fsp_s.bin'
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_fsp_t:
+
+Entry: intel-fsp-t: Intel Firmware Support Package (FSP) temp ram init
+----------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains a binary blob which is used on some devices to set up
+temporary memory (Cache-as-RAM or CAR). U-Boot executes this code in TPL so
+that it has access to memory for its stack and initial storage.
+
+An example filename is 'fsp_t.bin'
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_ifwi:
+
+Entry: intel-ifwi: Intel Integrated Firmware Image (IFWI) file
+--------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry. This is either the
+ IFWI file itself, or a file that can be converted into one using a
+ tool
+ - convert-fit: If present this indicates that the ifwitool should be
+ used to convert the provided file into a IFWI.
+
+This file contains code and data used by the SoC that is required to make
+it work. It includes U-Boot TPL, microcode, things related to the CSE
+(Converged Security Engine, the microcontroller that loads all the firmware)
+and other items beyond the wit of man.
+
+A typical filename is 'ifwi.bin' for an IFWI file, or 'fitimage.bin' for a
+file that will be converted to an IFWI.
+
+The position of this entry is generally set by the intel-descriptor entry.
+
+The contents of the IFWI are specified by the subnodes of the IFWI node.
+Each subnode describes an entry which is placed into the IFWFI with a given
+sub-partition (and optional entry name).
+
+Properties for subnodes:
+ - ifwi-subpart: sub-parition to put this entry into, e.g. "IBBP"
+ - ifwi-entry: entry name t use, e.g. "IBBL"
+ - ifwi-replace: if present, indicates that the item should be replaced
+ in the IFWI. Otherwise it is added.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_me:
+
+Entry: intel-me: Intel Management Engine (ME) file
+--------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains code used by the SoC that is required to make it work.
+The Management Engine is like a background task that runs things that are
+not clearly documented, but may include keyboard, display and network
+access. For platform that use ME it is not possible to disable it. U-Boot
+does not directly execute code in the ME binary.
+
+A typical filename is 'me.bin'.
+
+The position of this entry is generally set by the intel-descriptor entry.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_mrc:
+
+Entry: intel-mrc: Intel Memory Reference Code (MRC) file
+--------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains code for setting up the SDRAM on some Intel systems. This
+is executed by U-Boot when needed early during startup. A typical filename
+is 'mrc.bin'.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_refcode:
+
+Entry: intel-refcode: Intel Reference Code file
+-----------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains code for setting up the platform on some Intel systems.
+This is executed by U-Boot when needed early during startup. A typical
+filename is 'refcode.bin'.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+.. _etype_intel_vbt:
+
+Entry: intel-vbt: Intel Video BIOS Table (VBT) file
+---------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains code that sets up the integrated graphics subsystem on
+some Intel SoCs. U-Boot executes this when the display is started up.
+
+See README.x86 for information about Intel binary blobs.
+
+
+
+.. _etype_intel_vga:
+
+Entry: intel-vga: Intel Video Graphics Adaptor (VGA) file
+---------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+This file contains code that sets up the integrated graphics subsystem on
+some Intel SoCs. U-Boot executes this when the display is started up.
+
+This is similar to the VBT file but in a different format.
+
+See README.x86 for information about Intel binary blobs.
+
+
+
+.. _etype_mkimage:
+
+Entry: mkimage: Binary produced by mkimage
+------------------------------------------
+
+Properties / Entry arguments:
+ - args: Arguments to pass
+ - data-to-imagename: Indicates that the -d data should be passed in as
+ the image name also (-n)
+ - multiple-data-files: boolean to tell binman to pass all files as
+ datafiles to mkimage instead of creating a temporary file the result
+ of datafiles concatenation
+ - filename: filename of output binary generated by mkimage
+
+The data passed to mkimage via the -d flag is collected from subnodes of the
+mkimage node, e.g.::
+
+ mkimage {
+ filename = "imximage.bin";
+ args = "-n test -T imximage";
+
+ u-boot-spl {
+ };
+ };
+
+This calls mkimage to create an imximage with `u-boot-spl.bin` as the data
+file, with mkimage being called like this::
+
+ mkimage -d <data_file> -n test -T imximage <output_file>
+
+The output from mkimage then becomes part of the image produced by
+binman but also is written into `imximage.bin` file. If you need to put
+multiple things in the data file, you can use a section, or just multiple
+subnodes like this::
+
+ mkimage {
+ args = "-n test -T imximage";
+
+ u-boot-spl {
+ };
+
+ u-boot-tpl {
+ };
+ };
+
+Note that binman places the contents (here SPL and TPL) into a single file
+and passes that to mkimage using the -d option.
+
+To pass all datafiles untouched to mkimage::
+
+ mkimage {
+ args = "-n rk3399 -T rkspi";
+ multiple-data-files;
+
+ u-boot-tpl {
+ };
+
+ u-boot-spl {
+ };
+ };
+
+This calls mkimage to create a Rockchip RK3399-specific first stage
+bootloader, made of TPL+SPL. Since this first stage bootloader requires to
+align the TPL and SPL but also some weird hacks that is handled by mkimage
+directly, binman is told to not perform the concatenation of datafiles prior
+to passing the data to mkimage.
+
+To use CONFIG options in the arguments, use a string list instead, as in
+this example which also produces four arguments::
+
+ mkimage {
+ args = "-n", CONFIG_SYS_SOC, "-T imximage";
+
+ u-boot-spl {
+ };
+ };
+
+If you need to pass the input data in with the -n argument as well, then use
+the 'data-to-imagename' property::
+
+ mkimage {
+ args = "-T imximage";
+ data-to-imagename;
+
+ u-boot-spl {
+ };
+ };
+
+That will pass the data to mkimage both as the data file (with -d) and as
+the image name (with -n). In both cases, a filename is passed as the
+argument, with the actual data being in that file.
+
+If need to pass different data in with -n, then use an `imagename` subnode::
+
+ mkimage {
+ args = "-T imximage";
+
+ imagename {
+ blob {
+ filename = "spl/u-boot-spl.cfgout"
+ };
+ };
+
+ u-boot-spl {
+ };
+ };
+
+This will pass in u-boot-spl as the input data and the .cfgout file as the
+-n data.
+
+
+
+.. _etype_null:
+
+Entry: null: An entry which has no contents of its own
+------------------------------------------------------
+
+Note that the size property must be set since otherwise this entry does not
+know how large it should be.
+
+The contents are set by the containing section, e.g. the section's pad
+byte.
+
+
+
+.. _etype_opensbi:
+
+Entry: opensbi: RISC-V OpenSBI fw_dynamic blob
+----------------------------------------------
+
+Properties / Entry arguments:
+ - opensbi-path: Filename of file to read into entry. This is typically
+ called fw_dynamic.bin
+
+This entry holds the run-time firmware, typically started by U-Boot SPL.
+See the U-Boot README for your architecture or board for how to use it. See
+https://github.com/riscv/opensbi for more information about OpenSBI.
+
+
+
+.. _etype_powerpc_mpc85xx_bootpg_resetvec:
+
+Entry: powerpc-mpc85xx-bootpg-resetvec: PowerPC mpc85xx bootpg + resetvec code for U-Boot
+-----------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-br.bin (default 'u-boot-br.bin')
+
+This entry is valid for PowerPC mpc85xx cpus. This entry holds
+'bootpg + resetvec' code for PowerPC mpc85xx CPUs which needs to be
+placed at offset 'RESET_VECTOR_ADDRESS - 0xffc'.
+
+
+
+.. _etype_pre_load:
+
+Entry: pre-load: Pre load image header
+--------------------------------------
+
+Properties / Entry arguments:
+ - pre-load-key-path: Path of the directory that store key (provided by
+ the environment variable PRE_LOAD_KEY_PATH)
+ - content: List of phandles to entries to sign
+ - algo-name: Hash and signature algo to use for the signature
+ - padding-name: Name of the padding (pkcs-1.5 or pss)
+ - key-name: Filename of the private key to sign
+ - header-size: Total size of the header
+ - version: Version of the header
+
+This entry creates a pre-load header that contains a global
+image signature.
+
+For example, this creates an image with a pre-load header and a binary::
+
+ binman {
+ image2 {
+ filename = "sandbox.bin";
+
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa2048";
+ padding-name = "pss";
+ key-name = "private.pem";
+ header-size = <4096>;
+ version = <1>;
+ };
+
+ image: blob-ext {
+ filename = "sandbox.itb";
+ };
+ };
+ };
+
+
+
+.. _etype_rockchip_tpl:
+
+Entry: rockchip-tpl: Rockchip TPL binary
+----------------------------------------
+
+Properties / Entry arguments:
+ - rockchip-tpl-path: Filename of file to read into the entry,
+ typically <soc>_ddr_<version>.bin
+
+This entry holds an external TPL binary used by some Rockchip SoCs
+instead of normal U-Boot TPL, typically to initialize DRAM.
+
+
+
+.. _etype_scp:
+
+Entry: scp: System Control Processor (SCP) firmware blob
+--------------------------------------------------------
+
+Properties / Entry arguments:
+ - scp-path: Filename of file to read into the entry, typically scp.bin
+
+This entry holds firmware for an external platform-specific coprocessor.
+
+
+
+.. _etype_section:
+
+Entry: section: Entry that contains other entries
+-------------------------------------------------
+
+A section is an entry which can contain other entries, thus allowing
+hierarchical images to be created. See 'Sections and hierarchical images'
+in the binman README for more information.
+
+The base implementation simply joins the various entries together, using
+various rules about alignment, etc.
+
+Subclassing
+~~~~~~~~~~~
+
+This class can be subclassed to support other file formats which hold
+multiple entries, such as CBFS. To do this, override the following
+functions. The documentation here describes what your function should do.
+For example code, see etypes which subclass `Entry_section`, or `cbfs.py`
+for a more involved example::
+
+ $ grep -l \(Entry_section tools/binman/etype/*.py
+
+ReadNode()
+ Call `super().ReadNode()`, then read any special properties for the
+ section. Then call `self.ReadEntries()` to read the entries.
+
+ Binman calls this at the start when reading the image description.
+
+ReadEntries()
+ Read in the subnodes of the section. This may involve creating entries
+ of a particular etype automatically, as well as reading any special
+ properties in the entries. For each entry, entry.ReadNode() should be
+ called, to read the basic entry properties. The properties should be
+ added to `self._entries[]`, in the correct order, with a suitable name.
+
+ Binman calls this at the start when reading the image description.
+
+BuildSectionData(required)
+ Create the custom file format that you want and return it as bytes.
+ This likely sets up a file header, then loops through the entries,
+ adding them to the file. For each entry, call `entry.GetData()` to
+ obtain the data. If that returns None, and `required` is False, then
+ this method must give up and return None. But if `required` is True then
+ it should assume that all data is valid.
+
+ Binman calls this when packing the image, to find out the size of
+ everything. It is called again at the end when building the final image.
+
+SetImagePos(image_pos):
+ Call `super().SetImagePos(image_pos)`, then set the `image_pos` values
+ for each of the entries. This should use the custom file format to find
+ the `start offset` (and `image_pos`) of each entry. If the file format
+ uses compression in such a way that there is no offset available (other
+ than reading the whole file and decompressing it), then the offsets for
+ affected entries can remain unset (`None`). The size should also be set
+ if possible.
+
+ Binman calls this after the image has been packed, to update the
+ location that all the entries ended up at.
+
+ReadChildData(child, decomp, alt_format):
+ The default version of this may be good enough, if you are able to
+ implement SetImagePos() correctly. But that is a bit of a bypass, so
+ you can override this method to read from your custom file format. It
+ should read the entire entry containing the custom file using
+ `super().ReadData(True)`, then parse the file to get the data for the
+ given child, then return that data.
+
+ If your file format supports compression, the `decomp` argument tells
+ you whether to return the compressed data (`decomp` is False) or to
+ uncompress it first, then return the uncompressed data (`decomp` is
+ True). This is used by the `binman extract -U` option.
+
+ If your entry supports alternative formats, the alt_format provides the
+ alternative format that the user has selected. Your function should
+ return data in that format. This is used by the 'binman extract -l'
+ option.
+
+ Binman calls this when reading in an image, in order to populate all the
+ entries with the data from that image (`binman ls`).
+
+WriteChildData(child):
+ Binman calls this after `child.data` is updated, to inform the custom
+ file format about this, in case it needs to do updates.
+
+ The default version of this does nothing and probably needs to be
+ overridden for the 'binman replace' command to work. Your version should
+ use `child.data` to update the data for that child in the custom file
+ format.
+
+ Binman calls this when updating an image that has been read in and in
+ particular to update the data for a particular entry (`binman replace`)
+
+Properties / Entry arguments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+See :ref:`develop/package/binman:Image description format` for more
+information.
+
+align-default
+ Default alignment for this section, if no alignment is given in the
+ entry
+
+pad-byte
+ Pad byte to use when padding
+
+sort-by-offset
+ True if entries should be sorted by offset, False if they must be
+ in-order in the device tree description
+
+end-at-4gb
+ Used to build an x86 ROM which ends at 4GB (2^32)
+
+name-prefix
+ Adds a prefix to the name of every entry in the section when writing out
+ the map
+
+skip-at-start
+ Number of bytes before the first entry starts. These effectively adjust
+ the starting offset of entries. For example, if this is 16, then the
+ first entry would start at 16. An entry with offset = 20 would in fact
+ be written at offset 4 in the image file, since the first 16 bytes are
+ skipped when writing.
+
+filename
+ filename to write the unpadded section contents to within the output
+ directory (None to skip this).
+
+Since a section is also an entry, it inherits all the properies of entries
+too.
+
+Note that the `allow_missing` member controls whether this section permits
+external blobs to be missing their contents. The option will produce an
+image but of course it will not work. It is useful to make sure that
+Continuous Integration systems can build without the binaries being
+available. This is set by the `SetAllowMissing()` method, if
+`--allow-missing` is passed to binman.
+
+
+
+.. _etype_tee_os:
+
+Entry: tee-os: Entry containing an OP-TEE Trusted OS (TEE) blob
+---------------------------------------------------------------
+
+Properties / Entry arguments:
+ - tee-os-path: Filename of file to read into entry. This is typically
+ called tee.bin or tee.elf
+
+This entry holds the run-time firmware, typically started by U-Boot SPL.
+See the U-Boot README for your architecture or board for how to use it. See
+https://github.com/OP-TEE/optee_os for more information about OP-TEE.
+
+Note that if the file is in ELF format, it must go in a FIT. In that case,
+this entry will mark itself as absent, providing the data only through the
+read_elf_segments() method.
+
+Marking this entry as absent means that it if is used in the wrong context
+it can be automatically dropped. Thus it is possible to add an OP-TEE entry
+like this::
+
+ binman {
+ tee-os {
+ };
+ };
+
+and pass either an ELF or plain binary in with -a tee-os-path <filename>
+and have binman do the right thing:
+
+ - include the entry if tee.bin is provided and it does NOT have the v1
+ header
+ - drop it otherwise
+
+When used within a FIT, we can do::
+
+ binman {
+ fit {
+ tee-os {
+ };
+ };
+ };
+
+which will split the ELF into separate nodes for each segment, if an ELF
+file is provided (see :ref:`etype_fit`), or produce a single node if the
+OP-TEE binary v1 format is provided (see optee_doc_) .
+
+.. _optee_doc: https://optee.readthedocs.io/en/latest/architecture/core.html#partitioning-of-the-binary
+
+
+
+.. _etype_text:
+
+Entry: text: An entry which contains text
+-----------------------------------------
+
+The text can be provided either in the node itself or by a command-line
+argument. There is a level of indirection to allow multiple text strings
+and sharing of text.
+
+Properties / Entry arguments:
+ text-label: The value of this string indicates the property / entry-arg
+ that contains the string to place in the entry
+ <xxx> (actual name is the value of text-label): contains the string to
+ place in the entry.
+ <text>: The text to place in the entry (overrides the above mechanism).
+ This is useful when the text is constant.
+
+Example node::
+
+ text {
+ size = <50>;
+ text-label = "message";
+ };
+
+You can then use:
+
+ binman -amessage="this is my message"
+
+and binman will insert that string into the entry.
+
+It is also possible to put the string directly in the node::
+
+ text {
+ size = <8>;
+ text-label = "message";
+ message = "a message directly in the node"
+ };
+
+or just::
+
+ text {
+ size = <8>;
+ text = "some text directly in the node"
+ };
+
+The text is not itself nul-terminated. This can be achieved, if required,
+by setting the size of the entry to something larger than the text.
+
+
+
+.. _etype_ti_board_config:
+
+Entry: ti-board-config: An entry containing a TI schema validated board config binary
+-------------------------------------------------------------------------------------
+
+This etype supports generation of two kinds of board configuration
+binaries: singular board config binary as well as combined board config
+binary.
+
+Properties / Entry arguments:
+ - config-file: File containing board configuration data in YAML
+ - schema-file: File containing board configuration YAML schema against
+ which the config file is validated
+
+Output files:
+ - board config binary: File containing board configuration binary
+
+These above parameters are used only when the generated binary is
+intended to be a single board configuration binary. Example::
+
+ my-ti-board-config {
+ ti-board-config {
+ config = "board-config.yaml";
+ schema = "schema.yaml";
+ };
+ };
+
+To generate a combined board configuration binary, we pack the
+needed individual binaries into a ti-board-config binary. In this case,
+the available supported subnode names are board-cfg, pm-cfg, sec-cfg and
+rm-cfg. The final binary is prepended with a header containing details about
+the included board config binaries. Example::
+
+ my-combined-ti-board-config {
+ ti-board-config {
+ board-cfg {
+ config = "board-cfg.yaml";
+ schema = "schema.yaml";
+ };
+ sec-cfg {
+ config = "sec-cfg.yaml";
+ schema = "schema.yaml";
+ };
+ }
+ }
+
+
+
+.. _etype_ti_dm:
+
+Entry: ti-dm: TI Device Manager (DM) blob
+-----------------------------------------
+
+Properties / Entry arguments:
+ - ti-dm-path: Filename of file to read into the entry, typically ti-dm.bin
+
+This entry holds the device manager responsible for resource and power management
+in K3 devices. See https://software-dl.ti.com/tisci/esd/latest/ for more information
+about TI DM.
+
+
+
+.. _etype_ti_secure:
+
+Entry: ti-secure: Entry containing a TI x509 certificate binary
+---------------------------------------------------------------
+
+Properties / Entry arguments:
+ - content: List of phandles to entries to sign
+ - keyfile: Filename of file containing key to sign binary with
+ - sha: Hash function to be used for signing
+
+Output files:
+ - input.<unique_name> - input file passed to openssl
+ - config.<unique_name> - input file generated for openssl (which is
+ used as the config file)
+ - cert.<unique_name> - output file generated by openssl (which is
+ used as the entry contents)
+
+openssl signs the provided data, using the TI templated config file and
+writes the signature in this entry. This allows verification that the
+data is genuine.
+
+
+
+.. _etype_ti_secure_rom:
+
+Entry: ti-secure-rom: Entry containing a TI x509 certificate binary for images booted by ROM
+--------------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - keyfile: Filename of file containing key to sign binary with
+ - combined: boolean if device follows combined boot flow
+ - countersign: boolean if device contains countersigned system firmware
+ - load: load address of SPL
+ - sw-rev: software revision
+ - sha: Hash function to be used for signing
+ - core: core on which bootloader runs, valid cores are 'secure' and 'public'
+ - content: phandle of SPL in case of legacy bootflow or phandles of component binaries
+ in case of combined bootflow
+ - core-opts (optional): lockstep (0) or split (2) mode set to 0 by default
+
+The following properties are only for generating a combined bootflow binary:
+ - sysfw-inner-cert: boolean if binary contains sysfw inner certificate
+ - dm-data: boolean if binary contains dm-data binary
+ - content-sbl: phandle of SPL binary
+ - content-sysfw: phandle of sysfw binary
+ - content-sysfw-data: phandle of sysfw-data or tifs-data binary
+ - content-sysfw-inner-cert (optional): phandle of sysfw inner certificate binary
+ - content-dm-data (optional): phandle of dm-data binary
+ - load-sysfw: load address of sysfw binary
+ - load-sysfw-data: load address of sysfw-data or tifs-data binary
+ - load-sysfw-inner-cert (optional): load address of sysfw inner certificate binary
+ - load-dm-data (optional): load address of dm-data binary
+
+Output files:
+ - input.<unique_name> - input file passed to openssl
+ - config.<unique_name> - input file generated for openssl (which is
+ used as the config file)
+ - cert.<unique_name> - output file generated by openssl (which is
+ used as the entry contents)
+
+openssl signs the provided data, using the TI templated config file and
+writes the signature in this entry. This allows verification that the
+data is genuine.
+
+
+
+.. _etype_u_boot:
+
+Entry: u-boot: U-Boot flat binary
+---------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot.bin (default 'u-boot.bin')
+
+This is the U-Boot binary, containing relocation information to allow it
+to relocate itself at runtime. The binary typically includes a device tree
+blob at the end of it.
+
+U-Boot can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+Note that this entry is automatically replaced with u-boot-expanded unless
+--no-expanded is used or the node has a 'no-expanded' property.
+
+
+
+.. _etype_u_boot_dtb:
+
+Entry: u-boot-dtb: U-Boot device tree
+-------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+This is the U-Boot device tree, containing configuration information for
+U-Boot. U-Boot needs this to know what devices are present and which drivers
+to activate.
+
+Note: This is mostly an internal entry type, used by others. This allows
+binman to know which entries contain a device tree.
+
+
+
+.. _etype_u_boot_dtb_with_ucode:
+
+Entry: u-boot-dtb-with-ucode: A U-Boot device tree file, with the microcode removed
+-----------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+See Entry_u_boot_ucode for full details of the three entries involved in
+this process. This entry provides the U-Boot device-tree file, which
+contains the microcode. If the microcode is not being collated into one
+place then the offset and size of the microcode is recorded by this entry,
+for use by u-boot-with-ucode_ptr. If it is being collated, then this
+entry deletes the microcode from the device tree (to save space) and makes
+it available to u-boot-ucode.
+
+
+
+.. _etype_u_boot_elf:
+
+Entry: u-boot-elf: U-Boot ELF image
+-----------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot (default 'u-boot')
+
+This is the U-Boot ELF image. It does not include a device tree but can be
+relocated to any address for execution.
+
+
+
+.. _etype_u_boot_env:
+
+Entry: u-boot-env: An entry which contains a U-Boot environment
+---------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: File containing the environment text, with each line in the
+ form var=value
+
+
+
+.. _etype_u_boot_expanded:
+
+Entry: u-boot-expanded: U-Boot flat binary broken out into its component parts
+------------------------------------------------------------------------------
+
+This is a section containing the U-Boot binary and a devicetree. Using this
+entry type automatically creates this section, with the following entries
+in it:
+
+ u-boot-nodtb
+ u-boot-dtb
+
+Having the devicetree separate allows binman to update it in the final
+image, so that the entries positions are provided to the running U-Boot.
+
+
+
+.. _etype_u_boot_img:
+
+Entry: u-boot-img: U-Boot legacy image
+--------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot.img (default 'u-boot.img')
+
+This is the U-Boot binary as a packaged image, in legacy format. It has a
+header which allows it to be loaded at the correct address for execution.
+
+You should use FIT (Flat Image Tree) instead of the legacy image for new
+applications.
+
+
+
+.. _etype_u_boot_nodtb:
+
+Entry: u-boot-nodtb: U-Boot flat binary without device tree appended
+--------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename to include (default 'u-boot-nodtb.bin')
+
+This is the U-Boot binary, containing relocation information to allow it
+to relocate itself at runtime. It does not include a device tree blob at
+the end of it so normally cannot work without it. You can add a u-boot-dtb
+entry after this one, or use a u-boot entry instead, normally expands to a
+section containing u-boot and u-boot-dtb
+
+
+
+.. _etype_u_boot_spl:
+
+Entry: u-boot-spl: U-Boot SPL binary
+------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-spl.bin (default 'spl/u-boot-spl.bin')
+
+This is the U-Boot SPL (Secondary Program Loader) binary. This is a small
+binary which loads before U-Boot proper, typically into on-chip SRAM. It is
+responsible for locating, loading and jumping to U-Boot. Note that SPL is
+not relocatable so must be loaded to the correct address in SRAM, or written
+to run from the correct address if direct flash execution is possible (e.g.
+on x86 devices).
+
+SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+in the binman README for more information.
+
+The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+binman uses that to look up symbols to write into the SPL binary.
+
+Note that this entry is automatically replaced with u-boot-spl-expanded
+unless --no-expanded is used or the node has a 'no-expanded' property.
+
+
+
+.. _etype_u_boot_spl_bss_pad:
+
+Entry: u-boot-spl-bss-pad: U-Boot SPL binary padded with a BSS region
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+ None
+
+This holds the padding added after the SPL binary to cover the BSS (Block
+Started by Symbol) region. This region holds the various variables used by
+SPL. It is set to 0 by SPL when it starts up. If you want to append data to
+the SPL image (such as a device tree file), you must pad out the BSS region
+to avoid the data overlapping with U-Boot variables. This entry is useful in
+that case. It automatically pads out the entry size to cover both the code,
+data and BSS.
+
+The contents of this entry will a certain number of zero bytes, determined
+by __bss_size
+
+The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+binman uses that to look up the BSS address.
+
+
+
+.. _etype_u_boot_spl_dtb:
+
+Entry: u-boot-spl-dtb: U-Boot SPL device tree
+---------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'spl/u-boot-spl.dtb')
+
+This is the SPL device tree, containing configuration information for
+SPL. SPL needs this to know what devices are present and which drivers
+to activate.
+
+
+
+.. _etype_u_boot_spl_elf:
+
+Entry: u-boot-spl-elf: U-Boot SPL ELF image
+-------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of SPL u-boot (default 'spl/u-boot-spl')
+
+This is the U-Boot SPL ELF image. It does not include a device tree but can
+be relocated to any address for execution.
+
+
+
+.. _etype_u_boot_spl_expanded:
+
+Entry: u-boot-spl-expanded: U-Boot SPL flat binary broken out into its component parts
+--------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - spl-dtb: Controls whether this entry is selected (set to 'y' or '1' to
+ select)
+
+This is a section containing the U-Boot binary, BSS padding if needed and a
+devicetree. Using this entry type automatically creates this section, with
+the following entries in it:
+
+ u-boot-spl-nodtb
+ u-boot-spl-bss-pad
+ u-boot-dtb
+
+Having the devicetree separate allows binman to update it in the final
+image, so that the entries positions are provided to the running U-Boot.
+
+This entry is selected based on the value of the 'spl-dtb' entryarg. If
+this is non-empty (and not 'n' or '0') then this expanded entry is selected.
+
+
+
+.. _etype_u_boot_spl_nodtb:
+
+Entry: u-boot-spl-nodtb: SPL binary without device tree appended
+----------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename to include (default 'spl/u-boot-spl-nodtb.bin')
+
+This is the U-Boot SPL binary, It does not include a device tree blob at
+the end of it so may not be able to work without it, assuming SPL needs
+a device tree to operate on your platform. You can add a u-boot-spl-dtb
+entry after this one, or use a u-boot-spl entry instead' which normally
+expands to a section containing u-boot-spl-dtb, u-boot-spl-bss-pad and
+u-boot-spl-dtb
+
+SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+in the binman README for more information.
+
+The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+binman uses that to look up symbols to write into the SPL binary.
+
+
+
+.. _etype_u_boot_spl_pubkey_dtb:
+
+Entry: u-boot-spl-pubkey-dtb: U-Boot SPL device tree including public key
+-------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - key-name-hint: Public key name without extension (.crt).
+ Default is determined by underlying
+ bintool (fdt_add_pubkey), usually 'key'.
+ - algo: (Optional) Algorithm used for signing. Default is determined by
+ underlying bintool (fdt_add_pubkey), usually 'sha1,rsa2048'
+ - required: (Optional) If present this indicates that the key must be
+ verified for the image / configuration to be
+ considered valid
+
+The following example shows an image containing an SPL which
+is packed together with the dtb. Binman will add a signature
+node to the dtb.
+
+Example node::
+
+ image {
+ ...
+ spl {
+ filename = "spl.bin"
+
+ u-boot-spl-nodtb {
+ };
+ u-boot-spl-pubkey-dtb {
+ algo = "sha384,rsa4096";
+ required = "conf";
+ key-name-hint = "dev";
+ };
+ };
+ ...
+ }
+
+
+
+.. _etype_u_boot_spl_with_ucode_ptr:
+
+Entry: u-boot-spl-with-ucode-ptr: U-Boot SPL with embedded microcode pointer
+----------------------------------------------------------------------------
+
+This is used when SPL must set up the microcode for U-Boot.
+
+See Entry_u_boot_ucode for full details of the entries involved in this
+process.
+
+
+
+.. _etype_u_boot_tpl:
+
+Entry: u-boot-tpl: U-Boot TPL binary
+------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-tpl.bin (default 'tpl/u-boot-tpl.bin')
+
+This is the U-Boot TPL (Tertiary Program Loader) binary. This is a small
+binary which loads before SPL, typically into on-chip SRAM. It is
+responsible for locating, loading and jumping to SPL, the next-stage
+loader. Note that SPL is not relocatable so must be loaded to the correct
+address in SRAM, or written to run from the correct address if direct
+flash execution is possible (e.g. on x86 devices).
+
+SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+in the binman README for more information.
+
+The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+binman uses that to look up symbols to write into the TPL binary.
+
+Note that this entry is automatically replaced with u-boot-tpl-expanded
+unless --no-expanded is used or the node has a 'no-expanded' property.
+
+
+
+.. _etype_u_boot_tpl_bss_pad:
+
+Entry: u-boot-tpl-bss-pad: U-Boot TPL binary padded with a BSS region
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+ None
+
+This holds the padding added after the TPL binary to cover the BSS (Block
+Started by Symbol) region. This region holds the various variables used by
+TPL. It is set to 0 by TPL when it starts up. If you want to append data to
+the TPL image (such as a device tree file), you must pad out the BSS region
+to avoid the data overlapping with U-Boot variables. This entry is useful in
+that case. It automatically pads out the entry size to cover both the code,
+data and BSS.
+
+The contents of this entry will a certain number of zero bytes, determined
+by __bss_size
+
+The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+binman uses that to look up the BSS address.
+
+
+
+.. _etype_u_boot_tpl_dtb:
+
+Entry: u-boot-tpl-dtb: U-Boot TPL device tree
+---------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'tpl/u-boot-tpl.dtb')
+
+This is the TPL device tree, containing configuration information for
+TPL. TPL needs this to know what devices are present and which drivers
+to activate.
+
+
+
+.. _etype_u_boot_tpl_dtb_with_ucode:
+
+Entry: u-boot-tpl-dtb-with-ucode: U-Boot TPL with embedded microcode pointer
+----------------------------------------------------------------------------
+
+This is used when TPL must set up the microcode for U-Boot.
+
+See Entry_u_boot_ucode for full details of the entries involved in this
+process.
+
+
+
+.. _etype_u_boot_tpl_elf:
+
+Entry: u-boot-tpl-elf: U-Boot TPL ELF image
+-------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of TPL u-boot (default 'tpl/u-boot-tpl')
+
+This is the U-Boot TPL ELF image. It does not include a device tree but can
+be relocated to any address for execution.
+
+
+
+.. _etype_u_boot_tpl_expanded:
+
+Entry: u-boot-tpl-expanded: U-Boot TPL flat binary broken out into its component parts
+--------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - tpl-dtb: Controls whether this entry is selected (set to 'y' or '1' to
+ select)
+
+This is a section containing the U-Boot binary, BSS padding if needed and a
+devicetree. Using this entry type automatically creates this section, with
+the following entries in it:
+
+ u-boot-tpl-nodtb
+ u-boot-tpl-bss-pad
+ u-boot-dtb
+
+Having the devicetree separate allows binman to update it in the final
+image, so that the entries positions are provided to the running U-Boot.
+
+This entry is selected based on the value of the 'tpl-dtb' entryarg. If
+this is non-empty (and not 'n' or '0') then this expanded entry is selected.
+
+
+
+.. _etype_u_boot_tpl_nodtb:
+
+Entry: u-boot-tpl-nodtb: TPL binary without device tree appended
+----------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename to include (default 'tpl/u-boot-tpl-nodtb.bin')
+
+This is the U-Boot TPL binary, It does not include a device tree blob at
+the end of it so may not be able to work without it, assuming TPL needs
+a device tree to operate on your platform. You can add a u-boot-tpl-dtb
+entry after this one, or use a u-boot-tpl entry instead, which normally
+expands to a section containing u-boot-tpl-dtb, u-boot-tpl-bss-pad and
+u-boot-tpl-dtb
+
+TPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+in the binman README for more information.
+
+The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+binman uses that to look up symbols to write into the TPL binary.
+
+
+
+.. _etype_u_boot_tpl_with_ucode_ptr:
+
+Entry: u-boot-tpl-with-ucode-ptr: U-Boot TPL with embedded microcode pointer
+----------------------------------------------------------------------------
+
+See Entry_u_boot_ucode for full details of the entries involved in this
+process.
+
+
+
+.. _etype_u_boot_ucode:
+
+Entry: u-boot-ucode: U-Boot microcode block
+-------------------------------------------
+
+Properties / Entry arguments:
+ None
+
+The contents of this entry are filled in automatically by other entries
+which must also be in the image.
+
+U-Boot on x86 needs a single block of microcode. This is collected from
+the various microcode update nodes in the device tree. It is also unable
+to read the microcode from the device tree on platforms that use FSP
+(Firmware Support Package) binaries, because the API requires that the
+microcode is supplied before there is any SRAM available to use (i.e.
+the FSP sets up the SRAM / cache-as-RAM but does so in the call that
+requires the microcode!). To keep things simple, all x86 platforms handle
+microcode the same way in U-Boot (even non-FSP platforms). This is that
+a table is placed at _dt_ucode_base_size containing the base address and
+size of the microcode. This is either passed to the FSP (for FSP
+platforms), or used to set up the microcode (for non-FSP platforms).
+This all happens in the build system since it is the only way to get
+the microcode into a single blob and accessible without SRAM.
+
+There are two cases to handle. If there is only one microcode blob in
+the device tree, then the ucode pointer it set to point to that. This
+entry (u-boot-ucode) is empty. If there is more than one update, then
+this entry holds the concatenation of all updates, and the device tree
+entry (u-boot-dtb-with-ucode) is updated to remove the microcode. This
+last step ensures that that the microcode appears in one contiguous
+block in the image and is not unnecessarily duplicated in the device
+tree. It is referred to as 'collation' here.
+
+Entry types that have a part to play in handling microcode:
+
+ Entry_u_boot_with_ucode_ptr:
+ Contains u-boot-nodtb.bin (i.e. U-Boot without the device tree).
+ It updates it with the address and size of the microcode so that
+ U-Boot can find it early on start-up.
+ Entry_u_boot_dtb_with_ucode:
+ Contains u-boot.dtb. It stores the microcode in a
+ 'self.ucode_data' property, which is then read by this class to
+ obtain the microcode if needed. If collation is performed, it
+ removes the microcode from the device tree.
+ Entry_u_boot_ucode:
+ This class. If collation is enabled it reads the microcode from
+ the Entry_u_boot_dtb_with_ucode entry, and uses it as the
+ contents of this entry.
+
+
+
+.. _etype_u_boot_vpl:
+
+Entry: u-boot-vpl: U-Boot VPL binary
+------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-vpl.bin (default 'vpl/u-boot-vpl.bin')
+
+This is the U-Boot VPL (Verifying Program Loader) binary. This is a small
+binary which loads before SPL, typically into on-chip SRAM. It is
+responsible for locating, loading and jumping to SPL, the next-stage
+loader. Note that VPL is not relocatable so must be loaded to the correct
+address in SRAM, or written to run from the correct address if direct
+flash execution is possible (e.g. on x86 devices).
+
+SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+in the binman README for more information.
+
+The ELF file 'vpl/u-boot-vpl' must also be available for this to work, since
+binman uses that to look up symbols to write into the VPL binary.
+
+
+
+.. _etype_u_boot_vpl_bss_pad:
+
+Entry: u-boot-vpl-bss-pad: U-Boot VPL binary padded with a BSS region
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+ None
+
+This holds the padding added after the VPL binary to cover the BSS (Block
+Started by Symbol) region. This region holds the various variables used by
+VPL. It is set to 0 by VPL when it starts up. If you want to append data to
+the VPL image (such as a device tree file), you must pad out the BSS region
+to avoid the data overlapping with U-Boot variables. This entry is useful in
+that case. It automatically pads out the entry size to cover both the code,
+data and BSS.
+
+The contents of this entry will a certain number of zero bytes, determined
+by __bss_size
+
+The ELF file 'vpl/u-boot-vpl' must also be available for this to work, since
+binman uses that to look up the BSS address.
+
+
+
+.. _etype_u_boot_vpl_dtb:
+
+Entry: u-boot-vpl-dtb: U-Boot VPL device tree
+---------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'vpl/u-boot-vpl.dtb')
+
+This is the VPL device tree, containing configuration information for
+VPL. VPL needs this to know what devices are present and which drivers
+to activate.
+
+
+
+.. _etype_u_boot_vpl_elf:
+
+Entry: u-boot-vpl-elf: U-Boot VPL ELF image
+-------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of VPL u-boot (default 'vpl/u-boot-vpl')
+
+This is the U-Boot VPL ELF image. It does not include a device tree but can
+be relocated to any address for execution.
+
+
+
+.. _etype_u_boot_vpl_expanded:
+
+Entry: u-boot-vpl-expanded: U-Boot VPL flat binary broken out into its component parts
+--------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - vpl-dtb: Controls whether this entry is selected (set to 'y' or '1' to
+ select)
+
+This is a section containing the U-Boot binary, BSS padding if needed and a
+devicetree. Using this entry type automatically creates this section, with
+the following entries in it:
+
+ u-boot-vpl-nodtb
+ u-boot-vpl-bss-pad
+ u-boot-dtb
+
+Having the devicetree separate allows binman to update it in the final
+image, so that the entries positions are provided to the running U-Boot.
+
+This entry is selected based on the value of the 'vpl-dtb' entryarg. If
+this is non-empty (and not 'n' or '0') then this expanded entry is selected.
+
+
+
+.. _etype_u_boot_vpl_nodtb:
+
+Entry: u-boot-vpl-nodtb: VPL binary without device tree appended
+----------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename to include (default 'vpl/u-boot-vpl-nodtb.bin')
+
+This is the U-Boot VPL binary, It does not include a device tree blob at
+the end of it so may not be able to work without it, assuming VPL needs
+a device tree to operate on your platform. You can add a u_boot_vpl_dtb
+entry after this one, or use a u_boot_vpl entry instead, which normally
+expands to a section containing u-boot-vpl-dtb, u-boot-vpl-bss-pad and
+u-boot-vpl-dtb
+
+VPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+The ELF file 'vpl/u-boot-vpl' must also be available for this to work, since
+binman uses that to look up symbols to write into the VPL binary.
+
+
+
+.. _etype_u_boot_with_ucode_ptr:
+
+Entry: u-boot-with-ucode-ptr: U-Boot with embedded microcode pointer
+--------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-nodtb.bin (default 'u-boot-nodtb.bin')
+ - optional-ucode: boolean property to make microcode optional. If the
+ u-boot.bin image does not include microcode, no error will
+ be generated.
+
+See Entry_u_boot_ucode for full details of the three entries involved in
+this process. This entry updates U-Boot with the offset and size of the
+microcode, to allow early x86 boot code to find it without doing anything
+complicated. Otherwise it is the same as the u-boot entry.
+
+
+
+.. _etype_vblock:
+
+Entry: vblock: An entry which contains a Chromium OS verified boot block
+------------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - content: List of phandles to entries to sign
+ - keydir: Directory containing the public keys to use
+ - keyblock: Name of the key file to use (inside keydir)
+ - signprivate: Name of provide key file to use (inside keydir)
+ - version: Version number of the vblock (typically 1)
+ - kernelkey: Name of the kernel key to use (inside keydir)
+ - preamble-flags: Value of the vboot preamble flags (typically 0)
+
+Output files:
+ - input.<unique_name> - input file passed to futility
+ - vblock.<unique_name> - output file generated by futility (which is
+ used as the entry contents)
+
+Chromium OS signs the read-write firmware and kernel, writing the signature
+in this block. This allows U-Boot to verify that the next firmware stage
+and kernel are genuine.
+
+
+
+.. _etype_x509_cert:
+
+Entry: x509-cert: An entry which contains an X509 certificate
+-------------------------------------------------------------
+
+Properties / Entry arguments:
+ - content: List of phandles to entries to sign
+
+Output files:
+ - input.<unique_name> - input file passed to openssl
+ - cert.<unique_name> - output file generated by openssl (which is
+ used as the entry contents)
+
+openssl signs the provided data, writing the signature in this entry. This
+allows verification that the data is genuine
+
+
+
+.. _etype_x86_reset16:
+
+Entry: x86-reset16: x86 16-bit reset code for U-Boot
+----------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-reset16.bin (default
+ 'u-boot-x86-reset16.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+must be placed at a particular address. This entry holds that code. It is
+typically placed at offset CONFIG_RESET_VEC_LOC. The code is responsible
+for jumping to the x86-start16 code, which continues execution.
+
+For 64-bit U-Boot, the 'x86_reset16_spl' entry type is used instead.
+
+
+
+.. _etype_x86_reset16_spl:
+
+Entry: x86-reset16-spl: x86 16-bit reset code for U-Boot
+--------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-reset16.bin (default
+ 'u-boot-x86-reset16.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+must be placed at a particular address. This entry holds that code. It is
+typically placed at offset CONFIG_RESET_VEC_LOC. The code is responsible
+for jumping to the x86-start16 code, which continues execution.
+
+For 32-bit U-Boot, the 'x86_reset_spl' entry type is used instead.
+
+
+
+.. _etype_x86_reset16_tpl:
+
+Entry: x86-reset16-tpl: x86 16-bit reset code for U-Boot
+--------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-reset16.bin (default
+ 'u-boot-x86-reset16.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+must be placed at a particular address. This entry holds that code. It is
+typically placed at offset CONFIG_RESET_VEC_LOC. The code is responsible
+for jumping to the x86-start16 code, which continues execution.
+
+For 32-bit U-Boot, the 'x86_reset_tpl' entry type is used instead.
+
+
+
+.. _etype_x86_start16:
+
+Entry: x86-start16: x86 16-bit start-up code for U-Boot
+-------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-start16.bin (default
+ 'u-boot-x86-start16.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+must be placed in the top 64KB of the ROM. The reset code jumps to it. This
+entry holds that code. It is typically placed at offset
+CONFIG_SYS_X86_START16. The code is responsible for changing to 32-bit mode
+and jumping to U-Boot's entry point, which requires 32-bit mode (for 32-bit
+U-Boot).
+
+For 64-bit U-Boot, the 'x86_start16_spl' entry type is used instead.
+
+
+
+.. _etype_x86_start16_spl:
+
+Entry: x86-start16-spl: x86 16-bit start-up code for SPL
+--------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of spl/u-boot-x86-start16-spl.bin (default
+ 'spl/u-boot-x86-start16-spl.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+must be placed in the top 64KB of the ROM. The reset code jumps to it. This
+entry holds that code. It is typically placed at offset
+CONFIG_SYS_X86_START16. The code is responsible for changing to 32-bit mode
+and jumping to U-Boot's entry point, which requires 32-bit mode (for 32-bit
+U-Boot).
+
+For 32-bit U-Boot, the 'x86-start16' entry type is used instead.
+
+
+
+.. _etype_x86_start16_tpl:
+
+Entry: x86-start16-tpl: x86 16-bit start-up code for TPL
+--------------------------------------------------------
+
+Properties / Entry arguments:
+ - filename: Filename of tpl/u-boot-x86-start16-tpl.bin (default
+ 'tpl/u-boot-x86-start16-tpl.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+must be placed in the top 64KB of the ROM. The reset code jumps to it. This
+entry holds that code. It is typically placed at offset
+CONFIG_SYS_X86_START16. The code is responsible for changing to 32-bit mode
+and jumping to U-Boot's entry point, which requires 32-bit mode (for 32-bit
+U-Boot).
+
+If TPL is not being used, the 'x86-start16-spl or 'x86-start16' entry types
+may be used instead.
+
+
+
+.. _etype_xilinx_bootgen:
+
+Entry: xilinx-bootgen: Signed SPL boot image for Xilinx ZynqMP devices
+----------------------------------------------------------------------
+
+Properties / Entry arguments:
+ - auth-params: (Optional) Authentication parameters passed to bootgen
+ - fsbl-config: (Optional) FSBL parameters passed to bootgen
+ - keysrc-enc: (Optional) Key source when using decryption engine
+ - pmufw-filename: Filename of PMU firmware. Default: pmu-firmware.elf
+ - psk-key-name-hint: Name of primary secret key to use for signing the
+ secondardy public key. Format: .pem file
+ - ssk-key-name-hint: Name of secondardy secret key to use for signing
+ the boot image. Format: .pem file
+
+The etype is used to create a boot image for Xilinx ZynqMP
+devices.
+
+Information for signed images:
+
+In AMD/Xilinx SoCs, two pairs of public and secret keys are used
+- primary and secondary. The function of the primary public/secret key pair
+is to authenticate the secondary public/secret key pair.
+The function of the secondary key is to sign/verify the boot image. [1]
+
+AMD/Xilinx uses the following terms for private/public keys [1]:
+
+ PSK = Primary Secret Key (Used to sign Secondary Public Key)
+ PPK = Primary Public Key (Used to verify Secondary Public Key)
+ SSK = Secondary Secret Key (Used to sign the boot image/partitions)
+ SPK = Used to verify the actual boot image
+
+The following example builds a signed boot image. The fuses of
+the primary public key (ppk) should be fused together with the RSA_EN flag.
+
+Example node::
+
+ spl {
+ filename = "boot.signed.bin";
+
+ xilinx-bootgen {
+ psk-key-name-hint = "psk0";
+ ssk-key-name-hint = "ssk0";
+ auth-params = "ppk_select=0", "spk_id=0x00000000";
+
+ u-boot-spl-nodtb {
+ };
+ u-boot-spl-pubkey-dtb {
+ algo = "sha384,rsa4096";
+ required = "conf";
+ key-name-hint = "dev";
+ };
+ };
+ };
+
+For testing purposes, e.g. if no RSA_EN should be fused, one could add
+the "bh_auth_enable" flag in the fsbl-config field. This will skip the
+verification of the ppk fuses and boot the image, even if ppk hash is
+invalid.
+
+Example node::
+
+ xilinx-bootgen {
+ psk-key-name-hint = "psk0";
+ psk-key-name-hint = "ssk0";
+ ...
+ fsbl-config = "bh_auth_enable";
+ ...
+ };
+
+[1] https://docs.xilinx.com/r/en-US/ug1283-bootgen-user-guide/Using-Authentication
+
+
+
+
diff --git a/tools/binman/entry.py b/tools/binman/entry.py
new file mode 100644
index 00000000000..42e0b7b9145
--- /dev/null
+++ b/tools/binman/entry.py
@@ -0,0 +1,1384 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+#
+# Base class for all entries
+#
+
+from collections import namedtuple
+import importlib
+import os
+import pathlib
+import sys
+import time
+
+from binman import bintool
+from binman import elf
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from u_boot_pylib.tools import to_hex, to_hex_size
+from u_boot_pylib import tout
+
+modules = {}
+
+# This is imported if needed
+state = None
+
+# An argument which can be passed to entries on the command line, in lieu of
+# device-tree properties.
+EntryArg = namedtuple('EntryArg', ['name', 'datatype'])
+
+# Information about an entry for use when displaying summaries
+EntryInfo = namedtuple('EntryInfo', ['indent', 'name', 'etype', 'size',
+ 'image_pos', 'uncomp_size', 'offset',
+ 'entry'])
+
+class Entry(object):
+ """An Entry in the section
+
+ An entry corresponds to a single node in the device-tree description
+ of the section. Each entry ends up being a part of the final section.
+ Entries can be placed either right next to each other, or with padding
+ between them. The type of the entry determines the data that is in it.
+
+ This class is not used by itself. All entry objects are subclasses of
+ Entry.
+
+ Attributes:
+ section: Section object containing this entry
+ node: The node that created this entry
+ offset: Offset of entry within the section, None if not known yet (in
+ which case it will be calculated by Pack())
+ size: Entry size in bytes, None if not known
+ min_size: Minimum entry size in bytes
+ pre_reset_size: size as it was before ResetForPack(). This allows us to
+ keep track of the size we started with and detect size changes
+ uncomp_size: Size of uncompressed data in bytes, if the entry is
+ compressed, else None
+ contents_size: Size of contents in bytes, 0 by default
+ align: Entry start offset alignment relative to the start of the
+ containing section, or None
+ align_size: Entry size alignment, or None
+ align_end: Entry end offset alignment relative to the start of the
+ containing section, or None
+ pad_before: Number of pad bytes before the contents when it is placed
+ in the containing section, 0 if none. The pad bytes become part of
+ the entry.
+ pad_after: Number of pad bytes after the contents when it is placed in
+ the containing section, 0 if none. The pad bytes become part of
+ the entry.
+ data: Contents of entry (string of bytes). This does not include
+ padding created by pad_before or pad_after. If the entry is
+ compressed, this contains the compressed data.
+ uncomp_data: Original uncompressed data, if this entry is compressed,
+ else None
+ compress: Compression algoithm used (e.g. 'lz4'), 'none' if none
+ orig_offset: Original offset value read from node
+ orig_size: Original size value read from node
+ missing: True if this entry is missing its contents. Note that if it is
+ optional, this entry will not appear in the list generated by
+ entry.CheckMissing() since it is considered OK for it to be missing.
+ allow_missing: Allow children of this entry to be missing (used by
+ subclasses such as Entry_section)
+ allow_fake: Allow creating a dummy fake file if the blob file is not
+ available. This is mainly used for testing.
+ external: True if this entry contains an external binary blob
+ bintools: Bintools used by this entry (only populated for Image)
+ missing_bintools: List of missing bintools for this entry
+ update_hash: True if this entry's "hash" subnode should be
+ updated with a hash of the entry contents
+ comp_bintool: Bintools used for compress and decompress data
+ fake_fname: Fake filename, if one was created, else None
+ required_props (dict of str): Properties which must be present. This can
+ be added to by subclasses
+ elf_fname (str): Filename of the ELF file, if this entry holds an ELF
+ file, or is a binary file produced from an ELF file
+ auto_write_symbols (bool): True to write ELF symbols into this entry's
+ contents
+ absent (bool): True if this entry is absent. This can be controlled by
+ the entry itself, allowing it to vanish in certain circumstances.
+ An absent entry is removed during processing so that it does not
+ appear in the map
+ optional (bool): True if this entry contains an optional external blob
+ overlap (bool): True if this entry overlaps with others
+ preserve (bool): True if this entry should be preserved when updating
+ firmware. This means that it will not be changed by the update.
+ This is just a signal: enforcement of this is up to the updater.
+ This flag does not automatically propagate down to child entries.
+ build_done (bool): Indicates that the entry data has been built and does
+ not need to be done again. This is only used with 'binman replace',
+ to stop sections from being rebuilt if their entries have not been
+ replaced
+ """
+ fake_dir = None
+
+ def __init__(self, section, etype, node, name_prefix='',
+ auto_write_symbols=False):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ self.section = section
+ self.etype = etype
+ self._node = node
+ self.name = node and (name_prefix + node.name) or 'none'
+ self.offset = None
+ self.size = None
+ self.min_size = 0
+ self.pre_reset_size = None
+ self.uncomp_size = None
+ self.data = None
+ self.uncomp_data = None
+ self.contents_size = 0
+ self.align = None
+ self.align_size = None
+ self.align_end = None
+ self.pad_before = 0
+ self.pad_after = 0
+ self.offset_unset = False
+ self.image_pos = None
+ self.extend_size = False
+ self.compress = 'none'
+ self.missing = False
+ self.faked = False
+ self.external = False
+ self.allow_missing = False
+ self.allow_fake = False
+ self.bintools = {}
+ self.missing_bintools = []
+ self.update_hash = True
+ self.fake_fname = None
+ self.required_props = []
+ self.comp_bintool = None
+ self.elf_fname = None
+ self.auto_write_symbols = auto_write_symbols
+ self.absent = False
+ self.optional = False
+ self.overlap = False
+ self.elf_base_sym = None
+ self.offset_from_elf = None
+ self.preserve = False
+ self.build_done = False
+ self.no_write_symbols = False
+
+ @staticmethod
+ def FindEntryClass(etype, expanded):
+ """Look up the entry class for a node.
+
+ Args:
+ node_node: Path name of Node object containing information about
+ the entry to create (used for errors)
+ etype: Entry type to use
+ expanded: Use the expanded version of etype
+
+ Returns:
+ The entry class object if found, else None if not found and expanded
+ is True, else a tuple:
+ module name that could not be found
+ exception received
+ """
+ # Convert something like 'u-boot@0' to 'u_boot' since we are only
+ # interested in the type.
+ module_name = etype.replace('-', '_')
+
+ if '@' in module_name:
+ module_name = module_name.split('@')[0]
+ if expanded:
+ module_name += '_expanded'
+ module = modules.get(module_name)
+
+ # Also allow entry-type modules to be brought in from the etype directory.
+
+ # Import the module if we have not already done so.
+ if not module:
+ try:
+ module = importlib.import_module('binman.etype.' + module_name)
+ except ImportError as e:
+ if expanded:
+ return None
+ return module_name, e
+ modules[module_name] = module
+
+ # Look up the expected class name
+ return getattr(module, 'Entry_%s' % module_name)
+
+ @staticmethod
+ def Lookup(node_path, etype, expanded, missing_etype=False):
+ """Look up the entry class for a node.
+
+ Args:
+ node_node (str): Path name of Node object containing information
+ about the entry to create (used for errors)
+ etype (str): Entry type to use
+ expanded (bool): Use the expanded version of etype
+ missing_etype (bool): True to default to a blob etype if the
+ requested etype is not found
+
+ Returns:
+ The entry class object if found, else None if not found and expanded
+ is True
+
+ Raise:
+ ValueError if expanded is False and the class is not found
+ """
+ # Convert something like 'u-boot@0' to 'u_boot' since we are only
+ # interested in the type.
+ cls = Entry.FindEntryClass(etype, expanded)
+ if cls is None:
+ return None
+ elif isinstance(cls, tuple):
+ if missing_etype:
+ cls = Entry.FindEntryClass('blob', False)
+ if isinstance(cls, tuple): # This should not fail
+ module_name, e = cls
+ raise ValueError(
+ "Unknown entry type '%s' in node '%s' (expected etype/%s.py, error '%s'" %
+ (etype, node_path, module_name, e))
+ return cls
+
+ @staticmethod
+ def Create(section, node, etype=None, expanded=False, missing_etype=False):
+ """Create a new entry for a node.
+
+ Args:
+ section (entry_Section): Section object containing this node
+ node (Node): Node object containing information about the entry to
+ create
+ etype (str): Entry type to use, or None to work it out (used for
+ tests)
+ expanded (bool): Use the expanded version of etype
+ missing_etype (bool): True to default to a blob etype if the
+ requested etype is not found
+
+ Returns:
+ A new Entry object of the correct type (a subclass of Entry)
+ """
+ if not etype:
+ etype = fdt_util.GetString(node, 'type', node.name)
+ obj = Entry.Lookup(node.path, etype, expanded, missing_etype)
+ if obj and expanded:
+ # Check whether to use the expanded entry
+ new_etype = etype + '-expanded'
+ can_expand = not fdt_util.GetBool(node, 'no-expanded')
+ if can_expand and obj.UseExpanded(node, etype, new_etype):
+ etype = new_etype
+ else:
+ obj = None
+ if not obj:
+ obj = Entry.Lookup(node.path, etype, False, missing_etype)
+
+ # Call its constructor to get the object we want.
+ return obj(section, etype, node)
+
+ def ReadNode(self):
+ """Read entry information from the node
+
+ This must be called as the first thing after the Entry is created.
+
+ This reads all the fields we recognise from the node, ready for use.
+ """
+ self.ensure_props()
+ if 'pos' in self._node.props:
+ self.Raise("Please use 'offset' instead of 'pos'")
+ if 'expand-size' in self._node.props:
+ self.Raise("Please use 'extend-size' instead of 'expand-size'")
+ self.offset = fdt_util.GetInt(self._node, 'offset')
+ self.size = fdt_util.GetInt(self._node, 'size')
+ self.min_size = fdt_util.GetInt(self._node, 'min-size', 0)
+ self.orig_offset = fdt_util.GetInt(self._node, 'orig-offset')
+ self.orig_size = fdt_util.GetInt(self._node, 'orig-size')
+ if self.GetImage().copy_to_orig:
+ self.orig_offset = self.offset
+ self.orig_size = self.size
+
+ # These should not be set in input files, but are set in an FDT map,
+ # which is also read by this code.
+ self.image_pos = fdt_util.GetInt(self._node, 'image-pos')
+ self.uncomp_size = fdt_util.GetInt(self._node, 'uncomp-size')
+
+ self.align = fdt_util.GetInt(self._node, 'align')
+ if tools.not_power_of_two(self.align):
+ raise ValueError("Node '%s': Alignment %s must be a power of two" %
+ (self._node.path, self.align))
+ if self.section and self.align is None:
+ self.align = self.section.align_default
+ self.pad_before = fdt_util.GetInt(self._node, 'pad-before', 0)
+ self.pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
+ self.align_size = fdt_util.GetInt(self._node, 'align-size')
+ if tools.not_power_of_two(self.align_size):
+ self.Raise("Alignment size %s must be a power of two" %
+ self.align_size)
+ self.align_end = fdt_util.GetInt(self._node, 'align-end')
+ self.offset_unset = fdt_util.GetBool(self._node, 'offset-unset')
+ self.extend_size = fdt_util.GetBool(self._node, 'extend-size')
+ self.missing_msg = fdt_util.GetString(self._node, 'missing-msg')
+ self.optional = fdt_util.GetBool(self._node, 'optional')
+ self.overlap = fdt_util.GetBool(self._node, 'overlap')
+ if self.overlap:
+ self.required_props += ['offset', 'size']
+
+ # This is only supported by blobs and sections at present
+ self.compress = fdt_util.GetString(self._node, 'compress', 'none')
+ self.offset_from_elf = fdt_util.GetPhandleNameOffset(self._node,
+ 'offset-from-elf')
+
+ self.preserve = fdt_util.GetBool(self._node, 'preserve')
+ self.no_write_symbols = fdt_util.GetBool(self._node, 'no-write-symbols')
+
+ def GetDefaultFilename(self):
+ return None
+
+ def GetFdts(self):
+ """Get the device trees used by this entry
+
+ Returns:
+ Empty dict, if this entry is not a .dtb, otherwise:
+ Dict:
+ key: Filename from this entry (without the path)
+ value: Tuple:
+ Entry object for this dtb
+ Filename of file containing this dtb
+ """
+ return {}
+
+ def gen_entries(self):
+ """Allow entries to generate other entries
+
+ Some entries generate subnodes automatically, from which sub-entries
+ are then created. This method allows those to be added to the binman
+ definition for the current image. An entry which implements this method
+ should call state.AddSubnode() to add a subnode and can add properties
+ with state.AddString(), etc.
+
+ An example is 'files', which produces a section containing a list of
+ files.
+ """
+ pass
+
+ def AddMissingProperties(self, have_image_pos):
+ """Add new properties to the device tree as needed for this entry
+
+ Args:
+ have_image_pos: True if this entry has an image position. This can
+ be False if its parent section is compressed, since compression
+ groups all entries together into a compressed block of data,
+ obscuring the start of each individual child entry
+ """
+ for prop in ['offset', 'size']:
+ if not prop in self._node.props:
+ state.AddZeroProp(self._node, prop)
+ if have_image_pos and 'image-pos' not in self._node.props:
+ state.AddZeroProp(self._node, 'image-pos')
+ if self.GetImage().allow_repack:
+ if self.orig_offset is not None:
+ state.AddZeroProp(self._node, 'orig-offset', True)
+ if self.orig_size is not None:
+ state.AddZeroProp(self._node, 'orig-size', True)
+
+ if self.compress != 'none':
+ state.AddZeroProp(self._node, 'uncomp-size')
+
+ if self.update_hash:
+ err = state.CheckAddHashProp(self._node)
+ if err:
+ self.Raise(err)
+
+ def SetCalculatedProperties(self):
+ """Set the value of device-tree properties calculated by binman"""
+ state.SetInt(self._node, 'offset', self.offset)
+ state.SetInt(self._node, 'size', self.size)
+ base = self.section.GetRootSkipAtStart() if self.section else 0
+ if self.image_pos is not None:
+ state.SetInt(self._node, 'image-pos', self.image_pos - base)
+ if self.GetImage().allow_repack:
+ if self.orig_offset is not None:
+ state.SetInt(self._node, 'orig-offset', self.orig_offset, True)
+ if self.orig_size is not None:
+ state.SetInt(self._node, 'orig-size', self.orig_size, True)
+ if self.uncomp_size is not None:
+ state.SetInt(self._node, 'uncomp-size', self.uncomp_size)
+
+ if self.update_hash:
+ state.CheckSetHashValue(self._node, self.GetData)
+
+ def ProcessFdt(self, fdt):
+ """Allow entries to adjust the device tree
+
+ Some entries need to adjust the device tree for their purposes. This
+ may involve adding or deleting properties.
+
+ Returns:
+ True if processing is complete
+ False if processing could not be completed due to a dependency.
+ This will cause the entry to be retried after others have been
+ called
+ """
+ return True
+
+ def SetPrefix(self, prefix):
+ """Set the name prefix for a node
+
+ Args:
+ prefix: Prefix to set, or '' to not use a prefix
+ """
+ if prefix:
+ self.name = prefix + self.name
+
+ def SetContents(self, data):
+ """Set the contents of an entry
+
+ This sets both the data and content_size properties
+
+ Args:
+ data: Data to set to the contents (bytes)
+ """
+ self.data = data
+ self.contents_size = len(self.data)
+
+ def ProcessContentsUpdate(self, data):
+ """Update the contents of an entry, after the size is fixed
+
+ This checks that the new data is the same size as the old. If the size
+ has changed, this triggers a re-run of the packing algorithm.
+
+ Args:
+ data: Data to set to the contents (bytes)
+
+ Raises:
+ ValueError if the new data size is not the same as the old
+ """
+ size_ok = True
+ new_size = len(data)
+ if state.AllowEntryExpansion() and new_size > self.contents_size:
+ # self.data will indicate the new size needed
+ size_ok = False
+ elif state.AllowEntryContraction() and new_size < self.contents_size:
+ size_ok = False
+
+ # If not allowed to change, try to deal with it or give up
+ if size_ok:
+ if new_size > self.contents_size:
+ self.Raise('Cannot update entry size from %d to %d' %
+ (self.contents_size, new_size))
+
+ # Don't let the data shrink. Pad it if necessary
+ if size_ok and new_size < self.contents_size:
+ data += tools.get_bytes(0, self.contents_size - new_size)
+
+ if not size_ok:
+ tout.debug("Entry '%s' size change from %s to %s" % (
+ self._node.path, to_hex(self.contents_size),
+ to_hex(new_size)))
+ self.SetContents(data)
+ return size_ok
+
+ def ObtainContents(self, skip_entry=None, fake_size=0):
+ """Figure out the contents of an entry.
+
+ For missing blobs (where allow-missing is enabled), the contents are set
+ to b'' and self.missing is set to True.
+
+ Args:
+ skip_entry (Entry): Entry to skip when obtaining section contents
+ fake_size (int): Size of fake file to create if needed
+
+ Returns:
+ True if the contents were found, False if another call is needed
+ after the other entries are processed, None if there is no contents
+ """
+ # No contents by default: subclasses can implement this
+ return True
+
+ def ResetForPack(self):
+ """Reset offset/size fields so that packing can be done again"""
+ self.Detail('ResetForPack: offset %s->%s, size %s->%s' %
+ (to_hex(self.offset), to_hex(self.orig_offset),
+ to_hex(self.size), to_hex(self.orig_size)))
+ self.pre_reset_size = self.size
+ self.offset = self.orig_offset
+ self.size = self.orig_size
+
+ def Pack(self, offset):
+ """Figure out how to pack the entry into the section
+
+ Most of the time the entries are not fully specified. There may be
+ an alignment but no size. In that case we take the size from the
+ contents of the entry.
+
+ If an entry has no hard-coded offset, it will be placed at @offset.
+
+ Once this function is complete, both the offset and size of the
+ entry will be know.
+
+ Args:
+ Current section offset pointer
+
+ Returns:
+ New section offset pointer (after this entry)
+ """
+ self.Detail('Packing: offset=%s, size=%s, content_size=%x' %
+ (to_hex(self.offset), to_hex(self.size),
+ self.contents_size))
+ if self.offset is None:
+ if self.offset_unset:
+ self.Raise('No offset set with offset-unset: should another '
+ 'entry provide this correct offset?')
+ elif self.offset_from_elf:
+ self.offset = self.lookup_offset()
+ else:
+ self.offset = tools.align(offset, self.align)
+ needed = self.pad_before + self.contents_size + self.pad_after
+ needed = max(needed, self.min_size)
+ needed = tools.align(needed, self.align_size)
+ size = self.size
+ if not size:
+ size = needed
+ new_offset = self.offset + size
+ aligned_offset = tools.align(new_offset, self.align_end)
+ if aligned_offset != new_offset:
+ size = aligned_offset - self.offset
+ new_offset = aligned_offset
+
+ if not self.size:
+ self.size = size
+
+ if self.size < needed:
+ self.Raise("Entry contents size is %#x (%d) but entry size is "
+ "%#x (%d)" % (needed, needed, self.size, self.size))
+ # Check that the alignment is correct. It could be wrong if the
+ # and offset or size values were provided (i.e. not calculated), but
+ # conflict with the provided alignment values
+ if self.size != tools.align(self.size, self.align_size):
+ self.Raise("Size %#x (%d) does not match align-size %#x (%d)" %
+ (self.size, self.size, self.align_size, self.align_size))
+ if self.offset != tools.align(self.offset, self.align):
+ self.Raise("Offset %#x (%d) does not match align %#x (%d)" %
+ (self.offset, self.offset, self.align, self.align))
+ self.Detail(' - packed: offset=%#x, size=%#x, content_size=%#x, next_offset=%x' %
+ (self.offset, self.size, self.contents_size, new_offset))
+
+ return new_offset
+
+ def Raise(self, msg):
+ """Convenience function to raise an error referencing a node"""
+ raise ValueError("Node '%s': %s" % (self._node.path, msg))
+
+ def Info(self, msg):
+ """Convenience function to log info referencing a node"""
+ tag = "Info '%s'" % self._node.path
+ tout.detail('%30s: %s' % (tag, msg))
+
+ def Detail(self, msg):
+ """Convenience function to log detail referencing a node"""
+ tag = "Node '%s'" % self._node.path
+ tout.detail('%30s: %s' % (tag, msg))
+
+ def GetEntryArgsOrProps(self, props, required=False):
+ """Return the values of a set of properties
+
+ Args:
+ props: List of EntryArg objects
+
+ Raises:
+ ValueError if a property is not found
+ """
+ values = []
+ missing = []
+ for prop in props:
+ python_prop = prop.name.replace('-', '_')
+ if hasattr(self, python_prop):
+ value = getattr(self, python_prop)
+ else:
+ value = None
+ if value is None:
+ value = self.GetArg(prop.name, prop.datatype)
+ if value is None and required:
+ missing.append(prop.name)
+ values.append(value)
+ if missing:
+ self.GetImage().MissingArgs(self, missing)
+ return values
+
+ def GetPath(self):
+ """Get the path of a node
+
+ Returns:
+ Full path of the node for this entry
+ """
+ return self._node.path
+
+ def GetData(self, required=True):
+ """Get the contents of an entry
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the entry, excluding any padding. If the entry is
+ compressed, the compressed data is returned. If the entry data
+ is not yet available, False can be returned. If the entry data
+ is null, then None is returned.
+ """
+ self.Detail('GetData: size %s' % to_hex_size(self.data))
+ return self.data
+
+ def GetPaddedData(self, data=None):
+ """Get the data for an entry including any padding
+
+ Gets the entry data and uses its section's pad-byte value to add padding
+ before and after as defined by the pad-before and pad-after properties.
+
+ This does not consider alignment.
+
+ Returns:
+ Contents of the entry along with any pad bytes before and
+ after it (bytes)
+ """
+ if data is None:
+ data = self.GetData()
+ return self.section.GetPaddedDataForEntry(self, data)
+
+ def GetOffsets(self):
+ """Get the offsets for siblings
+
+ Some entry types can contain information about the position or size of
+ other entries. An example of this is the Intel Flash Descriptor, which
+ knows where the Intel Management Engine section should go.
+
+ If this entry knows about the position of other entries, it can specify
+ this by returning values here
+
+ Returns:
+ Dict:
+ key: Entry type
+ value: List containing position and size of the given entry
+ type. Either can be None if not known
+ """
+ return {}
+
+ def SetOffsetSize(self, offset, size):
+ """Set the offset and/or size of an entry
+
+ Args:
+ offset: New offset, or None to leave alone
+ size: New size, or None to leave alone
+ """
+ if offset is not None:
+ self.offset = offset
+ if size is not None:
+ self.size = size
+
+ def SetImagePos(self, image_pos):
+ """Set the position in the image
+
+ Args:
+ image_pos: Position of this entry in the image
+ """
+ self.image_pos = image_pos + self.offset
+
+ def ProcessContents(self):
+ """Do any post-packing updates of entry contents
+
+ This function should call ProcessContentsUpdate() to update the entry
+ contents, if necessary, returning its return value here.
+
+ Args:
+ data: Data to set to the contents (bytes)
+
+ Returns:
+ True if the new data size is OK, False if expansion is needed
+
+ Raises:
+ ValueError if the new data size is not the same as the old and
+ state.AllowEntryExpansion() is False
+ """
+ return True
+
+ def WriteSymbols(self, section):
+ """Write symbol values into binary files for access at run time
+
+ Args:
+ section: Section containing the entry
+ """
+ if self.auto_write_symbols and not self.no_write_symbols:
+ # Check if we are writing symbols into an ELF file
+ is_elf = self.GetDefaultFilename() == self.elf_fname
+ elf.LookupAndWriteSymbols(self.elf_fname, self, section.GetImage(),
+ is_elf, self.elf_base_sym)
+
+ def CheckEntries(self):
+ """Check that the entry offsets are correct
+
+ This is used for entries which have extra offset requirements (other
+ than having to be fully inside their section). Sub-classes can implement
+ this function and raise if there is a problem.
+ """
+ pass
+
+ @staticmethod
+ def GetStr(value):
+ if value is None:
+ return '<none> '
+ return '%08x' % value
+
+ @staticmethod
+ def WriteMapLine(fd, indent, name, offset, size, image_pos):
+ print('%s %s%s %s %s' % (Entry.GetStr(image_pos), ' ' * indent,
+ Entry.GetStr(offset), Entry.GetStr(size),
+ name), file=fd)
+
+ def WriteMap(self, fd, indent):
+ """Write a map of the entry to a .map file
+
+ Args:
+ fd: File to write the map to
+ indent: Curent indent level of map (0=none, 1=one level, etc.)
+ """
+ self.WriteMapLine(fd, indent, self.name, self.offset, self.size,
+ self.image_pos)
+
+ # pylint: disable=assignment-from-none
+ def GetEntries(self):
+ """Return a list of entries contained by this entry
+
+ Returns:
+ List of entries, or None if none. A normal entry has no entries
+ within it so will return None
+ """
+ return None
+
+ def FindEntryByNode(self, find_node):
+ """Find a node in an entry, searching all subentries
+
+ This does a recursive search.
+
+ Args:
+ find_node (fdt.Node): Node to find
+
+ Returns:
+ Entry: entry, if found, else None
+ """
+ entries = self.GetEntries()
+ if entries:
+ for entry in entries.values():
+ if entry._node == find_node:
+ return entry
+ found = entry.FindEntryByNode(find_node)
+ if found:
+ return found
+
+ return None
+
+ def GetArg(self, name, datatype=str):
+ """Get the value of an entry argument or device-tree-node property
+
+ Some node properties can be provided as arguments to binman. First check
+ the entry arguments, and fall back to the device tree if not found
+
+ Args:
+ name: Argument name
+ datatype: Data type (str or int)
+
+ Returns:
+ Value of argument as a string or int, or None if no value
+
+ Raises:
+ ValueError if the argument cannot be converted to in
+ """
+ value = state.GetEntryArg(name)
+ if value is not None:
+ if datatype == int:
+ try:
+ value = int(value)
+ except ValueError:
+ self.Raise("Cannot convert entry arg '%s' (value '%s') to integer" %
+ (name, value))
+ elif datatype == str:
+ pass
+ else:
+ raise ValueError("GetArg() internal error: Unknown data type '%s'" %
+ datatype)
+ else:
+ value = fdt_util.GetDatatype(self._node, name, datatype)
+ return value
+
+ @staticmethod
+ def WriteDocs(modules, test_missing=None):
+ """Write out documentation about the various entry types to stdout
+
+ Args:
+ modules: List of modules to include
+ test_missing: Used for testing. This is a module to report
+ as missing
+ """
+ print('''Binman Entry Documentation
+===========================
+
+This file describes the entry types supported by binman. These entry types can
+be placed in an image one by one to build up a final firmware image. It is
+fairly easy to create new entry types. Just add a new file to the 'etype'
+directory. You can use the existing entries as examples.
+
+Note that some entries are subclasses of others, using and extending their
+features to produce new behaviours.
+
+
+''')
+ modules = sorted(modules)
+
+ # Don't show the test entry
+ if '_testing' in modules:
+ modules.remove('_testing')
+ missing = []
+ for name in modules:
+ module = Entry.Lookup('WriteDocs', name, False)
+ docs = getattr(module, '__doc__')
+ if test_missing == name:
+ docs = None
+ if docs:
+ lines = docs.splitlines()
+ first_line = lines[0]
+ rest = [line[4:] for line in lines[1:]]
+ hdr = 'Entry: %s: %s' % (name.replace('_', '-'), first_line)
+
+ # Create a reference for use by rST docs
+ ref_name = f'etype_{module.__name__[6:]}'.lower()
+ print('.. _%s:' % ref_name)
+ print()
+ print(hdr)
+ print('-' * len(hdr))
+ print('\n'.join(rest))
+ print()
+ print()
+ else:
+ missing.append(name)
+
+ if missing:
+ raise ValueError('Documentation is missing for modules: %s' %
+ ', '.join(missing))
+
+ def GetUniqueName(self):
+ """Get a unique name for a node
+
+ Returns:
+ String containing a unique name for a node, consisting of the name
+ of all ancestors (starting from within the 'binman' node) separated
+ by a dot ('.'). This can be useful for generating unique filesnames
+ in the output directory.
+ """
+ name = self.name
+ node = self._node
+ while node.parent:
+ node = node.parent
+ if node.name in ('binman', '/'):
+ break
+ name = '%s.%s' % (node.name, name)
+ return name
+
+ def extend_to_limit(self, limit):
+ """Extend an entry so that it ends at the given offset limit"""
+ if self.offset + self.size < limit:
+ self.size = limit - self.offset
+ # Request the contents again, since changing the size requires that
+ # the data grows. This should not fail, but check it to be sure.
+ if not self.ObtainContents():
+ self.Raise('Cannot obtain contents when expanding entry')
+
+ def HasSibling(self, name):
+ """Check if there is a sibling of a given name
+
+ Returns:
+ True if there is an entry with this name in the the same section,
+ else False
+ """
+ return name in self.section.GetEntries()
+
+ def GetSiblingImagePos(self, name):
+ """Return the image position of the given sibling
+
+ Returns:
+ Image position of sibling, or None if the sibling has no position,
+ or False if there is no such sibling
+ """
+ if not self.HasSibling(name):
+ return False
+ return self.section.GetEntries()[name].image_pos
+
+ @staticmethod
+ def AddEntryInfo(entries, indent, name, etype, size, image_pos,
+ uncomp_size, offset, entry):
+ """Add a new entry to the entries list
+
+ Args:
+ entries: List (of EntryInfo objects) to add to
+ indent: Current indent level to add to list
+ name: Entry name (string)
+ etype: Entry type (string)
+ size: Entry size in bytes (int)
+ image_pos: Position within image in bytes (int)
+ uncomp_size: Uncompressed size if the entry uses compression, else
+ None
+ offset: Entry offset within parent in bytes (int)
+ entry: Entry object
+ """
+ entries.append(EntryInfo(indent, name, etype, size, image_pos,
+ uncomp_size, offset, entry))
+
+ def ListEntries(self, entries, indent):
+ """Add files in this entry to the list of entries
+
+ This can be overridden by subclasses which need different behaviour.
+
+ Args:
+ entries: List (of EntryInfo objects) to add to
+ indent: Current indent level to add to list
+ """
+ self.AddEntryInfo(entries, indent, self.name, self.etype, self.size,
+ self.image_pos, self.uncomp_size, self.offset, self)
+
+ def ReadData(self, decomp=True, alt_format=None):
+ """Read the data for an entry from the image
+
+ This is used when the image has been read in and we want to extract the
+ data for a particular entry from that image.
+
+ Args:
+ decomp: True to decompress any compressed data before returning it;
+ False to return the raw, uncompressed data
+
+ Returns:
+ Entry data (bytes)
+ """
+ # Use True here so that we get an uncompressed section to work from,
+ # although compressed sections are currently not supported
+ tout.debug("ReadChildData section '%s', entry '%s'" %
+ (self.section.GetPath(), self.GetPath()))
+ data = self.section.ReadChildData(self, decomp, alt_format)
+ return data
+
+ def ReadChildData(self, child, decomp=True, alt_format=None):
+ """Read the data for a particular child entry
+
+ This reads data from the parent and extracts the piece that relates to
+ the given child.
+
+ Args:
+ child (Entry): Child entry to read data for (must be valid)
+ decomp (bool): True to decompress any compressed data before
+ returning it; False to return the raw, uncompressed data
+ alt_format (str): Alternative format to read in, or None
+
+ Returns:
+ Data for the child (bytes)
+ """
+ pass
+
+ def LoadData(self, decomp=True):
+ data = self.ReadData(decomp)
+ self.contents_size = len(data)
+ self.ProcessContentsUpdate(data)
+ self.Detail('Loaded data size %x' % len(data))
+
+ def GetAltFormat(self, data, alt_format):
+ """Read the data for an extry in an alternative format
+
+ Supported formats are list in the documentation for each entry. An
+ example is fdtmap which provides .
+
+ Args:
+ data (bytes): Data to convert (this should have been produced by the
+ entry)
+ alt_format (str): Format to use
+
+ """
+ pass
+
+ def GetImage(self):
+ """Get the image containing this entry
+
+ Returns:
+ Image object containing this entry
+ """
+ return self.section.GetImage()
+
+ def WriteData(self, data, decomp=True):
+ """Write the data to an entry in the image
+
+ This is used when the image has been read in and we want to replace the
+ data for a particular entry in that image.
+
+ The image must be re-packed and written out afterwards.
+
+ Args:
+ data: Data to replace it with
+ decomp: True to compress the data if needed, False if data is
+ already compressed so should be used as is
+
+ Returns:
+ True if the data did not result in a resize of this entry, False if
+ the entry must be resized
+ """
+ if self.size is not None:
+ self.contents_size = self.size
+ else:
+ self.contents_size = self.pre_reset_size
+ ok = self.ProcessContentsUpdate(data)
+ self.build_done = False
+ self.Detail('WriteData: size=%x, ok=%s' % (len(data), ok))
+ section_ok = self.section.WriteChildData(self)
+ return ok and section_ok
+
+ def WriteChildData(self, child):
+ """Handle writing the data in a child entry
+
+ This should be called on the child's parent section after the child's
+ data has been updated. It should update any data structures needed to
+ validate that the update is successful.
+
+ This base-class implementation does nothing, since the base Entry object
+ does not have any children.
+
+ Args:
+ child: Child Entry that was written
+
+ Returns:
+ True if the section could be updated successfully, False if the
+ data is such that the section could not update
+ """
+ self.build_done = False
+ entry = self.section
+
+ # Now we must rebuild all sections above this one
+ while entry and entry != entry.section:
+ self.build_done = False
+ entry = entry.section
+
+ return True
+
+ def GetSiblingOrder(self):
+ """Get the relative order of an entry amoung its siblings
+
+ Returns:
+ 'start' if this entry is first among siblings, 'end' if last,
+ otherwise None
+ """
+ entries = list(self.section.GetEntries().values())
+ if entries:
+ if self == entries[0]:
+ return 'start'
+ elif self == entries[-1]:
+ return 'end'
+ return 'middle'
+
+ def SetAllowMissing(self, allow_missing):
+ """Set whether a section allows missing external blobs
+
+ Args:
+ allow_missing: True if allowed, False if not allowed
+ """
+ # This is meaningless for anything other than sections
+ pass
+
+ def SetAllowFakeBlob(self, allow_fake):
+ """Set whether a section allows to create a fake blob
+
+ Args:
+ allow_fake: True if allowed, False if not allowed
+ """
+ self.allow_fake = allow_fake
+
+ def CheckMissing(self, missing_list):
+ """Check if the entry has missing external blobs
+
+ If there are missing (non-optional) blobs, the entries are added to the
+ list
+
+ Args:
+ missing_list: List of Entry objects to be added to
+ """
+ if self.missing and not self.optional:
+ missing_list.append(self)
+
+ def check_fake_fname(self, fname, size=0):
+ """If the file is missing and the entry allows fake blobs, fake it
+
+ Sets self.faked to True if faked
+
+ Args:
+ fname (str): Filename to check
+ size (int): Size of fake file to create
+
+ Returns:
+ tuple:
+ fname (str): Filename of faked file
+ bool: True if the blob was faked, False if not
+ """
+ if self.allow_fake and not pathlib.Path(fname).is_file():
+ if not self.fake_fname:
+ outfname = os.path.join(self.fake_dir, os.path.basename(fname))
+ with open(outfname, "wb") as out:
+ out.truncate(size)
+ tout.info(f"Entry '{self._node.path}': Faked blob '{outfname}'")
+ self.fake_fname = outfname
+ self.faked = True
+ return self.fake_fname, True
+ return fname, False
+
+ def CheckFakedBlobs(self, faked_blobs_list):
+ """Check if any entries in this section have faked external blobs
+
+ If there are faked blobs, the entries are added to the list
+
+ Args:
+ faked_blobs_list: List of Entry objects to be added to
+ """
+ # This is meaningless for anything other than blobs
+ pass
+
+ def CheckOptional(self, optional_list):
+ """Check if the entry has missing but optional external blobs
+
+ If there are missing (optional) blobs, the entries are added to the list
+
+ Args:
+ optional_list (list): List of Entry objects to be added to
+ """
+ if self.missing and self.optional:
+ optional_list.append(self)
+
+ def GetAllowMissing(self):
+ """Get whether a section allows missing external blobs
+
+ Returns:
+ True if allowed, False if not allowed
+ """
+ return self.allow_missing
+
+ def record_missing_bintool(self, bintool):
+ """Record a missing bintool that was needed to produce this entry
+
+ Args:
+ bintool (Bintool): Bintool that was missing
+ """
+ if bintool not in self.missing_bintools:
+ self.missing_bintools.append(bintool)
+
+ def check_missing_bintools(self, missing_list):
+ """Check if any entries in this section have missing bintools
+
+ If there are missing bintools, these are added to the list
+
+ Args:
+ missing_list: List of Bintool objects to be added to
+ """
+ for bintool in self.missing_bintools:
+ if bintool not in missing_list:
+ missing_list.append(bintool)
+
+
+ def GetHelpTags(self):
+ """Get the tags use for missing-blob help
+
+ Returns:
+ list of possible tags, most desirable first
+ """
+ return list(filter(None, [self.missing_msg, self.name, self.etype]))
+
+ def CompressData(self, indata):
+ """Compress data according to the entry's compression method
+
+ Args:
+ indata: Data to compress
+
+ Returns:
+ Compressed data
+ """
+ self.uncomp_data = indata
+ if self.compress != 'none':
+ self.uncomp_size = len(indata)
+ if self.comp_bintool.is_present():
+ data = self.comp_bintool.compress(indata)
+ else:
+ self.record_missing_bintool(self.comp_bintool)
+ data = tools.get_bytes(0, 1024)
+ else:
+ data = indata
+ return data
+
+ def DecompressData(self, indata):
+ """Decompress data according to the entry's compression method
+
+ Args:
+ indata: Data to decompress
+
+ Returns:
+ Decompressed data
+ """
+ if self.compress != 'none':
+ if self.comp_bintool.is_present():
+ data = self.comp_bintool.decompress(indata)
+ self.uncomp_size = len(data)
+ else:
+ self.record_missing_bintool(self.comp_bintool)
+ data = tools.get_bytes(0, 1024)
+ else:
+ data = indata
+ self.uncomp_data = data
+ return data
+
+ @classmethod
+ def UseExpanded(cls, node, etype, new_etype):
+ """Check whether to use an expanded entry type
+
+ This is called by Entry.Create() when it finds an expanded version of
+ an entry type (e.g. 'u-boot-expanded'). If this method returns True then
+ it will be used (e.g. in place of 'u-boot'). If it returns False, it is
+ ignored.
+
+ Args:
+ node: Node object containing information about the entry to
+ create
+ etype: Original entry type being used
+ new_etype: New entry type proposed
+
+ Returns:
+ True to use this entry type, False to use the original one
+ """
+ tout.info("Node '%s': etype '%s': %s selected" %
+ (node.path, etype, new_etype))
+ return True
+
+ def CheckAltFormats(self, alt_formats):
+ """Add any alternative formats supported by this entry type
+
+ Args:
+ alt_formats (dict): Dict to add alt_formats to:
+ key: Name of alt format
+ value: Help text
+ """
+ pass
+
+ def AddBintools(self, btools):
+ """Add the bintools used by this entry type
+
+ Args:
+ btools (dict of Bintool):
+
+ Raise:
+ ValueError if compression algorithm is not supported
+ """
+ algo = self.compress
+ if algo != 'none':
+ algos = ['bzip2', 'gzip', 'lz4', 'lzma', 'lzo', 'xz', 'zstd']
+ if algo not in algos:
+ raise ValueError("Unknown algorithm '%s'" % algo)
+ names = {'lzma': 'lzma_alone', 'lzo': 'lzop'}
+ name = names.get(self.compress, self.compress)
+ self.comp_bintool = self.AddBintool(btools, name)
+
+ @classmethod
+ def AddBintool(self, tools, name):
+ """Add a new bintool to the tools used by this etype
+
+ Args:
+ name: Name of the tool
+ """
+ btool = bintool.Bintool.create(name)
+ tools[name] = btool
+ return btool
+
+ def SetUpdateHash(self, update_hash):
+ """Set whether this entry's "hash" subnode should be updated
+
+ Args:
+ update_hash: True if hash should be updated, False if not
+ """
+ self.update_hash = update_hash
+
+ def collect_contents_to_file(self, entries, prefix, fake_size=0):
+ """Put the contents of a list of entries into a file
+
+ Args:
+ entries (list of Entry): Entries to collect
+ prefix (str): Filename prefix of file to write to
+ fake_size (int): Size of fake file to create if needed
+
+ If any entry does not have contents yet, this function returns False
+ for the data.
+
+ Returns:
+ Tuple:
+ bytes: Concatenated data from all the entries (or None)
+ str: Filename of file written (or None if no data)
+ str: Unique portion of filename (or None if no data)
+ """
+ data = b''
+ for entry in entries:
+ data += entry.GetData()
+ uniq = self.GetUniqueName()
+ fname = tools.get_output_filename(f'{prefix}.{uniq}')
+ tools.write_file(fname, data)
+ return data, fname, uniq
+
+ @classmethod
+ def create_fake_dir(cls):
+ """Create the directory for fake files"""
+ cls.fake_dir = tools.get_output_filename('binman-fake')
+ if not os.path.exists(cls.fake_dir):
+ os.mkdir(cls.fake_dir)
+ tout.notice(f"Fake-blob dir is '{cls.fake_dir}'")
+
+ def ensure_props(self):
+ """Raise an exception if properties are missing
+
+ Args:
+ prop_list (list of str): List of properties to check for
+
+ Raises:
+ ValueError: Any property is missing
+ """
+ not_present = []
+ for prop in self.required_props:
+ if not prop in self._node.props:
+ not_present.append(prop)
+ if not_present:
+ self.Raise(f"'{self.etype}' entry is missing properties: {' '.join(not_present)}")
+
+ def mark_absent(self, msg):
+ tout.info("Entry '%s' marked absent: %s" % (self._node.path, msg))
+ self.absent = True
+
+ def read_elf_segments(self):
+ """Read segments from an entry that can generate an ELF file
+
+ Returns:
+ tuple:
+ list of segments, each:
+ int: Segment number (0 = first)
+ int: Start address of segment in memory
+ bytes: Contents of segment
+ int: entry address of ELF file
+ """
+ return None
+
+ def lookup_offset(self):
+ node, sym_name, offset = self.offset_from_elf
+ entry = self.section.FindEntryByNode(node)
+ if not entry:
+ self.Raise("Cannot find entry for node '%s'" % node.name)
+ if not entry.elf_fname:
+ entry.Raise("Need elf-fname property '%s'" % node.name)
+ val = elf.GetSymbolOffset(entry.elf_fname, sym_name,
+ entry.elf_base_sym)
+ return val + offset
+
+ def mark_build_done(self):
+ """Mark an entry as already built"""
+ self.build_done = True
+ entries = self.GetEntries()
+ if entries:
+ for entry in entries.values():
+ entry.mark_build_done()
+
+ def UpdateSignatures(self, privatekey_fname, algo, input_fname):
+ self.Raise('Updating signatures is not supported with this entry type')
diff --git a/tools/binman/entry_test.py b/tools/binman/entry_test.py
new file mode 100644
index 00000000000..ac6582cf86a
--- /dev/null
+++ b/tools/binman/entry_test.py
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the Entry class
+
+import collections
+import importlib
+import os
+import sys
+import unittest
+
+from binman import entry
+from binman.etype.blob import Entry_blob
+from dtoc import fdt
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class TestEntry(unittest.TestCase):
+ def setUp(self):
+ tools.prepare_output_dir(None)
+
+ def tearDown(self):
+ tools.finalise_output_dir()
+
+ def GetNode(self):
+ binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+ fname = fdt_util.EnsureCompiled(
+ os.path.join(binman_dir,('test/005_simple.dts')))
+ dtb = fdt.FdtScan(fname)
+ return dtb.GetNode('/binman/u-boot')
+
+ def _ReloadEntry(self):
+ global entry
+ if entry:
+ importlib.reload(entry)
+ else:
+ from binman import entry
+
+ def testEntryContents(self):
+ """Test the Entry bass class"""
+ from binman import entry
+ base_entry = entry.Entry(None, None, None)
+ self.assertEqual(True, base_entry.ObtainContents())
+
+ def testUnknownEntry(self):
+ """Test that unknown entry types are detected"""
+ Node = collections.namedtuple('Node', ['name', 'path'])
+ node = Node('invalid-name', 'invalid-path')
+ with self.assertRaises(ValueError) as e:
+ entry.Entry.Create(None, node, node.name)
+ self.assertIn("Unknown entry type 'invalid-name' in node "
+ "'invalid-path'", str(e.exception))
+
+ def testUniqueName(self):
+ """Test Entry.GetUniqueName"""
+ Node = collections.namedtuple('Node', ['name', 'parent'])
+ base_node = Node('root', None)
+ base_entry = entry.Entry(None, None, base_node)
+ self.assertEqual('root', base_entry.GetUniqueName())
+ sub_node = Node('subnode', base_node)
+ sub_entry = entry.Entry(None, None, sub_node)
+ self.assertEqual('root.subnode', sub_entry.GetUniqueName())
+
+ def testGetDefaultFilename(self):
+ """Trivial test for this base class function"""
+ base_entry = entry.Entry(None, None, None)
+ self.assertIsNone(base_entry.GetDefaultFilename())
+
+ def testBlobFdt(self):
+ """Test the GetFdtEtype() method of the blob-dtb entries"""
+ base = entry.Entry.Create(None, self.GetNode(), 'blob-dtb')
+ self.assertIsNone(base.GetFdtEtype())
+
+ dtb = entry.Entry.Create(None, self.GetNode(), 'u-boot-dtb')
+ self.assertEqual('u-boot-dtb', dtb.GetFdtEtype())
+
+ def testWriteChildData(self):
+ """Test the WriteChildData() method of the base class"""
+ base = entry.Entry.Create(None, self.GetNode(), 'blob-dtb')
+ self.assertTrue(base.WriteChildData(base))
+
+ def testReadChildData(self):
+ """Test the ReadChildData() method of the base class"""
+ base = entry.Entry.Create(None, self.GetNode(), 'blob-dtb')
+ self.assertIsNone(base.ReadChildData(base))
+
+ def testExpandedEntry(self):
+ """Test use of an expanded entry when available"""
+ base = entry.Entry.Create(None, self.GetNode())
+ self.assertEqual('u-boot', base.etype)
+
+ expanded = entry.Entry.Create(None, self.GetNode(), expanded=True)
+ self.assertEqual('u-boot-expanded', expanded.etype)
+
+ with self.assertRaises(ValueError) as e:
+ entry.Entry.Create(None, self.GetNode(), 'missing', expanded=True)
+ self.assertIn("Unknown entry type 'missing' in node '/binman/u-boot'",
+ str(e.exception))
+
+ def testMissingEtype(self):
+ """Test use of a blob etype when the requested one is not available"""
+ ent = entry.Entry.Create(None, self.GetNode(), 'missing',
+ missing_etype=True)
+ self.assertTrue(isinstance(ent, Entry_blob))
+ self.assertEquals('missing', ent.etype)
+
+ def testDecompressData(self):
+ """Test the DecompressData() method of the base class"""
+ base = entry.Entry.Create(None, self.GetNode(), 'blob-dtb')
+ base.compress = 'lz4'
+ bintools = {}
+ base.comp_bintool = base.AddBintool(bintools, '_testing')
+ self.assertEquals(tools.get_bytes(0, 1024), base.CompressData(b'abc'))
+ self.assertEquals(tools.get_bytes(0, 1024), base.DecompressData(b'abc'))
+
+ def testLookupOffset(self):
+ """Test the lookup_offset() method of the base class"""
+ def MyFindEntryByNode(node):
+ return self.found
+
+ base = entry.Entry.Create(None, self.GetNode(), 'blob-dtb')
+ base.FindEntryByNode = MyFindEntryByNode
+ base.section = base
+ self.found = None
+ base.offset_from_elf = [self.GetNode(), 'start', 0]
+ with self.assertRaises(ValueError) as e:
+ base.lookup_offset()
+ self.assertIn("Cannot find entry for node 'u-boot'", str(e.exception))
+
+ self.found = base
+ with self.assertRaises(ValueError) as e:
+ base.lookup_offset()
+ self.assertIn("Need elf-fname property 'u-boot'", str(e.exception))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/binman/etype/_testing.py b/tools/binman/etype/_testing.py
new file mode 100644
index 00000000000..e092d98ce15
--- /dev/null
+++ b/tools/binman/etype/_testing.py
@@ -0,0 +1,168 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for testing purposes. Not used in real images.
+#
+
+from collections import OrderedDict
+
+from binman.entry import Entry, EntryArg
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+
+class Entry__testing(Entry):
+ """A fake entry used for testing
+
+ This entry should not be used in normal images. It is a special entry with
+ strange features used for testing.
+
+ Properties / Entry arguments
+ test-str-fdt: Test string, normally in the node
+ test-int-fdt: Test integer, normally in the node
+ test-str-arg: Test string, normally in the entry arguments
+ test-int-arg: Test integer, normally in the entry arguments
+
+ The entry has a single 'a' byte as its contents. Operation is controlled by
+ a number of properties in the node, as follows:
+
+ Properties:
+ return-invalid-entry: Return an invalid entry from GetOffsets()
+ return-unknown-contents: Refuse to provide any contents (to cause a
+ failure)
+ bad-update-contents: Return a larger size in ProcessContents
+ bad-shrink-contents: Return a larger size in ProcessContents
+ never-complete-process-fdt: Refund to process the FDT (to cause a
+ failure)
+ require-args: Require that all used args are present (generating an
+ error if not)
+ force-bad-datatype: Force a call to GetEntryArgsOrProps() with a bad
+ data type (generating an error)
+ require-bintool-for-contents: Raise an error if the specified
+ bintool isn't usable in ObtainContents()
+ require-bintool-for-pack: Raise an error if the specified
+ bintool isn't usable in Pack()
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.return_invalid_entry = fdt_util.GetBool(self._node,
+ 'return-invalid-entry')
+ self.return_unknown_contents = fdt_util.GetBool(self._node,
+ 'return-unknown-contents')
+ self.bad_update_contents = fdt_util.GetBool(self._node,
+ 'bad-update-contents')
+ self.bad_shrink_contents = fdt_util.GetBool(self._node,
+ 'bad-shrink-contents')
+ self.return_contents_once = fdt_util.GetBool(self._node,
+ 'return-contents-once')
+ self.bad_update_contents_twice = fdt_util.GetBool(self._node,
+ 'bad-update-contents-twice')
+ self.return_contents_later = fdt_util.GetBool(self._node,
+ 'return-contents-later')
+ self.set_to_absent = fdt_util.GetBool(self._node, 'set-to-absent')
+
+ # Set to True when the entry is ready to process the FDT.
+ self.process_fdt_ready = False
+ self.never_complete_process_fdt = fdt_util.GetBool(self._node,
+ 'never-complete-process-fdt')
+ self.require_args = fdt_util.GetBool(self._node, 'require-args')
+
+ # This should be picked up by GetEntryArgsOrProps()
+ self.test_existing_prop = 'existing'
+ self.force_bad_datatype = fdt_util.GetBool(self._node,
+ 'force-bad-datatype')
+ (self.test_str_fdt, self.test_str_arg, self.test_int_fdt,
+ self.test_int_arg, existing) = self.GetEntryArgsOrProps([
+ EntryArg('test-str-fdt', str),
+ EntryArg('test-str-arg', str),
+ EntryArg('test-int-fdt', int),
+ EntryArg('test-int-arg', int),
+ EntryArg('test-existing-prop', str)], self.require_args)
+ if self.force_bad_datatype:
+ self.GetEntryArgsOrProps([EntryArg('test-bad-datatype-arg', bool)])
+ self.return_contents = True
+ self.contents = b'aa'
+
+ # Set to the required bintool when collecting bintools.
+ self.bintool_for_contents = None
+ self.require_bintool_for_contents = fdt_util.GetString(self._node,
+ 'require-bintool-for-contents')
+ if self.require_bintool_for_contents == '':
+ self.require_bintool_for_contents = '_testing'
+
+ self.bintool_for_pack = None
+ self.require_bintool_for_pack = fdt_util.GetString(self._node,
+ 'require-bintool-for-pack')
+ if self.require_bintool_for_pack == '':
+ self.require_bintool_for_pack = '_testing'
+
+ def Pack(self, offset):
+ """Figure out how to pack the entry into the section"""
+ if self.require_bintool_for_pack:
+ if self.bintool_for_pack is None:
+ self.Raise("Required bintool unusable in Pack()")
+ return super().Pack(offset)
+
+ def ObtainContents(self, fake_size=0):
+ if self.return_unknown_contents or not self.return_contents:
+ return False
+ if self.return_contents_later:
+ self.return_contents_later = False
+ return False
+ self.data = self.contents
+ self.contents_size = len(self.data)
+ if self.return_contents_once:
+ self.return_contents = False
+ if self.require_bintool_for_contents:
+ if self.bintool_for_contents is None:
+ self.Raise("Required bintool unusable in ObtainContents()")
+ if self.set_to_absent:
+ self.mark_absent('for testing purposes')
+ return True
+
+ def GetOffsets(self):
+ if self.return_invalid_entry :
+ return {'invalid-entry': [1, 2]}
+ return {}
+
+ def ProcessContents(self):
+ data = self.contents
+ if self.bad_update_contents:
+ # Request to update the contents with something larger, to cause a
+ # failure.
+ if self.bad_update_contents_twice:
+ data = self.data + b'a'
+ else:
+ data = b'aaa'
+ return self.ProcessContentsUpdate(data)
+ if self.bad_shrink_contents:
+ # Request to update the contents with something smaller, to cause a
+ # failure.
+ data = b'a'
+ return self.ProcessContentsUpdate(data)
+ if self.bad_shrink_contents:
+ # Request to update the contents with something smaller, to cause a
+ # failure.
+ data = b'a'
+ return self.ProcessContentsUpdate(data)
+ return True
+
+ def ProcessFdt(self, fdt):
+ """Force reprocessing the first time"""
+ ready = self.process_fdt_ready
+ if not self.never_complete_process_fdt:
+ self.process_fdt_ready = True
+ return ready
+
+ def AddBintools(self, btools):
+ """Add the bintools used by this entry type"""
+ if self.require_bintool_for_contents is not None:
+ self.bintool_for_contents = self.AddBintool(btools,
+ self.require_bintool_for_contents)
+ if self.require_bintool_for_pack is not None:
+ self.bintool_for_pack = self.AddBintool(btools,
+ self.require_bintool_for_pack)
diff --git a/tools/binman/etype/atf_bl31.py b/tools/binman/etype/atf_bl31.py
new file mode 100644
index 00000000000..2041da416c9
--- /dev/null
+++ b/tools/binman/etype/atf_bl31.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2020 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for ARM Trusted Firmware binary blob
+#
+
+from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
+
+class Entry_atf_bl31(Entry_blob_named_by_arg):
+ """ARM Trusted Firmware (ATF) BL31 blob
+
+ Properties / Entry arguments:
+ - atf-bl31-path: Filename of file to read into entry. This is typically
+ called bl31.bin or bl31.elf
+
+ This entry holds the run-time firmware, typically started by U-Boot SPL.
+ See the U-Boot README for your architecture or board for how to use it. See
+ https://github.com/ARM-software/arm-trusted-firmware for more information
+ about ATF.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'atf-bl31')
+ self.external = True
diff --git a/tools/binman/etype/atf_fip.py b/tools/binman/etype/atf_fip.py
new file mode 100644
index 00000000000..73a3f85b9f4
--- /dev/null
+++ b/tools/binman/etype/atf_fip.py
@@ -0,0 +1,273 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the ARM Trusted Firmware's Firmware Image Package (FIP)
+# format
+
+from collections import OrderedDict
+
+from binman.entry import Entry
+from binman.etype.section import Entry_section
+from binman.fip_util import FIP_TYPES, FipReader, FipWriter, UUID_LEN
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_atf_fip(Entry_section):
+ """ARM Trusted Firmware's Firmware Image Package (FIP)
+
+ A FIP_ provides a way to group binaries in a firmware image, used by ARM's
+ Trusted Firmware A (TF-A) code. It is a simple format consisting of a
+ table of contents with information about the type, offset and size of the
+ binaries in the FIP. It is quite similar to FMAP, with the major difference
+ that it uses UUIDs to indicate the type of each entry.
+
+ Note: It is recommended to always add an fdtmap to every image, as well as
+ any FIPs so that binman and other tools can access the entire image
+ correctly.
+
+ The UUIDs correspond to useful names in `fiptool`, provided by ATF to
+ operate on FIPs. Binman uses these names to make it easier to understand
+ what is going on, although it is possible to provide a UUID if needed.
+
+ The contents of the FIP are defined by subnodes of the atf-fip entry, e.g.::
+
+ atf-fip {
+ soc-fw {
+ filename = "bl31.bin";
+ };
+
+ scp-fwu-cfg {
+ filename = "bl2u.bin";
+ };
+
+ u-boot {
+ fip-type = "nt-fw";
+ };
+ };
+
+ This describes a FIP with three entries: soc-fw, scp-fwu-cfg and nt-fw.
+ You can use normal (non-external) binaries like U-Boot simply by adding a
+ FIP type, with the `fip-type` property, as above.
+
+ Since FIP exists to bring blobs together, Binman assumes that all FIP
+ entries are external binaries. If a binary may not exist, you can use the
+ `--allow-missing` flag to Binman, in which case the image is still created,
+ even though it will not actually work.
+
+ The size of the FIP depends on the size of the binaries. There is currently
+ no way to specify a fixed size. If the `atf-fip` node has a `size` entry,
+ this affects the space taken up by the `atf-fip` entry, but the FIP itself
+ does not expand to use that space.
+
+ Some other FIP features are available with Binman. The header and the
+ entries have 64-bit flag works. The flag flags do not seem to be defined
+ anywhere, but you can use `fip-hdr-flags` and fip-flags` to set the values
+ of the header and entries respectively.
+
+ FIP entries can be aligned to a particular power-of-two boundary. Use
+ fip-align for this.
+
+ Binman only understands the entry types that are included in its
+ implementation. It is possible to specify a 16-byte UUID instead, using the
+ fip-uuid property. In this case Binman doesn't know what its type is, so
+ just uses the UUID. See the `u-boot` node in this example::
+
+ binman {
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ fip-align = <16>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x456>;
+ filename = "bl31.bin";
+ };
+
+ scp-fwu-cfg {
+ filename = "bl2u.bin";
+ };
+
+ u-boot {
+ fip-uuid = [fc 65 13 92 4a 5b 11 ec
+ 94 35 ff 2d 1c fc 79 9c];
+ };
+ };
+ fdtmap {
+ };
+ };
+
+ Binman allows reading and updating FIP entries after the image is created,
+ provided that an FDPMAP is present too. Updates which change the size of a
+ FIP entry will cause it to be expanded or contracted as needed.
+
+ Properties for top-level atf-fip node
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ fip-hdr-flags (64 bits)
+ Sets the flags for the FIP header.
+
+ Properties for subnodes
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ fip-type (str)
+ FIP type to use for this entry. This is needed if the entry
+ name is not a valid type. Value types are defined in `fip_util.py`.
+ The FIP type defines the UUID that is used (they map 1:1).
+
+ fip-uuid (16 bytes)
+ If there is no FIP-type name defined, or it is not supported by Binman,
+ this property sets the UUID. It should be a 16-byte value, following the
+ hex digits of the UUID.
+
+ fip-flags (64 bits)
+ Set the flags for a FIP entry. Use in one of the subnodes of the
+ 7atf-fip entry.
+
+ fip-align
+ Set the alignment for a FIP entry, FIP entries can be aligned to a
+ particular power-of-two boundary. The default is 1.
+
+ Adding new FIP-entry types
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ When new FIP entries are defined by TF-A they appear in the
+ `TF-A source tree`_. You can use `fip_util.py` to update Binman to support
+ new types, then `send a patch`_ to the U-Boot mailing list. There are two
+ source files that the tool examples:
+
+ - `include/tools_share/firmware_image_package.h` has the UUIDs
+ - `tools/fiptool/tbbr_config.c` has the name and descripion for each UUID
+
+ To run the tool::
+
+ $ tools/binman/fip_util.py -s /path/to/arm-trusted-firmware
+ Warning: UUID 'UUID_NON_TRUSTED_WORLD_KEY_CERT' is not mentioned in tbbr_config.c file
+ Existing code in 'tools/binman/fip_util.py' is up-to-date
+
+ If it shows there is an update, it writes a new version of `fip_util.py`
+ to `fip_util.py.out`. You can change the output file using the `-i` flag.
+ If you have a problem, use `-D` to enable traceback debugging.
+
+ FIP commentary
+ ~~~~~~~~~~~~~~
+
+ As a side effect of use of UUIDs, FIP does not support multiple
+ entries of the same type, such as might be used to store fonts or graphics
+ icons, for example. For verified boot it could be used for each part of the
+ image (e.g. separate FIPs for A and B) but cannot describe the whole
+ firmware image. As with FMAP there is no hierarchy defined, although FMAP
+ works around this by having 'section' areas which encompass others. A
+ similar workaround would be possible with FIP but is not currently defined.
+
+ It is recommended to always add an fdtmap to every image, as well as any
+ FIPs so that binman and other tools can access the entire image correctly.
+
+ .. _FIP: https://trustedfirmware-a.readthedocs.io/en/latest/design/firmware-design.html#firmware-image-package-fip
+ .. _`TF-A source tree`: https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git
+ .. _`send a patch`: https://www.denx.de/wiki/U-Boot/Patches
+ """
+ def __init__(self, section, etype, node):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+ self.align_default = None
+ self._entries = OrderedDict()
+ self.reader = None
+
+ def ReadNode(self):
+ """Read properties from the atf-fip node"""
+ super().ReadNode()
+ self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
+ self._fip_flags = fdt_util.GetInt64(self._node, 'fip-hdr-flags', 0)
+ self._fip_align = fdt_util.GetInt(self._node, 'fip-align', 1)
+ if tools.not_power_of_two(self._fip_align):
+ raise ValueError("Node '%s': FIP alignment %s must be a power of two" %
+ (self._node.path, self._fip_align))
+ self.ReadEntries()
+
+ def ReadEntries(self):
+ """Read the subnodes to find out what should go in this FIP"""
+ for node in self._node.subnodes:
+ fip_type = None
+ etype = None
+ if node.name in FIP_TYPES:
+ fip_type = node.name
+ etype = 'blob-ext'
+
+ entry = Entry.Create(self, node, etype)
+ entry._fip_uuid = fdt_util.GetBytes(node, 'fip-uuid', UUID_LEN)
+ if not fip_type and not entry._fip_uuid:
+ fip_type = fdt_util.GetString(node, 'fip-type')
+ if not fip_type:
+ self.Raise("Must provide a fip-type (node name '%s' is not a known FIP type)" %
+ node.name)
+
+ entry._fip_type = fip_type
+ entry._fip_flags = fdt_util.GetInt64(node, 'fip-flags', 0)
+ entry.ReadNode()
+ entry._fip_name = node.name
+ self._entries[entry._fip_name] = entry
+
+ def BuildSectionData(self, required):
+ """Override this function to create a custom format for the entries
+
+ Arguments:
+ required (bool): True if the data must be valid, False if it may
+ be missing (entry.GetData() returns None
+
+ Returns:
+ bytes: Data obtained, or None if None
+ """
+ fip = FipWriter(self._fip_flags, self._fip_align)
+ for entry in self._entries.values():
+ # First get the input data and put it in an entry. If not available,
+ # try later.
+ entry_data = entry.GetData(required)
+ if not required and entry_data is None:
+ return None
+ fent = fip.add_entry(entry._fip_type or entry._fip_uuid, entry_data,
+ entry._fip_flags)
+ if fent:
+ entry._fip_entry = fent
+ data = fip.get_data()
+ return data
+
+ def SetImagePos(self, image_pos):
+ """Override this function to set all the entry properties from FIP
+
+ We can only do this once image_pos is known
+
+ Args:
+ image_pos: Position of this entry in the image
+ """
+ super().SetImagePos(image_pos)
+
+ # Now update the entries with info from the FIP entries
+ for entry in self._entries.values():
+ fent = entry._fip_entry
+ entry.size = fent.size
+ entry.offset = fent.offset
+ entry.image_pos = self.image_pos + entry.offset
+
+ def ReadChildData(self, child, decomp=True, alt_format=None):
+ if not self.reader:
+ self.fip_data = super().ReadData(True)
+ self.reader = FipReader(self.fip_data)
+ reader = self.reader
+
+ # It is tricky to obtain the data from a FIP entry since it is indexed
+ # by its UUID.
+ fent = reader.get_entry(child._fip_type or child._fip_uuid)
+ return fent.data
+
+ # Note:
+ # It is also possible to extract it using the offsets directly, but this
+ # seems less FIP_friendly:
+ # return self.fip_data[child.offset:child.offset + child.size]
+
+ def WriteChildData(self, child):
+ # Recreate the data structure, leaving the data for this child alone,
+ # so that child.data is used to pack into the FIP.
+ self.ObtainContents(skip_entry=child)
+ return super().WriteChildData(child)
diff --git a/tools/binman/etype/blob.py b/tools/binman/etype/blob.py
new file mode 100644
index 00000000000..064fae50365
--- /dev/null
+++ b/tools/binman/etype/blob.py
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for blobs, which are binary objects read from files
+#
+
+from binman.entry import Entry
+from binman import state
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+class Entry_blob(Entry):
+ """Arbitrary binary blob
+
+ Note: This should not be used by itself. It is normally used as a parent
+ class by other entry types.
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+ - compress: Compression algorithm to use:
+ none: No compression
+ lz4: Use lz4 compression (via 'lz4' command-line utility)
+
+ This entry reads data from a file and places it in the entry. The
+ default filename is often specified specified by the subclass. See for
+ example the 'u-boot' entry which provides the filename 'u-boot.bin'.
+
+ If compression is enabled, an extra 'uncomp-size' property is written to
+ the node (if enabled with -u) which provides the uncompressed size of the
+ data.
+ """
+ def __init__(self, section, etype, node, auto_write_symbols=False):
+ super().__init__(section, etype, node,
+ auto_write_symbols=auto_write_symbols)
+ self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
+ self.elf_fname = fdt_util.GetString(self._node, 'elf-filename',
+ self.elf_fname)
+ self.elf_base_sym = fdt_util.GetString(self._node, 'elf-base-sym')
+ if not self.auto_write_symbols:
+ if fdt_util.GetBool(self._node, 'write-symbols'):
+ self.auto_write_symbols = True
+
+ def ObtainContents(self, fake_size=0):
+ self._filename = self.GetDefaultFilename()
+ self._pathname = tools.get_input_filename(self._filename,
+ self.external and (self.optional or self.section.GetAllowMissing()))
+ # Allow the file to be missing
+ if not self._pathname:
+ self._pathname, faked = self.check_fake_fname(self._filename,
+ fake_size)
+ self.missing = True
+ if not faked:
+ self.SetContents(b'')
+ return True
+
+ self.ReadBlobContents()
+ return True
+
+ def ReadFileContents(self, pathname):
+ """Read blob contents into memory
+
+ This function compresses the data before returning if needed.
+
+ We assume the data is small enough to fit into memory. If this
+ is used for large filesystem image that might not be true.
+ In that case, Image.BuildImage() could be adjusted to use a
+ new Entry method which can read in chunks. Then we could copy
+ the data in chunks and avoid reading it all at once. For now
+ this seems like an unnecessary complication.
+
+ Args:
+ pathname (str): Pathname to read from
+
+ Returns:
+ bytes: Data read
+ """
+ state.TimingStart('read')
+ indata = tools.read_file(pathname)
+ state.TimingAccum('read')
+ state.TimingStart('compress')
+ data = self.CompressData(indata)
+ state.TimingAccum('compress')
+ return data
+
+ def ReadBlobContents(self):
+ data = self.ReadFileContents(self._pathname)
+ self.SetContents(data)
+ return True
+
+ def GetDefaultFilename(self):
+ return self._filename
+
+ def ProcessContents(self):
+ # The blob may have changed due to WriteSymbols()
+ return self.ProcessContentsUpdate(self.data)
+
+ def CheckFakedBlobs(self, faked_blobs_list):
+ """Check if any entries in this section have faked external blobs
+
+ If there are faked blobs, the entries are added to the list
+
+ Args:
+ faked_blobs_list: List of Entry objects to be added to
+ """
+ if self.faked:
+ faked_blobs_list.append(self)
diff --git a/tools/binman/etype/blob_dtb.py b/tools/binman/etype/blob_dtb.py
new file mode 100644
index 00000000000..d543de9f759
--- /dev/null
+++ b/tools/binman/etype/blob_dtb.py
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree files
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+from dtoc import fdt_util
+import struct
+
+# This is imported if needed
+state = None
+
+class Entry_blob_dtb(Entry_blob):
+ """A blob that holds a device tree
+
+ This is a blob containing a device tree. The contents of the blob are
+ obtained from the list of available device-tree files, managed by the
+ 'state' module.
+
+ Additional attributes:
+ prepend: Header used (e.g. 'length')
+ """
+ def __init__(self, section, etype, node):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+ self.prepend = None
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.prepend = fdt_util.GetString(self._node, 'prepend')
+ if self.prepend and self.prepend not in ['length']:
+ self.Raise("Invalid prepend in '%s': '%s'" %
+ (self._node.name, self.prepend))
+
+ def ObtainContents(self, fake_size=0):
+ """Get the device-tree from the list held by the 'state' module"""
+ self._filename = self.GetDefaultFilename()
+ self._pathname, _ = state.GetFdtContents(self.GetFdtEtype())
+ return super().ReadBlobContents()
+
+ def ProcessContents(self):
+ """Re-read the DTB contents so that we get any calculated properties"""
+ _, indata = state.GetFdtContents(self.GetFdtEtype())
+
+ if self.compress == 'zstd' and self.prepend != 'length':
+ self.Raise('The zstd compression requires a length header')
+
+ data = self.CompressData(indata)
+ return self.ProcessContentsUpdate(data)
+
+ def GetFdtEtype(self):
+ """Get the entry type of this device tree
+
+ This can be 'u-boot-dtb', 'u-boot-spl-dtb' or 'u-boot-tpl-dtb'
+ Returns:
+ Entry type if any, e.g. 'u-boot-dtb'
+ """
+ return None
+
+ def GetFdts(self):
+ fname = self.GetDefaultFilename()
+ return {self.GetFdtEtype(): [self, fname]}
+
+ def WriteData(self, data, decomp=True):
+ ok = super().WriteData(data, decomp)
+
+ # Update the state module, since it has the authoritative record of the
+ # device trees used. If we don't do this, then state.GetFdtContents()
+ # will still return the old contents
+ state.UpdateFdtContents(self.GetFdtEtype(), data)
+ return ok
+
+ def CompressData(self, indata):
+ data = super().CompressData(indata)
+ if self.prepend == 'length':
+ hdr = struct.pack('<I', len(data))
+ data = hdr + data
+ return data
+
+ def DecompressData(self, indata):
+ if self.prepend == 'length':
+ data_len = struct.unpack('<I', indata[:4])[0]
+ indata = indata[4:4 + data_len]
+ data = super().DecompressData(indata)
+ return data
diff --git a/tools/binman/etype/blob_ext.py b/tools/binman/etype/blob_ext.py
new file mode 100644
index 00000000000..ca265307380
--- /dev/null
+++ b/tools/binman/etype/blob_ext.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for external blobs, not built by U-Boot
+#
+
+import os
+
+from binman.etype.blob import Entry_blob
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+class Entry_blob_ext(Entry_blob):
+ """Externally built binary blob
+
+ Note: This should not be used by itself. It is normally used as a parent
+ class by other entry types.
+
+ If the file providing this blob is missing, binman can optionally ignore it
+ and produce a broken image with a warning.
+
+ See 'blob' for Properties / Entry arguments.
+ """
+ def __init__(self, section, etype, node):
+ Entry_blob.__init__(self, section, etype, node)
+ self.external = True
diff --git a/tools/binman/etype/blob_ext_list.py b/tools/binman/etype/blob_ext_list.py
new file mode 100644
index 00000000000..1bfcf6733a7
--- /dev/null
+++ b/tools/binman/etype/blob_ext_list.py
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a list of external blobs, not built by U-Boot
+#
+
+import os
+
+from binman.etype.blob import Entry_blob
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+class Entry_blob_ext_list(Entry_blob):
+ """List of externally built binary blobs
+
+ This is like blob-ext except that a number of blobs can be provided,
+ typically with some sort of relationship, e.g. all are DDC parameters.
+
+ If any of the external files needed by this llist is missing, binman can
+ optionally ignore it and produce a broken image with a warning.
+
+ Args:
+ filenames: List of filenames to read and include
+ """
+ def __init__(self, section, etype, node):
+ Entry_blob.__init__(self, section, etype, node)
+ self.external = True
+
+ def ReadNode(self):
+ super().ReadNode()
+ self._filenames = fdt_util.GetStringList(self._node, 'filenames')
+ self._pathnames = []
+
+ def ObtainContents(self):
+ missing = False
+ pathnames = []
+ for fname in self._filenames:
+ fname, _ = self.check_fake_fname(fname)
+ pathname = tools.get_input_filename(
+ fname, self.external and self.section.GetAllowMissing())
+ # Allow the file to be missing
+ if not pathname:
+ missing = True
+ pathnames.append(pathname)
+ self._pathnames = pathnames
+
+ if missing:
+ self.SetContents(b'')
+ self.missing = True
+ return True
+
+ data = bytearray()
+ for pathname in pathnames:
+ data += self.ReadFileContents(pathname)
+
+ self.SetContents(data)
+ return True
diff --git a/tools/binman/etype/blob_named_by_arg.py b/tools/binman/etype/blob_named_by_arg.py
new file mode 100644
index 00000000000..7c486b2dc91
--- /dev/null
+++ b/tools/binman/etype/blob_named_by_arg.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a blob where the filename comes from a property in the
+# node or an entry argument. The property is called '<blob_fname>-path' where
+# <blob_fname> is provided by the subclass using this entry type.
+
+from collections import OrderedDict
+
+from binman.etype.blob import Entry_blob
+from binman.entry import EntryArg
+
+
+class Entry_blob_named_by_arg(Entry_blob):
+ """A blob entry which gets its filename property from its subclass
+
+ Properties / Entry arguments:
+ - <xxx>-path: Filename containing the contents of this entry (optional,
+ defaults to None)
+
+ where <xxx> is the blob_fname argument to the constructor.
+
+ This entry cannot be used directly. Instead, it is used as a parent class
+ for another entry, which defined blob_fname. This parameter is used to
+ set the entry-arg or property containing the filename. The entry-arg or
+ property is in turn used to set the actual filename.
+
+ See cros_ec_rw for an example of this.
+ """
+ def __init__(self, section, etype, node, blob_fname, required=False):
+ super().__init__(section, etype, node)
+ filename, = self.GetEntryArgsOrProps(
+ [EntryArg('%s-path' % blob_fname, str)], required=required)
+ if filename:
+ self._filename = filename
diff --git a/tools/binman/etype/blob_phase.py b/tools/binman/etype/blob_phase.py
new file mode 100644
index 00000000000..951d9934050
--- /dev/null
+++ b/tools/binman/etype/blob_phase.py
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type base class for U-Boot or SPL binary with devicetree
+#
+
+from binman.etype.section import Entry_section
+
+# This is imported if needed
+state = None
+
+class Entry_blob_phase(Entry_section):
+ """Section that holds a phase binary
+
+ This is a base class that should not normally be used directly. It is used
+ when converting a 'u-boot' entry automatically into a 'u-boot-expanded'
+ entry; similarly for SPL.
+ """
+ def __init__(self, section, etype, node, root_fname, dtb_file, bss_pad):
+ """Set up a new blob for a phase
+
+ This holds an executable for a U-Boot phase, optional BSS padding and
+ a devicetree
+
+ Args:
+ section: entry_Section object for this entry's parent
+ etype: Type of object
+ node: Node defining this entry
+ root_fname: Root filename for the binary ('u-boot',
+ 'spl/u-boot-spl', etc.)
+ dtb_file: Name of devicetree file ('u-boot.dtb', u-boot-spl.dtb',
+ etc.)
+ bss_pad: True to add BSS padding before the devicetree
+ """
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+ self.root_fname = root_fname
+ self.dtb_file = dtb_file
+ self.bss_pad = bss_pad
+
+ def gen_entries(self):
+ """Create the subnodes"""
+ names = [self.root_fname + '-nodtb', self.root_fname + '-dtb']
+ if self.bss_pad:
+ names.insert(1, self.root_fname + '-bss-pad')
+ for name in names:
+ subnode = state.AddSubnode(self._node, name)
+
+ # Read entries again, now that we have some
+ self.ReadEntries()
+
+ # Propagate the no-write-symbols property
+ if self.no_write_symbols:
+ for entry in self._entries.values():
+ entry.no_write_symbols = True
diff --git a/tools/binman/etype/cbfs.py b/tools/binman/etype/cbfs.py
new file mode 100644
index 00000000000..575aa624f6c
--- /dev/null
+++ b/tools/binman/etype/cbfs.py
@@ -0,0 +1,303 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a Coreboot Filesystem (CBFS)
+#
+
+from collections import OrderedDict
+
+from binman import cbfs_util
+from binman.cbfs_util import CbfsWriter
+from binman.entry import Entry
+from dtoc import fdt_util
+
+# This is imported if needed
+state = None
+
+class Entry_cbfs(Entry):
+ """Coreboot Filesystem (CBFS)
+
+ A CBFS provides a way to group files into a group. It has a simple directory
+ structure and allows the position of individual files to be set, since it is
+ designed to support execute-in-place in an x86 SPI-flash device. Where XIP
+ is not used, it supports compression and storing ELF files.
+
+ CBFS is used by coreboot as its way of orgnanising SPI-flash contents.
+
+ The contents of the CBFS are defined by subnodes of the cbfs entry, e.g.::
+
+ cbfs {
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+
+ This creates a CBFS 1MB in size two files in it: u-boot.bin and u-boot.dtb.
+ Note that the size is required since binman does not support calculating it.
+ The contents of each entry is just what binman would normally provide if it
+ were not a CBFS node. A blob type can be used to import arbitrary files as
+ with the second subnode below::
+
+ cbfs {
+ size = <0x100000>;
+ u-boot {
+ cbfs-name = "BOOT";
+ cbfs-type = "raw";
+ };
+
+ dtb {
+ type = "blob";
+ filename = "u-boot.dtb";
+ cbfs-type = "raw";
+ cbfs-compress = "lz4";
+ cbfs-offset = <0x100000>;
+ };
+ };
+
+ This creates a CBFS 1MB in size with u-boot.bin (named "BOOT") and
+ u-boot.dtb (named "dtb") and compressed with the lz4 algorithm.
+
+
+ Properties supported in the top-level CBFS node:
+
+ cbfs-arch:
+ Defaults to "x86", but you can specify the architecture if needed.
+
+
+ Properties supported in the CBFS entry subnodes:
+
+ cbfs-name:
+ This is the name of the file created in CBFS. It defaults to the entry
+ name (which is the node name), but you can override it with this
+ property.
+
+ cbfs-type:
+ This is the CBFS file type. The following are supported:
+
+ raw:
+ This is a 'raw' file, although compression is supported. It can be
+ used to store any file in CBFS.
+
+ stage:
+ This is an ELF file that has been loaded (i.e. mapped to memory), so
+ appears in the CBFS as a flat binary. The input file must be an ELF
+ image, for example this puts "u-boot" (the ELF image) into a 'stage'
+ entry::
+
+ cbfs {
+ size = <0x100000>;
+ u-boot-elf {
+ cbfs-name = "BOOT";
+ cbfs-type = "stage";
+ };
+ };
+
+ You can use your own ELF file with something like::
+
+ cbfs {
+ size = <0x100000>;
+ something {
+ type = "blob";
+ filename = "cbfs-stage.elf";
+ cbfs-type = "stage";
+ };
+ };
+
+ As mentioned, the file is converted to a flat binary, so it is
+ equivalent to adding "u-boot.bin", for example, but with the load and
+ start addresses specified by the ELF. At present there is no option
+ to add a flat binary with a load/start address, similar to the
+ 'add-flat-binary' option in cbfstool.
+
+ cbfs-offset:
+ This is the offset of the file's data within the CBFS. It is used to
+ specify where the file should be placed in cases where a fixed position
+ is needed. Typical uses are for code which is not relocatable and must
+ execute in-place from a particular address. This works because SPI flash
+ is generally mapped into memory on x86 devices. The file header is
+ placed before this offset so that the data start lines up exactly with
+ the chosen offset. If this property is not provided, then the file is
+ placed in the next available spot.
+
+ The current implementation supports only a subset of CBFS features. It does
+ not support other file types (e.g. payload), adding multiple files (like the
+ 'files' entry with a pattern supported by binman), putting files at a
+ particular offset in the CBFS and a few other things.
+
+ Of course binman can create images containing multiple CBFSs, simply by
+ defining these in the binman config::
+
+
+ binman {
+ size = <0x800000>;
+ cbfs {
+ offset = <0x100000>;
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+
+ cbfs2 {
+ offset = <0x700000>;
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ image {
+ type = "blob";
+ filename = "image.jpg";
+ };
+ };
+ };
+
+ This creates an 8MB image with two CBFSs, one at offset 1MB, one at 7MB,
+ both of size 1MB.
+ """
+ def __init__(self, section, etype, node):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+ self.align_default = None
+ self._entries = OrderedDict()
+ self.reader = None
+
+ def ReadNode(self):
+ """Read properties from the atf-fip node"""
+ super().ReadNode()
+ self._cbfs_arg = fdt_util.GetString(self._node, 'cbfs-arch', 'x86')
+ self.ReadEntries()
+
+ def ReadEntries(self):
+ """Read the subnodes to find out what should go in this CBFS"""
+ for node in self._node.subnodes:
+ entry = Entry.Create(self, node)
+ entry.ReadNode()
+ entry._cbfs_name = fdt_util.GetString(node, 'cbfs-name', entry.name)
+ entry._type = fdt_util.GetString(node, 'cbfs-type')
+ compress = fdt_util.GetString(node, 'cbfs-compress', 'none')
+ entry._cbfs_offset = fdt_util.GetInt(node, 'cbfs-offset')
+ entry._cbfs_compress = cbfs_util.find_compress(compress)
+ if entry._cbfs_compress is None:
+ self.Raise("Invalid compression in '%s': '%s'" %
+ (node.name, compress))
+ self._entries[entry._cbfs_name] = entry
+
+ def ObtainCfile(self, cbfs, entry):
+ # First get the input data and put it in a file. If not available,
+ # try later.
+ data = entry.GetData()
+ cfile = None
+ if entry._type == 'raw':
+ cfile = cbfs.add_file_raw(entry._cbfs_name, data,
+ entry._cbfs_offset,
+ entry._cbfs_compress)
+ elif entry._type == 'stage':
+ cfile = cbfs.add_file_stage(entry._cbfs_name, data,
+ entry._cbfs_offset)
+ else:
+ entry.Raise("Unknown cbfs-type '%s' (use 'raw', 'stage')" %
+ entry._type)
+ return cfile
+
+ def ObtainContents(self, skip_entry=None):
+ arch = cbfs_util.find_arch(self._cbfs_arg)
+ if arch is None:
+ self.Raise("Invalid architecture '%s'" % self._cbfs_arg)
+ if self.size is None:
+ self.Raise("'cbfs' entry must have a size property")
+ cbfs = CbfsWriter(self.size, arch)
+ for entry in self._entries.values():
+ if entry != skip_entry and not entry.ObtainContents():
+ return False
+ cfile = self.ObtainCfile(cbfs, entry)
+ if cfile:
+ entry._cbfs_file = cfile
+ data = cbfs.get_data()
+ self.SetContents(data)
+ return True
+
+ def SetImagePos(self, image_pos):
+ """Override this function to set all the entry properties from CBFS
+
+ We can only do this once image_pos is known
+
+ Args:
+ image_pos: Position of this entry in the image
+ """
+ super().SetImagePos(image_pos)
+
+ # Now update the entries with info from the CBFS entries
+ for entry in self._entries.values():
+ cfile = entry._cbfs_file
+ entry.size = cfile.data_len
+ entry.offset = cfile.calced_cbfs_offset
+ entry.image_pos = self.image_pos + entry.offset
+ if entry._cbfs_compress:
+ entry.uncomp_size = cfile.memlen
+
+ def AddMissingProperties(self, have_image_pos):
+ super().AddMissingProperties(have_image_pos)
+ for entry in self._entries.values():
+ entry.AddMissingProperties(have_image_pos)
+ if entry._cbfs_compress:
+ state.AddZeroProp(entry._node, 'uncomp-size')
+ # Store the 'compress' property, since we don't look at
+ # 'cbfs-compress' in Entry.ReadData()
+ state.AddString(entry._node, 'compress',
+ cbfs_util.compress_name(entry._cbfs_compress))
+
+ def SetCalculatedProperties(self):
+ """Set the value of device-tree properties calculated by binman"""
+ super().SetCalculatedProperties()
+ for entry in self._entries.values():
+ state.SetInt(entry._node, 'offset', entry.offset)
+ state.SetInt(entry._node, 'size', entry.size)
+ state.SetInt(entry._node, 'image-pos', entry.image_pos)
+ if entry.uncomp_size is not None:
+ state.SetInt(entry._node, 'uncomp-size', entry.uncomp_size)
+
+ def ListEntries(self, entries, indent):
+ """Override this method to list all files in the section"""
+ super().ListEntries(entries, indent)
+ for entry in self._entries.values():
+ entry.ListEntries(entries, indent + 1)
+
+ def GetEntries(self):
+ return self._entries
+
+ def ReadData(self, decomp=True, alt_format=None):
+ data = super().ReadData(True, alt_format)
+ return data
+
+ def ReadChildData(self, child, decomp=True, alt_format=None):
+ if not self.reader:
+ data = super().ReadData(True, alt_format)
+ self.reader = cbfs_util.CbfsReader(data)
+ reader = self.reader
+ cfile = reader.files.get(child.name)
+ return cfile.data if decomp else cfile.orig_data
+
+ def WriteChildData(self, child):
+ # Recreate the data structure, leaving the data for this child alone,
+ # so that child.data is used to pack into the FIP.
+ self.ObtainContents(skip_entry=child)
+ return super().WriteChildData(child)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ for entry in self._entries.values():
+ entry.AddBintools(btools)
diff --git a/tools/binman/etype/collection.py b/tools/binman/etype/collection.py
new file mode 100644
index 00000000000..c532aafe3e7
--- /dev/null
+++ b/tools/binman/etype/collection.py
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Support for a collection of entries from other parts of an image
+
+from collections import OrderedDict
+import os
+
+from binman.entry import Entry
+from dtoc import fdt_util
+
+class Entry_collection(Entry):
+ """An entry which contains a collection of other entries
+
+ Properties / Entry arguments:
+ - content: List of phandles to entries to include
+
+ This allows reusing the contents of other entries. The contents of the
+ listed entries are combined to form this entry. This serves as a useful
+ base class for entry types which need to process data from elsewhere in
+ the image, not necessarily child entries.
+
+ The entries can generally be anywhere in the same image, even if they are in
+ a different section from this entry.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.content = fdt_util.GetPhandleList(self._node, 'content')
+ if not self.content:
+ self.Raise("Collection must have a 'content' property")
+
+ def GetContents(self, required):
+ """Get the contents of this entry
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the entry
+ """
+ # Join up all the data
+ self.Info('Getting contents, required=%s' % required)
+ data = bytearray()
+ for entry_phandle in self.content:
+ entry_data = self.section.GetContentsByPhandle(entry_phandle, self,
+ required)
+ if not required and entry_data is None:
+ self.Info('Contents not available yet')
+ # Data not available yet
+ return None
+ data += entry_data
+
+ self.Info('Returning contents size %x' % len(data))
+
+ return data
+
+ def ObtainContents(self):
+ data = self.GetContents(False)
+ if data is None:
+ return False
+ self.SetContents(data)
+ return True
+
+ def ProcessContents(self):
+ # The blob may have changed due to WriteSymbols()
+ data = self.GetContents(True)
+ return self.ProcessContentsUpdate(data)
diff --git a/tools/binman/etype/cros_ec_rw.py b/tools/binman/etype/cros_ec_rw.py
new file mode 100644
index 00000000000..bf676b2d1a7
--- /dev/null
+++ b/tools/binman/etype/cros_ec_rw.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a Chromium OS EC image (read-write section)
+#
+
+from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
+
+class Entry_cros_ec_rw(Entry_blob_named_by_arg):
+ """A blob entry which contains a Chromium OS read-write EC image
+
+ Properties / Entry arguments:
+ - cros-ec-rw-path: Filename containing the EC image
+
+ This entry holds a Chromium OS EC (embedded controller) image, for use in
+ updating the EC on startup via software sync.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'cros-ec-rw', required=True)
+ self.external = True
diff --git a/tools/binman/etype/efi_capsule.py b/tools/binman/etype/efi_capsule.py
new file mode 100644
index 00000000000..e3203717822
--- /dev/null
+++ b/tools/binman/etype/efi_capsule.py
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2023 Linaro Limited
+#
+# Entry-type module for producing a EFI capsule
+#
+
+import os
+
+from binman.entry import Entry
+from binman.etype.section import Entry_section
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+def get_binman_test_guid(type_str):
+ """Get the test image GUID for binman
+
+ Based on the string passed to the function, return
+ the corresponding GUID.
+
+ Args:
+ type_str: Key value of the type of GUID to look for
+
+ Returns:
+ The actual GUID value (str)
+ """
+ TYPE_TO_GUID = {
+ 'binman-test' : '09d7cf52-0720-4710-91d1-08469b7fe9c8'
+ }
+
+ return TYPE_TO_GUID[type_str]
+
+class Entry_efi_capsule(Entry_section):
+ """Generate EFI capsules
+
+ The parameters needed for generation of the capsules can
+ be provided as properties in the entry.
+
+ Properties / Entry arguments:
+ - image-index: Unique number for identifying corresponding
+ payload image. Number between 1 and descriptor count, i.e.
+ the total number of firmware images that can be updated. Mandatory
+ property.
+ - image-guid: Image GUID which will be used for identifying the
+ updatable image on the board. Mandatory property.
+ - hardware-instance: Optional number for identifying unique
+ hardware instance of a device in the system. Default value of 0
+ for images where value is not to be used.
+ - fw-version: Value of image version that can be put on the capsule
+ through the Firmware Management Protocol(FMP) header.
+ - monotonic-count: Count used when signing an image.
+ - private-key: Path to PEM formatted .key private key file. Mandatory
+ property for generating signed capsules.
+ - public-key-cert: Path to PEM formatted .crt public key certificate
+ file. Mandatory property for generating signed capsules.
+ - oem-flags - OEM flags to be passed through capsule header.
+
+ Since this is a subclass of Entry_section, all properties of the parent
+ class also apply here. Except for the properties stated as mandatory, the
+ rest of the properties are optional.
+
+ For more details on the description of the capsule format, and the capsule
+ update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
+ specification`_.
+
+ The capsule parameters like image index and image GUID are passed as
+ properties in the entry. The payload to be used in the capsule is to be
+ provided as a subnode of the capsule entry.
+
+ A typical capsule entry node would then look something like this
+
+ capsule {
+ type = "efi-capsule";
+ image-index = <0x1>;
+ /* Image GUID for testing capsule update */
+ image-guid = SANDBOX_UBOOT_IMAGE_GUID;
+ hardware-instance = <0x0>;
+ private-key = "path/to/the/private/key";
+ public-key-cert = "path/to/the/public-key-cert";
+ oem-flags = <0x8000>;
+
+ u-boot {
+ };
+ };
+
+ In the above example, the capsule payload is the U-Boot image. The
+ capsule entry would read the contents of the payload and put them
+ into the capsule. Any external file can also be specified as the
+ payload using the blob-ext subnode.
+
+ .. _`UEFI specification`: https://uefi.org/sites/default/files/resources/UEFI_Spec_2_10_Aug29.pdf
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.required_props = ['image-index', 'image-guid']
+ self.image_index = 0
+ self.image_guid = ''
+ self.hardware_instance = 0
+ self.monotonic_count = 0
+ self.fw_version = 0
+ self.oem_flags = 0
+ self.private_key = ''
+ self.public_key_cert = ''
+ self.auth = 0
+
+ def ReadNode(self):
+ super().ReadNode()
+
+ self.image_index = fdt_util.GetInt(self._node, 'image-index')
+ self.image_guid = fdt_util.GetString(self._node, 'image-guid')
+ self.fw_version = fdt_util.GetInt(self._node, 'fw-version')
+ self.hardware_instance = fdt_util.GetInt(self._node, 'hardware-instance')
+ self.monotonic_count = fdt_util.GetInt(self._node, 'monotonic-count')
+ self.oem_flags = fdt_util.GetInt(self._node, 'oem-flags')
+
+ self.private_key = fdt_util.GetString(self._node, 'private-key')
+ self.public_key_cert = fdt_util.GetString(self._node, 'public-key-cert')
+ if ((self.private_key and not self.public_key_cert) or (self.public_key_cert and not self.private_key)):
+ self.Raise('Both private key and public key certificate need to be provided')
+ elif not (self.private_key and self.public_key_cert):
+ self.auth = 0
+ else:
+ self.auth = 1
+
+ def BuildSectionData(self, required):
+ private_key = ''
+ public_key_cert = ''
+ if self.auth:
+ if not os.path.isabs(self.private_key):
+ private_key = tools.get_input_filename(self.private_key)
+ if not os.path.isabs(self.public_key_cert):
+ public_key_cert = tools.get_input_filename(self.public_key_cert)
+ data, payload, uniq = self.collect_contents_to_file(
+ self._entries.values(), 'capsule_in')
+ outfile = self._filename if self._filename else 'capsule.%s' % uniq
+ capsule_fname = tools.get_output_filename(outfile)
+ guid = self.image_guid
+ if self.image_guid == "binman-test":
+ guid = get_binman_test_guid('binman-test')
+
+ ret = self.mkeficapsule.generate_capsule(self.image_index,
+ guid,
+ self.hardware_instance,
+ payload,
+ capsule_fname,
+ private_key,
+ public_key_cert,
+ self.monotonic_count,
+ self.fw_version,
+ self.oem_flags)
+ if ret is not None:
+ os.remove(payload)
+ return tools.read_file(capsule_fname)
+
+ def AddBintools(self, btools):
+ self.mkeficapsule = self.AddBintool(btools, 'mkeficapsule')
diff --git a/tools/binman/etype/efi_empty_capsule.py b/tools/binman/etype/efi_empty_capsule.py
new file mode 100644
index 00000000000..064bf9a77f0
--- /dev/null
+++ b/tools/binman/etype/efi_empty_capsule.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2023 Linaro Limited
+#
+# Entry-type module for producing an empty EFI capsule
+#
+
+import os
+
+from binman.entry import Entry
+from binman.etype.efi_capsule import get_binman_test_guid
+from binman.etype.section import Entry_section
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_efi_empty_capsule(Entry_section):
+ """Generate EFI empty capsules
+
+ The parameters needed for generation of the empty capsules can
+ be provided as properties in the entry.
+
+ Properties / Entry arguments:
+ - image-guid: Image GUID which will be used for identifying the
+ updatable image on the board. Mandatory for accept capsule.
+ - capsule-type - String to indicate type of capsule to generate. Valid
+ values are 'accept' and 'revert'.
+
+ For more details on the description of the capsule format, and the capsule
+ update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
+ specification`_. For more information on the empty capsule, refer the
+ sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_.
+
+ A typical accept empty capsule entry node would then look something like this
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ /* GUID of image being accepted */
+ image-type-id = SANDBOX_UBOOT_IMAGE_GUID;
+ capsule-type = "accept";
+ };
+
+ A typical revert empty capsule entry node would then look something like this
+
+ empty-capsule {
+ type = "efi-empty-capsule";
+ capsule-type = "revert";
+ };
+
+ The empty capsules do not have any input payload image.
+
+ .. _`UEFI specification`: https://uefi.org/sites/default/files/resources/UEFI_Spec_2_10_Aug29.pdf
+ .. _`Dependable Boot specification`: https://git.codelinaro.org/linaro/dependable-boot/mbfw/uploads/6f7ddfe3be24e18d4319e108a758d02e/mbfw.pdf
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.required_props = ['capsule-type']
+ self.accept = 0
+ self.revert = 0
+
+ def ReadNode(self):
+ super().ReadNode()
+
+ self.image_guid = fdt_util.GetString(self._node, 'image-guid')
+ self.capsule_type = fdt_util.GetString(self._node, 'capsule-type')
+
+ if self.capsule_type != 'accept' and self.capsule_type != 'revert':
+ self.Raise('capsule-type should be either \'accept\' or \'revert\'')
+
+ if self.capsule_type == 'accept' and not self.image_guid:
+ self.Raise('Image GUID needed for generating accept capsule')
+
+ def BuildSectionData(self, required):
+ uniq = self.GetUniqueName()
+ outfile = self._filename if self._filename else 'capsule.%s' % uniq
+ capsule_fname = tools.get_output_filename(outfile)
+ accept = True if self.capsule_type == 'accept' else False
+ guid = self.image_guid
+ if self.image_guid == "binman-test":
+ guid = get_binman_test_guid('binman-test')
+
+ ret = self.mkeficapsule.generate_empty_capsule(guid, capsule_fname,
+ accept)
+ if ret is not None:
+ return tools.read_file(capsule_fname)
+
+ def AddBintools(self, btools):
+ self.mkeficapsule = self.AddBintool(btools, 'mkeficapsule')
diff --git a/tools/binman/etype/encrypted.py b/tools/binman/etype/encrypted.py
new file mode 100644
index 00000000000..53d0e76bab7
--- /dev/null
+++ b/tools/binman/etype/encrypted.py
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2023 Weidmüller Interface GmbH & Co. KG
+# Written by Christian Taedcke <christian.taedcke@weidmueller.com>
+#
+# Entry-type module for cipher information of encrypted blobs/binaries
+#
+
+from binman.etype.collection import Entry
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+# This is imported if needed
+state = None
+
+
+class Entry_encrypted(Entry):
+ """Externally built encrypted binary blob
+
+ This entry provides the functionality to include information about how to
+ decrypt an encrypted binary. This information is added to the
+ resulting device tree by adding a new cipher node in the entry's parent
+ node (i.e. the binary).
+
+ The key that must be used to decrypt the binary is either directly embedded
+ in the device tree or indirectly by specifying a key source. The key source
+ can be used as an id of a key that is stored in an external device.
+
+ Using an embedded key
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ This is an example using an embedded key::
+
+ blob-ext {
+ filename = "encrypted-blob.bin";
+ };
+
+ encrypted {
+ algo = "aes256-gcm";
+ iv-filename = "encrypted-blob.bin.iv";
+ key-filename = "encrypted-blob.bin.key";
+ };
+
+ This entry generates the following device tree structure form the example
+ above::
+
+ data = [...]
+ cipher {
+ algo = "aes256-gcm";
+ key = <0x...>;
+ iv = <0x...>;
+ };
+
+ The data property is generated by the blob-ext etype, the cipher node and
+ its content is generated by this etype.
+
+ Using an external key
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Instead of embedding the key itself into the device tree, it is also
+ possible to address an externally stored key by specifying a 'key-source'
+ instead of the 'key'::
+
+ blob-ext {
+ filename = "encrypted-blob.bin";
+ };
+
+ encrypted {
+ algo = "aes256-gcm";
+ iv-filename = "encrypted-blob.bin.iv";
+ key-source = "external-key-id";
+ };
+
+ This entry generates the following device tree structure form the example
+ above::
+
+ data = [...]
+ cipher {
+ algo = "aes256-gcm";
+ key-source = "external-key-id";
+ iv = <0x...>;
+ };
+
+ Properties
+ ~~~~~~~~~~
+
+ Properties / Entry arguments:
+ - algo: The encryption algorithm. Currently no algorithm is supported
+ out-of-the-box. Certain algorithms will be added in future
+ patches.
+ - iv-filename: The name of the file containing the initialization
+ vector (in short iv). See
+ https://en.wikipedia.org/wiki/Initialization_vector
+ - key-filename: The name of the file containing the key. Either
+ key-filename or key-source must be provided.
+ - key-source: The key that should be used. Either key-filename or
+ key-source must be provided.
+ """
+
+ def __init__(self, section, etype, node):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+ self.required_props = ['algo', 'iv-filename']
+ self._algo = None
+ self._iv_filename = None
+ self._key_name_hint = None
+ self._key_filename = None
+
+ def ReadNode(self):
+ super().ReadNode()
+
+ self._algo = fdt_util.GetString(self._node, 'algo')
+ self._iv_filename = fdt_util.GetString(self._node, 'iv-filename')
+ self._key_filename = fdt_util.GetString(self._node, 'key-filename')
+ self._key_source = fdt_util.GetString(self._node, 'key-source')
+
+ if self._key_filename is None and self._key_source is None:
+ self.Raise("Provide either 'key-filename' or 'key-source'")
+
+ def gen_entries(self):
+ super().gen_entries()
+
+ iv_filename = tools.get_input_filename(self._iv_filename)
+ iv = tools.read_file(iv_filename, binary=True)
+
+ cipher_node = state.AddSubnode(self._node.parent, "cipher")
+ cipher_node.AddString("algo", self._algo)
+ cipher_node.AddData("iv", iv)
+
+ if self._key_filename:
+ key_filename = tools.get_input_filename(self._key_filename)
+ key = tools.read_file(key_filename, binary=True)
+ cipher_node.AddData("key", key)
+
+ if self._key_source:
+ cipher_node.AddString("key-source", self._key_source)
diff --git a/tools/binman/etype/fdtmap.py b/tools/binman/etype/fdtmap.py
new file mode 100644
index 00000000000..f1f6217940f
--- /dev/null
+++ b/tools/binman/etype/fdtmap.py
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+
+"""# Entry-type module for a full map of the firmware image
+
+This handles putting an FDT into the image with just the information about the
+image.
+"""
+
+from binman.entry import Entry
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+FDTMAP_MAGIC = b'_FDTMAP_'
+FDTMAP_HDR_LEN = 16
+
+# These is imported if needed
+Fdt = None
+libfdt = None
+state = None
+
+def LocateFdtmap(data):
+ """Search an image for an fdt map
+
+ Args:
+ data: Data to search
+
+ Returns:
+ Position of fdt map in data, or None if not found. Note that the
+ position returned is of the FDT header, i.e. before the FDT data
+ """
+ hdr_pos = data.find(FDTMAP_MAGIC)
+ size = len(data)
+ if hdr_pos != -1:
+ hdr = data[hdr_pos:hdr_pos + FDTMAP_HDR_LEN]
+ if len(hdr) == FDTMAP_HDR_LEN:
+ return hdr_pos
+ return None
+
+class Entry_fdtmap(Entry):
+ """An entry which contains an FDT map
+
+ Properties / Entry arguments:
+ None
+
+ An FDT map is just a header followed by an FDT containing a list of all the
+ entries in the image. The root node corresponds to the image node in the
+ original FDT, and an image-name property indicates the image name in that
+ original tree.
+
+ The header is the string _FDTMAP_ followed by 8 unused bytes.
+
+ When used, this entry will be populated with an FDT map which reflects the
+ entries in the current image. Hierarchy is preserved, and all offsets and
+ sizes are included.
+
+ Note that the -u option must be provided to ensure that binman updates the
+ FDT with the position of each entry.
+
+ Example output for a simple image with U-Boot and an FDT map::
+
+ / {
+ image-name = "binman";
+ size = <0x00000112>;
+ image-pos = <0x00000000>;
+ offset = <0x00000000>;
+ u-boot {
+ size = <0x00000004>;
+ image-pos = <0x00000000>;
+ offset = <0x00000000>;
+ };
+ fdtmap {
+ size = <0x0000010e>;
+ image-pos = <0x00000004>;
+ offset = <0x00000004>;
+ };
+ };
+
+ If allow-repack is used then 'orig-offset' and 'orig-size' properties are
+ added as necessary. See the binman README.
+
+ When extracting files, an alternative 'fdt' format is available for fdtmaps.
+ Use `binman extract -F fdt ...` to use this. It will export a devicetree,
+ without the fdtmap header, so it can be viewed with `fdtdump`.
+ """
+ def __init__(self, section, etype, node):
+ # Put these here to allow entry-docs and help to work without libfdt
+ global libfdt
+ global state
+ global Fdt
+
+ import libfdt
+ from binman import state
+ from dtoc.fdt import Fdt
+
+ super().__init__(section, etype, node)
+ self.alt_formats = ['fdt']
+
+ def CheckAltFormats(self, alt_formats):
+ alt_formats['fdt'] = self, 'Extract the devicetree blob from the fdtmap'
+
+ def _GetFdtmap(self):
+ """Build an FDT map from the entries in the current image
+
+ Returns:
+ FDT map binary data
+ """
+ def _AddNode(node):
+ """Add a node to the FDT map"""
+ for pname, prop in node.props.items():
+ fsw.property(pname, prop.bytes)
+ for subnode in node.subnodes:
+ with fsw.add_node(subnode.name):
+ _AddNode(subnode)
+
+ data = state.GetFdtContents('fdtmap')[1]
+ # If we have an fdtmap it means that we are using this as the
+ # fdtmap for this image.
+ if data is None:
+ # Get the FDT data into an Fdt object
+ data = state.GetFdtContents()[1]
+ infdt = Fdt.FromData(data)
+ infdt.Scan()
+
+ # Find the node for the image containing the Fdt-map entry
+ path = self.section.GetPath()
+ self.Detail("Fdtmap: Using section '%s' (path '%s')" %
+ (self.section.name, path))
+ node = infdt.GetNode(path)
+ if not node:
+ self.Raise("Internal error: Cannot locate node for path '%s'" %
+ path)
+
+ # Build a new tree with all nodes and properties starting from that
+ # node
+ fsw = libfdt.FdtSw()
+ fsw.finish_reservemap()
+ with fsw.add_node(''):
+ fsw.property_string('image-node', node.name)
+ _AddNode(node)
+ fdt = fsw.as_fdt()
+
+ # Pack this new FDT and return its contents
+ fdt.pack()
+ outfdt = Fdt.FromData(fdt.as_bytearray())
+ data = outfdt.GetContents()
+ data = FDTMAP_MAGIC + tools.get_bytes(0, 8) + data
+ return data
+
+ def ObtainContents(self):
+ """Obtain a placeholder for the fdt-map contents"""
+ self.SetContents(self._GetFdtmap())
+ return True
+
+ def ProcessContents(self):
+ """Write an updated version of the FDT map to this entry
+
+ This is necessary since new data may have been written back to it during
+ processing, e.g. the image-pos properties.
+ """
+ return self.ProcessContentsUpdate(self._GetFdtmap())
+
+ def GetAltFormat(self, data, alt_format):
+ if alt_format == 'fdt':
+ return data[FDTMAP_HDR_LEN:]
diff --git a/tools/binman/etype/files.py b/tools/binman/etype/files.py
new file mode 100644
index 00000000000..c8757eafab1
--- /dev/null
+++ b/tools/binman/etype/files.py
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a set of files which are placed in individual
+# sub-entries
+#
+
+import glob
+import os
+
+from binman.etype.section import Entry_section
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+# This is imported if needed
+state = None
+
+class Entry_files(Entry_section):
+ """A set of files arranged in a section
+
+ Properties / Entry arguments:
+ - pattern: Filename pattern to match the files to include
+ - files-compress: Compression algorithm to use:
+ none: No compression
+ lz4: Use lz4 compression (via 'lz4' command-line utility)
+ - files-align: Align each file to the given alignment
+
+ This entry reads a number of files and places each in a separate sub-entry
+ within this entry. To access these you need to enable device-tree updates
+ at run-time so you can obtain the file positions.
+ """
+ def __init__(self, section, etype, node):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+
+ def ReadNode(self):
+ super().ReadNode()
+ self._pattern = fdt_util.GetString(self._node, 'pattern')
+ if not self._pattern:
+ self.Raise("Missing 'pattern' property")
+ self._files_compress = fdt_util.GetString(self._node, 'files-compress',
+ 'none')
+ self._files_align = fdt_util.GetInt(self._node, 'files-align');
+ self._require_matches = fdt_util.GetBool(self._node,
+ 'require-matches')
+
+ def gen_entries(self):
+ files = tools.get_input_filename_glob(self._pattern)
+ if self._require_matches and not files:
+ self.Raise("Pattern '%s' matched no files" % self._pattern)
+ for fname in files:
+ if not os.path.isfile(fname):
+ continue
+ name = os.path.basename(fname)
+ subnode = self._node.FindNode(name)
+ if not subnode:
+ subnode = state.AddSubnode(self._node, name)
+ state.AddString(subnode, 'type', 'blob')
+ state.AddString(subnode, 'filename', fname)
+ state.AddString(subnode, 'compress', self._files_compress)
+ if self._files_align:
+ state.AddInt(subnode, 'align', self._files_align)
+
+ # Read entries again, now that we have some
+ self.ReadEntries()
diff --git a/tools/binman/etype/fill.py b/tools/binman/etype/fill.py
new file mode 100644
index 00000000000..7c93d4e2689
--- /dev/null
+++ b/tools/binman/etype/fill.py
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+from binman.entry import Entry
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_fill(Entry):
+ """An entry which is filled to a particular byte value
+
+ Properties / Entry arguments:
+ - fill-byte: Byte to use to fill the entry
+
+ Note that the size property must be set since otherwise this entry does not
+ know how large it should be.
+
+ You can often achieve the same effect using the pad-byte property of the
+ overall image, in that the space between entries will then be padded with
+ that byte. But this entry is sometimes useful for explicitly setting the
+ byte value of a region.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.required_props = ['size']
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.fill_value = fdt_util.GetByte(self._node, 'fill-byte', 0)
+
+ def ObtainContents(self):
+ self.SetContents(tools.get_bytes(self.fill_value, self.size))
+ return True
diff --git a/tools/binman/etype/fit.py b/tools/binman/etype/fit.py
new file mode 100644
index 00000000000..2c14b15b03c
--- /dev/null
+++ b/tools/binman/etype/fit.py
@@ -0,0 +1,869 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+"""Entry-type module for producing a FIT"""
+
+import libfdt
+
+from binman.entry import Entry, EntryArg
+from binman.etype.section import Entry_section
+from binman import elf
+from dtoc import fdt_util
+from dtoc.fdt import Fdt
+from u_boot_pylib import tools
+
+# Supported operations, with the fit,operation property
+OP_GEN_FDT_NODES, OP_SPLIT_ELF = range(2)
+OPERATIONS = {
+ 'gen-fdt-nodes': OP_GEN_FDT_NODES,
+ 'split-elf': OP_SPLIT_ELF,
+ }
+
+class Entry_fit(Entry_section):
+
+ """Flat Image Tree (FIT)
+
+ This calls mkimage to create a FIT (U-Boot Flat Image Tree) based on the
+ input provided.
+
+ Nodes for the FIT should be written out in the binman configuration just as
+ they would be in a file passed to mkimage.
+
+ For example, this creates an image containing a FIT with U-Boot SPL::
+
+ binman {
+ fit {
+ description = "Test FIT";
+ fit,fdt-list = "of-list";
+
+ images {
+ kernel@1 {
+ description = "SPL";
+ os = "u-boot";
+ type = "rkspi";
+ arch = "arm";
+ compression = "none";
+ load = <0>;
+ entry = <0>;
+
+ u-boot-spl {
+ };
+ };
+ };
+ };
+ };
+
+ More complex setups can be created, with generated nodes, as described
+ below.
+
+ Properties (in the 'fit' node itself)
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Special properties have a `fit,` prefix, indicating that they should be
+ processed but not included in the final FIT.
+
+ The top-level 'fit' node supports the following special properties:
+
+ fit,external-offset
+ Indicates that the contents of the FIT are external and provides the
+ external offset. This is passed to mkimage via the -E and -p flags.
+
+ fit,align
+ Indicates what alignment to use for the FIT and its external data,
+ and provides the alignment to use. This is passed to mkimage via
+ the -B flag.
+
+ fit,fdt-list
+ Indicates the entry argument which provides the list of device tree
+ files for the gen-fdt-nodes operation (as below). This is often
+ `of-list` meaning that `-a of-list="dtb1 dtb2..."` should be passed
+ to binman.
+
+ fit,fdt-list-val
+ As an alternative to fit,fdt-list the list of device tree files
+ can be provided in this property as a string list, e.g.::
+
+ fit,fdt-list-val = "dtb1", "dtb2";
+
+ Substitutions
+ ~~~~~~~~~~~~~
+
+ Node names and property values support a basic string-substitution feature.
+ Available substitutions for '@' nodes (and property values) are:
+
+ SEQ:
+ Sequence number of the generated fdt (1, 2, ...)
+ NAME
+ Name of the dtb as provided (i.e. without adding '.dtb')
+
+ The `default` property, if present, will be automatically set to the name
+ if of configuration whose devicetree matches the `default-dt` entry
+ argument, e.g. with `-a default-dt=sun50i-a64-pine64-lts`.
+
+ Available substitutions for property values in these nodes are:
+
+ DEFAULT-SEQ:
+ Sequence number of the default fdt, as provided by the 'default-dt'
+ entry argument
+
+ Available operations
+ ~~~~~~~~~~~~~~~~~~~~
+
+ You can add an operation to an '@' node to indicate which operation is
+ required::
+
+ @fdt-SEQ {
+ fit,operation = "gen-fdt-nodes";
+ ...
+ };
+
+ Available operations are:
+
+ gen-fdt-nodes
+ Generate FDT nodes as above. This is the default if there is no
+ `fit,operation` property.
+
+ split-elf
+ Split an ELF file into a separate node for each segment.
+
+ Generating nodes from an FDT list (gen-fdt-nodes)
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ U-Boot supports creating fdt and config nodes automatically. To do this,
+ pass an `of-list` property (e.g. `-a of-list=file1 file2`). This tells
+ binman that you want to generates nodes for two files: `file1.dtb` and
+ `file2.dtb`. The `fit,fdt-list` property (see above) indicates that
+ `of-list` should be used. If the property is missing you will get an error.
+
+ Then add a 'generator node', a node with a name starting with '@'::
+
+ images {
+ @fdt-SEQ {
+ description = "fdt-NAME";
+ type = "flat_dt";
+ compression = "none";
+ };
+ };
+
+ This tells binman to create nodes `fdt-1` and `fdt-2` for each of your two
+ files. All the properties you specify will be included in the node. This
+ node acts like a template to generate the nodes. The generator node itself
+ does not appear in the output - it is replaced with what binman generates.
+ A 'data' property is created with the contents of the FDT file.
+
+ You can create config nodes in a similar way::
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ @config-SEQ {
+ description = "NAME";
+ firmware = "atf";
+ loadables = "uboot";
+ fdt = "fdt-SEQ";
+ };
+ };
+
+ This tells binman to create nodes `config-1` and `config-2`, i.e. a config
+ for each of your two files.
+
+ Note that if no devicetree files are provided (with '-a of-list' as above)
+ then no nodes will be generated.
+
+ Generating nodes from an ELF file (split-elf)
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This uses the node as a template to generate multiple nodes. The following
+ special properties are available:
+
+ split-elf
+ Split an ELF file into a separate node for each segment. This uses the
+ node as a template to generate multiple nodes. The following special
+ properties are available:
+
+ fit,load
+ Generates a `load = <...>` property with the load address of the
+ segment
+
+ fit,entry
+ Generates a `entry = <...>` property with the entry address of the
+ ELF. This is only produced for the first entry
+
+ fit,data
+ Generates a `data = <...>` property with the contents of the segment
+
+ fit,firmware
+ Generates a `firmware = <...>` property. Provides a list of possible
+ nodes to be used as the `firmware` property value. The first valid
+ node is picked as the firmware. Any remaining valid nodes is
+ prepended to the `loadable` property generated by `fit,loadables`
+
+ fit,loadables
+ Generates a `loadable = <...>` property with a list of the generated
+ nodes (including all nodes if this operation is used multiple times)
+
+
+ Here is an example showing ATF, TEE and a device tree all combined::
+
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ u-boot {
+ description = "U-Boot (64-bit)";
+ type = "standalone";
+ os = "U-Boot";
+ arch = "arm64";
+ compression = "none";
+ load = <CONFIG_TEXT_BASE>;
+ u-boot-nodtb {
+ };
+ };
+ @fdt-SEQ {
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ };
+ @atf-SEQ {
+ fit,operation = "split-elf";
+ description = "ARM Trusted Firmware";
+ type = "firmware";
+ arch = "arm64";
+ os = "arm-trusted-firmware";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ atf-bl31 {
+ };
+ hash {
+ algo = "sha256";
+ };
+ };
+
+ @tee-SEQ {
+ fit,operation = "split-elf";
+ description = "TEE";
+ type = "tee";
+ arch = "arm64";
+ os = "tee";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ tee-os {
+ };
+ hash {
+ algo = "sha256";
+ };
+ };
+ };
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ @config-SEQ {
+ description = "conf-NAME.dtb";
+ fdt = "fdt-SEQ";
+ fit,firmware = "atf-1", "u-boot";
+ fit,loadables;
+ };
+ };
+ };
+
+ If ATF-BL31 is available, this generates a node for each segment in the
+ ELF file, for example::
+
+ images {
+ atf-1 {
+ data = <...contents of first segment...>;
+ data-offset = <0x00000000>;
+ entry = <0x00040000>;
+ load = <0x00040000>;
+ compression = "none";
+ os = "arm-trusted-firmware";
+ arch = "arm64";
+ type = "firmware";
+ description = "ARM Trusted Firmware";
+ hash {
+ algo = "sha256";
+ value = <...hash of first segment...>;
+ };
+ };
+ atf-2 {
+ data = <...contents of second segment...>;
+ load = <0xff3b0000>;
+ compression = "none";
+ os = "arm-trusted-firmware";
+ arch = "arm64";
+ type = "firmware";
+ description = "ARM Trusted Firmware";
+ hash {
+ algo = "sha256";
+ value = <...hash of second segment...>;
+ };
+ };
+ };
+
+ The same applies for OP-TEE if that is available.
+
+ If each binary is not available, the relevant template node (@atf-SEQ or
+ @tee-SEQ) is removed from the output.
+
+ This also generates a `config-xxx` node for each device tree in `of-list`.
+ Note that the U-Boot build system uses `-a of-list=$(CONFIG_OF_LIST)`
+ so you can use `CONFIG_OF_LIST` to define that list. In this example it is
+ set up for `firefly-rk3399` with a single device tree and the default set
+ with `-a default-dt=$(CONFIG_DEFAULT_DEVICE_TREE)`, so the resulting output
+ is::
+
+ configurations {
+ default = "config-1";
+ config-1 {
+ loadables = "u-boot", "atf-2", "atf-3", "tee-1", "tee-2";
+ description = "rk3399-firefly.dtb";
+ fdt = "fdt-1";
+ firmware = "atf-1";
+ };
+ };
+
+ U-Boot SPL can then load the firmware (ATF) and all the loadables (U-Boot
+ proper, ATF and TEE), then proceed with the boot.
+ """
+ def __init__(self, section, etype, node):
+ """
+ Members:
+ _fit: FIT file being built
+ _entries: dict from Entry_section:
+ key: relative path to entry Node (from the base of the FIT)
+ value: Entry_section object comprising the contents of this
+ node
+ _priv_entries: Internal copy of _entries which includes 'generator'
+ entries which are used to create the FIT, but should not be
+ processed as real entries. This is set up once we have the
+ entries
+ _loadables: List of generated split-elf nodes, each a node name
+ """
+ super().__init__(section, etype, node)
+ self._fit = None
+ self._fit_props = {}
+ self._fdts = None
+ self.mkimage = None
+ self._priv_entries = {}
+ self._loadables = []
+
+ def ReadNode(self):
+ super().ReadNode()
+ for pname, prop in self._node.props.items():
+ if pname.startswith('fit,'):
+ self._fit_props[pname] = prop
+ self._fit_list_prop = self._fit_props.get('fit,fdt-list')
+ if self._fit_list_prop:
+ fdts, = self.GetEntryArgsOrProps(
+ [EntryArg(self._fit_list_prop.value, str)])
+ if fdts is not None:
+ self._fdts = fdts.split()
+ else:
+ self._fdts = fdt_util.GetStringList(self._node, 'fit,fdt-list-val')
+
+ self._fit_default_dt = self.GetEntryArgsOrProps([EntryArg('default-dt',
+ str)])[0]
+
+ def _get_operation(self, base_node, node):
+ """Get the operation referenced by a subnode
+
+ Args:
+ node (Node): Subnode (of the FIT) to check
+
+ Returns:
+ int: Operation to perform
+
+ Raises:
+ ValueError: Invalid operation name
+ """
+ oper_name = node.props.get('fit,operation')
+ if not oper_name:
+ return OP_GEN_FDT_NODES
+ oper = OPERATIONS.get(oper_name.value)
+ if oper is None:
+ self._raise_subnode(node, f"Unknown operation '{oper_name.value}'")
+ return oper
+
+ def ReadEntries(self):
+ def _add_entries(base_node, depth, node):
+ """Add entries for any nodes that need them
+
+ Args:
+ base_node: Base Node of the FIT (with 'description' property)
+ depth: Current node depth (0 is the base 'fit' node)
+ node: Current node to process
+
+ Here we only need to provide binman entries which are used to define
+ the 'data' for each image. We create an entry_Section for each.
+ """
+ rel_path = node.path[len(base_node.path):]
+ in_images = rel_path.startswith('/images')
+ has_images = depth == 2 and in_images
+ if has_images:
+ # This node is a FIT subimage node (e.g. "/images/kernel")
+ # containing content nodes. We collect the subimage nodes and
+ # section entries for them here to merge the content subnodes
+ # together and put the merged contents in the subimage node's
+ # 'data' property later.
+ entry = Entry.Create(self, node, etype='section')
+ entry.ReadNode()
+ # The hash subnodes here are for mkimage, not binman.
+ entry.SetUpdateHash(False)
+ image_name = rel_path[len('/images/'):]
+ self._entries[image_name] = entry
+
+ for subnode in node.subnodes:
+ _add_entries(base_node, depth + 1, subnode)
+
+ _add_entries(self._node, 0, self._node)
+
+ # Keep a copy of all entries, including generator entries, since those
+ # are removed from self._entries later.
+ self._priv_entries = dict(self._entries)
+
+ def BuildSectionData(self, required):
+ """Build FIT entry contents
+
+ This adds the 'data' properties to the input ITB (Image-tree Binary)
+ then runs mkimage to process it.
+
+ Args:
+ required (bool): True if the data must be present, False if it is OK
+ to return None
+
+ Returns:
+ bytes: Contents of the section
+ """
+ data = self._build_input()
+ uniq = self.GetUniqueName()
+ input_fname = tools.get_output_filename(f'{uniq}.itb')
+ output_fname = tools.get_output_filename(f'{uniq}.fit')
+ tools.write_file(input_fname, data)
+ tools.write_file(output_fname, data)
+
+ args = {}
+ ext_offset = self._fit_props.get('fit,external-offset')
+ if ext_offset is not None:
+ args = {
+ 'external': True,
+ 'pad': fdt_util.fdt32_to_cpu(ext_offset.value)
+ }
+ align = self._fit_props.get('fit,align')
+ if align is not None:
+ args.update({'align': fdt_util.fdt32_to_cpu(align.value)})
+ if self.mkimage.run(reset_timestamp=True, output_fname=output_fname,
+ **args) is None:
+ if not self.GetAllowMissing():
+ self.Raise("Missing tool: 'mkimage'")
+ # Bintool is missing; just use empty data as the output
+ self.record_missing_bintool(self.mkimage)
+ return tools.get_bytes(0, 1024)
+
+ return tools.read_file(output_fname)
+
+ def _raise_subnode(self, node, msg):
+ """Raise an error with a paticular FIT subnode
+
+ Args:
+ node (Node): FIT subnode containing the error
+ msg (str): Message to report
+
+ Raises:
+ ValueError, as requested
+ """
+ rel_path = node.path[len(self._node.path) + 1:]
+ self.Raise(f"subnode '{rel_path}': {msg}")
+
+ def _build_input(self):
+ """Finish the FIT by adding the 'data' properties to it
+
+ Arguments:
+ fdt: FIT to update
+
+ Returns:
+ bytes: New fdt contents
+ """
+ def _process_prop(pname, prop):
+ """Process special properties
+
+ Handles properties with generated values. At present the only
+ supported property is 'default', i.e. the default device tree in
+ the configurations node.
+
+ Args:
+ pname (str): Name of property
+ prop (Prop): Property to process
+ """
+ if pname == 'default':
+ val = prop.value
+ # Handle the 'default' property
+ if val.startswith('@'):
+ if not self._fdts:
+ return
+ if not self._fit_default_dt:
+ self.Raise("Generated 'default' node requires default-dt entry argument")
+ if self._fit_default_dt not in self._fdts:
+ self.Raise(
+ f"default-dt entry argument '{self._fit_default_dt}' "
+ f"not found in fdt list: {', '.join(self._fdts)}")
+ seq = self._fdts.index(self._fit_default_dt)
+ val = val[1:].replace('DEFAULT-SEQ', str(seq + 1))
+ fsw.property_string(pname, val)
+ return
+ elif pname.startswith('fit,'):
+ # Ignore these, which are commands for binman to process
+ return
+ elif pname in ['offset', 'size', 'image-pos']:
+ # Don't add binman's calculated properties
+ return
+ fsw.property(pname, prop.bytes)
+
+ def _process_firmware_prop(node):
+ """Process optional fit,firmware property
+
+ Picks the first valid entry for use as the firmware, remaining valid
+ entries is prepended to loadables
+
+ Args:
+ node (Node): Generator node to process
+
+ Returns:
+ firmware (str): Firmware or None
+ result (list): List of remaining loadables
+ """
+ val = fdt_util.GetStringList(node, 'fit,firmware')
+ if val is None:
+ return None, self._loadables
+ valid_entries = list(self._loadables)
+ for name, entry in self.GetEntries().items():
+ missing = []
+ entry.CheckMissing(missing)
+ entry.CheckOptional(missing)
+ if not missing:
+ valid_entries.append(name)
+ firmware = None
+ result = []
+ for name in val:
+ if name in valid_entries:
+ if not firmware:
+ firmware = name
+ elif name not in result:
+ result.append(name)
+ for name in self._loadables:
+ if name != firmware and name not in result:
+ result.append(name)
+ return firmware, result
+
+ def _gen_fdt_nodes(base_node, node, depth, in_images):
+ """Generate FDT nodes
+
+ This creates one node for each member of self._fdts using the
+ provided template. If a property value contains 'NAME' it is
+ replaced with the filename of the FDT. If a property value contains
+ SEQ it is replaced with the node sequence number, where 1 is the
+ first.
+
+ Args:
+ node (Node): Generator node to process
+ depth: Current node depth (0 is the base 'fit' node)
+ in_images: True if this is inside the 'images' node, so that
+ 'data' properties should be generated
+ """
+ if self._fdts:
+ firmware, fit_loadables = _process_firmware_prop(node)
+ # Generate nodes for each FDT
+ for seq, fdt_fname in enumerate(self._fdts):
+ node_name = node.name[1:].replace('SEQ', str(seq + 1))
+ fname = tools.get_input_filename(fdt_fname + '.dtb')
+ with fsw.add_node(node_name):
+ for pname, prop in node.props.items():
+ if pname == 'fit,firmware':
+ if firmware:
+ fsw.property_string('firmware', firmware)
+ elif pname == 'fit,loadables':
+ val = '\0'.join(fit_loadables) + '\0'
+ fsw.property('loadables', val.encode('utf-8'))
+ elif pname == 'fit,operation':
+ pass
+ elif pname.startswith('fit,'):
+ self._raise_subnode(
+ node, f"Unknown directive '{pname}'")
+ else:
+ val = prop.bytes.replace(
+ b'NAME', tools.to_bytes(fdt_fname))
+ val = val.replace(
+ b'SEQ', tools.to_bytes(str(seq + 1)))
+ fsw.property(pname, val)
+
+ # Add data for 'images' nodes (but not 'config')
+ if depth == 1 and in_images:
+ fsw.property('data', tools.read_file(fname))
+
+ for subnode in node.subnodes:
+ with fsw.add_node(subnode.name):
+ _add_node(node, depth + 1, subnode)
+ else:
+ if self._fdts is None:
+ if self._fit_list_prop:
+ self.Raise('Generator node requires '
+ f"'{self._fit_list_prop.value}' entry argument")
+ else:
+ self.Raise("Generator node requires 'fit,fdt-list' property")
+
+ def _gen_split_elf(base_node, node, depth, segments, entry_addr):
+ """Add nodes for the ELF file, one per group of contiguous segments
+
+ Args:
+ base_node (Node): Template node from the binman definition
+ node (Node): Node to replace (in the FIT being built)
+ depth: Current node depth (0 is the base 'fit' node)
+ segments (list): list of segments, each:
+ int: Segment number (0 = first)
+ int: Start address of segment in memory
+ bytes: Contents of segment
+ entry_addr (int): entry address of ELF file
+ """
+ for (seq, start, data) in segments:
+ node_name = node.name[1:].replace('SEQ', str(seq + 1))
+ with fsw.add_node(node_name):
+ loadables.append(node_name)
+ for pname, prop in node.props.items():
+ if not pname.startswith('fit,'):
+ fsw.property(pname, prop.bytes)
+ elif pname == 'fit,load':
+ fsw.property_u32('load', start)
+ elif pname == 'fit,entry':
+ if seq == 0:
+ fsw.property_u32('entry', entry_addr)
+ elif pname == 'fit,data':
+ fsw.property('data', bytes(data))
+ elif pname != 'fit,operation':
+ self._raise_subnode(
+ node, f"Unknown directive '{pname}'")
+
+ for subnode in node.subnodes:
+ with fsw.add_node(subnode.name):
+ _add_node(node, depth + 1, subnode)
+
+ def _gen_node(base_node, node, depth, in_images, entry):
+ """Generate nodes from a template
+
+ This creates one or more nodes depending on the fit,operation being
+ used.
+
+ For OP_GEN_FDT_NODES it creates one node for each member of
+ self._fdts using the provided template. If a property value contains
+ 'NAME' it is replaced with the filename of the FDT. If a property
+ value contains SEQ it is replaced with the node sequence number,
+ where 1 is the first.
+
+ For OP_SPLIT_ELF it emits one node for each section in the ELF file.
+ If the file is missing, nothing is generated.
+
+ Args:
+ base_node (Node): Base Node of the FIT (with 'description'
+ property)
+ node (Node): Generator node to process
+ depth (int): Current node depth (0 is the base 'fit' node)
+ in_images (bool): True if this is inside the 'images' node, so
+ that 'data' properties should be generated
+ entry (entry_Section): Entry for the section containing the
+ contents of this node
+ """
+ oper = self._get_operation(base_node, node)
+ if oper == OP_GEN_FDT_NODES:
+ _gen_fdt_nodes(base_node, node, depth, in_images)
+ elif oper == OP_SPLIT_ELF:
+ # Entry_section.ObtainContents() either returns True or
+ # raises an exception.
+ data = None
+ missing_opt_list = []
+ entry.ObtainContents()
+ entry.Pack(0)
+ entry.CheckMissing(missing_opt_list)
+ entry.CheckOptional(missing_opt_list)
+
+ # If any pieces are missing, skip this. The missing entries will
+ # show an error
+ if not missing_opt_list:
+ segs = entry.read_elf_segments()
+ if segs:
+ segments, entry_addr = segs
+ else:
+ elf_data = entry.GetData()
+ try:
+ segments, entry_addr = (
+ elf.read_loadable_segments(elf_data))
+ except ValueError as exc:
+ self._raise_subnode(
+ node, f'Failed to read ELF file: {str(exc)}')
+
+ _gen_split_elf(base_node, node, depth, segments, entry_addr)
+
+ def _add_node(base_node, depth, node):
+ """Add nodes to the output FIT
+
+ Args:
+ base_node (Node): Base Node of the FIT (with 'description'
+ property)
+ depth (int): Current node depth (0 is the base 'fit' node)
+ node (Node): Current node to process
+
+ There are two cases to deal with:
+ - hash and signature nodes which become part of the FIT
+ - binman entries which are used to define the 'data' for each
+ image, so don't appear in the FIT
+ """
+ # Copy over all the relevant properties
+ for pname, prop in node.props.items():
+ _process_prop(pname, prop)
+
+ rel_path = node.path[len(base_node.path):]
+ in_images = rel_path.startswith('/images')
+
+ has_images = depth == 2 and in_images
+ if has_images:
+ image_name = rel_path[len('/images/'):]
+ entry = self._priv_entries[image_name]
+ data = entry.GetData()
+ fsw.property('data', bytes(data))
+
+ for subnode in node.subnodes:
+ subnode_path = f'{rel_path}/{subnode.name}'
+ if has_images and not self.IsSpecialSubnode(subnode):
+ # This subnode is a content node not meant to appear in
+ # the FIT (e.g. "/images/kernel/u-boot"), so don't call
+ # fsw.add_node() or _add_node() for it.
+ pass
+ elif self.GetImage().generate and subnode.name.startswith('@'):
+ entry = self._priv_entries.get(subnode.name)
+ _gen_node(base_node, subnode, depth, in_images, entry)
+ # This is a generator (template) entry, so remove it from
+ # the list of entries used by PackEntries(), etc. Otherwise
+ # it will appear in the binman output
+ to_remove.append(subnode.name)
+ else:
+ with fsw.add_node(subnode.name):
+ _add_node(base_node, depth + 1, subnode)
+
+ # Build a new tree with all nodes and properties starting from the
+ # entry node
+ fsw = libfdt.FdtSw()
+ fsw.INC_SIZE = 65536
+ fsw.finish_reservemap()
+ to_remove = []
+ loadables = []
+ with fsw.add_node(''):
+ _add_node(self._node, 0, self._node)
+ self._loadables = loadables
+ fdt = fsw.as_fdt()
+
+ # Remove generator entries from the main list
+ for path in to_remove:
+ if path in self._entries:
+ del self._entries[path]
+
+ # Pack this new FDT and scan it so we can add the data later
+ fdt.pack()
+ data = fdt.as_bytearray()
+ return data
+
+ def SetImagePos(self, image_pos):
+ """Set the position in the image
+
+ This sets each subentry's offsets, sizes and positions-in-image
+ according to where they ended up in the packed FIT file.
+
+ Args:
+ image_pos (int): Position of this entry in the image
+ """
+ if self.build_done:
+ return
+ super().SetImagePos(image_pos)
+
+ # If mkimage is missing we'll have empty data,
+ # which will cause a FDT_ERR_BADMAGIC error
+ if self.mkimage in self.missing_bintools:
+ return
+
+ fdt = Fdt.FromData(self.GetData())
+ fdt.Scan()
+
+ for image_name, section in self._entries.items():
+ path = f"/images/{image_name}"
+ node = fdt.GetNode(path)
+
+ data_prop = node.props.get("data")
+ data_pos = fdt_util.GetInt(node, "data-position")
+ data_offset = fdt_util.GetInt(node, "data-offset")
+ data_size = fdt_util.GetInt(node, "data-size")
+
+ # Contents are inside the FIT
+ if data_prop is not None:
+ # GetOffset() returns offset of a fdt_property struct,
+ # which has 3 fdt32_t members before the actual data.
+ offset = data_prop.GetOffset() + 12
+ size = len(data_prop.bytes)
+
+ # External offset from the base of the FIT
+ elif data_pos is not None:
+ offset = data_pos
+ size = data_size
+
+ # External offset from the end of the FIT, not used in binman
+ elif data_offset is not None: # pragma: no cover
+ offset = fdt.GetFdtObj().totalsize() + data_offset
+ size = data_size
+
+ # This should never happen
+ else: # pragma: no cover
+ self.Raise(f'{path}: missing data properties')
+
+ section.SetOffsetSize(offset, size)
+ section.SetImagePos(self.image_pos)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.mkimage = self.AddBintool(btools, 'mkimage')
+
+ def CheckMissing(self, missing_list):
+ # We must use our private entry list for this since generator nodes
+ # which are removed from self._entries will otherwise not show up as
+ # missing
+ for entry in self._priv_entries.values():
+ entry.CheckMissing(missing_list)
+
+ def CheckOptional(self, optional_list):
+ # We must use our private entry list for this since generator nodes
+ # which are removed from self._entries will otherwise not show up as
+ # optional
+ for entry in self._priv_entries.values():
+ entry.CheckOptional(optional_list)
+
+ def CheckEntries(self):
+ pass
+
+ def UpdateSignatures(self, privatekey_fname, algo, input_fname):
+ uniq = self.GetUniqueName()
+ args = [ '-G', privatekey_fname, '-r', '-o', algo, '-F' ]
+ if input_fname:
+ fname = input_fname
+ else:
+ fname = tools.get_output_filename('%s.fit' % uniq)
+ tools.write_file(fname, self.GetData())
+ args.append(fname)
+
+ if self.mkimage.run_cmd(*args) is None:
+ self.Raise("Missing tool: 'mkimage'")
+
+ data = tools.read_file(fname)
+ self.WriteData(data)
diff --git a/tools/binman/etype/fmap.py b/tools/binman/etype/fmap.py
new file mode 100644
index 00000000000..3669d91a0bc
--- /dev/null
+++ b/tools/binman/etype/fmap.py
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a Flash map, as used by the flashrom SPI flash tool
+#
+
+from binman.entry import Entry
+from binman import fmap_util
+from u_boot_pylib import tools
+from u_boot_pylib.tools import to_hex_size
+from u_boot_pylib import tout
+
+
+class Entry_fmap(Entry):
+ """An entry which contains an Fmap section
+
+ Properties / Entry arguments:
+ None
+
+ FMAP is a simple format used by flashrom, an open-source utility for
+ reading and writing the SPI flash, typically on x86 CPUs. The format
+ provides flashrom with a list of areas, so it knows what it in the flash.
+ It can then read or write just a single area, instead of the whole flash.
+
+ The format is defined by the flashrom project, in the file lib/fmap.h -
+ see www.flashrom.org/Flashrom for more information.
+
+ When used, this entry will be populated with an FMAP which reflects the
+ entries in the current image. Note that any hierarchy is squashed, since
+ FMAP does not support this. Sections are represented as an area appearing
+ before its contents, so that it is possible to reconstruct the hierarchy
+ from the FMAP by using the offset information. This convention does not
+ seem to be documented, but is used in Chromium OS.
+
+ To mark an area as preserved, use the normal 'preserved' flag in the entry.
+ This will result in the corresponding FMAP area having the
+ FMAP_AREA_PRESERVE flag. This flag does not automatically propagate down to
+ child entries.
+
+ CBFS entries appear as a single entry, i.e. the sub-entries are ignored.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def _GetFmap(self):
+ """Build an FMAP from the entries in the current image
+
+ Returns:
+ FMAP binary data
+ """
+ def _AddEntries(areas, entry):
+ entries = entry.GetEntries()
+ tout.debug("fmap: Add entry '%s' type '%s' (%s subentries)" %
+ (entry.GetPath(), entry.etype, to_hex_size(entries)))
+
+ # Collect any flag (separate lines to ensure code coverage)
+ flags = 0
+ if entry.preserve:
+ flags = fmap_util.FMAP_AREA_PRESERVE
+
+ if entries and entry.etype != 'cbfs':
+ # Create an area for the section, which encompasses all entries
+ # within it
+ if entry.image_pos is None:
+ pos = 0
+ else:
+ pos = entry.image_pos - entry.GetRootSkipAtStart()
+
+ # Drop @ symbols in name
+ name = entry.name.replace('@', '')
+ areas.append(
+ fmap_util.FmapArea(pos, entry.size or 0, name, flags))
+ for subentry in entries.values():
+ _AddEntries(areas, subentry)
+ else:
+ pos = entry.image_pos
+ if pos is not None:
+ pos -= entry.section.GetRootSkipAtStart()
+ areas.append(fmap_util.FmapArea(pos or 0, entry.size or 0,
+ entry.name, flags))
+
+ entries = self.GetImage().GetEntries()
+ areas = []
+ for entry in entries.values():
+ _AddEntries(areas, entry)
+ return fmap_util.EncodeFmap(self.section.GetImageSize() or 0, self.name,
+ areas)
+
+ def ObtainContents(self):
+ """Obtain a placeholder for the fmap contents"""
+ self.SetContents(self._GetFmap())
+ return True
+
+ def ProcessContents(self):
+ return self.ProcessContentsUpdate(self._GetFmap())
diff --git a/tools/binman/etype/gbb.py b/tools/binman/etype/gbb.py
new file mode 100644
index 00000000000..cca18af6e2f
--- /dev/null
+++ b/tools/binman/etype/gbb.py
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Support for a Chromium OS Google Binary Block, used to record read-only
+# information mostly used by firmware.
+
+from collections import OrderedDict
+
+from u_boot_pylib import command
+from binman.entry import Entry, EntryArg
+
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+# Build GBB flags.
+# (src/platform/vboot_reference/firmware/include/gbb_header.h)
+gbb_flag_properties = {
+ 'dev-screen-short-delay': 0x1,
+ 'load-option-roms': 0x2,
+ 'enable-alternate-os': 0x4,
+ 'force-dev-switch-on': 0x8,
+ 'force-dev-boot-usb': 0x10,
+ 'disable-fw-rollback-check': 0x20,
+ 'enter-triggers-tonorm': 0x40,
+ 'force-dev-boot-legacy': 0x80,
+ 'faft-key-override': 0x100,
+ 'disable-ec-software-sync': 0x200,
+ 'default-dev-boot-legacy': 0x400,
+ 'disable-pd-software-sync': 0x800,
+ 'disable-lid-shutdown': 0x1000,
+ 'force-dev-boot-fastboot-full-cap': 0x2000,
+ 'enable-serial': 0x4000,
+ 'disable-dwmp': 0x8000,
+}
+
+
+class Entry_gbb(Entry):
+ """An entry which contains a Chromium OS Google Binary Block
+
+ Properties / Entry arguments:
+ - hardware-id: Hardware ID to use for this build (a string)
+ - keydir: Directory containing the public keys to use
+ - bmpblk: Filename containing images used by recovery
+
+ Chromium OS uses a GBB to store various pieces of information, in particular
+ the root and recovery keys that are used to verify the boot process. Some
+ more details are here:
+
+ https://www.chromium.org/chromium-os/firmware-porting-guide/2-concepts
+
+ but note that the page dates from 2013 so is quite out of date. See
+ README.chromium for how to obtain the required keys and tools.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.hardware_id, self.keydir, self.bmpblk = self.GetEntryArgsOrProps(
+ [EntryArg('hardware-id', str),
+ EntryArg('keydir', str),
+ EntryArg('bmpblk', str)])
+
+ # Read in the GBB flags from the config
+ self.gbb_flags = 0
+ flags_node = node.FindNode('flags')
+ if flags_node:
+ for flag, value in gbb_flag_properties.items():
+ if fdt_util.GetBool(flags_node, flag):
+ self.gbb_flags |= value
+
+ def ObtainContents(self):
+ gbb = 'gbb.bin'
+ fname = tools.get_output_filename(gbb)
+ if not self.size:
+ self.Raise('GBB must have a fixed size')
+ gbb_size = self.size
+ bmpfv_size = gbb_size - 0x2180
+ if bmpfv_size < 0:
+ self.Raise('GBB is too small (minimum 0x2180 bytes)')
+ keydir = tools.get_input_filename(self.keydir)
+
+ stdout = self.futility.gbb_create(
+ fname, [0x100, 0x1000, bmpfv_size, 0x1000])
+ if stdout is not None:
+ stdout = self.futility.gbb_set(
+ fname,
+ hwid=self.hardware_id,
+ rootkey='%s/root_key.vbpubk' % keydir,
+ recoverykey='%s/recovery_key.vbpubk' % keydir,
+ flags=self.gbb_flags,
+ bmpfv=tools.get_input_filename(self.bmpblk))
+
+ if stdout is not None:
+ self.SetContents(tools.read_file(fname))
+ else:
+ # Bintool is missing; just use the required amount of zero data
+ self.record_missing_bintool(self.futility)
+ self.SetContents(tools.get_bytes(0, gbb_size))
+
+ return True
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.futility = self.AddBintool(btools, 'futility')
diff --git a/tools/binman/etype/image_header.py b/tools/binman/etype/image_header.py
new file mode 100644
index 00000000000..24011884958
--- /dev/null
+++ b/tools/binman/etype/image_header.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Entry-type module for an image header which points to the FDT map
+
+This creates an 8-byte entry with a magic number and the offset of the FDT map
+(which is another entry in the image), relative to the start or end of the
+image.
+"""
+
+import struct
+
+from binman.entry import Entry
+from dtoc import fdt_util
+
+IMAGE_HEADER_MAGIC = b'BinM'
+IMAGE_HEADER_LEN = 8
+
+def LocateHeaderOffset(data):
+ """Search an image for an image header
+
+ Args:
+ data: Data to search
+
+ Returns:
+ Offset of image header in the image, or None if not found
+ """
+ hdr_pos = data.find(IMAGE_HEADER_MAGIC)
+ if hdr_pos != -1:
+ size = len(data)
+ hdr = data[hdr_pos:hdr_pos + IMAGE_HEADER_LEN]
+ if len(hdr) == IMAGE_HEADER_LEN:
+ offset = struct.unpack('<I', hdr[4:])[0]
+ if hdr_pos == len(data) - IMAGE_HEADER_LEN:
+ pos = size + offset - (1 << 32)
+ else:
+ pos = offset
+ return pos
+ return None
+
+class Entry_image_header(Entry):
+ """An entry which contains a pointer to the FDT map
+
+ Properties / Entry arguments:
+ location: Location of header ("start" or "end" of image). This is
+ optional. If omitted then the entry must have an offset property.
+
+ This adds an 8-byte entry to the start or end of the image, pointing to the
+ location of the FDT map. The format is a magic number followed by an offset
+ from the start or end of the image, in twos-compliment format.
+
+ This entry must be in the top-level part of the image.
+
+ NOTE: If the location is at the start/end, you will probably need to specify
+ sort-by-offset for the image, unless you actually put the image header
+ first/last in the entry list.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.location = fdt_util.GetString(self._node, 'location')
+
+ def _GetHeader(self):
+ image_pos = self.GetSiblingImagePos('fdtmap')
+ if image_pos == False:
+ self.Raise("'image_header' section must have an 'fdtmap' sibling")
+ elif image_pos is None:
+ # This will be available when called from ProcessContents(), but not
+ # when called from ObtainContents()
+ offset = 0xffffffff
+ else:
+ image_size = self.section.GetImageSize() or 0
+ base = (0 if self.location != 'end' else image_size)
+ offset = (image_pos - base) & 0xffffffff
+ data = IMAGE_HEADER_MAGIC + struct.pack('<I', offset)
+ return data
+
+ def ObtainContents(self):
+ """Obtain a placeholder for the header contents"""
+ self.SetContents(self._GetHeader())
+ return True
+
+ def Pack(self, offset):
+ """Special pack method to set the offset to start/end of image"""
+ if not self.offset:
+ if self.location not in ['start', 'end']:
+ self.Raise("Invalid location '%s', expected 'start' or 'end'" %
+ self.location)
+ order = self.GetSiblingOrder()
+ if self.location != order and not self.section.GetSort():
+ self.Raise("Invalid sibling order '%s' for image-header: Must be at '%s' to match location" %
+ (order, self.location))
+ if self.location != 'end':
+ offset = 0
+ else:
+ image_size = self.section.GetImageSize()
+ if image_size is None:
+ # We don't know the image, but this must be the last entry,
+ # so we can assume it goes
+ offset = offset
+ else:
+ offset = image_size - IMAGE_HEADER_LEN
+ offset += self.section.GetStartOffset()
+ return super().Pack(offset)
+
+ def ProcessContents(self):
+ """Write an updated version of the FDT map to this entry
+
+ This is necessary since image_pos is not available when ObtainContents()
+ is called, since by then the entries have not been packed in the image.
+ """
+ return self.ProcessContentsUpdate(self._GetHeader())
diff --git a/tools/binman/etype/intel_cmc.py b/tools/binman/etype/intel_cmc.py
new file mode 100644
index 00000000000..494d43c9cf9
--- /dev/null
+++ b/tools/binman/etype/intel_cmc.py
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Chip Microcode binary blob
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_cmc(Entry_blob_ext):
+ """Intel Chipset Micro Code (CMC) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains microcode for some devices in a special format. An
+ example filename is 'Microcode/C0_22211.BIN'.
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/intel_descriptor.py b/tools/binman/etype/intel_descriptor.py
new file mode 100644
index 00000000000..7fe88a9ec1a
--- /dev/null
+++ b/tools/binman/etype/intel_descriptor.py
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel flash descriptor
+#
+
+import struct
+
+from binman.entry import Entry
+from binman.etype.blob_ext import Entry_blob_ext
+
+FD_SIGNATURE = struct.pack('<L', 0x0ff0a55a)
+MAX_REGIONS = 5
+
+# Region numbers supported by the Intel firmware format
+(REGION_DESCRIPTOR, REGION_BIOS, REGION_ME, REGION_GBE,
+ REGION_PDATA) = range(5)
+
+class Region:
+ def __init__(self, data, frba, region_num):
+ pos = frba + region_num * 4
+ val = struct.unpack('<L', data[pos:pos + 4])[0]
+ self.base = (val & 0xfff) << 12
+ self.limit = ((val & 0x0fff0000) >> 4) | 0xfff
+ self.size = self.limit - self.base + 1
+
+class Entry_intel_descriptor(Entry_blob_ext):
+ """Intel flash descriptor block (4KB)
+
+ Properties / Entry arguments:
+ filename: Filename of file containing the descriptor. This is typically
+ a 4KB binary file, sometimes called 'descriptor.bin'
+
+ This entry is placed at the start of flash and provides information about
+ the SPI flash regions. In particular it provides the base address and
+ size of the ME (Management Engine) region, allowing us to place the ME
+ binary in the right place.
+
+ With this entry in your image, the position of the 'intel-me' entry will be
+ fixed in the image, which avoids you needed to specify an offset for that
+ region. This is useful, because it is not possible to change the position
+ of the ME region without updating the descriptor.
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self._regions = []
+
+ def Pack(self, offset):
+ """Put this entry at the start of the image"""
+ if self.offset is None:
+ offset = self.section.GetStartOffset()
+ return super().Pack(offset)
+
+ def GetOffsets(self):
+ info = {}
+ if self.missing:
+ # Return zero offsets so that these entries get placed somewhere
+ if self.HasSibling('intel-me'):
+ info['intel-me'] = [0, None]
+ return info
+ offset = self.data.find(FD_SIGNATURE)
+ if offset == -1:
+ self.Raise('Cannot find Intel Flash Descriptor (FD) signature')
+ flvalsig, flmap0, flmap1, flmap2 = struct.unpack('<LLLL',
+ self.data[offset:offset + 16])
+ frba = ((flmap0 >> 16) & 0xff) << 4
+ for i in range(MAX_REGIONS):
+ self._regions.append(Region(self.data, frba, i))
+
+ # Set the offset for ME (Management Engine) and IFWI (Integrated
+ # Firmware Image), for now, since the others are not used.
+ if self.HasSibling('intel-me'):
+ info['intel-me'] = [self._regions[REGION_ME].base,
+ self._regions[REGION_ME].size]
+ if self.HasSibling('intel-ifwi'):
+ info['intel-ifwi'] = [self._regions[REGION_BIOS].base, None]
+ return info
diff --git a/tools/binman/etype/intel_fit.py b/tools/binman/etype/intel_fit.py
new file mode 100644
index 00000000000..f1a10c55a67
--- /dev/null
+++ b/tools/binman/etype/intel_fit.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Firmware Image Table
+#
+
+import struct
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_fit(Entry_blob_ext):
+ """Intel Firmware Image Table (FIT)
+
+ This entry contains a dummy FIT as required by recent Intel CPUs. The FIT
+ contains information about the firmware and microcode available in the
+ image.
+
+ At present binman only supports a basic FIT with no microcode.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def ReadNode(self):
+ """Force 16-byte alignment as required by FIT pointer"""
+ super().ReadNode()
+ self.align = 16
+
+ def ObtainContents(self):
+ data = struct.pack('<8sIHBB', b'_FIT_ ', 1, 0x100, 0x80, 0x7d)
+ self.SetContents(data)
+ return True
diff --git a/tools/binman/etype/intel_fit_ptr.py b/tools/binman/etype/intel_fit_ptr.py
new file mode 100644
index 00000000000..01f082281c5
--- /dev/null
+++ b/tools/binman/etype/intel_fit_ptr.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a pointer to an Intel Firmware Image Table
+#
+
+import struct
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_fit_ptr(Entry_blob_ext):
+ """Intel Firmware Image Table (FIT) pointer
+
+ This entry contains a pointer to the FIT. It is required to be at address
+ 0xffffffc0 in the image.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ if self.HasSibling('intel-fit') is False:
+ self.Raise("'intel-fit-ptr' section must have an 'intel-fit' sibling")
+
+ def _GetContents(self):
+ fit_pos = self.GetSiblingImagePos('intel-fit')
+ return struct.pack('<II', fit_pos or 0, 0)
+
+ def ObtainContents(self):
+ self.SetContents(self._GetContents())
+ return True
+
+ def ProcessContents(self):
+ """Write an updated version of the FIT pointer to this entry
+
+ This is necessary since image_pos is not available when ObtainContents()
+ is called, since by then the entries have not been packed in the image.
+ """
+ return self.ProcessContentsUpdate(self._GetContents())
+
+ def Pack(self, offset):
+ """Special pack method to set the offset to the right place"""
+ return super().Pack(0xffffffc0)
diff --git a/tools/binman/etype/intel_fsp.py b/tools/binman/etype/intel_fsp.py
new file mode 100644
index 00000000000..326cb7d09b3
--- /dev/null
+++ b/tools/binman/etype/intel_fsp.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Firmware Support Package binary blob
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_fsp(Entry_blob_ext):
+ """Intel Firmware Support Package (FSP) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains binary blobs which are used on some devices to make the
+ platform work. U-Boot executes this code since it is not possible to set up
+ the hardware using U-Boot open-source code. Documentation is typically not
+ available in sufficient detail to allow this.
+
+ An example filename is 'FSP/QUEENSBAY_FSP_GOLD_001_20-DECEMBER-2013.fd'
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/intel_fsp_m.py b/tools/binman/etype/intel_fsp_m.py
new file mode 100644
index 00000000000..9bcac790ed9
--- /dev/null
+++ b/tools/binman/etype/intel_fsp_m.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Firmware Support Package binary blob (M section)
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_fsp_m(Entry_blob_ext):
+ """Intel Firmware Support Package (FSP) memory init
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains a binary blob which is used on some devices to set up
+ SDRAM. U-Boot executes this code in SPL so that it can make full use of
+ memory. Documentation is typically not available in sufficient detail to
+ allow U-Boot do this this itself..
+
+ An example filename is 'fsp_m.bin'
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/intel_fsp_s.py b/tools/binman/etype/intel_fsp_s.py
new file mode 100644
index 00000000000..1d5046d452b
--- /dev/null
+++ b/tools/binman/etype/intel_fsp_s.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Firmware Support Package binary blob (S section)
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_fsp_s(Entry_blob_ext):
+ """Intel Firmware Support Package (FSP) silicon init
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains a binary blob which is used on some devices to set up
+ the silicon. U-Boot executes this code in U-Boot proper after SDRAM is
+ running, so that it can make full use of memory. Documentation is typically
+ not available in sufficient detail to allow U-Boot do this this itself.
+
+ An example filename is 'fsp_s.bin'
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/intel_fsp_t.py b/tools/binman/etype/intel_fsp_t.py
new file mode 100644
index 00000000000..80d95cc6f9c
--- /dev/null
+++ b/tools/binman/etype/intel_fsp_t.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Firmware Support Package binary blob (T section)
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_fsp_t(Entry_blob_ext):
+ """Intel Firmware Support Package (FSP) temp ram init
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains a binary blob which is used on some devices to set up
+ temporary memory (Cache-as-RAM or CAR). U-Boot executes this code in TPL so
+ that it has access to memory for its stack and initial storage.
+
+ An example filename is 'fsp_t.bin'
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/intel_ifwi.py b/tools/binman/etype/intel_ifwi.py
new file mode 100644
index 00000000000..6513b97c3e5
--- /dev/null
+++ b/tools/binman/etype/intel_ifwi.py
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Management Engine binary blob
+#
+
+from collections import OrderedDict
+
+from binman.entry import Entry
+from binman.etype.blob_ext import Entry_blob_ext
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_intel_ifwi(Entry_blob_ext):
+ """Intel Integrated Firmware Image (IFWI) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry. This is either the
+ IFWI file itself, or a file that can be converted into one using a
+ tool
+ - convert-fit: If present this indicates that the ifwitool should be
+ used to convert the provided file into a IFWI.
+
+ This file contains code and data used by the SoC that is required to make
+ it work. It includes U-Boot TPL, microcode, things related to the CSE
+ (Converged Security Engine, the microcontroller that loads all the firmware)
+ and other items beyond the wit of man.
+
+ A typical filename is 'ifwi.bin' for an IFWI file, or 'fitimage.bin' for a
+ file that will be converted to an IFWI.
+
+ The position of this entry is generally set by the intel-descriptor entry.
+
+ The contents of the IFWI are specified by the subnodes of the IFWI node.
+ Each subnode describes an entry which is placed into the IFWFI with a given
+ sub-partition (and optional entry name).
+
+ Properties for subnodes:
+ - ifwi-subpart: sub-parition to put this entry into, e.g. "IBBP"
+ - ifwi-entry: entry name t use, e.g. "IBBL"
+ - ifwi-replace: if present, indicates that the item should be replaced
+ in the IFWI. Otherwise it is added.
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self._convert_fit = fdt_util.GetBool(self._node, 'convert-fit')
+ self._ifwi_entries = OrderedDict()
+
+ def ReadNode(self):
+ self.ReadEntries()
+ super().ReadNode()
+
+ def _BuildIfwi(self):
+ """Build the contents of the IFWI and write it to the 'data' property"""
+ # Create the IFWI file if needed
+ if self._convert_fit:
+ inname = self._pathname
+ outname = tools.get_output_filename('ifwi.bin')
+ if self.ifwitool.create_ifwi(inname, outname) is None:
+ # Bintool is missing; just create a zeroed ifwi.bin
+ self.record_missing_bintool(self.ifwitool)
+ self.SetContents(tools.get_bytes(0, 1024))
+
+ self._filename = 'ifwi.bin'
+ self._pathname = outname
+ else:
+ # Provide a different code path here to ensure we have test coverage
+ outname = self._pathname
+
+ # Delete OBBP if it is there, then add the required new items
+ if self.ifwitool.delete_subpart(outname, 'OBBP') is None:
+ # Bintool is missing; just use zero data
+ self.record_missing_bintool(self.ifwitool)
+ self.SetContents(tools.get_bytes(0, 1024))
+ return True
+
+ for entry in self._ifwi_entries.values():
+ # First get the input data and put it in a file
+ data = entry.GetPaddedData()
+ uniq = self.GetUniqueName()
+ input_fname = tools.get_output_filename('input.%s' % uniq)
+ tools.write_file(input_fname, data)
+
+ # At this point we know that ifwitool is present, so we don't need
+ # to check for None here
+ self.ifwitool.add_subpart(
+ outname, entry._ifwi_subpart, entry._ifwi_entry_name,
+ input_fname, entry._ifwi_replace)
+
+ self.ReadBlobContents()
+ return True
+
+ def ObtainContents(self):
+ """Get the contents for the IFWI
+
+ Unfortunately we cannot create anything from scratch here, as Intel has
+ tools which create precursor binaries with lots of data and settings,
+ and these are not incorporated into binman.
+
+ The first step is to get a file in the IFWI format. This is either
+ supplied directly or is extracted from a fitimage using the 'create'
+ subcommand.
+
+ After that we delete the OBBP sub-partition and add each of the files
+ that we want in the IFWI file, one for each sub-entry of the IWFI node.
+ """
+ self._pathname = tools.get_input_filename(self._filename,
+ self.section.GetAllowMissing())
+ # Allow the file to be missing
+ if not self._pathname:
+ self.SetContents(b'')
+ self.missing = True
+ return True
+ for entry in self._ifwi_entries.values():
+ if not entry.ObtainContents():
+ return False
+ return self._BuildIfwi()
+
+ def ProcessContents(self):
+ if self.missing:
+ return True
+ orig_data = self.data
+ self._BuildIfwi()
+ same = orig_data == self.data
+ return same
+
+ def ReadEntries(self):
+ """Read the subnodes to find out what should go in this IFWI"""
+ for node in self._node.subnodes:
+ entry = Entry.Create(self.section, node)
+ entry.ReadNode()
+ entry._ifwi_replace = fdt_util.GetBool(node, 'ifwi-replace')
+ entry._ifwi_subpart = fdt_util.GetString(node, 'ifwi-subpart')
+ entry._ifwi_entry_name = fdt_util.GetString(node, 'ifwi-entry')
+ self._ifwi_entries[entry._ifwi_subpart] = entry
+
+ def WriteSymbols(self, section):
+ """Write symbol values into binary files for access at run time"""
+ if not self.missing:
+ for entry in self._ifwi_entries.values():
+ entry.WriteSymbols(self)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.ifwitool = self.AddBintool(btools, 'ifwitool')
diff --git a/tools/binman/etype/intel_me.py b/tools/binman/etype/intel_me.py
new file mode 100644
index 00000000000..b93ebabdc9e
--- /dev/null
+++ b/tools/binman/etype/intel_me.py
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Management Engine binary blob
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_me(Entry_blob_ext):
+ """Intel Management Engine (ME) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains code used by the SoC that is required to make it work.
+ The Management Engine is like a background task that runs things that are
+ not clearly documented, but may include keyboard, display and network
+ access. For platform that use ME it is not possible to disable it. U-Boot
+ does not directly execute code in the ME binary.
+
+ A typical filename is 'me.bin'.
+
+ The position of this entry is generally set by the intel-descriptor entry.
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/intel_mrc.py b/tools/binman/etype/intel_mrc.py
new file mode 100644
index 00000000000..bb8b26ff686
--- /dev/null
+++ b/tools/binman/etype/intel_mrc.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Memory Reference Code binary blob
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_mrc(Entry_blob_ext):
+ """Intel Memory Reference Code (MRC) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains code for setting up the SDRAM on some Intel systems. This
+ is executed by U-Boot when needed early during startup. A typical filename
+ is 'mrc.bin'.
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'mrc.bin'
diff --git a/tools/binman/etype/intel_refcode.py b/tools/binman/etype/intel_refcode.py
new file mode 100644
index 00000000000..9112730a9a4
--- /dev/null
+++ b/tools/binman/etype/intel_refcode.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Memory Reference Code binary blob
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_refcode(Entry_blob_ext):
+ """Intel Reference Code file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains code for setting up the platform on some Intel systems.
+ This is executed by U-Boot when needed early during startup. A typical
+ filename is 'refcode.bin'.
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'refcode.bin'
diff --git a/tools/binman/etype/intel_vbt.py b/tools/binman/etype/intel_vbt.py
new file mode 100644
index 00000000000..8afd576600c
--- /dev/null
+++ b/tools/binman/etype/intel_vbt.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2017, Bin Meng <bmeng.cn@gmail.com>
+#
+# Entry-type module for Intel Video BIOS Table binary blob
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_vbt(Entry_blob_ext):
+ """Intel Video BIOS Table (VBT) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains code that sets up the integrated graphics subsystem on
+ some Intel SoCs. U-Boot executes this when the display is started up.
+
+ See README.x86 for information about Intel binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/intel_vga.py b/tools/binman/etype/intel_vga.py
new file mode 100644
index 00000000000..51e6465f0d0
--- /dev/null
+++ b/tools/binman/etype/intel_vga.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for x86 VGA ROM binary blob
+#
+
+from binman.etype.blob_ext import Entry_blob_ext
+
+class Entry_intel_vga(Entry_blob_ext):
+ """Intel Video Graphics Adaptor (VGA) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry
+
+ This file contains code that sets up the integrated graphics subsystem on
+ some Intel SoCs. U-Boot executes this when the display is started up.
+
+ This is similar to the VBT file but in a different format.
+
+ See README.x86 for information about Intel binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
diff --git a/tools/binman/etype/mkimage.py b/tools/binman/etype/mkimage.py
new file mode 100644
index 00000000000..6ae5d0c8a4f
--- /dev/null
+++ b/tools/binman/etype/mkimage.py
@@ -0,0 +1,256 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for producing an image using mkimage
+#
+
+from collections import OrderedDict
+
+from binman.entry import Entry
+from binman.etype.section import Entry_section
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_mkimage(Entry_section):
+ """Binary produced by mkimage
+
+ Properties / Entry arguments:
+ - args: Arguments to pass
+ - data-to-imagename: Indicates that the -d data should be passed in as
+ the image name also (-n)
+ - multiple-data-files: boolean to tell binman to pass all files as
+ datafiles to mkimage instead of creating a temporary file the result
+ of datafiles concatenation
+ - filename: filename of output binary generated by mkimage
+
+ The data passed to mkimage via the -d flag is collected from subnodes of the
+ mkimage node, e.g.::
+
+ mkimage {
+ filename = "imximage.bin";
+ args = "-n test -T imximage";
+
+ u-boot-spl {
+ };
+ };
+
+ This calls mkimage to create an imximage with `u-boot-spl.bin` as the data
+ file, with mkimage being called like this::
+
+ mkimage -d <data_file> -n test -T imximage <output_file>
+
+ The output from mkimage then becomes part of the image produced by
+ binman but also is written into `imximage.bin` file. If you need to put
+ multiple things in the data file, you can use a section, or just multiple
+ subnodes like this::
+
+ mkimage {
+ args = "-n test -T imximage";
+
+ u-boot-spl {
+ };
+
+ u-boot-tpl {
+ };
+ };
+
+ Note that binman places the contents (here SPL and TPL) into a single file
+ and passes that to mkimage using the -d option.
+
+ To pass all datafiles untouched to mkimage::
+
+ mkimage {
+ args = "-n rk3399 -T rkspi";
+ multiple-data-files;
+
+ u-boot-tpl {
+ };
+
+ u-boot-spl {
+ };
+ };
+
+ This calls mkimage to create a Rockchip RK3399-specific first stage
+ bootloader, made of TPL+SPL. Since this first stage bootloader requires to
+ align the TPL and SPL but also some weird hacks that is handled by mkimage
+ directly, binman is told to not perform the concatenation of datafiles prior
+ to passing the data to mkimage.
+
+ To use CONFIG options in the arguments, use a string list instead, as in
+ this example which also produces four arguments::
+
+ mkimage {
+ args = "-n", CONFIG_SYS_SOC, "-T imximage";
+
+ u-boot-spl {
+ };
+ };
+
+ If you need to pass the input data in with the -n argument as well, then use
+ the 'data-to-imagename' property::
+
+ mkimage {
+ args = "-T imximage";
+ data-to-imagename;
+
+ u-boot-spl {
+ };
+ };
+
+ That will pass the data to mkimage both as the data file (with -d) and as
+ the image name (with -n). In both cases, a filename is passed as the
+ argument, with the actual data being in that file.
+
+ If need to pass different data in with -n, then use an `imagename` subnode::
+
+ mkimage {
+ args = "-T imximage";
+
+ imagename {
+ blob {
+ filename = "spl/u-boot-spl.cfgout"
+ };
+ };
+
+ u-boot-spl {
+ };
+ };
+
+ This will pass in u-boot-spl as the input data and the .cfgout file as the
+ -n data.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self._imagename = None
+ self._multiple_data_files = False
+
+ def ReadNode(self):
+ super().ReadNode()
+ self._multiple_data_files = fdt_util.GetBool(self._node,
+ 'multiple-data-files')
+ self._args = fdt_util.GetArgs(self._node, 'args')
+ self._data_to_imagename = fdt_util.GetBool(self._node,
+ 'data-to-imagename')
+ if self._data_to_imagename and self._node.FindNode('imagename'):
+ self.Raise('Cannot use both imagename node and data-to-imagename')
+
+ def ReadEntries(self):
+ """Read the subnodes to find out what should go in this image"""
+ for node in self._node.subnodes:
+ if self.IsSpecialSubnode(node):
+ continue
+ entry = Entry.Create(self, node,
+ expanded=self.GetImage().use_expanded,
+ missing_etype=self.GetImage().missing_etype)
+ entry.ReadNode()
+ entry.SetPrefix(self._name_prefix)
+ if entry.name == 'imagename':
+ self._imagename = entry
+ else:
+ self._entries[entry.name] = entry
+
+ def BuildSectionData(self, required):
+ """Build mkimage entry contents
+
+ Runs mkimage to build the entry contents
+
+ Args:
+ required (bool): True if the data must be present, False if it is OK
+ to return None
+
+ Returns:
+ bytes: Contents of the section
+ """
+ # Use a non-zero size for any fake files to keep mkimage happy
+ # Note that testMkimageImagename() relies on this 'mkimage' parameter
+ fake_size = 1024
+ if self._multiple_data_files:
+ fnames = []
+ uniq = self.GetUniqueName()
+ for entry in self._entries.values():
+ # Put the contents in a temporary file
+ ename = f'mkimage-in-{uniq}-{entry.name}'
+ fname = tools.get_output_filename(ename)
+ data = entry.GetData(required)
+ tools.write_file(fname, data)
+ fnames.append(fname)
+ input_fname = ":".join(fnames)
+ data = b''
+ else:
+ data, input_fname, uniq = self.collect_contents_to_file(
+ self._entries.values(), 'mkimage', fake_size)
+ if self._imagename:
+ image_data, imagename_fname, _ = self.collect_contents_to_file(
+ [self._imagename], 'mkimage-n', 1024)
+ outfile = self._filename if self._filename else 'mkimage-out.%s' % uniq
+ output_fname = tools.get_output_filename(outfile)
+
+ missing_list = []
+ self.CheckMissing(missing_list)
+ self.missing = bool(missing_list)
+ if self.missing:
+ return b''
+
+ args = ['-d', input_fname]
+ if self._data_to_imagename:
+ args += ['-n', input_fname]
+ elif self._imagename:
+ args += ['-n', imagename_fname]
+ args += self._args + [output_fname]
+ if self.mkimage.run_cmd(*args) is not None:
+ return tools.read_file(output_fname)
+ else:
+ # Bintool is missing; just use the input data as the output
+ self.record_missing_bintool(self.mkimage)
+ return data
+
+ def GetEntries(self):
+ # Make a copy so we don't change the original
+ entries = OrderedDict(self._entries)
+ if self._imagename:
+ entries['imagename'] = self._imagename
+ return entries
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.mkimage = self.AddBintool(btools, 'mkimage')
+
+ def CheckEntries(self):
+ pass
+
+ def ProcessContents(self):
+ # The blob may have changed due to WriteSymbols()
+ ok = super().ProcessContents()
+ data = self.BuildSectionData(True)
+ ok2 = self.ProcessContentsUpdate(data)
+ return ok and ok2
+
+ def SetImagePos(self, image_pos):
+ """Set the position in the image
+
+ This sets each subentry's offsets, sizes and positions-in-image
+ according to where they ended up in the packed mkimage file.
+
+ NOTE: This assumes a legacy mkimage and assumes that the images are
+ written to the output in order. SoC-specific mkimage handling may not
+ conform to this, in which case these values may be wrong.
+
+ Args:
+ image_pos (int): Position of this entry in the image
+ """
+ # The mkimage header consists of 0x40 bytes, following by a table of
+ # offsets for each file
+ upto = 0x40
+
+ # Skip the 0-terminated list of offsets (assume a single image)
+ upto += 4 + 4
+ for entry in self.GetEntries().values():
+ entry.SetOffsetSize(upto, None)
+
+ # Give up if any entries lack a size
+ if entry.size is None:
+ return
+ upto += entry.size
+
+ super().SetImagePos(image_pos)
diff --git a/tools/binman/etype/null.py b/tools/binman/etype/null.py
new file mode 100644
index 00000000000..263fb5244df
--- /dev/null
+++ b/tools/binman/etype/null.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2023 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+from binman.entry import Entry
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_null(Entry):
+ """An entry which has no contents of its own
+
+ Note that the size property must be set since otherwise this entry does not
+ know how large it should be.
+
+ The contents are set by the containing section, e.g. the section's pad
+ byte.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.required_props = ['size']
+
+ def ObtainContents(self):
+ # null contents
+ return None
diff --git a/tools/binman/etype/nxp_imx8mimage.py b/tools/binman/etype/nxp_imx8mimage.py
new file mode 100644
index 00000000000..3585120b79b
--- /dev/null
+++ b/tools/binman/etype/nxp_imx8mimage.py
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2023-2024 Marek Vasut <marex@denx.de>
+# Written with much help from Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for generating the i.MX8M mkimage -T imx8mimage
+# configuration file and invocation of mkimage -T imx8mimage on the
+# configuration file and input data.
+#
+
+from collections import OrderedDict
+
+from binman.entry import Entry
+from binman.etype.mkimage import Entry_mkimage
+from binman.etype.section import Entry_section
+from binman import elf
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_nxp_imx8mimage(Entry_mkimage):
+ """NXP i.MX8M imx8mimage .cfg file generator and mkimage invoker
+
+ Properties / Entry arguments:
+ - nxp,boot-from - device to boot from (e.g. 'sd')
+ - nxp,loader-address - loader address (SPL text base)
+ - nxp,rom-version - BootROM version ('2' for i.MX8M Nano and Plus)
+ """
+
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.required_props = ['nxp,boot-from', 'nxp,rom-version', 'nxp,loader-address']
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.boot_from = fdt_util.GetString(self._node, 'nxp,boot-from')
+ self.loader_address = fdt_util.GetInt(self._node, 'nxp,loader-address')
+ self.rom_version = fdt_util.GetInt(self._node, 'nxp,rom-version')
+ self.ReadEntries()
+
+ def BuildSectionData(self, required):
+ data, input_fname, uniq = self.collect_contents_to_file(
+ self._entries.values(), 'input')
+ # Generate mkimage configuration file similar to imx8mimage.cfg
+ # and pass it to mkimage to generate SPL image for us here.
+ cfg_fname = tools.get_output_filename('nxp.imx8mimage.cfg.%s' % uniq)
+ with open(cfg_fname, 'w') as outf:
+ print('ROM_VERSION v%d' % self.rom_version, file=outf)
+ print('BOOT_FROM %s' % self.boot_from, file=outf)
+ print('LOADER %s %#x' % (input_fname, self.loader_address), file=outf)
+
+ output_fname = tools.get_output_filename(f'cfg-out.{uniq}')
+ args = ['-d', input_fname, '-n', cfg_fname, '-T', 'imx8mimage',
+ output_fname]
+ if self.mkimage.run_cmd(*args) is not None:
+ return tools.read_file(output_fname)
+ else:
+ # Bintool is missing; just use the input data as the output
+ self.record_missing_bintool(self.mkimage)
+ return data
+
+ def SetImagePos(self, image_pos):
+ # Customized SoC specific SetImagePos which skips the mkimage etype
+ # implementation and removes the 0x48 offset introduced there. That
+ # offset is only used for uImage/fitImage, which is not the case in
+ # here.
+ upto = 0x00
+ for entry in super().GetEntries().values():
+ entry.SetOffsetSize(upto, None)
+
+ # Give up if any entries lack a size
+ if entry.size is None:
+ return
+ upto += entry.size
+
+ Entry_section.SetImagePos(self, image_pos)
diff --git a/tools/binman/etype/opensbi.py b/tools/binman/etype/opensbi.py
new file mode 100644
index 00000000000..74d473d535a
--- /dev/null
+++ b/tools/binman/etype/opensbi.py
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2021, Bin Meng <bmeng.cn@gmail.com>
+#
+# Entry-type module for RISC-V OpenSBI binary blob
+#
+
+from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
+
+class Entry_opensbi(Entry_blob_named_by_arg):
+ """RISC-V OpenSBI fw_dynamic blob
+
+ Properties / Entry arguments:
+ - opensbi-path: Filename of file to read into entry. This is typically
+ called fw_dynamic.bin
+
+ This entry holds the run-time firmware, typically started by U-Boot SPL.
+ See the U-Boot README for your architecture or board for how to use it. See
+ https://github.com/riscv/opensbi for more information about OpenSBI.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'opensbi')
+ self.external = True
diff --git a/tools/binman/etype/powerpc_mpc85xx_bootpg_resetvec.py b/tools/binman/etype/powerpc_mpc85xx_bootpg_resetvec.py
new file mode 100644
index 00000000000..3a92fa399fb
--- /dev/null
+++ b/tools/binman/etype/powerpc_mpc85xx_bootpg_resetvec.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2018 NXP
+#
+# Entry-type module for the PowerPC mpc85xx bootpg and resetvec code for U-Boot
+#
+
+from binman.etype.blob import Entry_blob
+
+class Entry_powerpc_mpc85xx_bootpg_resetvec(Entry_blob):
+ """PowerPC mpc85xx bootpg + resetvec code for U-Boot
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-br.bin (default 'u-boot-br.bin')
+
+ This entry is valid for PowerPC mpc85xx cpus. This entry holds
+ 'bootpg + resetvec' code for PowerPC mpc85xx CPUs which needs to be
+ placed at offset 'RESET_VECTOR_ADDRESS - 0xffc'.
+ """
+
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'u-boot-br.bin'
diff --git a/tools/binman/etype/pre_load.py b/tools/binman/etype/pre_load.py
new file mode 100644
index 00000000000..2e4c72359ff
--- /dev/null
+++ b/tools/binman/etype/pre_load.py
@@ -0,0 +1,163 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2022 Softathome
+# Written by Philippe Reynes <philippe.reynes@softathome.com>
+#
+# Entry-type for the global header
+#
+
+import os
+import struct
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+from binman.entry import Entry
+from binman.etype.collection import Entry_collection
+from binman.entry import EntryArg
+
+from Cryptodome.Hash import SHA256, SHA384, SHA512
+from Cryptodome.PublicKey import RSA
+from Cryptodome.Signature import pkcs1_15
+from Cryptodome.Signature import pss
+
+PRE_LOAD_MAGIC = b'UBSH'
+
+RSAS = {
+ 'rsa1024': 1024 / 8,
+ 'rsa2048': 2048 / 8,
+ 'rsa4096': 4096 / 8
+}
+
+SHAS = {
+ 'sha256': SHA256,
+ 'sha384': SHA384,
+ 'sha512': SHA512
+}
+
+class Entry_pre_load(Entry_collection):
+ """Pre load image header
+
+ Properties / Entry arguments:
+ - pre-load-key-path: Path of the directory that store key (provided by
+ the environment variable PRE_LOAD_KEY_PATH)
+ - content: List of phandles to entries to sign
+ - algo-name: Hash and signature algo to use for the signature
+ - padding-name: Name of the padding (pkcs-1.5 or pss)
+ - key-name: Filename of the private key to sign
+ - header-size: Total size of the header
+ - version: Version of the header
+
+ This entry creates a pre-load header that contains a global
+ image signature.
+
+ For example, this creates an image with a pre-load header and a binary::
+
+ binman {
+ image2 {
+ filename = "sandbox.bin";
+
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa2048";
+ padding-name = "pss";
+ key-name = "private.pem";
+ header-size = <4096>;
+ version = <1>;
+ };
+
+ image: blob-ext {
+ filename = "sandbox.itb";
+ };
+ };
+ };
+ """
+
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.algo_name = fdt_util.GetString(self._node, 'algo-name')
+ self.padding_name = fdt_util.GetString(self._node, 'padding-name')
+ self.key_name = fdt_util.GetString(self._node, 'key-name')
+ self.header_size = fdt_util.GetInt(self._node, 'header-size')
+ self.version = fdt_util.GetInt(self._node, 'version')
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.key_path, = self.GetEntryArgsOrProps(
+ [EntryArg('pre-load-key-path', str)])
+ if self.key_path is None:
+ self.key_path = ''
+
+ def _CreateHeader(self):
+ """Create a pre load header"""
+ hash_name, sign_name = self.algo_name.split(',')
+ padding_name = self.padding_name
+ key_name = os.path.join(self.key_path, self.key_name)
+
+ # Check hash and signature name/type
+ if hash_name not in SHAS:
+ self.Raise(hash_name + " is not supported")
+ if sign_name not in RSAS:
+ self.Raise(sign_name + " is not supported")
+
+ # Read the key
+ key = RSA.import_key(tools.read_file(key_name))
+
+ # Check if the key has the expected size
+ if key.size_in_bytes() != RSAS[sign_name]:
+ self.Raise("The key " + self.key_name + " don't have the expected size")
+
+ # Compute the hash
+ hash_image = SHAS[hash_name].new()
+ hash_image.update(self.image)
+
+ # Compute the signature
+ if padding_name is None:
+ padding_name = "pkcs-1.5"
+ if padding_name == "pss":
+ salt_len = key.size_in_bytes() - hash_image.digest_size - 2
+ padding = pss
+ padding_args = {'salt_bytes': salt_len}
+ elif padding_name == "pkcs-1.5":
+ padding = pkcs1_15
+ padding_args = {}
+ else:
+ self.Raise(padding_name + " is not supported")
+
+ sig = padding.new(key, **padding_args).sign(hash_image)
+
+ hash_sig = SHA256.new()
+ hash_sig.update(sig)
+
+ version = self.version
+ header_size = self.header_size
+ image_size = len(self.image)
+ ofs_img_sig = 64 + len(sig)
+ flags = 0
+ reserved0 = 0
+ reserved1 = 0
+
+ first_header = struct.pack('>4sIIIIIII32s', PRE_LOAD_MAGIC,
+ version, header_size, image_size,
+ ofs_img_sig, flags, reserved0,
+ reserved1, hash_sig.digest())
+
+ hash_first_header = SHAS[hash_name].new()
+ hash_first_header.update(first_header)
+ sig_first_header = padding.new(key, **padding_args).sign(hash_first_header)
+
+ data = first_header + sig_first_header + sig
+ pad = bytearray(self.header_size - len(data))
+
+ return data + pad
+
+ def ObtainContents(self):
+ """Obtain a placeholder for the header contents"""
+ # wait that the image is available
+ self.image = self.GetContents(False)
+ if self.image is None:
+ return False
+ self.SetContents(self._CreateHeader())
+ return True
+
+ def ProcessContents(self):
+ data = self._CreateHeader()
+ return self.ProcessContentsUpdate(data)
diff --git a/tools/binman/etype/rockchip_tpl.py b/tools/binman/etype/rockchip_tpl.py
new file mode 100644
index 00000000000..74f58ba8570
--- /dev/null
+++ b/tools/binman/etype/rockchip_tpl.py
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Entry-type module for Rockchip TPL binary
+#
+
+from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
+
+class Entry_rockchip_tpl(Entry_blob_named_by_arg):
+ """Rockchip TPL binary
+
+ Properties / Entry arguments:
+ - rockchip-tpl-path: Filename of file to read into the entry,
+ typically <soc>_ddr_<version>.bin
+
+ This entry holds an external TPL binary used by some Rockchip SoCs
+ instead of normal U-Boot TPL, typically to initialize DRAM.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'rockchip-tpl')
+ self.external = True
diff --git a/tools/binman/etype/scp.py b/tools/binman/etype/scp.py
new file mode 100644
index 00000000000..a9bee3ce8bc
--- /dev/null
+++ b/tools/binman/etype/scp.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2020 Samuel Holland <samuel@sholland.org>
+#
+# Entry-type module for System Control Processor (SCP) firmware blob
+#
+
+from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
+
+class Entry_scp(Entry_blob_named_by_arg):
+ """System Control Processor (SCP) firmware blob
+
+ Properties / Entry arguments:
+ - scp-path: Filename of file to read into the entry, typically scp.bin
+
+ This entry holds firmware for an external platform-specific coprocessor.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'scp')
+ self.external = True
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py
new file mode 100644
index 00000000000..30c1041c7e8
--- /dev/null
+++ b/tools/binman/etype/section.py
@@ -0,0 +1,1048 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Entry-type module for sections (groups of entries)
+
+Sections are entries which can contain other entries. This allows hierarchical
+images to be created.
+"""
+
+from collections import OrderedDict
+import concurrent.futures
+import re
+import sys
+
+from binman.entry import Entry
+from binman import state
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+from u_boot_pylib.tools import to_hex_size
+
+
+class Entry_section(Entry):
+ """Entry that contains other entries
+
+ A section is an entry which can contain other entries, thus allowing
+ hierarchical images to be created. See 'Sections and hierarchical images'
+ in the binman README for more information.
+
+ The base implementation simply joins the various entries together, using
+ various rules about alignment, etc.
+
+ Subclassing
+ ~~~~~~~~~~~
+
+ This class can be subclassed to support other file formats which hold
+ multiple entries, such as CBFS. To do this, override the following
+ functions. The documentation here describes what your function should do.
+ For example code, see etypes which subclass `Entry_section`, or `cbfs.py`
+ for a more involved example::
+
+ $ grep -l \\(Entry_section tools/binman/etype/*.py
+
+ ReadNode()
+ Call `super().ReadNode()`, then read any special properties for the
+ section. Then call `self.ReadEntries()` to read the entries.
+
+ Binman calls this at the start when reading the image description.
+
+ ReadEntries()
+ Read in the subnodes of the section. This may involve creating entries
+ of a particular etype automatically, as well as reading any special
+ properties in the entries. For each entry, entry.ReadNode() should be
+ called, to read the basic entry properties. The properties should be
+ added to `self._entries[]`, in the correct order, with a suitable name.
+
+ Binman calls this at the start when reading the image description.
+
+ BuildSectionData(required)
+ Create the custom file format that you want and return it as bytes.
+ This likely sets up a file header, then loops through the entries,
+ adding them to the file. For each entry, call `entry.GetData()` to
+ obtain the data. If that returns None, and `required` is False, then
+ this method must give up and return None. But if `required` is True then
+ it should assume that all data is valid.
+
+ Binman calls this when packing the image, to find out the size of
+ everything. It is called again at the end when building the final image.
+
+ SetImagePos(image_pos):
+ Call `super().SetImagePos(image_pos)`, then set the `image_pos` values
+ for each of the entries. This should use the custom file format to find
+ the `start offset` (and `image_pos`) of each entry. If the file format
+ uses compression in such a way that there is no offset available (other
+ than reading the whole file and decompressing it), then the offsets for
+ affected entries can remain unset (`None`). The size should also be set
+ if possible.
+
+ Binman calls this after the image has been packed, to update the
+ location that all the entries ended up at.
+
+ ReadChildData(child, decomp, alt_format):
+ The default version of this may be good enough, if you are able to
+ implement SetImagePos() correctly. But that is a bit of a bypass, so
+ you can override this method to read from your custom file format. It
+ should read the entire entry containing the custom file using
+ `super().ReadData(True)`, then parse the file to get the data for the
+ given child, then return that data.
+
+ If your file format supports compression, the `decomp` argument tells
+ you whether to return the compressed data (`decomp` is False) or to
+ uncompress it first, then return the uncompressed data (`decomp` is
+ True). This is used by the `binman extract -U` option.
+
+ If your entry supports alternative formats, the alt_format provides the
+ alternative format that the user has selected. Your function should
+ return data in that format. This is used by the 'binman extract -l'
+ option.
+
+ Binman calls this when reading in an image, in order to populate all the
+ entries with the data from that image (`binman ls`).
+
+ WriteChildData(child):
+ Binman calls this after `child.data` is updated, to inform the custom
+ file format about this, in case it needs to do updates.
+
+ The default version of this does nothing and probably needs to be
+ overridden for the 'binman replace' command to work. Your version should
+ use `child.data` to update the data for that child in the custom file
+ format.
+
+ Binman calls this when updating an image that has been read in and in
+ particular to update the data for a particular entry (`binman replace`)
+
+ Properties / Entry arguments
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ See :ref:`develop/package/binman:Image description format` for more
+ information.
+
+ align-default
+ Default alignment for this section, if no alignment is given in the
+ entry
+
+ pad-byte
+ Pad byte to use when padding
+
+ sort-by-offset
+ True if entries should be sorted by offset, False if they must be
+ in-order in the device tree description
+
+ end-at-4gb
+ Used to build an x86 ROM which ends at 4GB (2^32)
+
+ name-prefix
+ Adds a prefix to the name of every entry in the section when writing out
+ the map
+
+ skip-at-start
+ Number of bytes before the first entry starts. These effectively adjust
+ the starting offset of entries. For example, if this is 16, then the
+ first entry would start at 16. An entry with offset = 20 would in fact
+ be written at offset 4 in the image file, since the first 16 bytes are
+ skipped when writing.
+
+ filename
+ filename to write the unpadded section contents to within the output
+ directory (None to skip this).
+
+ Since a section is also an entry, it inherits all the properies of entries
+ too.
+
+ Note that the `allow_missing` member controls whether this section permits
+ external blobs to be missing their contents. The option will produce an
+ image but of course it will not work. It is useful to make sure that
+ Continuous Integration systems can build without the binaries being
+ available. This is set by the `SetAllowMissing()` method, if
+ `--allow-missing` is passed to binman.
+ """
+ def __init__(self, section, etype, node, test=False):
+ if not test:
+ super().__init__(section, etype, node)
+ self._entries = OrderedDict()
+ self._pad_byte = 0
+ self._sort = False
+ self._skip_at_start = None
+ self._end_4gb = False
+ self._ignore_missing = False
+ self._filename = None
+ self.align_default = 0
+
+ def IsSpecialSubnode(self, node):
+ """Check if a node is a special one used by the section itself
+
+ Some nodes are used for hashing / signatures and do not add entries to
+ the actual section.
+
+ Returns:
+ bool: True if the node is a special one, else False
+ """
+ start_list = ('cipher', 'hash', 'signature', 'template')
+ return any(node.name.startswith(name) for name in start_list)
+
+ def ReadNode(self):
+ """Read properties from the section node"""
+ super().ReadNode()
+ self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
+ self._sort = fdt_util.GetBool(self._node, 'sort-by-offset')
+ self._end_4gb = fdt_util.GetBool(self._node, 'end-at-4gb')
+ self._skip_at_start = fdt_util.GetInt(self._node, 'skip-at-start')
+ if self._end_4gb:
+ if not self.size:
+ self.Raise("Section size must be provided when using end-at-4gb")
+ if self._skip_at_start is not None:
+ self.Raise("Provide either 'end-at-4gb' or 'skip-at-start'")
+ else:
+ self._skip_at_start = 0x100000000 - self.size
+ else:
+ if self._skip_at_start is None:
+ self._skip_at_start = 0
+ self._name_prefix = fdt_util.GetString(self._node, 'name-prefix')
+ self.align_default = fdt_util.GetInt(self._node, 'align-default', 0)
+ self._filename = fdt_util.GetString(self._node, 'filename',
+ self._filename)
+
+ self.ReadEntries()
+
+ def ReadEntries(self):
+ for node in self._node.subnodes:
+ if self.IsSpecialSubnode(node):
+ continue
+ entry = Entry.Create(self, node,
+ expanded=self.GetImage().use_expanded,
+ missing_etype=self.GetImage().missing_etype)
+ entry.ReadNode()
+ entry.SetPrefix(self._name_prefix)
+ self._entries[node.name] = entry
+
+ def _Raise(self, msg):
+ """Raises an error for this section
+
+ Args:
+ msg (str): Error message to use in the raise string
+ Raises:
+ ValueError: always
+ """
+ raise ValueError("Section '%s': %s" % (self._node.path, msg))
+
+ def GetFdts(self):
+ fdts = {}
+ for entry in self._entries.values():
+ fdts.update(entry.GetFdts())
+ return fdts
+
+ def ProcessFdt(self, fdt):
+ """Allow entries to adjust the device tree
+
+ Some entries need to adjust the device tree for their purposes. This
+ may involve adding or deleting properties.
+ """
+ todo = self._entries.values()
+ for passnum in range(3):
+ next_todo = []
+ for entry in todo:
+ if not entry.ProcessFdt(fdt):
+ next_todo.append(entry)
+ todo = next_todo
+ if not todo:
+ break
+ if todo:
+ self.Raise('Internal error: Could not complete processing of Fdt: remaining %s' %
+ todo)
+ return True
+
+ def gen_entries(self):
+ super().gen_entries()
+ for entry in self._entries.values():
+ entry.gen_entries()
+
+ def AddMissingProperties(self, have_image_pos):
+ """Add new properties to the device tree as needed for this entry"""
+ super().AddMissingProperties(have_image_pos)
+ if self.compress != 'none':
+ have_image_pos = False
+ for entry in self._entries.values():
+ entry.AddMissingProperties(have_image_pos)
+
+ def ObtainContents(self, fake_size=0, skip_entry=None):
+ return self.GetEntryContents(skip_entry=skip_entry)
+
+ def GetPaddedDataForEntry(self, entry, entry_data):
+ """Get the data for an entry including any padding
+
+ Gets the entry data and uses the section pad-byte value to add padding
+ before and after as defined by the pad-before and pad-after properties.
+ This does not consider alignment.
+
+ Args:
+ entry: Entry to check
+ entry_data: Data for the entry, False if is null
+
+ Returns:
+ Contents of the entry along with any pad bytes before and
+ after it (bytes)
+ """
+ pad_byte = (entry._pad_byte if isinstance(entry, Entry_section)
+ else self._pad_byte)
+
+ data = bytearray()
+ # Handle padding before the entry
+ if entry.pad_before:
+ data += tools.get_bytes(self._pad_byte, entry.pad_before)
+
+ # Add in the actual entry data
+ data += entry_data
+
+ # Handle padding after the entry
+ if entry.pad_after:
+ data += tools.get_bytes(self._pad_byte, entry.pad_after)
+
+ if entry.size:
+ data += tools.get_bytes(pad_byte, entry.size - len(data))
+
+ self.Detail('GetPaddedDataForEntry: size %s' % to_hex_size(self.data))
+
+ return data
+
+ def BuildSectionData(self, required):
+ """Build the contents of a section
+
+ This places all entries at the right place, dealing with padding before
+ and after entries. It does not do padding for the section itself (the
+ pad-before and pad-after properties in the section items) since that is
+ handled by the parent section.
+
+ This should be overridden by subclasses which want to build their own
+ data structure for the section.
+
+ Missing entries will have be given empty (or fake) data, so are
+ processed normally here.
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ Contents of the section (bytes), None if not available
+ """
+ section_data = bytearray()
+
+ for entry in self._entries.values():
+ entry_data = entry.GetData(required)
+
+ # This can happen when this section is referenced from a collection
+ # earlier in the image description. See testCollectionSection().
+ if not required and entry_data is None:
+ return None
+
+ entry_data_final = entry_data
+ if entry_data is None:
+ pad_byte = (entry._pad_byte if isinstance(entry, Entry_section)
+ else self._pad_byte)
+ entry_data_final = tools.get_bytes(self._pad_byte, entry.size)
+
+ data = self.GetPaddedDataForEntry(entry, entry_data_final)
+ # Handle empty space before the entry
+ pad = (entry.offset or 0) - self._skip_at_start - len(section_data)
+ if pad > 0:
+ section_data += tools.get_bytes(self._pad_byte, pad)
+
+ # Add in the actual entry data
+ if entry.overlap:
+ end_offset = entry.offset + entry.size
+ if end_offset > len(section_data):
+ entry.Raise("Offset %#x (%d) ending at %#x (%d) must overlap with existing entries" %
+ (entry.offset, entry.offset, end_offset,
+ end_offset))
+ # Don't write anything for null entries'
+ if entry_data is not None:
+ section_data = (section_data[:entry.offset] + data +
+ section_data[entry.offset + entry.size:])
+ else:
+ section_data += data
+
+ self.Detail('GetData: %d entries, total size %#x' %
+ (len(self._entries), len(section_data)))
+ return self.CompressData(section_data)
+
+ def GetPaddedData(self, data=None):
+ """Get the data for a section including any padding
+
+ Gets the section data and uses the parent section's pad-byte value to
+ add padding before and after as defined by the pad-before and pad-after
+ properties. If this is a top-level section (i.e. an image), this is the
+ same as GetData(), since padding is not supported.
+
+ This does not consider alignment.
+
+ Returns:
+ Contents of the section along with any pad bytes before and
+ after it (bytes)
+ """
+ section = self.section or self
+ if data is None:
+ data = self.GetData()
+ return section.GetPaddedDataForEntry(self, data)
+
+ def GetData(self, required=True):
+ """Get the contents of an entry
+
+ This builds the contents of the section, stores this as the contents of
+ the section and returns it. If the section has a filename, the data is
+ written there also.
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the section, made up for all all of its subentries.
+ This excludes any padding. If the section is compressed, the
+ compressed data is returned
+ """
+ if not self.build_done:
+ data = self.BuildSectionData(required)
+ if data is None:
+ return None
+ self.SetContents(data)
+ else:
+ data = self.data
+ if self._filename:
+ tools.write_file(tools.get_output_filename(self._filename), data)
+ return data
+
+ def GetOffsets(self):
+ """Handle entries that want to set the offset/size of other entries
+
+ This calls each entry's GetOffsets() method. If it returns a list
+ of entries to update, it updates them.
+ """
+ self.GetEntryOffsets()
+ return {}
+
+ def ResetForPack(self):
+ """Reset offset/size fields so that packing can be done again"""
+ super().ResetForPack()
+ for entry in self._entries.values():
+ entry.ResetForPack()
+
+ def Pack(self, offset):
+ """Pack all entries into the section"""
+ self._PackEntries()
+ if self._sort:
+ self._SortEntries()
+ self._extend_entries()
+
+ if self.build_done:
+ self.size = None
+ else:
+ data = self.BuildSectionData(True)
+ self.SetContents(data)
+
+ self.CheckSize()
+
+ offset = super().Pack(offset)
+ self.CheckEntries()
+ return offset
+
+ def _PackEntries(self):
+ """Pack all entries into the section"""
+ offset = self._skip_at_start
+ for entry in self._entries.values():
+ offset = entry.Pack(offset)
+ return offset
+
+ def _extend_entries(self):
+ """Extend any entries that are permitted to"""
+ exp_entry = None
+ for entry in self._entries.values():
+ if exp_entry:
+ exp_entry.extend_to_limit(entry.offset)
+ exp_entry = None
+ if entry.extend_size:
+ exp_entry = entry
+ if exp_entry:
+ exp_entry.extend_to_limit(self.size)
+
+ def _SortEntries(self):
+ """Sort entries by offset"""
+ entries = sorted(self._entries.values(), key=lambda entry: entry.offset)
+ self._entries.clear()
+ for entry in entries:
+ self._entries[entry._node.name] = entry
+
+ def CheckEntries(self):
+ """Check that entries do not overlap or extend outside the section"""
+ max_size = self.size if self.uncomp_size is None else self.uncomp_size
+
+ offset = 0
+ prev_name = 'None'
+ for entry in self._entries.values():
+ entry.CheckEntries()
+ if (entry.offset < self._skip_at_start or
+ entry.offset + entry.size > self._skip_at_start +
+ max_size):
+ entry.Raise('Offset %#x (%d) size %#x (%d) is outside the '
+ "section '%s' starting at %#x (%d) "
+ 'of size %#x (%d)' %
+ (entry.offset, entry.offset, entry.size, entry.size,
+ self._node.path, self._skip_at_start,
+ self._skip_at_start, max_size, max_size))
+ if not entry.overlap:
+ if entry.offset < offset and entry.size:
+ entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' ending at %#x (%d)" %
+ (entry.offset, entry.offset, prev_name, offset,
+ offset))
+ offset = entry.offset + entry.size
+ prev_name = entry.GetPath()
+
+ def WriteSymbols(self, section):
+ """Write symbol values into binary files for access at run time"""
+ for entry in self._entries.values():
+ entry.WriteSymbols(self)
+
+ def SetCalculatedProperties(self):
+ super().SetCalculatedProperties()
+ for entry in self._entries.values():
+ entry.SetCalculatedProperties()
+
+ def SetImagePos(self, image_pos):
+ super().SetImagePos(image_pos)
+ if self.compress == 'none':
+ for entry in self._entries.values():
+ entry.SetImagePos(image_pos + self.offset)
+
+ def ProcessContents(self):
+ sizes_ok_base = super(Entry_section, self).ProcessContents()
+ sizes_ok = True
+ for entry in self._entries.values():
+ if not entry.ProcessContents():
+ sizes_ok = False
+ return sizes_ok and sizes_ok_base
+
+ def WriteMap(self, fd, indent):
+ """Write a map of the section to a .map file
+
+ Args:
+ fd: File to write the map to
+ """
+ Entry.WriteMapLine(fd, indent, self.name, self.offset or 0,
+ self.size, self.image_pos)
+ for entry in self._entries.values():
+ entry.WriteMap(fd, indent + 1)
+
+ def GetEntries(self):
+ return self._entries
+
+ def GetContentsByPhandle(self, phandle, source_entry, required):
+ """Get the data contents of an entry specified by a phandle
+
+ This uses a phandle to look up a node and and find the entry
+ associated with it. Then it returns the contents of that entry.
+
+ The node must be a direct subnode of this section.
+
+ Args:
+ phandle: Phandle to look up (integer)
+ source_entry: Entry containing that phandle (used for error
+ reporting)
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ data from associated entry (as a string), or None if not found
+ """
+ node = self._node.GetFdt().LookupPhandle(phandle)
+ if not node:
+ source_entry.Raise("Cannot find node for phandle %d" % phandle)
+ entry = self.FindEntryByNode(node)
+ if not entry:
+ source_entry.Raise("Cannot find entry for node '%s'" % node.name)
+ return entry.GetData(required)
+
+ def LookupEntry(self, entries, sym_name, msg):
+ """Look up the entry for an ENF symbol
+
+ Args:
+ entries (dict): entries to search:
+ key: entry name
+ value: Entry object
+ sym_name: Symbol name in the ELF file to look up in the format
+ _binman_<entry>_prop_<property> where <entry> is the name of
+ the entry and <property> is the property to find (e.g.
+ _binman_u_boot_prop_offset). As a special case, you can append
+ _any to <entry> to have it search for any matching entry. E.g.
+ _binman_u_boot_any_prop_offset will match entries called u-boot,
+ u-boot-img and u-boot-nodtb)
+ msg: Message to display if an error occurs
+
+ Returns:
+ tuple:
+ Entry: entry object that was found
+ str: name used to search for entries (uses '-' instead of the
+ '_' used by the symbol name)
+ str: property name the symbol refers to, e.g. 'image_pos'
+
+ Raises:
+ ValueError:the symbol name cannot be decoded, e.g. does not have
+ a '_binman_' prefix
+ """
+ m = re.match(r'^_binman_(\w+)_prop_(\w+)$', sym_name)
+ if not m:
+ raise ValueError("%s: Symbol '%s' has invalid format" %
+ (msg, sym_name))
+ entry_name, prop_name = m.groups()
+ entry_name = entry_name.replace('_', '-')
+ entry = entries.get(entry_name)
+ if not entry:
+ if entry_name.endswith('-any'):
+ root = entry_name[:-4]
+ for name in entries:
+ if name.startswith(root):
+ rest = name[len(root):]
+ if rest in ['', '-elf', '-img', '-nodtb']:
+ entry = entries[name]
+ return entry, entry_name, prop_name
+
+ def LookupSymbol(self, sym_name, optional, msg, base_addr, entries=None):
+ """Look up a symbol in an ELF file
+
+ Looks up a symbol in an ELF file. Only entry types which come from an
+ ELF image can be used by this function.
+
+ At present the only entry properties supported are:
+ offset
+ image_pos - 'base_addr' is added if this is not an end-at-4gb image
+ size
+
+ Args:
+ sym_name: Symbol name in the ELF file to look up in the format
+ _binman_<entry>_prop_<property> where <entry> is the name of
+ the entry and <property> is the property to find (e.g.
+ _binman_u_boot_prop_offset). As a special case, you can append
+ _any to <entry> to have it search for any matching entry. E.g.
+ _binman_u_boot_any_prop_offset will match entries called u-boot,
+ u-boot-img and u-boot-nodtb)
+ optional: True if the symbol is optional. If False this function
+ will raise if the symbol is not found
+ msg: Message to display if an error occurs
+ base_addr: Base address of image. This is added to the returned
+ image_pos in most cases so that the returned position indicates
+ where the targetted entry/binary has actually been loaded. But
+ if end-at-4gb is used, this is not done, since the binary is
+ already assumed to be linked to the ROM position and using
+ execute-in-place (XIP).
+
+ Returns:
+ Value that should be assigned to that symbol, or None if it was
+ optional and not found
+
+ Raises:
+ ValueError if the symbol is invalid or not found, or references a
+ property which is not supported
+ """
+ if not entries:
+ entries = self._entries
+ entry, entry_name, prop_name = self.LookupEntry(entries, sym_name, msg)
+ if not entry:
+ err = ("%s: Entry '%s' not found in list (%s)" %
+ (msg, entry_name, ','.join(entries.keys())))
+ if optional:
+ print('Warning: %s' % err, file=sys.stderr)
+ return None
+ raise ValueError(err)
+ if prop_name == 'offset':
+ return entry.offset
+ elif prop_name == 'image_pos':
+ value = entry.image_pos
+ if not self.GetImage()._end_4gb:
+ value += base_addr
+ return value
+ if prop_name == 'size':
+ return entry.size
+ else:
+ raise ValueError("%s: No such property '%s'" % (msg, prop_name))
+
+ def GetRootSkipAtStart(self):
+ """Get the skip-at-start value for the top-level section
+
+ This is used to find out the starting offset for root section that
+ contains this section. If this is a top-level section then it returns
+ the skip-at-start offset for this section.
+
+ This is used to get the absolute position of section within the image.
+
+ Returns:
+ Integer skip-at-start value for the root section containing this
+ section
+ """
+ if self.section:
+ return self.section.GetRootSkipAtStart()
+ return self._skip_at_start
+
+ def GetStartOffset(self):
+ """Get the start offset for this section
+
+ Returns:
+ The first available offset in this section (typically 0)
+ """
+ return self._skip_at_start
+
+ def GetImageSize(self):
+ """Get the size of the image containing this section
+
+ Returns:
+ Image size as an integer number of bytes, which may be None if the
+ image size is dynamic and its sections have not yet been packed
+ """
+ return self.GetImage().size
+
+ def FindEntryType(self, etype):
+ """Find an entry type in the section
+
+ Args:
+ etype: Entry type to find
+ Returns:
+ entry matching that type, or None if not found
+ """
+ for entry in self._entries.values():
+ if entry.etype == etype:
+ return entry
+ return None
+
+ def GetEntryContents(self, skip_entry=None):
+ """Call ObtainContents() for each entry in the section
+
+ The overall goal of this function is to read in any available data in
+ this entry and any subentries. This includes reading in blobs, setting
+ up objects which have predefined contents, etc.
+
+ Since entry types which contain entries call ObtainContents() on all
+ those entries too, the result is that ObtainContents() is called
+ recursively for the whole tree below this one.
+
+ Entries with subentries are generally not *themselves& processed here,
+ i.e. their ObtainContents() implementation simply obtains contents of
+ their subentries, skipping their own contents. For example, the
+ implementation here (for entry_Section) does not attempt to pack the
+ entries into a final result. That is handled later.
+
+ Generally, calling this results in SetContents() being called for each
+ entry, so that the 'data' and 'contents_size; properties are set, and
+ subsequent calls to GetData() will return value data.
+
+ Where 'allow_missing' is set, this can result in the 'missing' property
+ being set to True if there is no data. This is handled by setting the
+ data to b''. This function will still return success. Future calls to
+ GetData() for this entry will return b'', or in the case where the data
+ is faked, GetData() will return that fake data.
+
+ Args:
+ skip_entry: (single) Entry to skip, or None to process all entries
+
+ Note that this may set entry.absent to True if the entry is not
+ actually needed
+ """
+ def _CheckDone(entry):
+ if entry != skip_entry:
+ if entry.ObtainContents() is False:
+ next_todo.append(entry)
+ return entry
+
+ todo = self.GetEntries().values()
+ for passnum in range(3):
+ threads = state.GetThreads()
+ next_todo = []
+
+ if threads == 0:
+ for entry in todo:
+ _CheckDone(entry)
+ else:
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=threads) as executor:
+ future_to_data = {
+ entry: executor.submit(_CheckDone, entry)
+ for entry in todo}
+ timeout = 60
+ if self.GetImage().test_section_timeout:
+ timeout = 0
+ done, not_done = concurrent.futures.wait(
+ future_to_data.values(), timeout=timeout)
+ # Make sure we check the result, so any exceptions are
+ # generated. Check the results in entry order, since tests
+ # may expect earlier entries to fail first.
+ for entry in todo:
+ job = future_to_data[entry]
+ job.result()
+ if not_done:
+ self.Raise('Timed out obtaining contents')
+
+ todo = next_todo
+ if not todo:
+ break
+
+ if todo:
+ self.Raise('Internal error: Could not complete processing of contents: remaining %s' %
+ todo)
+ return True
+
+ def drop_absent(self):
+ """Drop entries which are absent"""
+ self._entries = {n: e for n, e in self._entries.items() if not e.absent}
+
+ def _SetEntryOffsetSize(self, name, offset, size):
+ """Set the offset and size of an entry
+
+ Args:
+ name: Entry name to update
+ offset: New offset, or None to leave alone
+ size: New size, or None to leave alone
+ """
+ entry = self._entries.get(name)
+ if not entry:
+ self._Raise("Unable to set offset/size for unknown entry '%s'" %
+ name)
+ entry.SetOffsetSize(self._skip_at_start + offset if offset is not None
+ else None, size)
+
+ def GetEntryOffsets(self):
+ """Handle entries that want to set the offset/size of other entries
+
+ This calls each entry's GetOffsets() method. If it returns a list
+ of entries to update, it updates them.
+ """
+ for entry in self._entries.values():
+ offset_dict = entry.GetOffsets()
+ for name, info in offset_dict.items():
+ self._SetEntryOffsetSize(name, *info)
+
+ def CheckSize(self):
+ contents_size = len(self.data)
+
+ size = self.size
+ if not size:
+ data = self.GetPaddedData(self.data)
+ size = len(data)
+ size = tools.align(size, self.align_size)
+
+ if self.size and contents_size > self.size:
+ self._Raise("contents size %#x (%d) exceeds section size %#x (%d)" %
+ (contents_size, contents_size, self.size, self.size))
+ if not self.size:
+ self.size = size
+ if self.size != tools.align(self.size, self.align_size):
+ self._Raise("Size %#x (%d) does not match align-size %#x (%d)" %
+ (self.size, self.size, self.align_size,
+ self.align_size))
+ return size
+
+ def ListEntries(self, entries, indent):
+ """List the files in the section"""
+ Entry.AddEntryInfo(entries, indent, self.name, self.etype, self.size,
+ self.image_pos, None, self.offset, self)
+ for entry in self._entries.values():
+ entry.ListEntries(entries, indent + 1)
+
+ def LoadData(self, decomp=True):
+ for entry in self._entries.values():
+ entry.LoadData(decomp)
+ data = self.ReadData(decomp)
+ self.contents_size = len(data)
+ self.ProcessContentsUpdate(data)
+ self.Detail('Loaded data')
+
+ def GetImage(self):
+ """Get the image containing this section
+
+ Note that a top-level section is actually an Image, so this function may
+ return self.
+
+ Returns:
+ Image object containing this section
+ """
+ if not self.section:
+ return self
+ return self.section.GetImage()
+
+ def GetSort(self):
+ """Check if the entries in this section will be sorted
+
+ Returns:
+ True if to be sorted, False if entries will be left in the order
+ they appear in the device tree
+ """
+ return self._sort
+
+ def ReadData(self, decomp=True, alt_format=None):
+ tout.info("ReadData path='%s'" % self.GetPath())
+ parent_data = self.section.ReadData(True, alt_format)
+ offset = self.offset - self.section._skip_at_start
+ data = parent_data[offset:offset + self.size]
+ tout.info(
+ '%s: Reading data from offset %#x-%#x (real %#x), size %#x, got %#x' %
+ (self.GetPath(), self.offset, self.offset + self.size, offset,
+ self.size, len(data)))
+ return data
+
+ def ReadChildData(self, child, decomp=True, alt_format=None):
+ tout.debug(f"ReadChildData for child '{child.GetPath()}'")
+ parent_data = self.ReadData(True, alt_format)
+ offset = child.offset - self._skip_at_start
+ tout.debug("Extract for child '%s': offset %#x, skip_at_start %#x, result %#x" %
+ (child.GetPath(), child.offset, self._skip_at_start, offset))
+ data = parent_data[offset:offset + child.size]
+ if decomp:
+ indata = data
+ data = child.DecompressData(indata)
+ if child.uncomp_size:
+ tout.info("%s: Decompressing data size %#x with algo '%s' to data size %#x" %
+ (child.GetPath(), len(indata), child.compress,
+ len(data)))
+ if alt_format:
+ new_data = child.GetAltFormat(data, alt_format)
+ if new_data is not None:
+ data = new_data
+ return data
+
+ def WriteData(self, data, decomp=True):
+ ok = super().WriteData(data, decomp)
+
+ # The section contents are now fixed and cannot be rebuilt from the
+ # containing entries.
+ self.mark_build_done()
+ return ok
+
+ def WriteChildData(self, child):
+ return super().WriteChildData(child)
+
+ def SetAllowMissing(self, allow_missing):
+ """Set whether a section allows missing external blobs
+
+ Args:
+ allow_missing: True if allowed, False if not allowed
+ """
+ self.allow_missing = allow_missing
+ for entry in self.GetEntries().values():
+ entry.SetAllowMissing(allow_missing)
+
+ def SetAllowFakeBlob(self, allow_fake):
+ """Set whether a section allows to create a fake blob
+
+ Args:
+ allow_fake: True if allowed, False if not allowed
+ """
+ super().SetAllowFakeBlob(allow_fake)
+ for entry in self.GetEntries().values():
+ entry.SetAllowFakeBlob(allow_fake)
+
+ def CheckMissing(self, missing_list):
+ """Check if any entries in this section have missing external blobs
+
+ If there are missing (non-optional) blobs, the entries are added to the
+ list
+
+ Args:
+ missing_list: List of Entry objects to be added to
+ """
+ for entry in self.GetEntries().values():
+ entry.CheckMissing(missing_list)
+
+ def CheckFakedBlobs(self, faked_blobs_list):
+ """Check if any entries in this section have faked external blobs
+
+ If there are faked blobs, the entries are added to the list
+
+ Args:
+ faked_blobs_list: List of Entry objects to be added to
+ """
+ for entry in self.GetEntries().values():
+ entry.CheckFakedBlobs(faked_blobs_list)
+
+ def CheckOptional(self, optional_list):
+ """Check the section for missing but optional external blobs
+
+ If there are missing (optional) blobs, the entries are added to the list
+
+ Args:
+ optional_list (list): List of Entry objects to be added to
+ """
+ for entry in self.GetEntries().values():
+ entry.CheckOptional(optional_list)
+
+ def check_missing_bintools(self, missing_list):
+ """Check if any entries in this section have missing bintools
+
+ If there are missing bintools, these are added to the list
+
+ Args:
+ missing_list: List of Bintool objects to be added to
+ """
+ super().check_missing_bintools(missing_list)
+ for entry in self.GetEntries().values():
+ entry.check_missing_bintools(missing_list)
+
+ def _CollectEntries(self, entries, entries_by_name, add_entry):
+ """Collect all the entries in an section
+
+ This builds up a dict of entries in this section and all subsections.
+ Entries are indexed by path and by name.
+
+ Since all paths are unique, entries will not have any conflicts. However
+ entries_by_name make have conflicts if two entries have the same name
+ (e.g. with different parent sections). In this case, an entry at a
+ higher level in the hierarchy will win over a lower-level entry.
+
+ Args:
+ entries: dict to put entries:
+ key: entry path
+ value: Entry object
+ entries_by_name: dict to put entries
+ key: entry name
+ value: Entry object
+ add_entry: Entry to add
+ """
+ entries[add_entry.GetPath()] = add_entry
+ to_add = add_entry.GetEntries()
+ if to_add:
+ for entry in to_add.values():
+ entries[entry.GetPath()] = entry
+ for entry in to_add.values():
+ self._CollectEntries(entries, entries_by_name, entry)
+ entries_by_name[add_entry.name] = add_entry
+
+ def MissingArgs(self, entry, missing):
+ """Report a missing argument, if enabled
+
+ For entries which require arguments, this reports an error if some are
+ missing. If missing entries are being ignored (e.g. because we read the
+ entry from an image rather than creating it), this function does
+ nothing.
+
+ Args:
+ entry (Entry): Entry to raise the error on
+ missing (list of str): List of missing properties / entry args, each
+ a string
+ """
+ if not self._ignore_missing:
+ missing = ', '.join(missing)
+ entry.Raise(f'Missing required properties/entry args: {missing}')
+
+ def CheckAltFormats(self, alt_formats):
+ for entry in self.GetEntries().values():
+ entry.CheckAltFormats(alt_formats)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ for entry in self.GetEntries().values():
+ entry.AddBintools(btools)
+
+ def read_elf_segments(self):
+ entries = self.GetEntries()
+
+ # If the section only has one entry, see if it can provide ELF segments
+ if len(entries) == 1:
+ for entry in entries.values():
+ return entry.read_elf_segments()
+ return None
diff --git a/tools/binman/etype/tee_os.py b/tools/binman/etype/tee_os.py
new file mode 100644
index 00000000000..5529727e833
--- /dev/null
+++ b/tools/binman/etype/tee_os.py
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/
+#
+# Entry-type module for OP-TEE Trusted OS firmware blob
+#
+
+import struct
+
+from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
+from binman import elf
+
+class Entry_tee_os(Entry_blob_named_by_arg):
+ """Entry containing an OP-TEE Trusted OS (TEE) blob
+
+ Properties / Entry arguments:
+ - tee-os-path: Filename of file to read into entry. This is typically
+ called tee.bin or tee.elf
+
+ This entry holds the run-time firmware, typically started by U-Boot SPL.
+ See the U-Boot README for your architecture or board for how to use it. See
+ https://github.com/OP-TEE/optee_os for more information about OP-TEE.
+
+ Note that if the file is in ELF format, it must go in a FIT. In that case,
+ this entry will mark itself as absent, providing the data only through the
+ read_elf_segments() method.
+
+ Marking this entry as absent means that it if is used in the wrong context
+ it can be automatically dropped. Thus it is possible to add an OP-TEE entry
+ like this::
+
+ binman {
+ tee-os {
+ };
+ };
+
+ and pass either an ELF or plain binary in with -a tee-os-path <filename>
+ and have binman do the right thing:
+
+ - include the entry if tee.bin is provided and it does NOT have the v1
+ header
+ - drop it otherwise
+
+ When used within a FIT, we can do::
+
+ binman {
+ fit {
+ tee-os {
+ };
+ };
+ };
+
+ which will split the ELF into separate nodes for each segment, if an ELF
+ file is provided (see :ref:`etype_fit`), or produce a single node if the
+ OP-TEE binary v1 format is provided (see optee_doc_) .
+
+ .. _optee_doc: https://optee.readthedocs.io/en/latest/architecture/core.html#partitioning-of-the-binary
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'tee-os')
+ self.external = True
+
+ @staticmethod
+ def is_optee_bin_v1(data):
+ return len(data) >= 8 and data[0:5] == b'OPTE\x01'
+
+ def ObtainContents(self, fake_size=0):
+ result = super().ObtainContents(fake_size)
+ if not self.missing:
+ # If using the flat binary (without the OP-TEE header), then it is
+ # just included as a blob. But if it is an ELF or usees the v1
+ # binary header, then the FIT implementation will call
+ # read_elf_segments() to get the segment information
+ if elf.is_valid(self.data):
+ self.mark_absent('uses Elf format which must be in a FIT')
+ elif self.is_optee_bin_v1(self.data):
+ # The FIT implementation will call read_elf_segments() to get
+ # the segment information
+ self.mark_absent('uses v1 format which must be in a FIT')
+ return result
+
+ def read_elf_segments(self):
+ data = self.GetData()
+ if self.is_optee_bin_v1(data):
+ # OP-TEE v1 format (tee.bin)
+ init_sz, start_hi, start_lo, _, paged_sz = (
+ struct.unpack_from('<5I', data, 0x8))
+ if paged_sz != 0:
+ self.Raise("OP-TEE paged mode not supported")
+ e_entry = (start_hi << 32) + start_lo
+ p_addr = e_entry
+ p_data = data[0x1c:]
+ if len(p_data) != init_sz:
+ self.Raise("Invalid OP-TEE file: size mismatch (expected %#x, have %#x)" %
+ (init_sz, len(p_data)))
+ return [[0, p_addr, p_data]], e_entry
+ return None
diff --git a/tools/binman/etype/text.py b/tools/binman/etype/text.py
new file mode 100644
index 00000000000..e4deb4abacc
--- /dev/null
+++ b/tools/binman/etype/text.py
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+from collections import OrderedDict
+
+from binman.entry import Entry, EntryArg
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+
+class Entry_text(Entry):
+ """An entry which contains text
+
+ The text can be provided either in the node itself or by a command-line
+ argument. There is a level of indirection to allow multiple text strings
+ and sharing of text.
+
+ Properties / Entry arguments:
+ text-label: The value of this string indicates the property / entry-arg
+ that contains the string to place in the entry
+ <xxx> (actual name is the value of text-label): contains the string to
+ place in the entry.
+ <text>: The text to place in the entry (overrides the above mechanism).
+ This is useful when the text is constant.
+
+ Example node::
+
+ text {
+ size = <50>;
+ text-label = "message";
+ };
+
+ You can then use:
+
+ binman -amessage="this is my message"
+
+ and binman will insert that string into the entry.
+
+ It is also possible to put the string directly in the node::
+
+ text {
+ size = <8>;
+ text-label = "message";
+ message = "a message directly in the node"
+ };
+
+ or just::
+
+ text {
+ size = <8>;
+ text = "some text directly in the node"
+ };
+
+ The text is not itself nul-terminated. This can be achieved, if required,
+ by setting the size of the entry to something larger than the text.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ value = fdt_util.GetString(self._node, 'text')
+ if value:
+ value = tools.to_bytes(value)
+ else:
+ label, = self.GetEntryArgsOrProps([EntryArg('text-label', str)])
+ self.text_label = label
+ if self.text_label:
+ value, = self.GetEntryArgsOrProps([EntryArg(self.text_label,
+ str)])
+ value = tools.to_bytes(value) if value is not None else value
+ self.value = value
+
+ def ObtainContents(self):
+ if not self.value:
+ self.Raise("No value provided for text label '%s'" %
+ self.text_label)
+ self.SetContents(self.value)
+ return True
diff --git a/tools/binman/etype/ti_board_config.py b/tools/binman/etype/ti_board_config.py
new file mode 100644
index 00000000000..c10d66edcb1
--- /dev/null
+++ b/tools/binman/etype/ti_board_config.py
@@ -0,0 +1,264 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/
+# Written by Neha Malcom Francis <n-francis@ti.com>
+#
+# Entry-type module for generating schema validated TI board
+# configuration binary
+#
+
+import os
+import struct
+import yaml
+import yamllint
+
+from collections import OrderedDict
+from jsonschema import validate
+from shutil import copyfileobj
+
+from binman.entry import Entry
+from binman.etype.section import Entry_section
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from yamllint import config
+
+BOARDCFG = 0xB
+BOARDCFG_SEC = 0xD
+BOARDCFG_PM = 0xE
+BOARDCFG_RM = 0xC
+BOARDCFG_NUM_ELEMS = 4
+
+class Entry_ti_board_config(Entry_section):
+ """An entry containing a TI schema validated board config binary
+
+ This etype supports generation of two kinds of board configuration
+ binaries: singular board config binary as well as combined board config
+ binary.
+
+ Properties / Entry arguments:
+ - config-file: File containing board configuration data in YAML
+ - schema-file: File containing board configuration YAML schema against
+ which the config file is validated
+
+ Output files:
+ - board config binary: File containing board configuration binary
+
+ These above parameters are used only when the generated binary is
+ intended to be a single board configuration binary. Example::
+
+ my-ti-board-config {
+ ti-board-config {
+ config = "board-config.yaml";
+ schema = "schema.yaml";
+ };
+ };
+
+ To generate a combined board configuration binary, we pack the
+ needed individual binaries into a ti-board-config binary. In this case,
+ the available supported subnode names are board-cfg, pm-cfg, sec-cfg and
+ rm-cfg. The final binary is prepended with a header containing details about
+ the included board config binaries. Example::
+
+ my-combined-ti-board-config {
+ ti-board-config {
+ board-cfg {
+ config = "board-cfg.yaml";
+ schema = "schema.yaml";
+ };
+ sec-cfg {
+ config = "sec-cfg.yaml";
+ schema = "schema.yaml";
+ };
+ }
+ }
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self._config = None
+ self._schema = None
+ self._entries = OrderedDict()
+ self._num_elems = BOARDCFG_NUM_ELEMS
+ self._fmt = '<HHHBB'
+ self._index = 0
+ self._binary_offset = 0
+ self._sw_rev = 1
+ self._devgrp = 0
+
+ def ReadNode(self):
+ super().ReadNode()
+ self._config = fdt_util.GetString(self._node, 'config')
+ self._schema = fdt_util.GetString(self._node, 'schema')
+ # Depending on whether config file is present in node, we determine
+ # whether it is a combined board config binary or not
+ if self._config is None:
+ self.ReadEntries()
+ else:
+ self._config_file = tools.get_input_filename(self._config)
+ self._schema_file = tools.get_input_filename(self._schema)
+
+ def ReadEntries(self):
+ """Read the subnodes to find out what should go in this image
+ """
+ for node in self._node.subnodes:
+ if 'type' not in node.props:
+ entry = Entry.Create(self, node, 'ti-board-config')
+ entry.ReadNode()
+ cfg_data = entry.BuildSectionData(True)
+ entry._cfg_data = cfg_data
+ self._entries[entry.name] = entry
+ self._num_elems = len(self._node.subnodes)
+
+ def _convert_to_byte_chunk(self, val, data_type):
+ """Convert value into byte array
+
+ Args:
+ val: value to convert into byte array
+ data_type: data type used in schema, supported data types are u8,
+ u16 and u32
+
+ Returns:
+ array of bytes representing value
+ """
+ size = 0
+ if (data_type == '#/definitions/u8'):
+ size = 1
+ elif (data_type == '#/definitions/u16'):
+ size = 2
+ else:
+ size = 4
+ if type(val) == int:
+ br = val.to_bytes(size, byteorder='little')
+ return br
+
+ def _compile_yaml(self, schema_yaml, file_yaml):
+ """Convert YAML file into byte array based on YAML schema
+
+ Args:
+ schema_yaml: file containing YAML schema
+ file_yaml: file containing config to compile
+
+ Returns:
+ array of bytes repesenting YAML file against YAML schema
+ """
+ br = bytearray()
+ for key, node in file_yaml.items():
+ node_schema = schema_yaml['properties'][key]
+ node_type = node_schema.get('type')
+ if not 'type' in node_schema:
+ br += self._convert_to_byte_chunk(node,
+ node_schema.get('$ref'))
+ elif node_type == 'object':
+ br += self._compile_yaml(node_schema, node)
+ elif node_type == 'array':
+ for item in node:
+ if not isinstance(item, dict):
+ br += self._convert_to_byte_chunk(
+ item, schema_yaml['properties'][key]['items']['$ref'])
+ else:
+ br += self._compile_yaml(node_schema.get('items'), item)
+ return br
+
+ def _generate_binaries(self):
+ """Generate config binary artifacts from the loaded YAML configuration file
+
+ Returns:
+ byte array containing config binary artifacts
+ or None if generation fails
+ """
+ cfg_binary = bytearray()
+ for key, node in self.file_yaml.items():
+ node_schema = self.schema_yaml['properties'][key]
+ br = self._compile_yaml(node_schema, node)
+ cfg_binary += br
+ return cfg_binary
+
+ def _add_boardcfg(self, bcfgtype, bcfgdata):
+ """Add board config to combined board config binary
+
+ Args:
+ bcfgtype (int): board config type
+ bcfgdata (byte array): board config data
+ """
+ size = len(bcfgdata)
+ desc = struct.pack(self._fmt, bcfgtype,
+ self._binary_offset, size, self._devgrp, 0)
+ with open(self.descfile, 'ab+') as desc_fh:
+ desc_fh.write(desc)
+ with open(self.bcfgfile, 'ab+') as bcfg_fh:
+ bcfg_fh.write(bcfgdata)
+ self._binary_offset += size
+ self._index += 1
+
+ def _finalize(self):
+ """Generate final combined board config binary
+
+ Returns:
+ byte array containing combined board config data
+ or None if unable to generate
+ """
+ with open(self.descfile, 'rb') as desc_fh:
+ with open(self.bcfgfile, 'rb') as bcfg_fh:
+ with open(self.fh_file, 'ab+') as fh:
+ copyfileobj(desc_fh, fh)
+ copyfileobj(bcfg_fh, fh)
+ data = tools.read_file(self.fh_file)
+ return data
+
+ def BuildSectionData(self, required):
+ if self._config is None:
+ self._binary_offset = 0
+ uniq = self.GetUniqueName()
+ self.fh_file = tools.get_output_filename('fh.%s' % uniq)
+ self.descfile = tools.get_output_filename('desc.%s' % uniq)
+ self.bcfgfile = tools.get_output_filename('bcfg.%s' % uniq)
+
+ # when binman runs again make sure we start clean
+ if os.path.exists(self.fh_file):
+ os.remove(self.fh_file)
+ if os.path.exists(self.descfile):
+ os.remove(self.descfile)
+ if os.path.exists(self.bcfgfile):
+ os.remove(self.bcfgfile)
+
+ with open(self.fh_file, 'wb') as f:
+ t_bytes = f.write(struct.pack(
+ '<BB', self._num_elems, self._sw_rev))
+ self._binary_offset += t_bytes
+ self._binary_offset += self._num_elems * struct.calcsize(self._fmt)
+
+ if 'board-cfg' in self._entries:
+ self._add_boardcfg(BOARDCFG, self._entries['board-cfg']._cfg_data)
+
+ if 'sec-cfg' in self._entries:
+ self._add_boardcfg(BOARDCFG_SEC, self._entries['sec-cfg']._cfg_data)
+
+ if 'pm-cfg' in self._entries:
+ self._add_boardcfg(BOARDCFG_PM, self._entries['pm-cfg']._cfg_data)
+
+ if 'rm-cfg' in self._entries:
+ self._add_boardcfg(BOARDCFG_RM, self._entries['rm-cfg']._cfg_data)
+
+ data = self._finalize()
+ return data
+
+ else:
+ with open(self._config_file, 'r') as f:
+ self.file_yaml = yaml.safe_load(f)
+ with open(self._schema_file, 'r') as sch:
+ self.schema_yaml = yaml.safe_load(sch)
+
+ yaml_config = config.YamlLintConfig("extends: default")
+ for p in yamllint.linter.run(open(self._config_file, "r"), yaml_config):
+ self.Raise(f"Yamllint error: Line {p.line} in {self._config_file}: {p.rule}")
+ try:
+ validate(self.file_yaml, self.schema_yaml)
+ except Exception as e:
+ self.Raise(f"Schema validation error: {e}")
+
+ data = self._generate_binaries()
+ return data
+
+ def SetImagePos(self, image_pos):
+ Entry.SetImagePos(self, image_pos)
+
+ def CheckEntries(self):
+ Entry.CheckEntries(self)
diff --git a/tools/binman/etype/ti_dm.py b/tools/binman/etype/ti_dm.py
new file mode 100644
index 00000000000..0faa0bf0ca7
--- /dev/null
+++ b/tools/binman/etype/ti_dm.py
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+# Written by Neha Malcom Francis <n-francis@ti.com>
+#
+# Entry-type module for TI Device Manager (DM)
+#
+
+from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
+
+class Entry_ti_dm(Entry_blob_named_by_arg):
+ """TI Device Manager (DM) blob
+
+ Properties / Entry arguments:
+ - ti-dm-path: Filename of file to read into the entry, typically ti-dm.bin
+
+ This entry holds the device manager responsible for resource and power management
+ in K3 devices. See https://software-dl.ti.com/tisci/esd/latest/ for more information
+ about TI DM.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'ti-dm')
+ self.external = True
diff --git a/tools/binman/etype/ti_secure.py b/tools/binman/etype/ti_secure.py
new file mode 100644
index 00000000000..704dcf8a381
--- /dev/null
+++ b/tools/binman/etype/ti_secure.py
@@ -0,0 +1,173 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/
+# Written by Neha Malcom Francis <n-francis@ti.com>
+#
+
+# Support for generation of TI secured binary blobs
+
+from binman.entry import EntryArg
+from binman.etype.x509_cert import Entry_x509_cert
+from dataclasses import dataclass
+
+from dtoc import fdt_util
+
+@dataclass
+class Firewall():
+ id: int
+ region: int
+ control : int
+ permissions: list
+ start_address: str
+ end_address: str
+
+ def ensure_props(self, etype, name):
+ missing_props = []
+ for key, val in self.__dict__.items():
+ if val is None:
+ missing_props += [key]
+
+ if len(missing_props):
+ etype.Raise(f"Subnode '{name}' is missing properties: {','.join(missing_props)}")
+
+ def get_certificate(self) -> str:
+ unique_identifier = f"{self.id}{self.region}"
+ cert = f"""
+firewallID{unique_identifier} = INTEGER:{self.id}
+region{unique_identifier} = INTEGER:{self.region}
+control{unique_identifier} = INTEGER:{hex(self.control)}
+nPermissionRegs{unique_identifier} = INTEGER:{len(self.permissions)}
+"""
+ for index, permission in enumerate(self.permissions):
+ cert += f"""permissions{unique_identifier}{index} = INTEGER:{hex(permission)}
+"""
+ cert += f"""startAddress{unique_identifier} = FORMAT:HEX,OCT:{self.start_address:02x}
+endAddress{unique_identifier} = FORMAT:HEX,OCT:{self.end_address:02x}
+"""
+ return cert
+
+class Entry_ti_secure(Entry_x509_cert):
+ """Entry containing a TI x509 certificate binary
+
+ Properties / Entry arguments:
+ - content: List of phandles to entries to sign
+ - keyfile: Filename of file containing key to sign binary with
+ - sha: Hash function to be used for signing
+ - auth-in-place: This is an integer field that contains two pieces
+ of information
+ Lower Byte - Remains 0x02 as per our use case
+ ( 0x02: Move the authenticated binary back to the header )
+ Upper Byte - The Host ID of the core owning the firewall
+
+ Output files:
+ - input.<unique_name> - input file passed to openssl
+ - config.<unique_name> - input file generated for openssl (which is
+ used as the config file)
+ - cert.<unique_name> - output file generated by openssl (which is
+ used as the entry contents)
+
+ Depending on auth-in-place information in the inputs, we read the
+ firewall nodes that describe the configurations of firewall that TIFS
+ will be doing after reading the certificate.
+
+ The syntax of the firewall nodes are as such:
+
+ firewall-257-0 {
+ id = <257>; /* The ID of the firewall being configured */
+ region = <0>; /* Region number to configure */
+
+ control = /* The control register */
+ <(FWCTRL_EN | FWCTRL_LOCK | FWCTRL_BG | FWCTRL_CACHE)>;
+
+ permissions = /* The permission registers */
+ <((FWPRIVID_ALL << FWPRIVID_SHIFT) |
+ FWPERM_SECURE_PRIV_RWCD |
+ FWPERM_SECURE_USER_RWCD |
+ FWPERM_NON_SECURE_PRIV_RWCD |
+ FWPERM_NON_SECURE_USER_RWCD)>;
+
+ /* More defines can be found in k3-security.h */
+
+ start_address = /* The Start Address of the firewall */
+ <0x0 0x0>;
+ end_address = /* The End Address of the firewall */
+ <0xff 0xffffffff>;
+ };
+
+
+ openssl signs the provided data, using the TI templated config file and
+ writes the signature in this entry. This allows verification that the
+ data is genuine.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.openssl = None
+ self.firewall_cert_data: dict = {
+ 'auth_in_place': 0x02,
+ 'num_firewalls': 0,
+ 'certificate': '',
+ }
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.key_fname = self.GetEntryArgsOrProps([
+ EntryArg('keyfile', str)], required=True)[0]
+ auth_in_place = fdt_util.GetInt(self._node, 'auth-in-place')
+ if auth_in_place:
+ self.firewall_cert_data['auth_in_place'] = auth_in_place
+ self.ReadFirewallNode()
+ self.sha = fdt_util.GetInt(self._node, 'sha', 512)
+ self.req_dist_name = {'C': 'US',
+ 'ST': 'TX',
+ 'L': 'Dallas',
+ 'O': 'Texas Instruments Incorporated',
+ 'OU': 'Processors',
+ 'CN': 'TI Support',
+ 'emailAddress': 'support@ti.com'}
+
+ def ReadFirewallNode(self):
+ self.firewall_cert_data['certificate'] = ""
+ self.firewall_cert_data['num_firewalls'] = 0
+ for node in self._node.subnodes:
+ if 'firewall' in node.name:
+ firewall = Firewall(
+ fdt_util.GetInt(node, 'id'),
+ fdt_util.GetInt(node, 'region'),
+ fdt_util.GetInt(node, 'control'),
+ fdt_util.GetPhandleList(node, 'permissions'),
+ fdt_util.GetInt64(node, 'start_address'),
+ fdt_util.GetInt64(node, 'end_address'),
+ )
+ firewall.ensure_props(self, node.name)
+ self.firewall_cert_data['num_firewalls'] += 1
+ self.firewall_cert_data['certificate'] += firewall.get_certificate()
+
+ def GetCertificate(self, required):
+ """Get the contents of this entry
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the entry, which is the certificate binary for the
+ provided data
+ """
+ return super().GetCertificate(required=required, type='sysfw')
+
+ def ObtainContents(self):
+ data = self.data
+ if data is None:
+ data = self.GetCertificate(False)
+ if data is None:
+ return False
+ self.SetContents(data)
+ return True
+
+ def ProcessContents(self):
+ # The blob may have changed due to WriteSymbols()
+ data = self.data
+ return self.ProcessContentsUpdate(data)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.openssl = self.AddBintool(btools, 'openssl')
diff --git a/tools/binman/etype/ti_secure_rom.py b/tools/binman/etype/ti_secure_rom.py
new file mode 100644
index 00000000000..f6fc3f90f84
--- /dev/null
+++ b/tools/binman/etype/ti_secure_rom.py
@@ -0,0 +1,256 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2022-2023 Texas Instruments Incorporated - https://www.ti.com/
+# Written by Neha Malcom Francis <n-francis@ti.com>
+#
+
+# Support for generation of TI secured bootloaders booted by ROM
+
+from binman.entry import EntryArg
+from binman.etype.x509_cert import Entry_x509_cert
+
+import hashlib
+
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+VALID_SHAS = [256, 384, 512, 224]
+SHA_OIDS = {256:'2.16.840.1.101.3.4.2.1',
+ 384:'2.16.840.1.101.3.4.2.2',
+ 512:'2.16.840.1.101.3.4.2.3',
+ 224:'2.16.840.1.101.3.4.2.4'}
+
+class Entry_ti_secure_rom(Entry_x509_cert):
+ """Entry containing a TI x509 certificate binary for images booted by ROM
+
+ Properties / Entry arguments:
+ - keyfile: Filename of file containing key to sign binary with
+ - combined: boolean if device follows combined boot flow
+ - countersign: boolean if device contains countersigned system firmware
+ - load: load address of SPL
+ - sw-rev: software revision
+ - sha: Hash function to be used for signing
+ - core: core on which bootloader runs, valid cores are 'secure' and 'public'
+ - content: phandle of SPL in case of legacy bootflow or phandles of component binaries
+ in case of combined bootflow
+ - core-opts (optional): lockstep (0) or split (2) mode set to 0 by default
+
+ The following properties are only for generating a combined bootflow binary:
+ - sysfw-inner-cert: boolean if binary contains sysfw inner certificate
+ - dm-data: boolean if binary contains dm-data binary
+ - content-sbl: phandle of SPL binary
+ - content-sysfw: phandle of sysfw binary
+ - content-sysfw-data: phandle of sysfw-data or tifs-data binary
+ - content-sysfw-inner-cert (optional): phandle of sysfw inner certificate binary
+ - content-dm-data (optional): phandle of dm-data binary
+ - load-sysfw: load address of sysfw binary
+ - load-sysfw-data: load address of sysfw-data or tifs-data binary
+ - load-sysfw-inner-cert (optional): load address of sysfw inner certificate binary
+ - load-dm-data (optional): load address of dm-data binary
+
+ Output files:
+ - input.<unique_name> - input file passed to openssl
+ - config.<unique_name> - input file generated for openssl (which is
+ used as the config file)
+ - cert.<unique_name> - output file generated by openssl (which is
+ used as the entry contents)
+
+ openssl signs the provided data, using the TI templated config file and
+ writes the signature in this entry. This allows verification that the
+ data is genuine.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.openssl = None
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.combined = fdt_util.GetBool(self._node, 'combined', False)
+ self.countersign = fdt_util.GetBool(self._node, 'countersign', False)
+ self.load_addr = fdt_util.GetInt(self._node, 'load', 0x00000000)
+ self.sw_rev = fdt_util.GetInt(self._node, 'sw-rev', 1)
+ self.sha = fdt_util.GetInt(self._node, 'sha', 512)
+ self.core = fdt_util.GetString(self._node, 'core', 'secure')
+ self.bootcore_opts = fdt_util.GetInt(self._node, 'core-opts')
+ self.key_fname = self.GetEntryArgsOrProps([
+ EntryArg('keyfile', str)], required=True)[0]
+ if self.combined:
+ self.sysfw_inner_cert = fdt_util.GetBool(self._node, 'sysfw-inner-cert', False)
+ self.load_addr_sysfw = fdt_util.GetInt(self._node, 'load-sysfw', 0x00000000)
+ self.load_addr_sysfw_data = fdt_util.GetInt(self._node, 'load-sysfw-data', 0x00000000)
+ self.dm_data = fdt_util.GetBool(self._node, 'dm-data', False)
+ if self.dm_data:
+ self.load_addr_dm_data = fdt_util.GetInt(self._node, 'load-dm-data', 0x00000000)
+ self.req_dist_name = {'C': 'US',
+ 'ST': 'TX',
+ 'L': 'Dallas',
+ 'O': 'Texas Instruments Incorporated',
+ 'OU': 'Processors',
+ 'CN': 'TI Support',
+ 'emailAddress': 'support@ti.com'}
+
+ def NonCombinedGetCertificate(self, required):
+ """Generate certificate for legacy boot flow
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the entry, which is the certificate binary for the
+ provided data
+ """
+ if self.bootcore_opts is None:
+ self.bootcore_opts = 0
+
+ if self.core == 'secure':
+ if self.countersign:
+ self.cert_type = 3
+ else:
+ self.cert_type = 2
+ self.bootcore = 0
+ else:
+ self.cert_type = 1
+ self.bootcore = 16
+
+ return super().GetCertificate(required=required, type='rom')
+
+ def CombinedGetCertificate(self, required):
+ """Generate certificate for combined boot flow
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the entry, which is the certificate binary for the
+ provided data
+ """
+ uniq = self.GetUniqueName()
+
+ self.num_comps = 3
+ self.sha_type = SHA_OIDS[self.sha]
+
+ if self.bootcore_opts is None:
+ self.bootcore_opts = 0
+
+ # sbl
+ self.content = fdt_util.GetPhandleList(self._node, 'content-sbl')
+ input_data_sbl = self.GetContents(required)
+ if input_data_sbl is None:
+ return None
+
+ input_fname_sbl = tools.get_output_filename('input.%s' % uniq)
+ tools.write_file(input_fname_sbl, input_data_sbl)
+
+ indata_sbl = tools.read_file(input_fname_sbl)
+ self.hashval_sbl = hashlib.sha512(indata_sbl).hexdigest()
+ self.imagesize_sbl = len(indata_sbl)
+
+ # sysfw
+ self.content = fdt_util.GetPhandleList(self._node, 'content-sysfw')
+ input_data_sysfw = self.GetContents(required)
+
+ input_fname_sysfw = tools.get_output_filename('input.%s' % uniq)
+ tools.write_file(input_fname_sysfw, input_data_sysfw)
+
+ indata_sysfw = tools.read_file(input_fname_sysfw)
+ self.hashval_sysfw = hashlib.sha512(indata_sysfw).hexdigest()
+ self.imagesize_sysfw = len(indata_sysfw)
+
+ # sysfw data
+ self.content = fdt_util.GetPhandleList(self._node, 'content-sysfw-data')
+ input_data_sysfw_data = self.GetContents(required)
+
+ input_fname_sysfw_data = tools.get_output_filename('input.%s' % uniq)
+ tools.write_file(input_fname_sysfw_data, input_data_sysfw_data)
+
+ indata_sysfw_data = tools.read_file(input_fname_sysfw_data)
+ self.hashval_sysfw_data = hashlib.sha512(indata_sysfw_data).hexdigest()
+ self.imagesize_sysfw_data = len(indata_sysfw_data)
+
+ # sysfw inner cert
+ self.sysfw_inner_cert_ext_boot_block = ""
+ self.sysfw_inner_cert_ext_boot_sequence_string = ""
+ imagesize_sysfw_inner_cert = 0
+ if self.sysfw_inner_cert:
+ self.content = fdt_util.GetPhandleList(self._node, 'content-sysfw-inner-cert')
+ input_data_sysfw_inner_cert = self.GetContents(required)
+
+ input_fname_sysfw_inner_cert = tools.get_output_filename('input.%s' % uniq)
+ tools.write_file(input_fname_sysfw_inner_cert, input_data_sysfw_inner_cert)
+
+ indata_sysfw_inner_cert = tools.read_file(input_fname_sysfw_inner_cert)
+ hashval_sysfw_inner_cert = hashlib.sha512(indata_sysfw_inner_cert).hexdigest()
+ imagesize_sysfw_inner_cert = len(indata_sysfw_inner_cert)
+ self.num_comps += 1
+ self.sysfw_inner_cert_ext_boot_sequence_string = "sysfw_inner_cert=SEQUENCE:sysfw_inner_cert"
+ self.sysfw_inner_cert_ext_boot_block = f"""[sysfw_inner_cert]
+compType = INTEGER:3
+bootCore = INTEGER:0
+compOpts = INTEGER:0
+destAddr = FORMAT:HEX,OCT:00000000
+compSize = INTEGER:{imagesize_sysfw_inner_cert}
+shaType = OID:{self.sha_type}
+shaValue = FORMAT:HEX,OCT:{hashval_sysfw_inner_cert}"""
+
+ # dm data
+ self.dm_data_ext_boot_sequence_string = ""
+ self.dm_data_ext_boot_block = ""
+ imagesize_dm_data = 0
+ if self.dm_data:
+ self.content = fdt_util.GetPhandleList(self._node, 'content-dm-data')
+ input_data_dm_data = self.GetContents(required)
+
+ input_fname_dm_data = tools.get_output_filename('input.%s' % uniq)
+ tools.write_file(input_fname_dm_data, input_data_dm_data)
+
+ indata_dm_data = tools.read_file(input_fname_dm_data)
+ hashval_dm_data = hashlib.sha512(indata_dm_data).hexdigest()
+ imagesize_dm_data = len(indata_dm_data)
+ self.num_comps += 1
+ self.dm_data_ext_boot_sequence_string = "dm_data=SEQUENCE:dm_data"
+ self.dm_data_ext_boot_block = f"""[dm_data]
+compType = INTEGER:17
+bootCore = INTEGER:16
+compOpts = INTEGER:0
+destAddr = FORMAT:HEX,OCT:{self.load_addr_dm_data:08x}
+compSize = INTEGER:{imagesize_dm_data}
+shaType = OID:{self.sha_type}
+shaValue = FORMAT:HEX,OCT:{hashval_dm_data}"""
+
+ self.total_size = self.imagesize_sbl + self.imagesize_sysfw + self.imagesize_sysfw_data + imagesize_sysfw_inner_cert + imagesize_dm_data
+ return super().GetCertificate(required=required, type='rom-combined')
+
+ def GetCertificate(self, required):
+ """Get the contents of this entry
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the entry, which is the certificate binary for the
+ provided data
+ """
+ if self.combined:
+ return self.CombinedGetCertificate(required)
+ else:
+ return self.NonCombinedGetCertificate(required)
+
+ def ObtainContents(self):
+ data = self.data
+ if data is None:
+ data = self.GetCertificate(False)
+ if data is None:
+ return False
+ self.SetContents(data)
+ return True
+
+ def ProcessContents(self):
+ # The blob may have changed due to WriteSymbols()
+ data = self.data
+ return self.ProcessContentsUpdate(data)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.openssl = self.AddBintool(btools, 'openssl')
diff --git a/tools/binman/etype/u_boot.py b/tools/binman/etype/u_boot.py
new file mode 100644
index 00000000000..d5639eef2e4
--- /dev/null
+++ b/tools/binman/etype/u_boot.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the expanded U-Boot binary
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot(Entry_blob):
+ """U-Boot flat binary
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot.bin (default 'u-boot.bin')
+
+ This is the U-Boot binary, containing relocation information to allow it
+ to relocate itself at runtime. The binary typically includes a device tree
+ blob at the end of it.
+
+ U-Boot can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+ Note that this entry is automatically replaced with u-boot-expanded unless
+ --no-expanded is used or the node has a 'no-expanded' property.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'u-boot.bin'
diff --git a/tools/binman/etype/u_boot_dtb.py b/tools/binman/etype/u_boot_dtb.py
new file mode 100644
index 00000000000..65e71291d27
--- /dev/null
+++ b/tools/binman/etype/u_boot_dtb.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree
+#
+
+from binman.entry import Entry
+from binman.etype.blob_dtb import Entry_blob_dtb
+
+class Entry_u_boot_dtb(Entry_blob_dtb):
+ """U-Boot device tree
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+ This is the U-Boot device tree, containing configuration information for
+ U-Boot. U-Boot needs this to know what devices are present and which drivers
+ to activate.
+
+ Note: This is mostly an internal entry type, used by others. This allows
+ binman to know which entries contain a device tree.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'u-boot.dtb'
+
+ def GetFdtEtype(self):
+ return 'u-boot-dtb'
diff --git a/tools/binman/etype/u_boot_dtb_with_ucode.py b/tools/binman/etype/u_boot_dtb_with_ucode.py
new file mode 100644
index 00000000000..f7225cecc16
--- /dev/null
+++ b/tools/binman/etype/u_boot_dtb_with_ucode.py
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree with the microcode removed
+#
+
+from binman.entry import Entry
+from binman.etype.blob_dtb import Entry_blob_dtb
+from u_boot_pylib import tools
+
+# This is imported if needed
+state = None
+
+class Entry_u_boot_dtb_with_ucode(Entry_blob_dtb):
+ """A U-Boot device tree file, with the microcode removed
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+ See Entry_u_boot_ucode for full details of the three entries involved in
+ this process. This entry provides the U-Boot device-tree file, which
+ contains the microcode. If the microcode is not being collated into one
+ place then the offset and size of the microcode is recorded by this entry,
+ for use by u-boot-with-ucode_ptr. If it is being collated, then this
+ entry deletes the microcode from the device tree (to save space) and makes
+ it available to u-boot-ucode.
+ """
+ def __init__(self, section, etype, node):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+ self.ucode_data = b''
+ self.collate = False
+ self.ucode_offset = None
+ self.ucode_size = None
+ self.ucode = None
+ self.ready = False
+
+ def GetDefaultFilename(self):
+ return 'u-boot.dtb'
+
+ def GetFdtEtype(self):
+ return 'u-boot-dtb'
+
+ def ProcessFdt(self, fdt):
+ # So the module can be loaded without it
+ from dtoc import fdt
+
+ # If the section does not need microcode, there is nothing to do
+ ucode_dest_entry = self.section.FindEntryType(
+ 'u-boot-spl-with-ucode-ptr')
+ if not ucode_dest_entry or not ucode_dest_entry.target_offset:
+ ucode_dest_entry = self.section.FindEntryType(
+ 'u-boot-tpl-with-ucode-ptr')
+ if not ucode_dest_entry or not ucode_dest_entry.target_offset:
+ ucode_dest_entry = self.section.FindEntryType(
+ 'u-boot-with-ucode-ptr')
+ if not ucode_dest_entry or not ucode_dest_entry.target_offset:
+ return True
+
+ # Remove the microcode
+ etype = self.GetFdtEtype()
+ fdt = state.GetFdtForEtype(etype)
+ self.ucode = fdt.GetNode('/microcode')
+ if not self.ucode:
+ raise self.Raise("No /microcode node found in '%s'" % etype)
+
+ # There's no need to collate it (move all microcode into one place)
+ # if we only have one chunk of microcode.
+ self.collate = len(self.ucode.subnodes) > 1
+ for node in self.ucode.subnodes:
+ data_prop = node.props.get('data')
+ if data_prop:
+ self.ucode_data += data_prop.bytes
+ if self.collate:
+ node.DeleteProp('data')
+ return True
+
+ def ObtainContents(self):
+ # Call the base class just in case it does something important.
+ super().ObtainContents()
+ if self.ucode and not self.collate:
+ for node in self.ucode.subnodes:
+ data_prop = node.props.get('data')
+ if data_prop:
+ # Find the offset in the device tree of the ucode data
+ self.ucode_offset = data_prop.GetOffset() + 12
+ self.ucode_size = len(data_prop.bytes)
+ self.ready = True
+ else:
+ self.ready = True
+ return self.ready
diff --git a/tools/binman/etype/u_boot_elf.py b/tools/binman/etype/u_boot_elf.py
new file mode 100644
index 00000000000..f4d86aa176a
--- /dev/null
+++ b/tools/binman/etype/u_boot_elf.py
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot ELF image
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_u_boot_elf(Entry_blob):
+ """U-Boot ELF image
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot (default 'u-boot')
+
+ This is the U-Boot ELF image. It does not include a device tree but can be
+ relocated to any address for execution.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self._strip = fdt_util.GetBool(self._node, 'strip')
+
+ def ReadBlobContents(self):
+ if self._strip:
+ uniq = self.GetUniqueName()
+ out_fname = tools.get_output_filename('%s.stripped' % uniq)
+ tools.write_file(out_fname, tools.read_file(self._pathname))
+ tools.run('strip', out_fname)
+ self._pathname = out_fname
+ super().ReadBlobContents()
+ return True
+
+ def GetDefaultFilename(self):
+ return 'u-boot'
diff --git a/tools/binman/etype/u_boot_env.py b/tools/binman/etype/u_boot_env.py
new file mode 100644
index 00000000000..c027e93d42c
--- /dev/null
+++ b/tools/binman/etype/u_boot_env.py
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+import struct
+import zlib
+
+from binman.etype.blob import Entry_blob
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_u_boot_env(Entry_blob):
+ """An entry which contains a U-Boot environment
+
+ Properties / Entry arguments:
+ - filename: File containing the environment text, with each line in the
+ form var=value
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def ReadNode(self):
+ super().ReadNode()
+ if self.size is None:
+ self.Raise("'u-boot-env' entry must have a size property")
+ self.fill_value = fdt_util.GetByte(self._node, 'fill-byte', 0)
+
+ def ReadBlobContents(self):
+ indata = tools.read_file(self._pathname)
+ data = b''
+ for line in indata.splitlines():
+ data += line + b'\0'
+ data += b'\0';
+ pad = self.size - len(data) - 5
+ if pad < 0:
+ self.Raise("'u-boot-env' entry too small to hold data (need %#x more bytes)" % -pad)
+ data += tools.get_bytes(self.fill_value, pad)
+ crc = zlib.crc32(data)
+ buf = struct.pack('<I', crc) + b'\x01' + data
+ self.SetContents(buf)
+ return True
diff --git a/tools/binman/etype/u_boot_expanded.py b/tools/binman/etype/u_boot_expanded.py
new file mode 100644
index 00000000000..8797824c9f0
--- /dev/null
+++ b/tools/binman/etype/u_boot_expanded.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot binary
+#
+
+from binman.etype.blob_phase import Entry_blob_phase
+
+class Entry_u_boot_expanded(Entry_blob_phase):
+ """U-Boot flat binary broken out into its component parts
+
+ This is a section containing the U-Boot binary and a devicetree. Using this
+ entry type automatically creates this section, with the following entries
+ in it:
+
+ u-boot-nodtb
+ u-boot-dtb
+
+ Having the devicetree separate allows binman to update it in the final
+ image, so that the entries positions are provided to the running U-Boot.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, 'u-boot', 'u-boot-dtb', False)
diff --git a/tools/binman/etype/u_boot_img.py b/tools/binman/etype/u_boot_img.py
new file mode 100644
index 00000000000..8a739d8edb6
--- /dev/null
+++ b/tools/binman/etype/u_boot_img.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot binary
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_img(Entry_blob):
+ """U-Boot legacy image
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot.img (default 'u-boot.img')
+
+ This is the U-Boot binary as a packaged image, in legacy format. It has a
+ header which allows it to be loaded at the correct address for execution.
+
+ You should use FIT (Flat Image Tree) instead of the legacy image for new
+ applications.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'u-boot.img'
diff --git a/tools/binman/etype/u_boot_nodtb.py b/tools/binman/etype/u_boot_nodtb.py
new file mode 100644
index 00000000000..347ba7dc697
--- /dev/null
+++ b/tools/binman/etype/u_boot_nodtb.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for 'u-boot-nodtb.bin'
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_nodtb(Entry_blob):
+ """U-Boot flat binary without device tree appended
+
+ Properties / Entry arguments:
+ - filename: Filename to include (default 'u-boot-nodtb.bin')
+
+ This is the U-Boot binary, containing relocation information to allow it
+ to relocate itself at runtime. It does not include a device tree blob at
+ the end of it so normally cannot work without it. You can add a u-boot-dtb
+ entry after this one, or use a u-boot entry instead, normally expands to a
+ section containing u-boot and u-boot-dtb
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'u-boot-nodtb.bin'
diff --git a/tools/binman/etype/u_boot_spl.py b/tools/binman/etype/u_boot_spl.py
new file mode 100644
index 00000000000..7f710c857db
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl.py
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for spl/u-boot-spl.bin
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_spl(Entry_blob):
+ """U-Boot SPL binary
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-spl.bin (default 'spl/u-boot-spl.bin')
+
+ This is the U-Boot SPL (Secondary Program Loader) binary. This is a small
+ binary which loads before U-Boot proper, typically into on-chip SRAM. It is
+ responsible for locating, loading and jumping to U-Boot. Note that SPL is
+ not relocatable so must be loaded to the correct address in SRAM, or written
+ to run from the correct address if direct flash execution is possible (e.g.
+ on x86 devices).
+
+ SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+ in the binman README for more information.
+
+ The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+ binman uses that to look up symbols to write into the SPL binary.
+
+ Note that this entry is automatically replaced with u-boot-spl-expanded
+ unless --no-expanded is used or the node has a 'no-expanded' property.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'spl/u-boot-spl'
+
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-spl.bin'
diff --git a/tools/binman/etype/u_boot_spl_bss_pad.py b/tools/binman/etype/u_boot_spl_bss_pad.py
new file mode 100644
index 00000000000..4af4045d370
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl_bss_pad.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for BSS padding for spl/u-boot-spl.bin. This padding
+# can be added after the SPL binary to ensure that anything concatenated
+# to it will appear to SPL to be at the end of BSS rather than the start.
+#
+
+from binman import elf
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+from u_boot_pylib import tools
+
+class Entry_u_boot_spl_bss_pad(Entry_blob):
+ """U-Boot SPL binary padded with a BSS region
+
+ Properties / Entry arguments:
+ None
+
+ This holds the padding added after the SPL binary to cover the BSS (Block
+ Started by Symbol) region. This region holds the various variables used by
+ SPL. It is set to 0 by SPL when it starts up. If you want to append data to
+ the SPL image (such as a device tree file), you must pad out the BSS region
+ to avoid the data overlapping with U-Boot variables. This entry is useful in
+ that case. It automatically pads out the entry size to cover both the code,
+ data and BSS.
+
+ The contents of this entry will a certain number of zero bytes, determined
+ by __bss_size
+
+ The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+ binman uses that to look up the BSS address.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def ObtainContents(self):
+ fname = tools.get_input_filename('spl/u-boot-spl')
+ bss_size = elf.GetSymbolAddress(fname, '__bss_size')
+ if bss_size is None:
+ self.Raise('Expected __bss_size symbol in spl/u-boot-spl')
+ self.SetContents(tools.get_bytes(0, bss_size))
+ return True
diff --git a/tools/binman/etype/u_boot_spl_dtb.py b/tools/binman/etype/u_boot_spl_dtb.py
new file mode 100644
index 00000000000..eefc4a44aab
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl_dtb.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree in SPL (Secondary Program Loader)
+#
+
+from binman.entry import Entry
+from binman.etype.blob_dtb import Entry_blob_dtb
+
+class Entry_u_boot_spl_dtb(Entry_blob_dtb):
+ """U-Boot SPL device tree
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'spl/u-boot-spl.dtb')
+
+ This is the SPL device tree, containing configuration information for
+ SPL. SPL needs this to know what devices are present and which drivers
+ to activate.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-spl.dtb'
+
+ def GetFdtEtype(self):
+ return 'u-boot-spl-dtb'
diff --git a/tools/binman/etype/u_boot_spl_elf.py b/tools/binman/etype/u_boot_spl_elf.py
new file mode 100644
index 00000000000..7b7b4e01495
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl_elf.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot SPL ELF image
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_spl_elf(Entry_blob):
+ """U-Boot SPL ELF image
+
+ Properties / Entry arguments:
+ - filename: Filename of SPL u-boot (default 'spl/u-boot-spl')
+
+ This is the U-Boot SPL ELF image. It does not include a device tree but can
+ be relocated to any address for execution.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'spl/u-boot-spl'
+
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-spl'
diff --git a/tools/binman/etype/u_boot_spl_expanded.py b/tools/binman/etype/u_boot_spl_expanded.py
new file mode 100644
index 00000000000..fcd0dd19ac4
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl_expanded.py
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for expanded U-Boot SPL binary
+#
+
+from u_boot_pylib import tout
+
+from binman import state
+from binman.etype.blob_phase import Entry_blob_phase
+
+class Entry_u_boot_spl_expanded(Entry_blob_phase):
+ """U-Boot SPL flat binary broken out into its component parts
+
+ Properties / Entry arguments:
+ - spl-dtb: Controls whether this entry is selected (set to 'y' or '1' to
+ select)
+
+ This is a section containing the U-Boot binary, BSS padding if needed and a
+ devicetree. Using this entry type automatically creates this section, with
+ the following entries in it:
+
+ u-boot-spl-nodtb
+ u-boot-spl-bss-pad
+ u-boot-dtb
+
+ Having the devicetree separate allows binman to update it in the final
+ image, so that the entries positions are provided to the running U-Boot.
+
+ This entry is selected based on the value of the 'spl-dtb' entryarg. If
+ this is non-empty (and not 'n' or '0') then this expanded entry is selected.
+ """
+ def __init__(self, section, etype, node):
+ bss_pad = state.GetEntryArgBool('spl-bss-pad')
+ super().__init__(section, etype, node, 'u-boot-spl', 'u-boot-spl-dtb',
+ bss_pad)
+
+ @classmethod
+ def UseExpanded(cls, node, etype, new_etype):
+ val = state.GetEntryArgBool('spl-dtb')
+ tout.do_output(tout.INFO if val else tout.DETAIL,
+ "Node '%s': etype '%s': %s %sselected" %
+ (node.path, etype, new_etype, '' if val else 'not '))
+ return val
diff --git a/tools/binman/etype/u_boot_spl_nodtb.py b/tools/binman/etype/u_boot_spl_nodtb.py
new file mode 100644
index 00000000000..e7ec329c902
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl_nodtb.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for 'u-boot-spl-nodtb.bin'
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_spl_nodtb(Entry_blob):
+ """SPL binary without device tree appended
+
+ Properties / Entry arguments:
+ - filename: Filename to include (default 'spl/u-boot-spl-nodtb.bin')
+
+ This is the U-Boot SPL binary, It does not include a device tree blob at
+ the end of it so may not be able to work without it, assuming SPL needs
+ a device tree to operate on your platform. You can add a u-boot-spl-dtb
+ entry after this one, or use a u-boot-spl entry instead' which normally
+ expands to a section containing u-boot-spl-dtb, u-boot-spl-bss-pad and
+ u-boot-spl-dtb
+
+ SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+ in the binman README for more information.
+
+ The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+ binman uses that to look up symbols to write into the SPL binary.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'spl/u-boot-spl'
+
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-spl-nodtb.bin'
diff --git a/tools/binman/etype/u_boot_spl_pubkey_dtb.py b/tools/binman/etype/u_boot_spl_pubkey_dtb.py
new file mode 100644
index 00000000000..cb196061de2
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl_pubkey_dtb.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2023 Weidmueller GmbH
+# Written by Lukas Funke <lukas.funke@weidmueller.com>
+#
+# Entry-type module for 'u-boot-spl-pubkey.dtb'
+#
+
+import tempfile
+import os
+
+from binman.etype.blob_dtb import Entry_blob_dtb
+
+from dtoc import fdt_util
+
+from u_boot_pylib import tools
+
+# This is imported if needed
+state = None
+
+# pylint: disable=C0103
+class Entry_u_boot_spl_pubkey_dtb(Entry_blob_dtb):
+ """U-Boot SPL device tree including public key
+
+ Properties / Entry arguments:
+ - key-name-hint: Public key name without extension (.crt).
+ Default is determined by underlying
+ bintool (fdt_add_pubkey), usually 'key'.
+ - algo: (Optional) Algorithm used for signing. Default is determined by
+ underlying bintool (fdt_add_pubkey), usually 'sha1,rsa2048'
+ - required: (Optional) If present this indicates that the key must be
+ verified for the image / configuration to be
+ considered valid
+
+ The following example shows an image containing an SPL which
+ is packed together with the dtb. Binman will add a signature
+ node to the dtb.
+
+ Example node::
+
+ image {
+ ...
+ spl {
+ filename = "spl.bin"
+
+ u-boot-spl-nodtb {
+ };
+ u-boot-spl-pubkey-dtb {
+ algo = "sha384,rsa4096";
+ required = "conf";
+ key-name-hint = "dev";
+ };
+ };
+ ...
+ }
+ """
+
+ def __init__(self, section, etype, node):
+ # Put this here to allow entry-docs and help to work without libfdt
+ global state
+ from binman import state
+
+ super().__init__(section, etype, node)
+ self.required_props = ['key-name-hint']
+ self.fdt_add_pubkey = None
+ self._algo = fdt_util.GetString(self._node, 'algo')
+ self._required = fdt_util.GetString(self._node, 'required')
+ self._key_name_hint = fdt_util.GetString(self._node, 'key-name-hint')
+
+ def ObtainContents(self, fake_size=0):
+ """Add public key to SPL dtb
+
+ Add public key which is pointed out by
+ 'key-name-hint' to node 'signature' in the spl-dtb
+
+ This is equivalent to the '-K' option of 'mkimage'
+
+ Args:
+ fake_size (int): unused
+ """
+
+ # We don't pass fake_size upwards because this is currently
+ # not supported by the blob type
+ super().ObtainContents()
+
+ with tempfile.NamedTemporaryFile(prefix=os.path.basename(
+ self.GetFdtEtype()),
+ dir=tools.get_output_dir())\
+ as pubkey_tdb:
+ tools.write_file(pubkey_tdb.name, self.GetData())
+ keyname = tools.get_input_filename(self._key_name_hint + ".crt")
+ self.fdt_add_pubkey.run(pubkey_tdb.name,
+ os.path.dirname(keyname),
+ self._key_name_hint,
+ self._required, self._algo)
+ dtb = tools.read_file(pubkey_tdb.name)
+ self.SetContents(dtb)
+ state.UpdateFdtContents(self.GetFdtEtype(), dtb)
+
+ return True
+
+ # pylint: disable=R0201,C0116
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-spl-pubkey.dtb'
+
+ # pylint: disable=R0201,C0116
+ def GetFdtEtype(self):
+ return 'u-boot-spl-dtb'
+
+ # pylint: disable=R0201,C0116
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.fdt_add_pubkey = self.AddBintool(btools, 'fdt_add_pubkey')
diff --git a/tools/binman/etype/u_boot_spl_with_ucode_ptr.py b/tools/binman/etype/u_boot_spl_with_ucode_ptr.py
new file mode 100644
index 00000000000..18b99b00f4a
--- /dev/null
+++ b/tools/binman/etype/u_boot_spl_with_ucode_ptr.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for an SPL binary with an embedded microcode pointer
+#
+
+import struct
+
+from binman.etype.u_boot_with_ucode_ptr import Entry_u_boot_with_ucode_ptr
+
+class Entry_u_boot_spl_with_ucode_ptr(Entry_u_boot_with_ucode_ptr):
+ """U-Boot SPL with embedded microcode pointer
+
+ This is used when SPL must set up the microcode for U-Boot.
+
+ See Entry_u_boot_ucode for full details of the entries involved in this
+ process.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'spl/u-boot-spl'
+
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-spl-nodtb.bin'
diff --git a/tools/binman/etype/u_boot_tpl.py b/tools/binman/etype/u_boot_tpl.py
new file mode 100644
index 00000000000..397b9f89531
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl.py
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for tpl/u-boot-tpl.bin
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_tpl(Entry_blob):
+ """U-Boot TPL binary
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-tpl.bin (default 'tpl/u-boot-tpl.bin')
+
+ This is the U-Boot TPL (Tertiary Program Loader) binary. This is a small
+ binary which loads before SPL, typically into on-chip SRAM. It is
+ responsible for locating, loading and jumping to SPL, the next-stage
+ loader. Note that SPL is not relocatable so must be loaded to the correct
+ address in SRAM, or written to run from the correct address if direct
+ flash execution is possible (e.g. on x86 devices).
+
+ SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+ in the binman README for more information.
+
+ The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+ binman uses that to look up symbols to write into the TPL binary.
+
+ Note that this entry is automatically replaced with u-boot-tpl-expanded
+ unless --no-expanded is used or the node has a 'no-expanded' property.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'tpl/u-boot-tpl'
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-tpl.bin'
diff --git a/tools/binman/etype/u_boot_tpl_bss_pad.py b/tools/binman/etype/u_boot_tpl_bss_pad.py
new file mode 100644
index 00000000000..46d2cd58f7e
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_bss_pad.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for BSS padding for tpl/u-boot-tpl.bin. This padding
+# can be added after the TPL binary to ensure that anything concatenated
+# to it will appear to TPL to be at the end of BSS rather than the start.
+#
+
+from binman import elf
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+from u_boot_pylib import tools
+
+class Entry_u_boot_tpl_bss_pad(Entry_blob):
+ """U-Boot TPL binary padded with a BSS region
+
+ Properties / Entry arguments:
+ None
+
+ This holds the padding added after the TPL binary to cover the BSS (Block
+ Started by Symbol) region. This region holds the various variables used by
+ TPL. It is set to 0 by TPL when it starts up. If you want to append data to
+ the TPL image (such as a device tree file), you must pad out the BSS region
+ to avoid the data overlapping with U-Boot variables. This entry is useful in
+ that case. It automatically pads out the entry size to cover both the code,
+ data and BSS.
+
+ The contents of this entry will a certain number of zero bytes, determined
+ by __bss_size
+
+ The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+ binman uses that to look up the BSS address.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def ObtainContents(self):
+ fname = tools.get_input_filename('tpl/u-boot-tpl')
+ bss_size = elf.GetSymbolAddress(fname, '__bss_size')
+ if bss_size is None:
+ self.Raise('Expected __bss_size symbol in tpl/u-boot-tpl')
+ self.SetContents(tools.get_bytes(0, bss_size))
+ return True
diff --git a/tools/binman/etype/u_boot_tpl_dtb.py b/tools/binman/etype/u_boot_tpl_dtb.py
new file mode 100644
index 00000000000..2ff1d7ced1a
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_dtb.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree in TPL (Tertiary Program Loader)
+#
+
+from binman.entry import Entry
+from binman.etype.blob_dtb import Entry_blob_dtb
+
+class Entry_u_boot_tpl_dtb(Entry_blob_dtb):
+ """U-Boot TPL device tree
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'tpl/u-boot-tpl.dtb')
+
+ This is the TPL device tree, containing configuration information for
+ TPL. TPL needs this to know what devices are present and which drivers
+ to activate.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-tpl.dtb'
+
+ def GetFdtEtype(self):
+ return 'u-boot-tpl-dtb'
diff --git a/tools/binman/etype/u_boot_tpl_dtb_with_ucode.py b/tools/binman/etype/u_boot_tpl_dtb_with_ucode.py
new file mode 100644
index 00000000000..066f18dfef2
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_dtb_with_ucode.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree with the microcode removed
+#
+
+from binman.etype.u_boot_dtb_with_ucode import Entry_u_boot_dtb_with_ucode
+
+class Entry_u_boot_tpl_dtb_with_ucode(Entry_u_boot_dtb_with_ucode):
+ """U-Boot TPL with embedded microcode pointer
+
+ This is used when TPL must set up the microcode for U-Boot.
+
+ See Entry_u_boot_ucode for full details of the entries involved in this
+ process.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-tpl.dtb'
+
+ def GetFdtEtype(self):
+ return 'u-boot-tpl-dtb'
diff --git a/tools/binman/etype/u_boot_tpl_elf.py b/tools/binman/etype/u_boot_tpl_elf.py
new file mode 100644
index 00000000000..fd100019b39
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_elf.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot TPL ELF image
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_tpl_elf(Entry_blob):
+ """U-Boot TPL ELF image
+
+ Properties / Entry arguments:
+ - filename: Filename of TPL u-boot (default 'tpl/u-boot-tpl')
+
+ This is the U-Boot TPL ELF image. It does not include a device tree but can
+ be relocated to any address for execution.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'tpl/u-boot-tpl'
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-tpl'
diff --git a/tools/binman/etype/u_boot_tpl_expanded.py b/tools/binman/etype/u_boot_tpl_expanded.py
new file mode 100644
index 00000000000..58db4f37556
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_expanded.py
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for expanded U-Boot TPL binary
+#
+
+from u_boot_pylib import tout
+
+from binman import state
+from binman.etype.blob_phase import Entry_blob_phase
+
+class Entry_u_boot_tpl_expanded(Entry_blob_phase):
+ """U-Boot TPL flat binary broken out into its component parts
+
+ Properties / Entry arguments:
+ - tpl-dtb: Controls whether this entry is selected (set to 'y' or '1' to
+ select)
+
+ This is a section containing the U-Boot binary, BSS padding if needed and a
+ devicetree. Using this entry type automatically creates this section, with
+ the following entries in it:
+
+ u-boot-tpl-nodtb
+ u-boot-tpl-bss-pad
+ u-boot-dtb
+
+ Having the devicetree separate allows binman to update it in the final
+ image, so that the entries positions are provided to the running U-Boot.
+
+ This entry is selected based on the value of the 'tpl-dtb' entryarg. If
+ this is non-empty (and not 'n' or '0') then this expanded entry is selected.
+ """
+ def __init__(self, section, etype, node):
+ bss_pad = state.GetEntryArgBool('tpl-bss-pad')
+ super().__init__(section, etype, node, 'u-boot-tpl', 'u-boot-tpl-dtb',
+ bss_pad)
+
+ @classmethod
+ def UseExpanded(cls, node, etype, new_etype):
+ val = state.GetEntryArgBool('tpl-dtb')
+ tout.do_output(tout.INFO if val else tout.DETAIL,
+ "Node '%s': etype '%s': %s %sselected" %
+ (node.path, etype, new_etype, '' if val else 'not '))
+ return val
diff --git a/tools/binman/etype/u_boot_tpl_nodtb.py b/tools/binman/etype/u_boot_tpl_nodtb.py
new file mode 100644
index 00000000000..9bb2b5dda30
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_nodtb.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for 'u-boot-tpl-nodtb.bin'
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_tpl_nodtb(Entry_blob):
+ """TPL binary without device tree appended
+
+ Properties / Entry arguments:
+ - filename: Filename to include (default 'tpl/u-boot-tpl-nodtb.bin')
+
+ This is the U-Boot TPL binary, It does not include a device tree blob at
+ the end of it so may not be able to work without it, assuming TPL needs
+ a device tree to operate on your platform. You can add a u-boot-tpl-dtb
+ entry after this one, or use a u-boot-tpl entry instead, which normally
+ expands to a section containing u-boot-tpl-dtb, u-boot-tpl-bss-pad and
+ u-boot-tpl-dtb
+
+ TPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+ in the binman README for more information.
+
+ The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+ binman uses that to look up symbols to write into the TPL binary.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'tpl/u-boot-tpl'
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-tpl-nodtb.bin'
diff --git a/tools/binman/etype/u_boot_tpl_with_ucode_ptr.py b/tools/binman/etype/u_boot_tpl_with_ucode_ptr.py
new file mode 100644
index 00000000000..f8cc22011ce
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_with_ucode_ptr.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for an TPL binary with an embedded microcode pointer
+#
+
+import struct
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+from binman.etype.u_boot_with_ucode_ptr import Entry_u_boot_with_ucode_ptr
+from u_boot_pylib import command
+from u_boot_pylib import tools
+
+class Entry_u_boot_tpl_with_ucode_ptr(Entry_u_boot_with_ucode_ptr):
+ """U-Boot TPL with embedded microcode pointer
+
+ See Entry_u_boot_ucode for full details of the entries involved in this
+ process.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'tpl/u-boot-tpl'
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-tpl-nodtb.bin'
diff --git a/tools/binman/etype/u_boot_ucode.py b/tools/binman/etype/u_boot_ucode.py
new file mode 100644
index 00000000000..97ed7d7eb14
--- /dev/null
+++ b/tools/binman/etype/u_boot_ucode.py
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a U-Boot binary with an embedded microcode pointer
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+from u_boot_pylib import tools
+
+class Entry_u_boot_ucode(Entry_blob):
+ """U-Boot microcode block
+
+ Properties / Entry arguments:
+ None
+
+ The contents of this entry are filled in automatically by other entries
+ which must also be in the image.
+
+ U-Boot on x86 needs a single block of microcode. This is collected from
+ the various microcode update nodes in the device tree. It is also unable
+ to read the microcode from the device tree on platforms that use FSP
+ (Firmware Support Package) binaries, because the API requires that the
+ microcode is supplied before there is any SRAM available to use (i.e.
+ the FSP sets up the SRAM / cache-as-RAM but does so in the call that
+ requires the microcode!). To keep things simple, all x86 platforms handle
+ microcode the same way in U-Boot (even non-FSP platforms). This is that
+ a table is placed at _dt_ucode_base_size containing the base address and
+ size of the microcode. This is either passed to the FSP (for FSP
+ platforms), or used to set up the microcode (for non-FSP platforms).
+ This all happens in the build system since it is the only way to get
+ the microcode into a single blob and accessible without SRAM.
+
+ There are two cases to handle. If there is only one microcode blob in
+ the device tree, then the ucode pointer it set to point to that. This
+ entry (u-boot-ucode) is empty. If there is more than one update, then
+ this entry holds the concatenation of all updates, and the device tree
+ entry (u-boot-dtb-with-ucode) is updated to remove the microcode. This
+ last step ensures that that the microcode appears in one contiguous
+ block in the image and is not unnecessarily duplicated in the device
+ tree. It is referred to as 'collation' here.
+
+ Entry types that have a part to play in handling microcode:
+
+ Entry_u_boot_with_ucode_ptr:
+ Contains u-boot-nodtb.bin (i.e. U-Boot without the device tree).
+ It updates it with the address and size of the microcode so that
+ U-Boot can find it early on start-up.
+ Entry_u_boot_dtb_with_ucode:
+ Contains u-boot.dtb. It stores the microcode in a
+ 'self.ucode_data' property, which is then read by this class to
+ obtain the microcode if needed. If collation is performed, it
+ removes the microcode from the device tree.
+ Entry_u_boot_ucode:
+ This class. If collation is enabled it reads the microcode from
+ the Entry_u_boot_dtb_with_ucode entry, and uses it as the
+ contents of this entry.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def ObtainContents(self):
+ # If the section does not need microcode, there is nothing to do
+ found = False
+ for suffix in ['', '-spl', '-tpl']:
+ name = 'u-boot%s-with-ucode-ptr' % suffix
+ entry = self.section.FindEntryType(name)
+ if entry and entry.target_offset:
+ found = True
+ if not found:
+ self.data = b''
+ return True
+ # Get the microcode from the device tree entry. If it is not available
+ # yet, return False so we will be called later. If the section simply
+ # doesn't exist, then we may as well return True, since we are going to
+ # get an error anyway.
+ for suffix in ['', '-spl', '-tpl']:
+ name = 'u-boot%s-dtb-with-ucode' % suffix
+ fdt_entry = self.section.FindEntryType(name)
+ if fdt_entry:
+ break
+ if not fdt_entry:
+ self.data = b''
+ return True
+ if not fdt_entry.ready:
+ return False
+
+ if not fdt_entry.collate:
+ # This binary can be empty
+ self.data = b''
+ return True
+
+ # Write it out to a file
+ self._pathname = tools.get_output_filename('u-boot-ucode.bin')
+ tools.write_file(self._pathname, fdt_entry.ucode_data)
+
+ self.ReadBlobContents()
+
+ return True
diff --git a/tools/binman/etype/u_boot_vpl.py b/tools/binman/etype/u_boot_vpl.py
new file mode 100644
index 00000000000..31d7e8374e2
--- /dev/null
+++ b/tools/binman/etype/u_boot_vpl.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for vpl/u-boot-vpl.bin
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_vpl(Entry_blob):
+ """U-Boot VPL binary
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-vpl.bin (default 'vpl/u-boot-vpl.bin')
+
+ This is the U-Boot VPL (Verifying Program Loader) binary. This is a small
+ binary which loads before SPL, typically into on-chip SRAM. It is
+ responsible for locating, loading and jumping to SPL, the next-stage
+ loader. Note that VPL is not relocatable so must be loaded to the correct
+ address in SRAM, or written to run from the correct address if direct
+ flash execution is possible (e.g. on x86 devices).
+
+ SPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+ in the binman README for more information.
+
+ The ELF file 'vpl/u-boot-vpl' must also be available for this to work, since
+ binman uses that to look up symbols to write into the VPL binary.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'vpl/u-boot-vpl'
+
+ def GetDefaultFilename(self):
+ return 'vpl/u-boot-vpl.bin'
diff --git a/tools/binman/etype/u_boot_vpl_bss_pad.py b/tools/binman/etype/u_boot_vpl_bss_pad.py
new file mode 100644
index 00000000000..12b286a7198
--- /dev/null
+++ b/tools/binman/etype/u_boot_vpl_bss_pad.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for BSS padding for vpl/u-boot-vpl.bin. This padding
+# can be added after the VPL binary to ensure that anything concatenated
+# to it will appear to VPL to be at the end of BSS rather than the start.
+#
+
+from binman import elf
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+from u_boot_pylib import tools
+
+class Entry_u_boot_vpl_bss_pad(Entry_blob):
+ """U-Boot VPL binary padded with a BSS region
+
+ Properties / Entry arguments:
+ None
+
+ This holds the padding added after the VPL binary to cover the BSS (Block
+ Started by Symbol) region. This region holds the various variables used by
+ VPL. It is set to 0 by VPL when it starts up. If you want to append data to
+ the VPL image (such as a device tree file), you must pad out the BSS region
+ to avoid the data overlapping with U-Boot variables. This entry is useful in
+ that case. It automatically pads out the entry size to cover both the code,
+ data and BSS.
+
+ The contents of this entry will a certain number of zero bytes, determined
+ by __bss_size
+
+ The ELF file 'vpl/u-boot-vpl' must also be available for this to work, since
+ binman uses that to look up the BSS address.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def ObtainContents(self):
+ fname = tools.get_input_filename('vpl/u-boot-vpl')
+ bss_size = elf.GetSymbolAddress(fname, '__bss_size')
+ if bss_size is None:
+ self.Raise('Expected __bss_size symbol in vpl/u-boot-vpl')
+ self.SetContents(tools.get_bytes(0, bss_size))
+ return True
diff --git a/tools/binman/etype/u_boot_vpl_dtb.py b/tools/binman/etype/u_boot_vpl_dtb.py
new file mode 100644
index 00000000000..f6253bf2431
--- /dev/null
+++ b/tools/binman/etype/u_boot_vpl_dtb.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree in VPL (Verifying Program Loader)
+#
+
+from binman.entry import Entry
+from binman.etype.blob_dtb import Entry_blob_dtb
+
+class Entry_u_boot_vpl_dtb(Entry_blob_dtb):
+ """U-Boot VPL device tree
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot.dtb (default 'vpl/u-boot-vpl.dtb')
+
+ This is the VPL device tree, containing configuration information for
+ VPL. VPL needs this to know what devices are present and which drivers
+ to activate.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'vpl/u-boot-vpl.dtb'
+
+ def GetFdtEtype(self):
+ return 'u-boot-vpl-dtb'
diff --git a/tools/binman/etype/u_boot_vpl_elf.py b/tools/binman/etype/u_boot_vpl_elf.py
new file mode 100644
index 00000000000..2c686790194
--- /dev/null
+++ b/tools/binman/etype/u_boot_vpl_elf.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot VPL ELF image
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_vpl_elf(Entry_blob):
+ """U-Boot VPL ELF image
+
+ Properties / Entry arguments:
+ - filename: Filename of VPL u-boot (default 'vpl/u-boot-vpl')
+
+ This is the U-Boot VPL ELF image. It does not include a device tree but can
+ be relocated to any address for execution.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'vpl/u-boot-vpl'
+
+ def GetDefaultFilename(self):
+ return 'vpl/u-boot-vpl'
diff --git a/tools/binman/etype/u_boot_vpl_expanded.py b/tools/binman/etype/u_boot_vpl_expanded.py
new file mode 100644
index 00000000000..deff5a3f8c2
--- /dev/null
+++ b/tools/binman/etype/u_boot_vpl_expanded.py
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for expanded U-Boot VPL binary
+#
+
+from u_boot_pylib import tout
+
+from binman import state
+from binman.etype.blob_phase import Entry_blob_phase
+
+class Entry_u_boot_vpl_expanded(Entry_blob_phase):
+ """U-Boot VPL flat binary broken out into its component parts
+
+ Properties / Entry arguments:
+ - vpl-dtb: Controls whether this entry is selected (set to 'y' or '1' to
+ select)
+
+ This is a section containing the U-Boot binary, BSS padding if needed and a
+ devicetree. Using this entry type automatically creates this section, with
+ the following entries in it:
+
+ u-boot-vpl-nodtb
+ u-boot-vpl-bss-pad
+ u-boot-dtb
+
+ Having the devicetree separate allows binman to update it in the final
+ image, so that the entries positions are provided to the running U-Boot.
+
+ This entry is selected based on the value of the 'vpl-dtb' entryarg. If
+ this is non-empty (and not 'n' or '0') then this expanded entry is selected.
+ """
+ def __init__(self, section, etype, node):
+ bss_pad = state.GetEntryArgBool('vpl-bss-pad')
+ super().__init__(section, etype, node, 'u-boot-vpl', 'u-boot-vpl-dtb',
+ bss_pad)
+
+ @classmethod
+ def UseExpanded(cls, node, etype, new_etype):
+ val = state.GetEntryArgBool('vpl-dtb')
+ tout.do_output(tout.INFO if val else tout.DETAIL,
+ "Node '%s': etype '%s': %s %sselected" %
+ (node.path, etype, new_etype, '' if val else 'not '))
+ return val
diff --git a/tools/binman/etype/u_boot_vpl_nodtb.py b/tools/binman/etype/u_boot_vpl_nodtb.py
new file mode 100644
index 00000000000..64c2767488d
--- /dev/null
+++ b/tools/binman/etype/u_boot_vpl_nodtb.py
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for 'u-boot-vpl-nodtb.bin'
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_u_boot_vpl_nodtb(Entry_blob):
+ """VPL binary without device tree appended
+
+ Properties / Entry arguments:
+ - filename: Filename to include (default 'vpl/u-boot-vpl-nodtb.bin')
+
+ This is the U-Boot VPL binary, It does not include a device tree blob at
+ the end of it so may not be able to work without it, assuming VPL needs
+ a device tree to operate on your platform. You can add a u_boot_vpl_dtb
+ entry after this one, or use a u_boot_vpl entry instead, which normally
+ expands to a section containing u-boot-vpl-dtb, u-boot-vpl-bss-pad and
+ u-boot-vpl-dtb
+
+ VPL can access binman symbols at runtime. See :ref:`binman_fdt`.
+
+ The ELF file 'vpl/u-boot-vpl' must also be available for this to work, since
+ binman uses that to look up symbols to write into the VPL binary.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node, auto_write_symbols=True)
+ self.elf_fname = 'vpl/u-boot-vpl'
+
+ def GetDefaultFilename(self):
+ return 'vpl/u-boot-vpl-nodtb.bin'
diff --git a/tools/binman/etype/u_boot_with_ucode_ptr.py b/tools/binman/etype/u_boot_with_ucode_ptr.py
new file mode 100644
index 00000000000..aab27ac8ee7
--- /dev/null
+++ b/tools/binman/etype/u_boot_with_ucode_ptr.py
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a U-Boot binary with an embedded microcode pointer
+#
+
+import struct
+
+from binman import elf
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from u_boot_pylib import command
+
+class Entry_u_boot_with_ucode_ptr(Entry_blob):
+ """U-Boot with embedded microcode pointer
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-nodtb.bin (default 'u-boot-nodtb.bin')
+ - optional-ucode: boolean property to make microcode optional. If the
+ u-boot.bin image does not include microcode, no error will
+ be generated.
+
+ See Entry_u_boot_ucode for full details of the three entries involved in
+ this process. This entry updates U-Boot with the offset and size of the
+ microcode, to allow early x86 boot code to find it without doing anything
+ complicated. Otherwise it is the same as the u-boot entry.
+ """
+ def __init__(self, section, etype, node, auto_write_symbols=False):
+ super().__init__(section, etype, node, auto_write_symbols)
+ self.elf_fname = 'u-boot'
+ self.target_offset = None
+
+ def GetDefaultFilename(self):
+ return 'u-boot-nodtb.bin'
+
+ def ProcessFdt(self, fdt):
+ # Figure out where to put the microcode pointer
+ fname = tools.get_input_filename(self.elf_fname)
+ sym = elf.GetSymbolAddress(fname, '_dt_ucode_base_size')
+ if sym:
+ self.target_offset = sym
+ elif not fdt_util.GetBool(self._node, 'optional-ucode'):
+ self.Raise('Cannot locate _dt_ucode_base_size symbol in u-boot')
+ return True
+
+ def ProcessContents(self):
+ # If the image does not need microcode, there is nothing to do
+ if not self.target_offset:
+ return True
+
+ # Get the offset of the microcode
+ ucode_entry = self.section.FindEntryType('u-boot-ucode')
+ if not ucode_entry:
+ self.Raise('Cannot find microcode region u-boot-ucode')
+
+ # Check the target pos is in the section. If it is not, then U-Boot is
+ # being linked incorrectly, or is being placed at the wrong offset
+ # in the section.
+ #
+ # The section must be set up so that U-Boot is placed at the
+ # flash address to which it is linked. For example, if
+ # CONFIG_TEXT_BASE is 0xfff00000, and the ROM is 8MB, then
+ # the U-Boot region must start at offset 7MB in the section. In this
+ # case the ROM starts at 0xff800000, so the offset of the first
+ # entry in the section corresponds to that.
+ if (self.target_offset < self.image_pos or
+ self.target_offset >= self.image_pos + self.size):
+ self.Raise('Microcode pointer _dt_ucode_base_size at %08x is outside the section ranging from %08x to %08x' %
+ (self.target_offset, self.image_pos,
+ self.image_pos + self.size))
+
+ # Get the microcode, either from u-boot-ucode or u-boot-dtb-with-ucode.
+ # If we have left the microcode in the device tree, then it will be
+ # in the latter. If we extracted the microcode from the device tree
+ # and collated it in one place, it will be in the former.
+ if ucode_entry.size:
+ offset, size = ucode_entry.offset, ucode_entry.size
+ else:
+ dtb_entry = self.section.FindEntryType('u-boot-dtb-with-ucode')
+ if not dtb_entry:
+ dtb_entry = self.section.FindEntryType(
+ 'u-boot-tpl-dtb-with-ucode')
+ if not dtb_entry:
+ self.Raise('Cannot find microcode region u-boot-dtb-with-ucode')
+ offset = dtb_entry.offset + dtb_entry.ucode_offset
+ size = dtb_entry.ucode_size
+
+ # Write the microcode offset and size into the entry
+ offset_and_size = struct.pack('<2L', offset, size)
+ self.target_offset -= self.image_pos
+ return self.ProcessContentsUpdate(self.data[:self.target_offset] +
+ offset_and_size +
+ self.data[self.target_offset + 8:])
diff --git a/tools/binman/etype/vblock.py b/tools/binman/etype/vblock.py
new file mode 100644
index 00000000000..4adb9a4e9bf
--- /dev/null
+++ b/tools/binman/etype/vblock.py
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Support for a Chromium OS verified boot block, used to sign a read-write
+# section of the image.
+
+from collections import OrderedDict
+import os
+
+from binman.entry import EntryArg
+from binman.etype.collection import Entry_collection
+
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_vblock(Entry_collection):
+ """An entry which contains a Chromium OS verified boot block
+
+ Properties / Entry arguments:
+ - content: List of phandles to entries to sign
+ - keydir: Directory containing the public keys to use
+ - keyblock: Name of the key file to use (inside keydir)
+ - signprivate: Name of provide key file to use (inside keydir)
+ - version: Version number of the vblock (typically 1)
+ - kernelkey: Name of the kernel key to use (inside keydir)
+ - preamble-flags: Value of the vboot preamble flags (typically 0)
+
+ Output files:
+ - input.<unique_name> - input file passed to futility
+ - vblock.<unique_name> - output file generated by futility (which is
+ used as the entry contents)
+
+ Chromium OS signs the read-write firmware and kernel, writing the signature
+ in this block. This allows U-Boot to verify that the next firmware stage
+ and kernel are genuine.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.futility = None
+ (self.keydir, self.keyblock, self.signprivate, self.version,
+ self.kernelkey, self.preamble_flags) = self.GetEntryArgsOrProps([
+ EntryArg('keydir', str),
+ EntryArg('keyblock', str),
+ EntryArg('signprivate', str),
+ EntryArg('version', int),
+ EntryArg('kernelkey', str),
+ EntryArg('preamble-flags', int)])
+
+ def GetVblock(self, required):
+ """Get the contents of this entry
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+
+ Returns:
+ bytes content of the entry, which is the signed vblock for the
+ provided data
+ """
+ # Join up the data files to be signed
+ input_data = self.GetContents(required)
+ if input_data is None:
+ return None
+
+ uniq = self.GetUniqueName()
+ output_fname = tools.get_output_filename('vblock.%s' % uniq)
+ input_fname = tools.get_output_filename('input.%s' % uniq)
+ tools.write_file(input_fname, input_data)
+ prefix = self.keydir + '/'
+ stdout = self.futility.sign_firmware(
+ vblock=output_fname,
+ keyblock=prefix + self.keyblock,
+ signprivate=prefix + self.signprivate,
+ version=f'{self.version:d}',
+ firmware=input_fname,
+ kernelkey=prefix + self.kernelkey,
+ flags=f'{self.preamble_flags}')
+ if stdout is not None:
+ data = tools.read_file(output_fname)
+ else:
+ # Bintool is missing; just use 4KB of zero data
+ self.record_missing_bintool(self.futility)
+ data = tools.get_bytes(0, 4096)
+ return data
+
+ def ObtainContents(self):
+ data = self.GetVblock(False)
+ if data is None:
+ return False
+ self.SetContents(data)
+ return True
+
+ def ProcessContents(self):
+ # The blob may have changed due to WriteSymbols()
+ data = self.GetVblock(True)
+ return self.ProcessContentsUpdate(data)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.futility = self.AddBintool(btools, 'futility')
diff --git a/tools/binman/etype/x509_cert.py b/tools/binman/etype/x509_cert.py
new file mode 100644
index 00000000000..29630d1b86c
--- /dev/null
+++ b/tools/binman/etype/x509_cert.py
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2023 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Support for an X509 certificate, used to sign a set of entries
+
+from collections import OrderedDict
+import os
+
+from binman.entry import EntryArg
+from binman.etype.collection import Entry_collection
+
+from dtoc import fdt_util
+from u_boot_pylib import tools
+
+class Entry_x509_cert(Entry_collection):
+ """An entry which contains an X509 certificate
+
+ Properties / Entry arguments:
+ - content: List of phandles to entries to sign
+
+ Output files:
+ - input.<unique_name> - input file passed to openssl
+ - cert.<unique_name> - output file generated by openssl (which is
+ used as the entry contents)
+
+ openssl signs the provided data, writing the signature in this entry. This
+ allows verification that the data is genuine
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self.openssl = None
+ self.req_dist_name = None
+ self.cert_type = None
+ self.bootcore = None
+ self.bootcore_opts = None
+ self.load_addr = None
+ self.sha = None
+ self.total_size = None
+ self.num_comps = None
+ self.sysfw_inner_cert_ext_boot_sequence_string = None
+ self.dm_data_ext_boot_sequence_string = None
+ self.imagesize_sbl = None
+ self.hashval_sbl = None
+ self.load_addr_sysfw = None
+ self.imagesize_sysfw = None
+ self.hashval_sysfw = None
+ self.load_addr_sysfw_data = None
+ self.imagesize_sysfw_data = None
+ self.hashval_sysfw_data = None
+ self.sysfw_inner_cert_ext_boot_block = None
+ self.dm_data_ext_boot_block = None
+ self.firewall_cert_data = None
+
+ def ReadNode(self):
+ super().ReadNode()
+ self._cert_ca = fdt_util.GetString(self._node, 'cert-ca')
+ self._cert_rev = fdt_util.GetInt(self._node, 'cert-revision-int', 0)
+ self.key_fname = self.GetEntryArgsOrProps([
+ EntryArg('keyfile', str)], required=True)[0]
+ self.sw_rev = fdt_util.GetInt(self._node, 'sw-rev', 1)
+
+ def GetCertificate(self, required, type='generic'):
+ """Get the contents of this entry
+
+ Args:
+ required: True if the data must be present, False if it is OK to
+ return None
+ type: Type of x509 certificate to generate, current supported ones are
+ 'generic', 'sysfw', 'rom'
+
+ Returns:
+ bytes content of the entry, which is the signed vblock for the
+ provided data
+ """
+ # Join up the data files to be signed
+ input_data = self.GetContents(required)
+ if input_data is None:
+ return None
+
+ uniq = self.GetUniqueName()
+ output_fname = tools.get_output_filename('cert.%s' % uniq)
+ input_fname = tools.get_output_filename('input.%s' % uniq)
+ config_fname = tools.get_output_filename('config.%s' % uniq)
+ tools.write_file(input_fname, input_data)
+ if type == 'generic':
+ stdout = self.openssl.x509_cert(
+ cert_fname=output_fname,
+ input_fname=input_fname,
+ key_fname=self.key_fname,
+ cn=self._cert_ca,
+ revision=self._cert_rev,
+ config_fname=config_fname)
+ elif type == 'sysfw':
+ stdout = self.openssl.x509_cert_sysfw(
+ cert_fname=output_fname,
+ input_fname=input_fname,
+ key_fname=self.key_fname,
+ config_fname=config_fname,
+ sw_rev=self.sw_rev,
+ req_dist_name_dict=self.req_dist_name,
+ firewall_cert_data=self.firewall_cert_data)
+ elif type == 'rom':
+ stdout = self.openssl.x509_cert_rom(
+ cert_fname=output_fname,
+ input_fname=input_fname,
+ key_fname=self.key_fname,
+ config_fname=config_fname,
+ sw_rev=self.sw_rev,
+ req_dist_name_dict=self.req_dist_name,
+ cert_type=self.cert_type,
+ bootcore=self.bootcore,
+ bootcore_opts=self.bootcore_opts,
+ load_addr=self.load_addr,
+ sha=self.sha
+ )
+ elif type == 'rom-combined':
+ stdout = self.openssl.x509_cert_rom_combined(
+ cert_fname=output_fname,
+ input_fname=input_fname,
+ key_fname=self.key_fname,
+ config_fname=config_fname,
+ sw_rev=self.sw_rev,
+ req_dist_name_dict=self.req_dist_name,
+ load_addr=self.load_addr,
+ sha=self.sha,
+ total_size=self.total_size,
+ num_comps=self.num_comps,
+ sysfw_inner_cert_ext_boot_sequence_string=self.sysfw_inner_cert_ext_boot_sequence_string,
+ dm_data_ext_boot_sequence_string=self.dm_data_ext_boot_sequence_string,
+ imagesize_sbl=self.imagesize_sbl,
+ hashval_sbl=self.hashval_sbl,
+ load_addr_sysfw=self.load_addr_sysfw,
+ imagesize_sysfw=self.imagesize_sysfw,
+ hashval_sysfw=self.hashval_sysfw,
+ load_addr_sysfw_data=self.load_addr_sysfw_data,
+ imagesize_sysfw_data=self.imagesize_sysfw_data,
+ hashval_sysfw_data=self.hashval_sysfw_data,
+ sysfw_inner_cert_ext_boot_block=self.sysfw_inner_cert_ext_boot_block,
+ dm_data_ext_boot_block=self.dm_data_ext_boot_block,
+ bootcore_opts=self.bootcore_opts
+ )
+ if stdout is not None:
+ data = tools.read_file(output_fname)
+ else:
+ # Bintool is missing; just use 4KB of zero data
+ self.record_missing_bintool(self.openssl)
+ data = tools.get_bytes(0, 4096)
+ return data
+
+ def ObtainContents(self):
+ data = self.GetCertificate(False)
+ if data is None:
+ return False
+ self.SetContents(data)
+ return True
+
+ def ProcessContents(self):
+ # The blob may have changed due to WriteSymbols()
+ data = self.GetCertificate(True)
+ return self.ProcessContentsUpdate(data)
+
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.openssl = self.AddBintool(btools, 'openssl')
diff --git a/tools/binman/etype/x86_reset16.py b/tools/binman/etype/x86_reset16.py
new file mode 100644
index 00000000000..5d49f16e21c
--- /dev/null
+++ b/tools/binman/etype/x86_reset16.py
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 reset code for U-Boot
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_x86_reset16(Entry_blob):
+ """x86 16-bit reset code for U-Boot
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-reset16.bin (default
+ 'u-boot-x86-reset16.bin')
+
+ x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+ must be placed at a particular address. This entry holds that code. It is
+ typically placed at offset CONFIG_RESET_VEC_LOC. The code is responsible
+ for jumping to the x86-start16 code, which continues execution.
+
+ For 64-bit U-Boot, the 'x86_reset16_spl' entry type is used instead.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'u-boot-x86-reset16.bin'
diff --git a/tools/binman/etype/x86_reset16_spl.py b/tools/binman/etype/x86_reset16_spl.py
new file mode 100644
index 00000000000..775b90699ba
--- /dev/null
+++ b/tools/binman/etype/x86_reset16_spl.py
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 reset code for U-Boot
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_x86_reset16_spl(Entry_blob):
+ """x86 16-bit reset code for U-Boot
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-reset16.bin (default
+ 'u-boot-x86-reset16.bin')
+
+ x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+ must be placed at a particular address. This entry holds that code. It is
+ typically placed at offset CONFIG_RESET_VEC_LOC. The code is responsible
+ for jumping to the x86-start16 code, which continues execution.
+
+ For 32-bit U-Boot, the 'x86_reset_spl' entry type is used instead.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-x86-reset16-spl.bin'
diff --git a/tools/binman/etype/x86_reset16_tpl.py b/tools/binman/etype/x86_reset16_tpl.py
new file mode 100644
index 00000000000..52d3f4869ae
--- /dev/null
+++ b/tools/binman/etype/x86_reset16_tpl.py
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 reset code for U-Boot
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_x86_reset16_tpl(Entry_blob):
+ """x86 16-bit reset code for U-Boot
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-reset16.bin (default
+ 'u-boot-x86-reset16.bin')
+
+ x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+ must be placed at a particular address. This entry holds that code. It is
+ typically placed at offset CONFIG_RESET_VEC_LOC. The code is responsible
+ for jumping to the x86-start16 code, which continues execution.
+
+ For 32-bit U-Boot, the 'x86_reset_tpl' entry type is used instead.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-x86-reset16-tpl.bin'
diff --git a/tools/binman/etype/x86_start16.py b/tools/binman/etype/x86_start16.py
new file mode 100644
index 00000000000..18fdd95d370
--- /dev/null
+++ b/tools/binman/etype/x86_start16.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 start-up code for U-Boot
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_x86_start16(Entry_blob):
+ """x86 16-bit start-up code for U-Boot
+
+ Properties / Entry arguments:
+ - filename: Filename of u-boot-x86-start16.bin (default
+ 'u-boot-x86-start16.bin')
+
+ x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+ must be placed in the top 64KB of the ROM. The reset code jumps to it. This
+ entry holds that code. It is typically placed at offset
+ CONFIG_SYS_X86_START16. The code is responsible for changing to 32-bit mode
+ and jumping to U-Boot's entry point, which requires 32-bit mode (for 32-bit
+ U-Boot).
+
+ For 64-bit U-Boot, the 'x86_start16_spl' entry type is used instead.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'u-boot-x86-start16.bin'
diff --git a/tools/binman/etype/x86_start16_spl.py b/tools/binman/etype/x86_start16_spl.py
new file mode 100644
index 00000000000..ac8e90f2e0c
--- /dev/null
+++ b/tools/binman/etype/x86_start16_spl.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 start-up code for U-Boot SPL
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_x86_start16_spl(Entry_blob):
+ """x86 16-bit start-up code for SPL
+
+ Properties / Entry arguments:
+ - filename: Filename of spl/u-boot-x86-start16-spl.bin (default
+ 'spl/u-boot-x86-start16-spl.bin')
+
+ x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+ must be placed in the top 64KB of the ROM. The reset code jumps to it. This
+ entry holds that code. It is typically placed at offset
+ CONFIG_SYS_X86_START16. The code is responsible for changing to 32-bit mode
+ and jumping to U-Boot's entry point, which requires 32-bit mode (for 32-bit
+ U-Boot).
+
+ For 32-bit U-Boot, the 'x86-start16' entry type is used instead.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'spl/u-boot-x86-start16-spl.bin'
diff --git a/tools/binman/etype/x86_start16_tpl.py b/tools/binman/etype/x86_start16_tpl.py
new file mode 100644
index 00000000000..72d4608bb73
--- /dev/null
+++ b/tools/binman/etype/x86_start16_tpl.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 start-up code for U-Boot TPL
+#
+
+from binman.entry import Entry
+from binman.etype.blob import Entry_blob
+
+class Entry_x86_start16_tpl(Entry_blob):
+ """x86 16-bit start-up code for TPL
+
+ Properties / Entry arguments:
+ - filename: Filename of tpl/u-boot-x86-start16-tpl.bin (default
+ 'tpl/u-boot-x86-start16-tpl.bin')
+
+ x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+ must be placed in the top 64KB of the ROM. The reset code jumps to it. This
+ entry holds that code. It is typically placed at offset
+ CONFIG_SYS_X86_START16. The code is responsible for changing to 32-bit mode
+ and jumping to U-Boot's entry point, which requires 32-bit mode (for 32-bit
+ U-Boot).
+
+ If TPL is not being used, the 'x86-start16-spl or 'x86-start16' entry types
+ may be used instead.
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-x86-start16-tpl.bin'
diff --git a/tools/binman/etype/xilinx_bootgen.py b/tools/binman/etype/xilinx_bootgen.py
new file mode 100644
index 00000000000..70a4b2e2429
--- /dev/null
+++ b/tools/binman/etype/xilinx_bootgen.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2023 Weidmueller GmbH
+# Written by Lukas Funke <lukas.funke@weidmueller.com>
+#
+# Entry-type module for Zynq(MP) boot images (boot.bin)
+#
+
+import tempfile
+
+from collections import OrderedDict
+
+from binman import elf
+from binman.etype.section import Entry_section
+
+from dtoc import fdt_util
+
+from u_boot_pylib import tools
+from u_boot_pylib import command
+
+# pylint: disable=C0103
+class Entry_xilinx_bootgen(Entry_section):
+ """Signed SPL boot image for Xilinx ZynqMP devices
+
+ Properties / Entry arguments:
+ - auth-params: (Optional) Authentication parameters passed to bootgen
+ - fsbl-config: (Optional) FSBL parameters passed to bootgen
+ - keysrc-enc: (Optional) Key source when using decryption engine
+ - pmufw-filename: Filename of PMU firmware. Default: pmu-firmware.elf
+ - psk-key-name-hint: Name of primary secret key to use for signing the
+ secondardy public key. Format: .pem file
+ - ssk-key-name-hint: Name of secondardy secret key to use for signing
+ the boot image. Format: .pem file
+
+ The etype is used to create a boot image for Xilinx ZynqMP
+ devices.
+
+ Information for signed images:
+
+ In AMD/Xilinx SoCs, two pairs of public and secret keys are used
+ - primary and secondary. The function of the primary public/secret key pair
+ is to authenticate the secondary public/secret key pair.
+ The function of the secondary key is to sign/verify the boot image. [1]
+
+ AMD/Xilinx uses the following terms for private/public keys [1]:
+
+ PSK = Primary Secret Key (Used to sign Secondary Public Key)
+ PPK = Primary Public Key (Used to verify Secondary Public Key)
+ SSK = Secondary Secret Key (Used to sign the boot image/partitions)
+ SPK = Used to verify the actual boot image
+
+ The following example builds a signed boot image. The fuses of
+ the primary public key (ppk) should be fused together with the RSA_EN flag.
+
+ Example node::
+
+ spl {
+ filename = "boot.signed.bin";
+
+ xilinx-bootgen {
+ psk-key-name-hint = "psk0";
+ ssk-key-name-hint = "ssk0";
+ auth-params = "ppk_select=0", "spk_id=0x00000000";
+
+ u-boot-spl-nodtb {
+ };
+ u-boot-spl-pubkey-dtb {
+ algo = "sha384,rsa4096";
+ required = "conf";
+ key-name-hint = "dev";
+ };
+ };
+ };
+
+ For testing purposes, e.g. if no RSA_EN should be fused, one could add
+ the "bh_auth_enable" flag in the fsbl-config field. This will skip the
+ verification of the ppk fuses and boot the image, even if ppk hash is
+ invalid.
+
+ Example node::
+
+ xilinx-bootgen {
+ psk-key-name-hint = "psk0";
+ psk-key-name-hint = "ssk0";
+ ...
+ fsbl-config = "bh_auth_enable";
+ ...
+ };
+
+ [1] https://docs.xilinx.com/r/en-US/ug1283-bootgen-user-guide/Using-Authentication
+
+ """
+ def __init__(self, section, etype, node):
+ super().__init__(section, etype, node)
+ self._auth_params = None
+ self._entries = OrderedDict()
+ self._filename = None
+ self._fsbl_config = None
+ self._keysrc_enc = None
+ self._pmufw_filename = None
+ self._psk_key_name_hint = None
+ self._ssk_key_name_hint = None
+ self.align_default = None
+ self.bootgen = None
+ self.required_props = ['pmufw-filename',
+ 'psk-key-name-hint',
+ 'ssk-key-name-hint']
+
+ def ReadNode(self):
+ """Read properties from the xilinx-bootgen node"""
+ super().ReadNode()
+ self._auth_params = fdt_util.GetStringList(self._node,
+ 'auth-params')
+ self._filename = fdt_util.GetString(self._node, 'filename')
+ self._fsbl_config = fdt_util.GetStringList(self._node,
+ 'fsbl-config')
+ self._keysrc_enc = fdt_util.GetString(self._node,
+ 'keysrc-enc')
+ self._pmufw_filename = fdt_util.GetString(self._node, 'pmufw-filename')
+ self._psk_key_name_hint = fdt_util.GetString(self._node,
+ 'psk-key-name-hint')
+ self._ssk_key_name_hint = fdt_util.GetString(self._node,
+ 'ssk-key-name-hint')
+ self.ReadEntries()
+
+ @classmethod
+ def _ToElf(cls, data, output_fname):
+ """Convert SPL object file to bootable ELF file
+
+ Args:
+ data (bytearray): u-boot-spl-nodtb + u-boot-spl-pubkey-dtb obj file
+ data
+ output_fname (str): Filename of converted FSBL ELF file
+ """
+ platform_elfflags = {"aarch64":
+ ["-B", "aarch64", "-O", "elf64-littleaarch64"],
+ # amd64 support makes no sense for the target
+ # platform, but we include it here to enable
+ # testing on hosts
+ "x86_64":
+ ["-B", "i386", "-O", "elf64-x86-64"]
+ }
+
+ gcc, args = tools.get_target_compile_tool('cc')
+ args += ['-dumpmachine']
+ stdout = command.output(gcc, *args)
+ # split target machine triplet (arch, vendor, os)
+ arch, _, _ = stdout.split('-')
+
+ spl_elf = elf.DecodeElf(tools.read_file(
+ tools.get_input_filename('spl/u-boot-spl')), 0)
+
+ # Obj file to swap data and text section (rename-section)
+ with tempfile.NamedTemporaryFile(prefix="u-boot-spl-pubkey-",
+ suffix=".o.tmp",
+ dir=tools.get_output_dir())\
+ as tmp_obj:
+ input_objcopy_fname = tmp_obj.name
+ # Align packed content to 4 byte boundary
+ pad = bytearray(tools.align(len(data), 4) - len(data))
+ tools.write_file(input_objcopy_fname, data + pad)
+ # Final output elf file which contains a valid start address
+ with tempfile.NamedTemporaryFile(prefix="u-boot-spl-pubkey-elf-",
+ suffix=".o.tmp",
+ dir=tools.get_output_dir())\
+ as tmp_elf_obj:
+ input_ld_fname = tmp_elf_obj.name
+ objcopy, args = tools.get_target_compile_tool('objcopy')
+ args += ["--rename-section", ".data=.text",
+ "-I", "binary"]
+ args += platform_elfflags[arch]
+ args += [input_objcopy_fname, input_ld_fname]
+ command.run(objcopy, *args)
+
+ ld, args = tools.get_target_compile_tool('ld')
+ args += [input_ld_fname, '-o', output_fname,
+ "--defsym", f"_start={hex(spl_elf.entry)}",
+ "-Ttext", hex(spl_elf.entry)]
+ command.run(ld, *args)
+
+ def BuildSectionData(self, required):
+ """Pack node content, and create bootable, signed ZynqMP boot image
+
+ The method collects the content of this node (usually SPL + dtb) and
+ converts them to an ELF file. The ELF file is passed to the
+ Xilinx bootgen tool which packs the SPL ELF file together with
+ Platform Management Unit (PMU) firmware into a bootable image
+ for ZynqMP devices. The image is signed within this step.
+
+ The result is a bootable, signed SPL image for Xilinx ZynqMP devices.
+ """
+ data = super().BuildSectionData(required)
+ bootbin_fname = self._filename if self._filename else \
+ tools.get_output_filename(
+ f'boot.{self.GetUniqueName()}.bin')
+
+ pmufw_elf_fname = tools.get_input_filename(self._pmufw_filename)
+ psk_fname = tools.get_input_filename(self._psk_key_name_hint + ".pem")
+ ssk_fname = tools.get_input_filename(self._ssk_key_name_hint + ".pem")
+ fsbl_config = ";".join(self._fsbl_config) if self._fsbl_config else None
+ auth_params = ";".join(self._auth_params) if self._auth_params else None
+
+ spl_elf_fname = tools.get_output_filename('u-boot-spl-pubkey.dtb.elf')
+
+ # We need to convert to node content (see above) into an ELF
+ # file in order to be processed by bootgen.
+ self._ToElf(bytearray(data), spl_elf_fname)
+
+ # Call Bootgen in order to sign the SPL
+ if self.bootgen.sign('zynqmp', spl_elf_fname, pmufw_elf_fname,
+ psk_fname, ssk_fname, fsbl_config,
+ auth_params, self._keysrc_enc, bootbin_fname) is None:
+ # Bintool is missing; just use empty data as the output
+ self.record_missing_bintool(self.bootgen)
+ data = tools.get_bytes(0, 1024)
+ else:
+ data = tools.read_file(bootbin_fname)
+
+ self.SetContents(data)
+
+ return data
+
+ # pylint: disable=C0116
+ def AddBintools(self, btools):
+ super().AddBintools(btools)
+ self.bootgen = self.AddBintool(btools, 'bootgen')
diff --git a/tools/binman/fdt_test.py b/tools/binman/fdt_test.py
new file mode 100644
index 00000000000..7ef87295463
--- /dev/null
+++ b/tools/binman/fdt_test.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the fdt modules
+
+import os
+import sys
+import tempfile
+import unittest
+
+from dtoc import fdt
+from dtoc import fdt_util
+from dtoc.fdt import FdtScan
+from u_boot_pylib import tools
+
+class TestFdt(unittest.TestCase):
+ @classmethod
+ def setUpClass(self):
+ self._binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+ self._indir = tempfile.mkdtemp(prefix='binmant.')
+ tools.prepare_output_dir(self._indir, True)
+
+ @classmethod
+ def tearDownClass(self):
+ tools._finalise_for_test()
+
+ def TestFile(self, fname):
+ return os.path.join(self._binman_dir, 'test', fname)
+
+ def GetCompiled(self, fname):
+ return fdt_util.EnsureCompiled(self.TestFile(fname))
+
+ def _DeleteProp(self, dt):
+ node = dt.GetNode('/microcode/update@0')
+ node.DeleteProp('data')
+
+ def testFdtNormal(self):
+ fname = self.GetCompiled('034_x86_ucode.dts')
+ dt = FdtScan(fname)
+ self._DeleteProp(dt)
+
+ def testFdtNormalProp(self):
+ fname = self.GetCompiled('045_prop_test.dts')
+ dt = FdtScan(fname)
+ node = dt.GetNode('/binman/intel-me')
+ self.assertEquals('intel-me', node.name)
+ val = fdt_util.GetString(node, 'filename')
+ self.assertEquals(str, type(val))
+ self.assertEquals('me.bin', val)
+
+ prop = node.props['intval']
+ self.assertEquals(fdt.Type.INT, prop.type)
+ self.assertEquals(3, fdt_util.GetInt(node, 'intval'))
+
+ prop = node.props['intarray']
+ self.assertEquals(fdt.Type.INT, prop.type)
+ self.assertEquals(list, type(prop.value))
+ self.assertEquals(2, len(prop.value))
+ self.assertEquals([5, 6],
+ [fdt_util.fdt32_to_cpu(val) for val in prop.value])
+
+ prop = node.props['byteval']
+ self.assertEquals(fdt.Type.BYTE, prop.type)
+ self.assertEquals(chr(8), prop.value)
+
+ prop = node.props['bytearray']
+ self.assertEquals(fdt.Type.BYTE, prop.type)
+ self.assertEquals(list, type(prop.value))
+ self.assertEquals(str, type(prop.value[0]))
+ self.assertEquals(3, len(prop.value))
+ self.assertEquals([chr(1), '#', '4'], prop.value)
+
+ prop = node.props['longbytearray']
+ self.assertEquals(fdt.Type.INT, prop.type)
+ self.assertEquals(0x090a0b0c, fdt_util.GetInt(node, 'longbytearray'))
+
+ prop = node.props['stringval']
+ self.assertEquals(fdt.Type.STRING, prop.type)
+ self.assertEquals('message2', fdt_util.GetString(node, 'stringval'))
+
+ prop = node.props['stringarray']
+ self.assertEquals(fdt.Type.STRING, prop.type)
+ self.assertEquals(list, type(prop.value))
+ self.assertEquals(3, len(prop.value))
+ self.assertEquals(['another', 'multi-word', 'message'], prop.value)
diff --git a/tools/binman/fip_util.py b/tools/binman/fip_util.py
new file mode 100755
index 00000000000..b5caab2d37a
--- /dev/null
+++ b/tools/binman/fip_util.py
@@ -0,0 +1,627 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Support for ARM's Firmware Image Package (FIP) format
+
+FIP is a format similar to FMAP[1] but with fewer features and an obscure UUID
+instead of the region name.
+
+It consists of a header and a table of entries, each pointing to a place in the
+firmware image where something can be found.
+
+[1] https://chromium.googlesource.com/chromiumos/third_party/flashmap/+/refs/heads/master/lib/fmap.h
+
+If ATF updates, run this program to update the FIT_TYPE_LIST.
+
+ARM Trusted Firmware is available at:
+
+https://github.com/ARM-software/arm-trusted-firmware.git
+"""
+
+from argparse import ArgumentParser
+import collections
+import io
+import os
+import re
+import struct
+import sys
+from uuid import UUID
+
+OUR_FILE = os.path.realpath(__file__)
+OUR_PATH = os.path.dirname(OUR_FILE)
+
+# Bring in the patman and dtoc libraries (but don't override the first path
+# in PYTHONPATH)
+sys.path.insert(2, os.path.join(OUR_PATH, '..'))
+
+# pylint: disable=C0413
+from u_boot_pylib import command
+from u_boot_pylib import tools
+
+# The TOC header, at the start of the FIP
+HEADER_FORMAT = '<IIQ'
+HEADER_LEN = 0x10
+HEADER_MAGIC = 0xaA640001
+HEADER_SERIAL = 0x12345678
+
+# The entry header (a table of these comes after the TOC header)
+UUID_LEN = 16
+ENTRY_FORMAT = f'<{UUID_LEN}sQQQ'
+ENTRY_SIZE = 0x28
+
+HEADER_NAMES = (
+ 'name',
+ 'serial',
+ 'flags',
+)
+
+ENTRY_NAMES = (
+ 'uuid',
+ 'offset',
+ 'size',
+ 'flags',
+)
+
+# Set to True to enable output from running fiptool for debugging
+VERBOSE = False
+
+# Use a class so we can convert the bytes, making the table more readable
+# pylint: disable=R0903
+class FipType:
+ """A FIP entry type that we understand"""
+ def __init__(self, name, desc, uuid_bytes):
+ """Create up a new type
+
+ Args:
+ name (str): Short name for the type
+ desc (str): Longer description for the type
+ uuid_bytes (bytes): List of 16 bytes for the UUID
+ """
+ self.name = name
+ self.desc = desc
+ self.uuid = bytes(uuid_bytes)
+
+# This is taken from tbbr_config.c in ARM Trusted Firmware
+FIP_TYPE_LIST = [
+ # ToC Entry UUIDs
+ FipType('scp-fwu-cfg', 'SCP Firmware Updater Configuration FWU SCP_BL2U',
+ [0x65, 0x92, 0x27, 0x03, 0x2f, 0x74, 0xe6, 0x44,
+ 0x8d, 0xff, 0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10]),
+ FipType('ap-fwu-cfg', 'AP Firmware Updater Configuration BL2U',
+ [0x60, 0xb3, 0xeb, 0x37, 0xc1, 0xe5, 0xea, 0x41,
+ 0x9d, 0xf3, 0x19, 0xed, 0xa1, 0x1f, 0x68, 0x01]),
+ FipType('fwu', 'Firmware Updater NS_BL2U',
+ [0x4f, 0x51, 0x1d, 0x11, 0x2b, 0xe5, 0x4e, 0x49,
+ 0xb4, 0xc5, 0x83, 0xc2, 0xf7, 0x15, 0x84, 0x0a]),
+ FipType('fwu-cert', 'Non-Trusted Firmware Updater certificate',
+ [0x71, 0x40, 0x8a, 0xb2, 0x18, 0xd6, 0x87, 0x4c,
+ 0x8b, 0x2e, 0xc6, 0xdc, 0xcd, 0x50, 0xf0, 0x96]),
+ FipType('tb-fw', 'Trusted Boot Firmware BL2',
+ [0x5f, 0xf9, 0xec, 0x0b, 0x4d, 0x22, 0x3e, 0x4d,
+ 0xa5, 0x44, 0xc3, 0x9d, 0x81, 0xc7, 0x3f, 0x0a]),
+ FipType('scp-fw', 'SCP Firmware SCP_BL2',
+ [0x97, 0x66, 0xfd, 0x3d, 0x89, 0xbe, 0xe8, 0x49,
+ 0xae, 0x5d, 0x78, 0xa1, 0x40, 0x60, 0x82, 0x13]),
+ FipType('soc-fw', 'EL3 Runtime Firmware BL31',
+ [0x47, 0xd4, 0x08, 0x6d, 0x4c, 0xfe, 0x98, 0x46,
+ 0x9b, 0x95, 0x29, 0x50, 0xcb, 0xbd, 0x5a, 0x00]),
+ FipType('tos-fw', 'Secure Payload BL32 (Trusted OS)',
+ [0x05, 0xd0, 0xe1, 0x89, 0x53, 0xdc, 0x13, 0x47,
+ 0x8d, 0x2b, 0x50, 0x0a, 0x4b, 0x7a, 0x3e, 0x38]),
+ FipType('tos-fw-extra1', 'Secure Payload BL32 Extra1 (Trusted OS Extra1)',
+ [0x0b, 0x70, 0xc2, 0x9b, 0x2a, 0x5a, 0x78, 0x40,
+ 0x9f, 0x65, 0x0a, 0x56, 0x82, 0x73, 0x82, 0x88]),
+ FipType('tos-fw-extra2', 'Secure Payload BL32 Extra2 (Trusted OS Extra2)',
+ [0x8e, 0xa8, 0x7b, 0xb1, 0xcf, 0xa2, 0x3f, 0x4d,
+ 0x85, 0xfd, 0xe7, 0xbb, 0xa5, 0x02, 0x20, 0xd9]),
+ FipType('nt-fw', 'Non-Trusted Firmware BL33',
+ [0xd6, 0xd0, 0xee, 0xa7, 0xfc, 0xea, 0xd5, 0x4b,
+ 0x97, 0x82, 0x99, 0x34, 0xf2, 0x34, 0xb6, 0xe4]),
+ FipType('rmm-fw', 'Realm Monitor Management Firmware',
+ [0x6c, 0x07, 0x62, 0xa6, 0x12, 0xf2, 0x4b, 0x56,
+ 0x92, 0xcb, 0xba, 0x8f, 0x63, 0x36, 0x06, 0xd9]),
+ # Key certificates
+ FipType('rot-cert', 'Root Of Trust key certificate',
+ [0x86, 0x2d, 0x1d, 0x72, 0xf8, 0x60, 0xe4, 0x11,
+ 0x92, 0x0b, 0x8b, 0xe7, 0x62, 0x16, 0x0f, 0x24]),
+ FipType('trusted-key-cert', 'Trusted key certificate',
+ [0x82, 0x7e, 0xe8, 0x90, 0xf8, 0x60, 0xe4, 0x11,
+ 0xa1, 0xb4, 0x77, 0x7a, 0x21, 0xb4, 0xf9, 0x4c]),
+ FipType('scp-fw-key-cert', 'SCP Firmware key certificate',
+ [0x02, 0x42, 0x21, 0xa1, 0xf8, 0x60, 0xe4, 0x11,
+ 0x8d, 0x9b, 0xf3, 0x3c, 0x0e, 0x15, 0xa0, 0x14]),
+ FipType('soc-fw-key-cert', 'SoC Firmware key certificate',
+ [0x8a, 0xb8, 0xbe, 0xcc, 0xf9, 0x60, 0xe4, 0x11,
+ 0x9a, 0xd0, 0xeb, 0x48, 0x22, 0xd8, 0xdc, 0xf8]),
+ FipType('tos-fw-key-cert', 'Trusted OS Firmware key certificate',
+ [0x94, 0x77, 0xd6, 0x03, 0xfb, 0x60, 0xe4, 0x11,
+ 0x85, 0xdd, 0xb7, 0x10, 0x5b, 0x8c, 0xee, 0x04]),
+ FipType('nt-fw-key-cert', 'Non-Trusted Firmware key certificate',
+ [0x8a, 0xd5, 0x83, 0x2a, 0xfb, 0x60, 0xe4, 0x11,
+ 0x8a, 0xaf, 0xdf, 0x30, 0xbb, 0xc4, 0x98, 0x59]),
+ # Content certificates
+ FipType('tb-fw-cert', 'Trusted Boot Firmware BL2 certificate',
+ [0xd6, 0xe2, 0x69, 0xea, 0x5d, 0x63, 0xe4, 0x11,
+ 0x8d, 0x8c, 0x9f, 0xba, 0xbe, 0x99, 0x56, 0xa5]),
+ FipType('scp-fw-cert', 'SCP Firmware content certificate',
+ [0x44, 0xbe, 0x6f, 0x04, 0x5e, 0x63, 0xe4, 0x11,
+ 0xb2, 0x8b, 0x73, 0xd8, 0xea, 0xae, 0x96, 0x56]),
+ FipType('soc-fw-cert', 'SoC Firmware content certificate',
+ [0xe2, 0xb2, 0x0c, 0x20, 0x5e, 0x63, 0xe4, 0x11,
+ 0x9c, 0xe8, 0xab, 0xcc, 0xf9, 0x2b, 0xb6, 0x66]),
+ FipType('tos-fw-cert', 'Trusted OS Firmware content certificate',
+ [0xa4, 0x9f, 0x44, 0x11, 0x5e, 0x63, 0xe4, 0x11,
+ 0x87, 0x28, 0x3f, 0x05, 0x72, 0x2a, 0xf3, 0x3d]),
+ FipType('nt-fw-cert', 'Non-Trusted Firmware content certificate',
+ [0x8e, 0xc4, 0xc1, 0xf3, 0x5d, 0x63, 0xe4, 0x11,
+ 0xa7, 0xa9, 0x87, 0xee, 0x40, 0xb2, 0x3f, 0xa7]),
+ FipType('sip-sp-cert', 'SiP owned Secure Partition content certificate',
+ [0x77, 0x6d, 0xfd, 0x44, 0x86, 0x97, 0x4c, 0x3b,
+ 0x91, 0xeb, 0xc1, 0x3e, 0x02, 0x5a, 0x2a, 0x6f]),
+ FipType('plat-sp-cert', 'Platform owned Secure Partition content certificate',
+ [0xdd, 0xcb, 0xbf, 0x4a, 0xca, 0xd6, 0x11, 0xea,
+ 0x87, 0xd0, 0x02, 0x42, 0xac, 0x13, 0x00, 0x03]),
+ # Dynamic configs
+ FipType('hw-config', 'HW_CONFIG',
+ [0x08, 0xb8, 0xf1, 0xd9, 0xc9, 0xcf, 0x93, 0x49,
+ 0xa9, 0x62, 0x6f, 0xbc, 0x6b, 0x72, 0x65, 0xcc]),
+ FipType('tb-fw-config', 'TB_FW_CONFIG',
+ [0x6c, 0x04, 0x58, 0xff, 0xaf, 0x6b, 0x7d, 0x4f,
+ 0x82, 0xed, 0xaa, 0x27, 0xbc, 0x69, 0xbf, 0xd2]),
+ FipType('soc-fw-config', 'SOC_FW_CONFIG',
+ [0x99, 0x79, 0x81, 0x4b, 0x03, 0x76, 0xfb, 0x46,
+ 0x8c, 0x8e, 0x8d, 0x26, 0x7f, 0x78, 0x59, 0xe0]),
+ FipType('tos-fw-config', 'TOS_FW_CONFIG',
+ [0x26, 0x25, 0x7c, 0x1a, 0xdb, 0xc6, 0x7f, 0x47,
+ 0x8d, 0x96, 0xc4, 0xc4, 0xb0, 0x24, 0x80, 0x21]),
+ FipType('nt-fw-config', 'NT_FW_CONFIG',
+ [0x28, 0xda, 0x98, 0x15, 0x93, 0xe8, 0x7e, 0x44,
+ 0xac, 0x66, 0x1a, 0xaf, 0x80, 0x15, 0x50, 0xf9]),
+ FipType('fw-config', 'FW_CONFIG',
+ [0x58, 0x07, 0xe1, 0x6a, 0x84, 0x59, 0x47, 0xbe,
+ 0x8e, 0xd5, 0x64, 0x8e, 0x8d, 0xdd, 0xab, 0x0e]),
+ ] # end
+
+FIP_TYPES = {ftype.name: ftype for ftype in FIP_TYPE_LIST}
+
+
+def get_type_uuid(fip_type_or_uuid):
+ """get_type_uuid() - Convert a type or uuid into both
+
+ This always returns a UUID, but may not return a type since it does not do
+ the reverse lookup.
+
+ Args:
+ fip_type_or_uuid (str or bytes): Either a string containing the name of
+ an entry (e.g. 'soc-fw') or a bytes(16) containing the UUID
+
+ Returns:
+ tuple:
+ str: fip type (None if not known)
+ bytes(16): uuid
+
+ Raises:
+ ValueError: An unknown type was requested
+ """
+ if isinstance(fip_type_or_uuid, str):
+ fip_type = fip_type_or_uuid
+ lookup = FIP_TYPES.get(fip_type)
+ if not lookup:
+ raise ValueError(f"Unknown FIP entry type '{fip_type}'")
+ uuid = lookup.uuid
+ else:
+ fip_type = None
+ uuid = fip_type_or_uuid
+ return fip_type, uuid
+
+
+# pylint: disable=R0903
+class FipHeader:
+ """Class to represent a FIP header"""
+ def __init__(self, name, serial, flags):
+ """Set up a new header object
+
+ Args:
+ name (str): Name, i.e. HEADER_MAGIC
+ serial (str): Serial value, i.e. HEADER_SERIAL
+ flags (int64): Flags value
+ """
+ self.name = name
+ self.serial = serial
+ self.flags = flags
+
+
+# pylint: disable=R0903
+class FipEntry:
+ """Class to represent a single FIP entry
+
+ This is used to hold the information about an entry, including its contents.
+ Use the get_data() method to obtain the raw output for writing to the FIP
+ file.
+ """
+ def __init__(self, uuid, offset, size, flags):
+ self.uuid = uuid
+ self.offset = offset
+ self.size = size
+ self.flags = flags
+ self.fip_type = None
+ self.data = None
+ self.valid = uuid != tools.get_bytes(0, UUID_LEN)
+ if self.valid:
+ # Look up the friendly name
+ matches = {val for (key, val) in FIP_TYPES.items()
+ if val.uuid == uuid}
+ if len(matches) == 1:
+ self.fip_type = matches.pop().name
+
+ @classmethod
+ def from_type(cls, fip_type_or_uuid, data, flags):
+ """Create a FipEntry from a type name
+
+ Args:
+ cls (class): This class
+ fip_type_or_uuid (str or bytes): Name of the type to create, or
+ bytes(16) uuid
+ data (bytes): Contents of entry
+ flags (int64): Flags value
+
+ Returns:
+ FipEntry: Created 241
+ """
+ fip_type, uuid = get_type_uuid(fip_type_or_uuid)
+ fent = FipEntry(uuid, None, len(data), flags)
+ fent.fip_type = fip_type
+ fent.data = data
+ return fent
+
+
+def decode_fip(data):
+ """Decode a FIP into a header and list of FIP entries
+
+ Args:
+ data (bytes): Data block containing the FMAP
+
+ Returns:
+ Tuple:
+ header: FipHeader object
+ List of FipArea objects
+ """
+ fields = list(struct.unpack(HEADER_FORMAT, data[:HEADER_LEN]))
+ header = FipHeader(*fields)
+ fents = []
+ pos = HEADER_LEN
+ while True:
+ fields = list(struct.unpack(ENTRY_FORMAT, data[pos:pos + ENTRY_SIZE]))
+ fent = FipEntry(*fields)
+ if not fent.valid:
+ break
+ fent.data = data[fent.offset:fent.offset + fent.size]
+ fents.append(fent)
+ pos += ENTRY_SIZE
+ return header, fents
+
+
+class FipWriter:
+ """Class to handle writing a ARM Trusted Firmware's Firmware Image Package
+
+ Usage is something like:
+
+ fip = FipWriter(size)
+ fip.add_entry('scp-fwu-cfg', tools.read_file('something.bin'))
+ ...
+ data = cbw.get_data()
+
+ Attributes:
+ """
+ def __init__(self, flags, align):
+ self._fip_entries = []
+ self._flags = flags
+ self._align = align
+
+ def add_entry(self, fip_type, data, flags):
+ """Add a new entry to the FIP
+
+ Args:
+ fip_type (str): Type to add, e.g. 'tos-fw-config'
+ data (bytes): Contents of entry
+ flags (int64): Entry flags
+
+ Returns:
+ FipEntry: entry that was added
+ """
+ fent = FipEntry.from_type(fip_type, data, flags)
+ self._fip_entries.append(fent)
+ return fent
+
+ def get_data(self):
+ """Obtain the full contents of the FIP
+
+ Thhis builds the FIP with headers and all required FIP entries.
+
+ Returns:
+ bytes: data resulting from building the FIP
+ """
+ buf = io.BytesIO()
+ hdr = struct.pack(HEADER_FORMAT, HEADER_MAGIC, HEADER_SERIAL,
+ self._flags)
+ buf.write(hdr)
+
+ # Calculate the position fo the first entry
+ offset = len(hdr)
+ offset += len(self._fip_entries) * ENTRY_SIZE
+ offset += ENTRY_SIZE # terminating entry
+
+ for fent in self._fip_entries:
+ offset = tools.align(offset, self._align)
+ fent.offset = offset
+ offset += fent.size
+
+ # Write out the TOC
+ for fent in self._fip_entries:
+ hdr = struct.pack(ENTRY_FORMAT, fent.uuid, fent.offset, fent.size,
+ fent.flags)
+ buf.write(hdr)
+
+ # Write out the entries
+ for fent in self._fip_entries:
+ buf.seek(fent.offset)
+ buf.write(fent.data)
+
+ return buf.getvalue()
+
+
+class FipReader():
+ """Class to handle reading a Firmware Image Package (FIP)
+
+ Usage is something like:
+ fip = fip_util.FipReader(data)
+ fent = fip.get_entry('fwu')
+ self.WriteFile('ufwu.bin', fent.data)
+ blob = fip.get_entry(
+ bytes([0xe3, 0xb7, 0x8d, 0x9e, 0x4a, 0x64, 0x11, 0xec,
+ 0xb4, 0x5c, 0xfb, 0xa2, 0xb9, 0xb4, 0x97, 0x88]))
+ self.WriteFile('blob.bin', blob.data)
+ """
+ def __init__(self, data, read=True):
+ """Set up a new FitReader
+
+ Args:
+ data (bytes): data to read
+ read (bool): True to read the data now
+ """
+ self.fents = collections.OrderedDict()
+ self.data = data
+ if read:
+ self.read()
+
+ def read(self):
+ """Read all the files in the FIP and add them to self.files"""
+ self.header, self.fents = decode_fip(self.data)
+
+ def get_entry(self, fip_type_or_uuid):
+ """get_entry() - Find an entry by type or UUID
+
+ Args:
+ fip_type_or_uuid (str or bytes): Name of the type to create, or
+ bytes(16) uuid
+
+ Returns:
+ FipEntry: if found
+
+ Raises:
+ ValueError: entry type not found
+ """
+ fip_type, uuid = get_type_uuid(fip_type_or_uuid)
+ for fent in self.fents:
+ if fent.uuid == uuid:
+ return fent
+ label = fip_type
+ if not label:
+ label = UUID(bytes=uuid)
+ raise ValueError(f"Cannot find FIP entry '{label}'")
+
+
+def parse_macros(srcdir):
+ """parse_macros: Parse the firmware_image_package.h file
+
+ Args:
+ srcdir (str): 'arm-trusted-firmware' source directory
+
+ Returns:
+ dict:
+ key: UUID macro name, e.g. 'UUID_TRUSTED_FWU_CERT'
+ value: list:
+ file comment, e.g. 'ToC Entry UUIDs'
+ macro name, e.g. 'UUID_TRUSTED_FWU_CERT'
+ uuid as bytes(16)
+
+ Raises:
+ ValueError: a line cannot be parsed
+ """
+ re_uuid = re.compile('0x[0-9a-fA-F]{2}')
+ re_comment = re.compile(r'^/\* (.*) \*/$')
+ fname = os.path.join(srcdir, 'include/tools_share/firmware_image_package.h')
+ data = tools.read_file(fname, binary=False)
+ macros = collections.OrderedDict()
+ comment = None
+ for linenum, line in enumerate(data.splitlines()):
+ if line.startswith('/*'):
+ mat = re_comment.match(line)
+ if mat:
+ comment = mat.group(1)
+ else:
+ # Example: #define UUID_TOS_FW_CONFIG \
+ if 'UUID' in line:
+ macro = line.split()[1]
+ elif '{{' in line:
+ mat = re_uuid.findall(line)
+ if not mat or len(mat) != 16:
+ raise ValueError(
+ f'{fname}: Cannot parse UUID line {linenum + 1}: Got matches: {mat}')
+
+ uuid = bytes([int(val, 16) for val in mat])
+ macros[macro] = comment, macro, uuid
+ if not macros:
+ raise ValueError(f'{fname}: Cannot parse file')
+ return macros
+
+
+def parse_names(srcdir):
+ """parse_names: Parse the tbbr_config.c file
+
+ Args:
+ srcdir (str): 'arm-trusted-firmware' source directory
+
+ Returns:
+ tuple: dict of entries:
+ key: UUID macro, e.g. 'UUID_NON_TRUSTED_FIRMWARE_BL33'
+ tuple: entry information
+ Description of entry, e.g. 'Non-Trusted Firmware BL33'
+ UUID macro, e.g. 'UUID_NON_TRUSTED_FIRMWARE_BL33'
+ Name of entry, e.g. 'nt-fw'
+
+ Raises:
+ ValueError: the file cannot be parsed
+ """
+ # Extract the .name, .uuid and .cmdline_name values
+ re_data = re.compile(r'\.name = "([^"]*)",\s*\.uuid = (UUID_\w*),\s*\.cmdline_name = "([^"]+)"',
+ re.S)
+ fname = os.path.join(srcdir, 'tools/fiptool/tbbr_config.c')
+ data = tools.read_file(fname, binary=False)
+
+ # Example entry:
+ # {
+ # .name = "Secure Payload BL32 Extra2 (Trusted OS Extra2)",
+ # .uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
+ # .cmdline_name = "tos-fw-extra2"
+ # },
+ mat = re_data.findall(data)
+ if not mat:
+ raise ValueError(f'{fname}: Cannot parse file')
+ names = {uuid: (desc, uuid, name) for desc, uuid, name in mat}
+ return names
+
+
+def create_code_output(macros, names):
+ """create_code_output() - Create the new version of this Python file
+
+ Args:
+ macros (dict):
+ key (str): UUID macro name, e.g. 'UUID_TRUSTED_FWU_CERT'
+ value: list:
+ file comment, e.g. 'ToC Entry UUIDs'
+ macro name, e.g. 'UUID_TRUSTED_FWU_CERT'
+ uuid as bytes(16)
+
+ names (dict): list of entries, each
+ tuple: entry information
+ Description of entry, e.g. 'Non-Trusted Firmware BL33'
+ UUID macro, e.g. 'UUID_NON_TRUSTED_FIRMWARE_BL33'
+ Name of entry, e.g. 'nt-fw'
+
+ Returns:
+ str: Table of FipType() entries
+ """
+ def _to_hex_list(data):
+ """Convert bytes into C code
+
+ Args:
+ bytes to convert
+
+ Returns:
+ str: in the format '0x12, 0x34, 0x56...'
+ """
+ # Use 0x instead of %# since the latter ignores the 0 modifier in
+ # Python 3.8.10
+ return ', '.join(['0x%02x' % byte for byte in data])
+
+ out = ''
+ last_comment = None
+ for comment, macro, uuid in macros.values():
+ name_entry = names.get(macro)
+ if not name_entry:
+ print(f"Warning: UUID '{macro}' is not mentioned in tbbr_config.c file")
+ continue
+ desc, _, name = name_entry
+ if last_comment != comment:
+ out += f' # {comment}\n'
+ last_comment = comment
+ out += """ FipType('%s', '%s',
+ [%s,
+ %s]),
+""" % (name, desc, _to_hex_list(uuid[:8]), _to_hex_list(uuid[8:]))
+ return out
+
+
+def parse_atf_source(srcdir, dstfile, oldfile):
+ """parse_atf_source(): Parse the ATF source tree and update this file
+
+ Args:
+ srcdir (str): Path to 'arm-trusted-firmware' directory. Get this from:
+ https://github.com/ARM-software/arm-trusted-firmware.git
+ dstfile (str): File to write new code to, if an update is needed
+ oldfile (str): Python source file to compare against
+
+ Raises:
+ ValueError: srcdir readme.rst is missing or the first line does not
+ match what is expected
+ """
+ # We expect a readme file
+ readme_fname = os.path.join(srcdir, 'readme.rst')
+ if not os.path.exists(readme_fname):
+ raise ValueError(
+ f"Expected file '{readme_fname}' - try using -s to specify the "
+ 'arm-trusted-firmware directory')
+ readme = tools.read_file(readme_fname, binary=False)
+ first_line = 'Trusted Firmware-A'
+ if readme.splitlines()[0] != first_line:
+ raise ValueError(f"'{readme_fname}' does not start with '{first_line}'")
+ macros = parse_macros(srcdir)
+ names = parse_names(srcdir)
+ output = create_code_output(macros, names)
+ orig = tools.read_file(oldfile, binary=False)
+ re_fip_list = re.compile(r'(.*FIP_TYPE_LIST = \[).*?( ] # end.*)', re.S)
+ mat = re_fip_list.match(orig)
+ new_code = mat.group(1) + '\n' + output + mat.group(2) if mat else output
+ if new_code == orig:
+ print(f"Existing code in '{oldfile}' is up-to-date")
+ else:
+ tools.write_file(dstfile, new_code, binary=False)
+ print(f'Needs update, try:\n\tmeld {dstfile} {oldfile}')
+
+
+def main(argv, oldfile):
+ """Main program for this tool
+
+ Args:
+ argv (list): List of str command-line arguments
+ oldfile (str): Python source file to compare against
+
+ Returns:
+ int: 0 (exit code)
+ """
+ parser = ArgumentParser(epilog='''Creates an updated version of this code,
+with a table of FIP-entry types parsed from the arm-trusted-firmware source
+directory''')
+ parser.add_argument(
+ '-D', '--debug', action='store_true',
+ help='Enabling debugging (provides a full traceback on error)')
+ parser.add_argument(
+ '-o', '--outfile', type=str, default='fip_util.py.out',
+ help='Output file to write new fip_util.py file to')
+ parser.add_argument(
+ '-s', '--src', type=str, default='.',
+ help='Directory containing the arm-trusted-firmware source')
+ args = parser.parse_args(argv)
+
+ if not args.debug:
+ sys.tracebacklimit = 0
+
+ parse_atf_source(args.src, args.outfile, oldfile)
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:], OUR_FILE)) # pragma: no cover
diff --git a/tools/binman/fip_util_test.py b/tools/binman/fip_util_test.py
new file mode 100755
index 00000000000..56aa56f4643
--- /dev/null
+++ b/tools/binman/fip_util_test.py
@@ -0,0 +1,398 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2021 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Tests for fip_util
+
+This tests a few features of fip_util which are not covered by binman's ftest.py
+"""
+
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+# Bring in the patman and dtoc libraries (but don't override the first path
+# in PYTHONPATH)
+OUR_PATH = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(2, os.path.join(OUR_PATH, '..'))
+
+# pylint: disable=C0413
+from binman import bintool
+from binman import fip_util
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
+
+FIPTOOL = bintool.Bintool.create('fiptool')
+HAVE_FIPTOOL = FIPTOOL.is_present()
+
+# pylint: disable=R0902,R0904
+class TestFip(unittest.TestCase):
+ """Test of fip_util classes"""
+ #pylint: disable=W0212
+ def setUp(self):
+ # Create a temporary directory for test files
+ self._indir = tempfile.mkdtemp(prefix='fip_util.')
+ tools.set_input_dirs([self._indir])
+
+ # Set up a temporary output directory, used by the tools library when
+ # compressing files
+ tools.prepare_output_dir(None)
+
+ self.src_file = os.path.join(self._indir, 'orig.py')
+ self.outname = tools.get_output_filename('out.py')
+ self.args = ['-D', '-s', self._indir, '-o', self.outname]
+ self.readme = os.path.join(self._indir, 'readme.rst')
+ self.macro_dir = os.path.join(self._indir, 'include/tools_share')
+ self.macro_fname = os.path.join(self.macro_dir,
+ 'firmware_image_package.h')
+ self.name_dir = os.path.join(self._indir, 'tools/fiptool')
+ self.name_fname = os.path.join(self.name_dir, 'tbbr_config.c')
+
+ macro_contents = '''
+
+/* ToC Entry UUIDs */
+#define UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U \\
+ {{0x65, 0x92, 0x27, 0x03}, {0x2f, 0x74}, {0xe6, 0x44}, 0x8d, 0xff, {0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10} }
+#define UUID_TRUSTED_UPDATE_FIRMWARE_BL2U \\
+ {{0x60, 0xb3, 0xeb, 0x37}, {0xc1, 0xe5}, {0xea, 0x41}, 0x9d, 0xf3, {0x19, 0xed, 0xa1, 0x1f, 0x68, 0x01} }
+
+'''
+
+ name_contents = '''
+
+toc_entry_t toc_entries[] = {
+ {
+ .name = "SCP Firmware Updater Configuration FWU SCP_BL2U",
+ .uuid = UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U,
+ .cmdline_name = "scp-fwu-cfg"
+ },
+ {
+ .name = "AP Firmware Updater Configuration BL2U",
+ .uuid = UUID_TRUSTED_UPDATE_FIRMWARE_BL2U,
+ .cmdline_name = "ap-fwu-cfg"
+ },
+'''
+
+ def setup_readme(self):
+ """Set up the readme.txt file"""
+ tools.write_file(self.readme, 'Trusted Firmware-A\n==================',
+ binary=False)
+
+ def setup_macro(self, data=macro_contents):
+ """Set up the tbbr_config.c file"""
+ os.makedirs(self.macro_dir)
+ tools.write_file(self.macro_fname, data, binary=False)
+
+ def setup_name(self, data=name_contents):
+ """Set up the firmware_image_package.h file"""
+ os.makedirs(self.name_dir)
+ tools.write_file(self.name_fname, data, binary=False)
+
+ def tearDown(self):
+ """Remove the temporary input directory and its contents"""
+ if self._indir:
+ shutil.rmtree(self._indir)
+ self._indir = None
+ tools.finalise_output_dir()
+
+ def test_no_readme(self):
+ """Test handling of a missing readme.rst"""
+ with self.assertRaises(Exception) as err:
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('Expected file', str(err.exception))
+
+ def test_invalid_readme(self):
+ """Test that an invalid readme.rst is detected"""
+ tools.write_file(self.readme, 'blah', binary=False)
+ with self.assertRaises(Exception) as err:
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('does not start with', str(err.exception))
+
+ def test_no_fip_h(self):
+ """Check handling of missing firmware_image_package.h"""
+ self.setup_readme()
+ with self.assertRaises(Exception) as err:
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('No such file or directory', str(err.exception))
+
+ def test_invalid_fip_h(self):
+ """Check failure to parse firmware_image_package.h"""
+ self.setup_readme()
+ self.setup_macro('blah')
+ with self.assertRaises(Exception) as err:
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('Cannot parse file', str(err.exception))
+
+ def test_parse_fip_h(self):
+ """Check parsing of firmware_image_package.h"""
+ self.setup_readme()
+ # Check parsing the header file
+ self.setup_macro()
+ macros = fip_util.parse_macros(self._indir)
+ expected_macros = {
+ 'UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U':
+ ('ToC Entry UUIDs', 'UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U',
+ bytes([0x65, 0x92, 0x27, 0x03, 0x2f, 0x74, 0xe6, 0x44,
+ 0x8d, 0xff, 0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10])),
+ 'UUID_TRUSTED_UPDATE_FIRMWARE_BL2U':
+ ('ToC Entry UUIDs', 'UUID_TRUSTED_UPDATE_FIRMWARE_BL2U',
+ bytes([0x60, 0xb3, 0xeb, 0x37, 0xc1, 0xe5, 0xea, 0x41,
+ 0x9d, 0xf3, 0x19, 0xed, 0xa1, 0x1f, 0x68, 0x01])),
+ }
+ self.assertEqual(expected_macros, macros)
+
+ def test_missing_tbbr_c(self):
+ """Check handlinh of missing tbbr_config.c"""
+ self.setup_readme()
+ self.setup_macro()
+
+ # Still need the .c file
+ with self.assertRaises(Exception) as err:
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('tbbr_config.c', str(err.exception))
+
+ def test_invalid_tbbr_c(self):
+ """Check failure to parse tbbr_config.c"""
+ self.setup_readme()
+ self.setup_macro()
+ # Check invalid format for C file
+ self.setup_name('blah')
+ with self.assertRaises(Exception) as err:
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('Cannot parse file', str(err.exception))
+
+ def test_inconsistent_tbbr_c(self):
+ """Check tbbr_config.c in a format we don't expect"""
+ self.setup_readme()
+ # This is missing a hex value
+ self.setup_macro('''
+
+/* ToC Entry UUIDs */
+#define UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U \\
+ {{0x65, 0x92, 0x27,}, {0x2f, 0x74}, {0xe6, 0x44}, 0x8d, 0xff, {0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10} }
+#define UUID_TRUSTED_UPDATE_FIRMWARE_BL2U \\
+ {{0x60, 0xb3, 0xeb, 0x37}, {0xc1, 0xe5}, {0xea, 0x41}, 0x9d, 0xf3, {0x19, 0xed, 0xa1, 0x1f, 0x68, 0x01} }
+
+''')
+ # Check invalid format for C file
+ self.setup_name('blah')
+ with self.assertRaises(Exception) as err:
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('Cannot parse UUID line 5', str(err.exception))
+
+ def test_parse_tbbr_c(self):
+ """Check parsing tbbr_config.c"""
+ self.setup_readme()
+ self.setup_macro()
+ self.setup_name()
+
+ names = fip_util.parse_names(self._indir)
+
+ expected_names = {
+ 'UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U': (
+ 'SCP Firmware Updater Configuration FWU SCP_BL2U',
+ 'UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U',
+ 'scp-fwu-cfg'),
+ 'UUID_TRUSTED_UPDATE_FIRMWARE_BL2U': (
+ 'AP Firmware Updater Configuration BL2U',
+ 'UUID_TRUSTED_UPDATE_FIRMWARE_BL2U',
+ 'ap-fwu-cfg'),
+ }
+ self.assertEqual(expected_names, names)
+
+ def test_uuid_not_in_tbbr_config_c(self):
+ """Check handling a UUID in the header file that's not in the .c file"""
+ self.setup_readme()
+ self.setup_macro(self.macro_contents + '''
+#define UUID_TRUSTED_OS_FW_KEY_CERT \\
+ {{0x94, 0x77, 0xd6, 0x03}, {0xfb, 0x60}, {0xe4, 0x11}, 0x85, 0xdd, {0xb7, 0x10, 0x5b, 0x8c, 0xee, 0x04} }
+
+''')
+ self.setup_name()
+
+ macros = fip_util.parse_macros(self._indir)
+ names = fip_util.parse_names(self._indir)
+ with test_util.capture_sys_output() as (stdout, _):
+ fip_util.create_code_output(macros, names)
+ self.assertIn(
+ "UUID 'UUID_TRUSTED_OS_FW_KEY_CERT' is not mentioned in tbbr_config.c file",
+ stdout.getvalue())
+
+ def test_changes(self):
+ """Check handling of a source file that does/doesn't need changes"""
+ self.setup_readme()
+ self.setup_macro()
+ self.setup_name()
+
+ # Check generating the file when changes are needed
+ tools.write_file(self.src_file, '''
+
+# This is taken from tbbr_config.c in ARM Trusted Firmware
+FIP_TYPE_LIST = [
+ # ToC Entry UUIDs
+ FipType('scp-fwu-cfg', 'SCP Firmware Updater Configuration FWU SCP_BL2U',
+ [0x65, 0x92, 0x27, 0x03, 0x2f, 0x74, 0xe6, 0x44,
+ 0x8d, 0xff, 0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10]),
+ ] # end
+blah de blah
+ ''', binary=False)
+ with test_util.capture_sys_output() as (stdout, _):
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('Needs update', stdout.getvalue())
+
+ # Check generating the file when no changes are needed
+ tools.write_file(self.src_file, '''
+# This is taken from tbbr_config.c in ARM Trusted Firmware
+FIP_TYPE_LIST = [
+ # ToC Entry UUIDs
+ FipType('scp-fwu-cfg', 'SCP Firmware Updater Configuration FWU SCP_BL2U',
+ [0x65, 0x92, 0x27, 0x03, 0x2f, 0x74, 0xe6, 0x44,
+ 0x8d, 0xff, 0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10]),
+ FipType('ap-fwu-cfg', 'AP Firmware Updater Configuration BL2U',
+ [0x60, 0xb3, 0xeb, 0x37, 0xc1, 0xe5, 0xea, 0x41,
+ 0x9d, 0xf3, 0x19, 0xed, 0xa1, 0x1f, 0x68, 0x01]),
+ ] # end
+blah blah''', binary=False)
+ with test_util.capture_sys_output() as (stdout, _):
+ fip_util.main(self.args, self.src_file)
+ self.assertIn('is up-to-date', stdout.getvalue())
+
+ def test_no_debug(self):
+ """Test running without the -D flag"""
+ self.setup_readme()
+ self.setup_macro()
+ self.setup_name()
+
+ args = self.args.copy()
+ args.remove('-D')
+ tools.write_file(self.src_file, '', binary=False)
+ with test_util.capture_sys_output():
+ fip_util.main(args, self.src_file)
+
+ @unittest.skipIf(not HAVE_FIPTOOL, 'No fiptool available')
+ def test_fiptool_list(self):
+ """Create a FIP and check that fiptool can read it"""
+ fwu = b'my data'
+ tb_fw = b'some more data'
+ fip = fip_util.FipWriter(0x123, 0x10)
+ fip.add_entry('fwu', fwu, 0x456)
+ fip.add_entry('tb-fw', tb_fw, 0)
+ fip.add_entry(bytes(range(16)), tb_fw, 0)
+ data = fip.get_data()
+ fname = tools.get_output_filename('data.fip')
+ tools.write_file(fname, data)
+ result = FIPTOOL.info(fname)
+ self.assertEqual(
+ '''Firmware Updater NS_BL2U: offset=0xB0, size=0x7, cmdline="--fwu"
+Trusted Boot Firmware BL2: offset=0xC0, size=0xE, cmdline="--tb-fw"
+00010203-0405-0607-0809-0A0B0C0D0E0F: offset=0xD0, size=0xE, cmdline="--blob"
+''',
+ result)
+
+ fwu_data = b'my data'
+ tb_fw_data = b'some more data'
+ other_fw_data = b'even more'
+
+ def create_fiptool_image(self):
+ """Create an image with fiptool which we can use for testing
+
+ Returns:
+ FipReader: reader for the image
+ """
+ fwu = os.path.join(self._indir, 'fwu')
+ tools.write_file(fwu, self.fwu_data)
+
+ tb_fw = os.path.join(self._indir, 'tb_fw')
+ tools.write_file(tb_fw, self.tb_fw_data)
+
+ other_fw = os.path.join(self._indir, 'other_fw')
+ tools.write_file(other_fw, self.other_fw_data)
+
+ fname = tools.get_output_filename('data.fip')
+ uuid = 'e3b78d9e-4a64-11ec-b45c-fba2b9b49788'
+ FIPTOOL.create_new(fname, 8, 0x123, fwu, tb_fw, uuid, other_fw)
+
+ return fip_util.FipReader(tools.read_file(fname))
+
+ @unittest.skipIf(not HAVE_FIPTOOL, 'No fiptool available')
+ def test_fiptool_create(self):
+ """Create a FIP with fiptool and check that fip_util can read it"""
+ reader = self.create_fiptool_image()
+
+ header = reader.header
+ fents = reader.fents
+
+ self.assertEqual(0x123 << 32, header.flags)
+ self.assertEqual(fip_util.HEADER_MAGIC, header.name)
+ self.assertEqual(fip_util.HEADER_SERIAL, header.serial)
+
+ self.assertEqual(3, len(fents))
+ fent = fents[0]
+ self.assertEqual(
+ bytes([0x4f, 0x51, 0x1d, 0x11, 0x2b, 0xe5, 0x4e, 0x49,
+ 0xb4, 0xc5, 0x83, 0xc2, 0xf7, 0x15, 0x84, 0x0a]), fent.uuid)
+ self.assertEqual(0xb0, fent.offset)
+ self.assertEqual(len(self.fwu_data), fent.size)
+ self.assertEqual(0, fent.flags)
+ self.assertEqual(self.fwu_data, fent.data)
+
+ fent = fents[1]
+ self.assertEqual(
+ bytes([0x5f, 0xf9, 0xec, 0x0b, 0x4d, 0x22, 0x3e, 0x4d,
+ 0xa5, 0x44, 0xc3, 0x9d, 0x81, 0xc7, 0x3f, 0x0a]), fent.uuid)
+ self.assertEqual(0xb8, fent.offset)
+ self.assertEqual(len(self.tb_fw_data), fent.size)
+ self.assertEqual(0, fent.flags)
+ self.assertEqual(self.tb_fw_data, fent.data)
+
+ fent = fents[2]
+ self.assertEqual(
+ bytes([0xe3, 0xb7, 0x8d, 0x9e, 0x4a, 0x64, 0x11, 0xec,
+ 0xb4, 0x5c, 0xfb, 0xa2, 0xb9, 0xb4, 0x97, 0x88]), fent.uuid)
+ self.assertEqual(0xc8, fent.offset)
+ self.assertEqual(len(self.other_fw_data), fent.size)
+ self.assertEqual(0, fent.flags)
+ self.assertEqual(self.other_fw_data, fent.data)
+
+ @unittest.skipIf(not HAVE_FIPTOOL, 'No fiptool available')
+ def test_reader_get_entry(self):
+ """Test get_entry() by name and UUID"""
+ reader = self.create_fiptool_image()
+ fents = reader.fents
+ fent = reader.get_entry('fwu')
+ self.assertEqual(fent, fents[0])
+
+ fent = reader.get_entry(
+ bytes([0x5f, 0xf9, 0xec, 0x0b, 0x4d, 0x22, 0x3e, 0x4d,
+ 0xa5, 0x44, 0xc3, 0x9d, 0x81, 0xc7, 0x3f, 0x0a]))
+ self.assertEqual(fent, fents[1])
+
+ # Try finding entries that don't exist
+ with self.assertRaises(Exception) as err:
+ fent = reader.get_entry('scp-fwu-cfg')
+ self.assertIn("Cannot find FIP entry 'scp-fwu-cfg'", str(err.exception))
+
+ with self.assertRaises(Exception) as err:
+ fent = reader.get_entry(bytes(list(range(16))))
+ self.assertIn(
+ "Cannot find FIP entry '00010203-0405-0607-0809-0a0b0c0d0e0f'",
+ str(err.exception))
+
+ with self.assertRaises(Exception) as err:
+ fent = reader.get_entry('blah')
+ self.assertIn("Unknown FIP entry type 'blah'", str(err.exception))
+
+ @unittest.skipIf(not HAVE_FIPTOOL, 'No fiptool available')
+ def test_fiptool_errors(self):
+ """Check some error reporting from fiptool"""
+ with self.assertRaises(Exception) as err:
+ with test_util.capture_sys_output():
+ FIPTOOL.create_bad()
+ self.assertIn("unrecognized option '--fred'", str(err.exception))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/binman/fmap_util.py b/tools/binman/fmap_util.py
new file mode 100644
index 00000000000..40f2dbfe0f5
--- /dev/null
+++ b/tools/binman/fmap_util.py
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Support for flashrom's FMAP format. This supports a header followed by a
+# number of 'areas', describing regions of a firmware storage device,
+# generally SPI flash.
+
+import collections
+import struct
+import sys
+
+from u_boot_pylib import tools
+
+# constants imported from lib/fmap.h
+FMAP_SIGNATURE = b'__FMAP__'
+FMAP_VER_MAJOR = 1
+FMAP_VER_MINOR = 0
+FMAP_STRLEN = 32
+
+FMAP_AREA_STATIC = 1 << 0
+FMAP_AREA_COMPRESSED = 1 << 1
+FMAP_AREA_RO = 1 << 2
+
+FMAP_HEADER_LEN = 56
+FMAP_AREA_LEN = 42
+
+FMAP_HEADER_FORMAT = '<8sBBQI%dsH'% (FMAP_STRLEN)
+FMAP_AREA_FORMAT = '<II%dsH' % (FMAP_STRLEN)
+
+FMAP_HEADER_NAMES = (
+ 'signature',
+ 'ver_major',
+ 'ver_minor',
+ 'base',
+ 'image_size',
+ 'name',
+ 'nareas',
+)
+
+FMAP_AREA_NAMES = (
+ 'offset',
+ 'size',
+ 'name',
+ 'flags',
+)
+
+# Flags supported by areas (bits 2:0 are unused so not included here)
+FMAP_AREA_PRESERVE = 1 << 3 # Preserved by any firmware updates
+
+# These are the two data structures supported by flashrom, a header (which
+# appears once at the start) and an area (which is repeated until the end of
+# the list of areas)
+FmapHeader = collections.namedtuple('FmapHeader', FMAP_HEADER_NAMES)
+FmapArea = collections.namedtuple('FmapArea', FMAP_AREA_NAMES)
+
+
+def NameToFmap(name):
+ if type(name) == bytes:
+ name = name.decode('utf-8')
+ return name.replace('\0', '').replace('-', '_').upper()
+
+def ConvertName(field_names, fields):
+ """Convert a name to something flashrom likes
+
+ Flashrom requires upper case, underscores instead of hyphens. We remove any
+ null characters as well. This updates the 'name' value in fields.
+
+ Args:
+ field_names: List of field names for this struct
+ fields: Dict:
+ key: Field name
+ value: value of that field (string for the ones we support)
+ """
+ name_index = field_names.index('name')
+ fields[name_index] = tools.to_bytes(NameToFmap(fields[name_index]))
+
+def DecodeFmap(data):
+ """Decode a flashmap into a header and list of areas
+
+ Args:
+ data: Data block containing the FMAP
+
+ Returns:
+ Tuple:
+ header: FmapHeader object
+ List of FmapArea objects
+ """
+ fields = list(struct.unpack(FMAP_HEADER_FORMAT, data[:FMAP_HEADER_LEN]))
+ ConvertName(FMAP_HEADER_NAMES, fields)
+ header = FmapHeader(*fields)
+ areas = []
+ data = data[FMAP_HEADER_LEN:]
+ for area in range(header.nareas):
+ fields = list(struct.unpack(FMAP_AREA_FORMAT, data[:FMAP_AREA_LEN]))
+ ConvertName(FMAP_AREA_NAMES, fields)
+ areas.append(FmapArea(*fields))
+ data = data[FMAP_AREA_LEN:]
+ return header, areas
+
+def EncodeFmap(image_size, name, areas):
+ """Create a new FMAP from a list of areas
+
+ Args:
+ image_size: Size of image, to put in the header
+ name: Name of image, to put in the header
+ areas: List of FmapArea objects
+
+ Returns:
+ String containing the FMAP created
+ """
+ def _FormatBlob(fmt, names, obj):
+ params = [getattr(obj, name) for name in names]
+ ConvertName(names, params)
+ return struct.pack(fmt, *params)
+
+ values = FmapHeader(FMAP_SIGNATURE, 1, 0, 0, image_size, name, len(areas))
+ blob = _FormatBlob(FMAP_HEADER_FORMAT, FMAP_HEADER_NAMES, values)
+ for area in areas:
+ blob += _FormatBlob(FMAP_AREA_FORMAT, FMAP_AREA_NAMES, area)
+ return blob
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py
new file mode 100644
index 00000000000..8a44bc051b3
--- /dev/null
+++ b/tools/binman/ftest.py
@@ -0,0 +1,7464 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# To run a single test, change to this directory, and:
+#
+# python -m unittest func_test.TestFunctional.testHelp
+
+import collections
+import gzip
+import hashlib
+from optparse import OptionParser
+import os
+import re
+import shutil
+import struct
+import sys
+import tempfile
+import unittest
+import unittest.mock
+import urllib.error
+
+from binman import bintool
+from binman import cbfs_util
+from binman import cmdline
+from binman import control
+from binman import elf
+from binman import elf_test
+from binman import fip_util
+from binman import fmap_util
+from binman import state
+from dtoc import fdt
+from dtoc import fdt_util
+from binman.etype import fdtmap
+from binman.etype import image_header
+from binman.image import Image
+from u_boot_pylib import command
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+# Contents of test files, corresponding to different entry types
+U_BOOT_DATA = b'1234'
+U_BOOT_IMG_DATA = b'img'
+U_BOOT_SPL_DATA = b'56780123456789abcdefghijklm'
+U_BOOT_TPL_DATA = b'tpl9876543210fedcbazywvuts'
+U_BOOT_VPL_DATA = b'vpl76543210fedcbazywxyz_'
+BLOB_DATA = b'89'
+ME_DATA = b'0abcd'
+VGA_DATA = b'vga'
+EFI_CAPSULE_DATA = b'efi'
+U_BOOT_DTB_DATA = b'udtb'
+U_BOOT_SPL_DTB_DATA = b'spldtb'
+U_BOOT_TPL_DTB_DATA = b'tpldtb'
+U_BOOT_VPL_DTB_DATA = b'vpldtb'
+X86_START16_DATA = b'start16'
+X86_START16_SPL_DATA = b'start16spl'
+X86_START16_TPL_DATA = b'start16tpl'
+X86_RESET16_DATA = b'reset16'
+X86_RESET16_SPL_DATA = b'reset16spl'
+X86_RESET16_TPL_DATA = b'reset16tpl'
+PPC_MPC85XX_BR_DATA = b'ppcmpc85xxbr'
+U_BOOT_NODTB_DATA = b'nodtb with microcode pointer somewhere in here'
+U_BOOT_SPL_NODTB_DATA = b'splnodtb with microcode pointer somewhere in here'
+U_BOOT_TPL_NODTB_DATA = b'tplnodtb with microcode pointer somewhere in here'
+U_BOOT_VPL_NODTB_DATA = b'vplnodtb'
+U_BOOT_EXP_DATA = U_BOOT_NODTB_DATA + U_BOOT_DTB_DATA
+U_BOOT_SPL_EXP_DATA = U_BOOT_SPL_NODTB_DATA + U_BOOT_SPL_DTB_DATA
+U_BOOT_TPL_EXP_DATA = U_BOOT_TPL_NODTB_DATA + U_BOOT_TPL_DTB_DATA
+FSP_DATA = b'fsp'
+CMC_DATA = b'cmc'
+VBT_DATA = b'vbt'
+MRC_DATA = b'mrc'
+TEXT_DATA = 'text'
+TEXT_DATA2 = 'text2'
+TEXT_DATA3 = 'text3'
+CROS_EC_RW_DATA = b'ecrw'
+GBB_DATA = b'gbbd'
+BMPBLK_DATA = b'bmp'
+VBLOCK_DATA = b'vblk'
+FILES_DATA = (b"sorry I'm late\nOh, don't bother apologising, I'm " +
+ b"sorry you're alive\n")
+COMPRESS_DATA = b'compress xxxxxxxxxxxxxxxxxxxxxx data'
+COMPRESS_DATA_BIG = COMPRESS_DATA * 2
+REFCODE_DATA = b'refcode'
+FSP_M_DATA = b'fsp_m'
+FSP_S_DATA = b'fsp_s'
+FSP_T_DATA = b'fsp_t'
+ATF_BL31_DATA = b'bl31'
+TEE_OS_DATA = b'this is some tee OS data'
+TI_DM_DATA = b'tidmtidm'
+ATF_BL2U_DATA = b'bl2u'
+OPENSBI_DATA = b'opensbi'
+SCP_DATA = b'scp'
+ROCKCHIP_TPL_DATA = b'rockchip-tpl'
+TEST_FDT1_DATA = b'fdt1'
+TEST_FDT2_DATA = b'test-fdt2'
+ENV_DATA = b'var1=1\nvar2="2"'
+ENCRYPTED_IV_DATA = b'123456'
+ENCRYPTED_KEY_DATA = b'abcde'
+PRE_LOAD_MAGIC = b'UBSH'
+PRE_LOAD_VERSION = 0x11223344.to_bytes(4, 'big')
+PRE_LOAD_HDR_SIZE = 0x00001000.to_bytes(4, 'big')
+TI_BOARD_CONFIG_DATA = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+TI_UNSECURE_DATA = b'unsecuredata'
+
+# Subdirectory of the input dir to use to put test FDTs
+TEST_FDT_SUBDIR = 'fdts'
+
+# The expected size for the device tree in some tests
+EXTRACT_DTB_SIZE = 0x3c9
+
+# Properties expected to be in the device tree when update_dtb is used
+BASE_DTB_PROPS = ['offset', 'size', 'image-pos']
+
+# Extra properties expected to be in the device tree when allow-repack is used
+REPACK_DTB_PROPS = ['orig-offset', 'orig-size']
+
+# Supported compression bintools
+COMP_BINTOOLS = ['bzip2', 'gzip', 'lz4', 'lzma_alone', 'lzop', 'xz', 'zstd']
+
+TEE_ADDR = 0x5678
+
+# Firmware Management Protocol(FMP) GUID
+FW_MGMT_GUID = '6dcbd5ed-e82d-4c44-bda1-7194199ad92a'
+# Image GUID specified in the DTS
+CAPSULE_IMAGE_GUID = '09d7cf52-0720-4710-91d1-08469b7fe9c8'
+# Windows cert GUID
+WIN_CERT_TYPE_EFI_GUID = '4aafd29d-68df-49ee-8aa9-347d375665a7'
+# Empty capsule GUIDs
+EMPTY_CAPSULE_ACCEPT_GUID = '0c996046-bcc0-4d04-85ec-e1fcedf1c6f8'
+EMPTY_CAPSULE_REVERT_GUID = 'acd58b4b-c0e8-475f-99b5-6b3f7e07aaf0'
+
+class TestFunctional(unittest.TestCase):
+ """Functional tests for binman
+
+ Most of these use a sample .dts file to build an image and then check
+ that it looks correct. The sample files are in the test/ subdirectory
+ and are numbered.
+
+ For each entry type a very small test file is created using fixed
+ string contents. This makes it easy to test that things look right, and
+ debug problems.
+
+ In some cases a 'real' file must be used - these are also supplied in
+ the test/ diurectory.
+ """
+ @classmethod
+ def setUpClass(cls):
+ global entry
+ from binman import entry
+
+ # Handle the case where argv[0] is 'python'
+ cls._binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+ cls._binman_pathname = os.path.join(cls._binman_dir, 'binman')
+
+ # Create a temporary directory for input files
+ cls._indir = tempfile.mkdtemp(prefix='binmant.')
+
+ # Create some test files
+ TestFunctional._MakeInputFile('u-boot.bin', U_BOOT_DATA)
+ TestFunctional._MakeInputFile('u-boot.img', U_BOOT_IMG_DATA)
+ TestFunctional._MakeInputFile('spl/u-boot-spl.bin', U_BOOT_SPL_DATA)
+ TestFunctional._MakeInputFile('tpl/u-boot-tpl.bin', U_BOOT_TPL_DATA)
+ TestFunctional._MakeInputFile('vpl/u-boot-vpl.bin', U_BOOT_VPL_DATA)
+ TestFunctional._MakeInputFile('blobfile', BLOB_DATA)
+ TestFunctional._MakeInputFile('me.bin', ME_DATA)
+ TestFunctional._MakeInputFile('vga.bin', VGA_DATA)
+ cls._ResetDtbs()
+
+ TestFunctional._MakeInputFile('u-boot-br.bin', PPC_MPC85XX_BR_DATA)
+
+ TestFunctional._MakeInputFile('u-boot-x86-start16.bin', X86_START16_DATA)
+ TestFunctional._MakeInputFile('spl/u-boot-x86-start16-spl.bin',
+ X86_START16_SPL_DATA)
+ TestFunctional._MakeInputFile('tpl/u-boot-x86-start16-tpl.bin',
+ X86_START16_TPL_DATA)
+
+ TestFunctional._MakeInputFile('u-boot-x86-reset16.bin',
+ X86_RESET16_DATA)
+ TestFunctional._MakeInputFile('spl/u-boot-x86-reset16-spl.bin',
+ X86_RESET16_SPL_DATA)
+ TestFunctional._MakeInputFile('tpl/u-boot-x86-reset16-tpl.bin',
+ X86_RESET16_TPL_DATA)
+
+ TestFunctional._MakeInputFile('u-boot-nodtb.bin', U_BOOT_NODTB_DATA)
+ TestFunctional._MakeInputFile('spl/u-boot-spl-nodtb.bin',
+ U_BOOT_SPL_NODTB_DATA)
+ TestFunctional._MakeInputFile('tpl/u-boot-tpl-nodtb.bin',
+ U_BOOT_TPL_NODTB_DATA)
+ TestFunctional._MakeInputFile('vpl/u-boot-vpl-nodtb.bin',
+ U_BOOT_VPL_NODTB_DATA)
+ TestFunctional._MakeInputFile('fsp.bin', FSP_DATA)
+ TestFunctional._MakeInputFile('cmc.bin', CMC_DATA)
+ TestFunctional._MakeInputFile('vbt.bin', VBT_DATA)
+ TestFunctional._MakeInputFile('mrc.bin', MRC_DATA)
+ TestFunctional._MakeInputFile('ecrw.bin', CROS_EC_RW_DATA)
+ TestFunctional._MakeInputDir('devkeys')
+ TestFunctional._MakeInputFile('bmpblk.bin', BMPBLK_DATA)
+ TestFunctional._MakeInputFile('refcode.bin', REFCODE_DATA)
+ TestFunctional._MakeInputFile('fsp_m.bin', FSP_M_DATA)
+ TestFunctional._MakeInputFile('fsp_s.bin', FSP_S_DATA)
+ TestFunctional._MakeInputFile('fsp_t.bin', FSP_T_DATA)
+
+ cls._elf_testdir = os.path.join(cls._indir, 'elftest')
+ elf_test.BuildElfTestFiles(cls._elf_testdir)
+
+ # ELF file with a '_dt_ucode_base_size' symbol
+ TestFunctional._MakeInputFile('u-boot',
+ tools.read_file(cls.ElfTestFile('u_boot_ucode_ptr')))
+
+ # Intel flash descriptor file
+ cls._SetupDescriptor()
+
+ shutil.copytree(cls.TestFile('files'),
+ os.path.join(cls._indir, 'files'))
+
+ shutil.copytree(cls.TestFile('yaml'),
+ os.path.join(cls._indir, 'yaml'))
+
+ TestFunctional._MakeInputFile('compress', COMPRESS_DATA)
+ TestFunctional._MakeInputFile('compress_big', COMPRESS_DATA_BIG)
+ TestFunctional._MakeInputFile('bl31.bin', ATF_BL31_DATA)
+ TestFunctional._MakeInputFile('tee-pager.bin', TEE_OS_DATA)
+ TestFunctional._MakeInputFile('dm.bin', TI_DM_DATA)
+ TestFunctional._MakeInputFile('bl2u.bin', ATF_BL2U_DATA)
+ TestFunctional._MakeInputFile('fw_dynamic.bin', OPENSBI_DATA)
+ TestFunctional._MakeInputFile('scp.bin', SCP_DATA)
+ TestFunctional._MakeInputFile('rockchip-tpl.bin', ROCKCHIP_TPL_DATA)
+ TestFunctional._MakeInputFile('ti_unsecure.bin', TI_UNSECURE_DATA)
+ TestFunctional._MakeInputFile('capsule_input.bin', EFI_CAPSULE_DATA)
+
+ # Add a few .dtb files for testing
+ TestFunctional._MakeInputFile('%s/test-fdt1.dtb' % TEST_FDT_SUBDIR,
+ TEST_FDT1_DATA)
+ TestFunctional._MakeInputFile('%s/test-fdt2.dtb' % TEST_FDT_SUBDIR,
+ TEST_FDT2_DATA)
+
+ TestFunctional._MakeInputFile('env.txt', ENV_DATA)
+
+ # ELF file with two sections in different parts of memory, used for both
+ # ATF and OP_TEE
+ TestFunctional._MakeInputFile('bl31.elf',
+ tools.read_file(cls.ElfTestFile('elf_sections')))
+ TestFunctional._MakeInputFile('tee.elf',
+ tools.read_file(cls.ElfTestFile('elf_sections')))
+
+ # Newer OP_TEE file in v1 binary format
+ cls.make_tee_bin('tee.bin')
+
+ # test files for encrypted tests
+ TestFunctional._MakeInputFile('encrypted-file.iv', ENCRYPTED_IV_DATA)
+ TestFunctional._MakeInputFile('encrypted-file.key', ENCRYPTED_KEY_DATA)
+
+ cls.comp_bintools = {}
+ for name in COMP_BINTOOLS:
+ cls.comp_bintools[name] = bintool.Bintool.create(name)
+
+ @classmethod
+ def tearDownClass(cls):
+ """Remove the temporary input directory and its contents"""
+ if cls.preserve_indir:
+ print('Preserving input dir: %s' % cls._indir)
+ else:
+ if cls._indir:
+ shutil.rmtree(cls._indir)
+ cls._indir = None
+
+ @classmethod
+ def setup_test_args(cls, preserve_indir=False, preserve_outdirs=False,
+ toolpath=None, verbosity=None):
+ """Accept arguments controlling test execution
+
+ Args:
+ preserve_indir: Preserve the shared input directory used by all
+ tests in this class.
+ preserve_outdir: Preserve the output directories used by tests. Each
+ test has its own, so this is normally only useful when running a
+ single test.
+ toolpath: ist of paths to use for tools
+ """
+ cls.preserve_indir = preserve_indir
+ cls.preserve_outdirs = preserve_outdirs
+ cls.toolpath = toolpath
+ cls.verbosity = verbosity
+
+ def _CheckBintool(self, bintool):
+ if not bintool.is_present():
+ self.skipTest('%s not available' % bintool.name)
+
+ def _CheckLz4(self):
+ bintool = self.comp_bintools['lz4']
+ self._CheckBintool(bintool)
+
+ def _CleanupOutputDir(self):
+ """Remove the temporary output directory"""
+ if self.preserve_outdirs:
+ print('Preserving output dir: %s' % tools.outdir)
+ else:
+ tools._finalise_for_test()
+
+ def setUp(self):
+ # Enable this to turn on debugging output
+ # tout.init(tout.DEBUG)
+ command.test_result = None
+
+ def tearDown(self):
+ """Remove the temporary output directory"""
+ self._CleanupOutputDir()
+
+ def _SetupImageInTmpdir(self):
+ """Set up the output image in a new temporary directory
+
+ This is used when an image has been generated in the output directory,
+ but we want to run binman again. This will create a new output
+ directory and fail to delete the original one.
+
+ This creates a new temporary directory, copies the image to it (with a
+ new name) and removes the old output directory.
+
+ Returns:
+ Tuple:
+ Temporary directory to use
+ New image filename
+ """
+ image_fname = tools.get_output_filename('image.bin')
+ tmpdir = tempfile.mkdtemp(prefix='binman.')
+ updated_fname = os.path.join(tmpdir, 'image-updated.bin')
+ tools.write_file(updated_fname, tools.read_file(image_fname))
+ self._CleanupOutputDir()
+ return tmpdir, updated_fname
+
+ @classmethod
+ def _ResetDtbs(cls):
+ TestFunctional._MakeInputFile('u-boot.dtb', U_BOOT_DTB_DATA)
+ TestFunctional._MakeInputFile('spl/u-boot-spl.dtb', U_BOOT_SPL_DTB_DATA)
+ TestFunctional._MakeInputFile('tpl/u-boot-tpl.dtb', U_BOOT_TPL_DTB_DATA)
+ TestFunctional._MakeInputFile('vpl/u-boot-vpl.dtb', U_BOOT_VPL_DTB_DATA)
+
+ def _RunBinman(self, *args, **kwargs):
+ """Run binman using the command line
+
+ Args:
+ Arguments to pass, as a list of strings
+ kwargs: Arguments to pass to Command.RunPipe()
+ """
+ result = command.run_pipe([[self._binman_pathname] + list(args)],
+ capture=True, capture_stderr=True, raise_on_error=False)
+ if result.return_code and kwargs.get('raise_on_error', True):
+ raise Exception("Error running '%s': %s" % (' '.join(args),
+ result.stdout + result.stderr))
+ return result
+
+ def _DoBinman(self, *argv):
+ """Run binman using directly (in the same process)
+
+ Args:
+ Arguments to pass, as a list of strings
+ Returns:
+ Return value (0 for success)
+ """
+ argv = list(argv)
+ args = cmdline.ParseArgs(argv)
+ args.pager = 'binman-invalid-pager'
+ args.build_dir = self._indir
+
+ # For testing, you can force an increase in verbosity here
+ # args.verbosity = tout.DEBUG
+ return control.Binman(args)
+
+ def _DoTestFile(self, fname, debug=False, map=False, update_dtb=False,
+ entry_args=None, images=None, use_real_dtb=False,
+ use_expanded=False, verbosity=None, allow_missing=False,
+ allow_fake_blobs=False, extra_indirs=None, threads=None,
+ test_section_timeout=False, update_fdt_in_elf=None,
+ force_missing_bintools='', ignore_missing=False, output_dir=None):
+ """Run binman with a given test file
+
+ Args:
+ fname: Device-tree source filename to use (e.g. 005_simple.dts)
+ debug: True to enable debugging output
+ map: True to output map files for the images
+ update_dtb: Update the offset and size of each entry in the device
+ tree before packing it into the image
+ entry_args: Dict of entry args to supply to binman
+ key: arg name
+ value: value of that arg
+ images: List of image names to build
+ use_real_dtb: True to use the test file as the contents of
+ the u-boot-dtb entry. Normally this is not needed and the
+ test contents (the U_BOOT_DTB_DATA string) can be used.
+ But in some test we need the real contents.
+ use_expanded: True to use expanded entries where available, e.g.
+ 'u-boot-expanded' instead of 'u-boot'
+ verbosity: Verbosity level to use (0-3, None=don't set it)
+ allow_missing: Set the '--allow-missing' flag so that missing
+ external binaries just produce a warning instead of an error
+ allow_fake_blobs: Set the '--fake-ext-blobs' flag
+ extra_indirs: Extra input directories to add using -I
+ threads: Number of threads to use (None for default, 0 for
+ single-threaded)
+ test_section_timeout: True to force the first time to timeout, as
+ used in testThreadTimeout()
+ update_fdt_in_elf: Value to pass with --update-fdt-in-elf=xxx
+ force_missing_tools (str): comma-separated list of bintools to
+ regard as missing
+ output_dir: Specific output directory to use for image using -O
+
+ Returns:
+ int return code, 0 on success
+ """
+ args = []
+ if debug:
+ args.append('-D')
+ if verbosity is not None:
+ args.append('-v%d' % verbosity)
+ elif self.verbosity:
+ args.append('-v%d' % self.verbosity)
+ if self.toolpath:
+ for path in self.toolpath:
+ args += ['--toolpath', path]
+ if threads is not None:
+ args.append('-T%d' % threads)
+ if test_section_timeout:
+ args.append('--test-section-timeout')
+ args += ['build', '-p', '-I', self._indir, '-d', self.TestFile(fname)]
+ if map:
+ args.append('-m')
+ if update_dtb:
+ args.append('-u')
+ if not use_real_dtb:
+ args.append('--fake-dtb')
+ if not use_expanded:
+ args.append('--no-expanded')
+ if entry_args:
+ for arg, value in entry_args.items():
+ args.append('-a%s=%s' % (arg, value))
+ if allow_missing:
+ args.append('-M')
+ if ignore_missing:
+ args.append('-W')
+ if allow_fake_blobs:
+ args.append('--fake-ext-blobs')
+ if force_missing_bintools:
+ args += ['--force-missing-bintools', force_missing_bintools]
+ if update_fdt_in_elf:
+ args += ['--update-fdt-in-elf', update_fdt_in_elf]
+ if images:
+ for image in images:
+ args += ['-i', image]
+ if extra_indirs:
+ for indir in extra_indirs:
+ args += ['-I', indir]
+ if output_dir:
+ args += ['-O', output_dir]
+ return self._DoBinman(*args)
+
+ def _SetupDtb(self, fname, outfile='u-boot.dtb'):
+ """Set up a new test device-tree file
+
+ The given file is compiled and set up as the device tree to be used
+ for ths test.
+
+ Args:
+ fname: Filename of .dts file to read
+ outfile: Output filename for compiled device-tree binary
+
+ Returns:
+ Contents of device-tree binary
+ """
+ tmpdir = tempfile.mkdtemp(prefix='binmant.')
+ dtb = fdt_util.EnsureCompiled(self.TestFile(fname), tmpdir)
+ with open(dtb, 'rb') as fd:
+ data = fd.read()
+ TestFunctional._MakeInputFile(outfile, data)
+ shutil.rmtree(tmpdir)
+ return data
+
+ def _GetDtbContentsForSpls(self, dtb_data, name):
+ """Create a version of the main DTB for SPL / TPL / VPL
+
+ For testing we don't actually have different versions of the DTB. With
+ U-Boot we normally run fdtgrep to remove unwanted nodes, but for tests
+ we don't normally have any unwanted nodes.
+
+ We still want the DTBs for SPL and TPL to be different though, since
+ otherwise it is confusing to know which one we are looking at. So add
+ an 'spl' or 'tpl' property to the top-level node.
+
+ Args:
+ dtb_data: dtb data to modify (this should be a value devicetree)
+ name: Name of a new property to add
+
+ Returns:
+ New dtb data with the property added
+ """
+ dtb = fdt.Fdt.FromData(dtb_data)
+ dtb.Scan()
+ dtb.GetNode('/binman').AddZeroProp(name)
+ dtb.Sync(auto_resize=True)
+ dtb.Pack()
+ return dtb.GetContents()
+
+ def _DoReadFileDtb(self, fname, use_real_dtb=False, use_expanded=False,
+ map=False, update_dtb=False, entry_args=None,
+ reset_dtbs=True, extra_indirs=None, threads=None):
+ """Run binman and return the resulting image
+
+ This runs binman with a given test file and then reads the resulting
+ output file. It is a shortcut function since most tests need to do
+ these steps.
+
+ Raises an assertion failure if binman returns a non-zero exit code.
+
+ Args:
+ fname: Device-tree source filename to use (e.g. 005_simple.dts)
+ use_real_dtb: True to use the test file as the contents of
+ the u-boot-dtb entry. Normally this is not needed and the
+ test contents (the U_BOOT_DTB_DATA string) can be used.
+ But in some test we need the real contents.
+ use_expanded: True to use expanded entries where available, e.g.
+ 'u-boot-expanded' instead of 'u-boot'
+ map: True to output map files for the images
+ update_dtb: Update the offset and size of each entry in the device
+ tree before packing it into the image
+ entry_args: Dict of entry args to supply to binman
+ key: arg name
+ value: value of that arg
+ reset_dtbs: With use_real_dtb the test dtb is overwritten by this
+ function. If reset_dtbs is True, then the original test dtb
+ is written back before this function finishes
+ extra_indirs: Extra input directories to add using -I
+ threads: Number of threads to use (None for default, 0 for
+ single-threaded)
+
+ Returns:
+ Tuple:
+ Resulting image contents
+ Device tree contents
+ Map data showing contents of image (or None if none)
+ Output device tree binary filename ('u-boot.dtb' path)
+ """
+ dtb_data = None
+ # Use the compiled test file as the u-boot-dtb input
+ if use_real_dtb:
+ dtb_data = self._SetupDtb(fname)
+
+ # For testing purposes, make a copy of the DT for SPL and TPL. Add
+ # a node indicating which it is, so aid verification.
+ for name in ['spl', 'tpl', 'vpl']:
+ dtb_fname = '%s/u-boot-%s.dtb' % (name, name)
+ outfile = os.path.join(self._indir, dtb_fname)
+ TestFunctional._MakeInputFile(dtb_fname,
+ self._GetDtbContentsForSpls(dtb_data, name))
+
+ try:
+ retcode = self._DoTestFile(fname, map=map, update_dtb=update_dtb,
+ entry_args=entry_args, use_real_dtb=use_real_dtb,
+ use_expanded=use_expanded, extra_indirs=extra_indirs,
+ threads=threads)
+ self.assertEqual(0, retcode)
+ out_dtb_fname = tools.get_output_filename('u-boot.dtb.out')
+
+ # Find the (only) image, read it and return its contents
+ image = control.images['image']
+ image_fname = tools.get_output_filename('image.bin')
+ self.assertTrue(os.path.exists(image_fname))
+ if map:
+ map_fname = tools.get_output_filename('image.map')
+ with open(map_fname) as fd:
+ map_data = fd.read()
+ else:
+ map_data = None
+ with open(image_fname, 'rb') as fd:
+ return fd.read(), dtb_data, map_data, out_dtb_fname
+ finally:
+ # Put the test file back
+ if reset_dtbs and use_real_dtb:
+ self._ResetDtbs()
+
+ def _DoReadFileRealDtb(self, fname):
+ """Run binman with a real .dtb file and return the resulting data
+
+ Args:
+ fname: DT source filename to use (e.g. 082_fdt_update_all.dts)
+
+ Returns:
+ Resulting image contents
+ """
+ return self._DoReadFileDtb(fname, use_real_dtb=True, update_dtb=True)[0]
+
+ def _DoReadFile(self, fname, use_real_dtb=False):
+ """Helper function which discards the device-tree binary
+
+ Args:
+ fname: Device-tree source filename to use (e.g. 005_simple.dts)
+ use_real_dtb: True to use the test file as the contents of
+ the u-boot-dtb entry. Normally this is not needed and the
+ test contents (the U_BOOT_DTB_DATA string) can be used.
+ But in some test we need the real contents.
+
+ Returns:
+ Resulting image contents
+ """
+ return self._DoReadFileDtb(fname, use_real_dtb)[0]
+
+ @classmethod
+ def _MakeInputFile(cls, fname, contents):
+ """Create a new test input file, creating directories as needed
+
+ Args:
+ fname: Filename to create
+ contents: File contents to write in to the file
+ Returns:
+ Full pathname of file created
+ """
+ pathname = os.path.join(cls._indir, fname)
+ dirname = os.path.dirname(pathname)
+ if dirname and not os.path.exists(dirname):
+ os.makedirs(dirname)
+ with open(pathname, 'wb') as fd:
+ fd.write(contents)
+ return pathname
+
+ @classmethod
+ def _MakeInputDir(cls, dirname):
+ """Create a new test input directory, creating directories as needed
+
+ Args:
+ dirname: Directory name to create
+
+ Returns:
+ Full pathname of directory created
+ """
+ pathname = os.path.join(cls._indir, dirname)
+ if not os.path.exists(pathname):
+ os.makedirs(pathname)
+ return pathname
+
+ @classmethod
+ def _SetupSplElf(cls, src_fname='bss_data'):
+ """Set up an ELF file with a '_dt_ucode_base_size' symbol
+
+ Args:
+ Filename of ELF file to use as SPL
+ """
+ TestFunctional._MakeInputFile('spl/u-boot-spl',
+ tools.read_file(cls.ElfTestFile(src_fname)))
+
+ @classmethod
+ def _SetupTplElf(cls, src_fname='bss_data'):
+ """Set up an ELF file with a '_dt_ucode_base_size' symbol
+
+ Args:
+ Filename of ELF file to use as TPL
+ """
+ TestFunctional._MakeInputFile('tpl/u-boot-tpl',
+ tools.read_file(cls.ElfTestFile(src_fname)))
+
+ @classmethod
+ def _SetupVplElf(cls, src_fname='bss_data'):
+ """Set up an ELF file with a '_dt_ucode_base_size' symbol
+
+ Args:
+ Filename of ELF file to use as VPL
+ """
+ TestFunctional._MakeInputFile('vpl/u-boot-vpl',
+ tools.read_file(cls.ElfTestFile(src_fname)))
+
+ @classmethod
+ def _SetupPmuFwlElf(cls, src_fname='bss_data'):
+ """Set up an ELF file with a '_dt_ucode_base_size' symbol
+
+ Args:
+ Filename of ELF file to use as VPL
+ """
+ TestFunctional._MakeInputFile('pmu-firmware.elf',
+ tools.read_file(cls.ElfTestFile(src_fname)))
+
+ @classmethod
+ def _SetupDescriptor(cls):
+ with open(cls.TestFile('descriptor.bin'), 'rb') as fd:
+ TestFunctional._MakeInputFile('descriptor.bin', fd.read())
+
+ @classmethod
+ def TestFile(cls, fname):
+ return os.path.join(cls._binman_dir, 'test', fname)
+
+ @classmethod
+ def ElfTestFile(cls, fname):
+ return os.path.join(cls._elf_testdir, fname)
+
+ @classmethod
+ def make_tee_bin(cls, fname, paged_sz=0, extra_data=b''):
+ init_sz, start_hi, start_lo, dummy = (len(U_BOOT_DATA), 0, TEE_ADDR, 0)
+ data = b'OPTE\x01xxx' + struct.pack('<5I', init_sz, start_hi, start_lo,
+ dummy, paged_sz) + U_BOOT_DATA
+ data += extra_data
+ TestFunctional._MakeInputFile(fname, data)
+
+ def AssertInList(self, grep_list, target):
+ """Assert that at least one of a list of things is in a target
+
+ Args:
+ grep_list: List of strings to check
+ target: Target string
+ """
+ for grep in grep_list:
+ if grep in target:
+ return
+ self.fail("Error: '%s' not found in '%s'" % (grep_list, target))
+
+ def CheckNoGaps(self, entries):
+ """Check that all entries fit together without gaps
+
+ Args:
+ entries: List of entries to check
+ """
+ offset = 0
+ for entry in entries.values():
+ self.assertEqual(offset, entry.offset)
+ offset += entry.size
+
+ def GetFdtLen(self, dtb):
+ """Get the totalsize field from a device-tree binary
+
+ Args:
+ dtb: Device-tree binary contents
+
+ Returns:
+ Total size of device-tree binary, from the header
+ """
+ return struct.unpack('>L', dtb[4:8])[0]
+
+ def _GetPropTree(self, dtb, prop_names, prefix='/binman/'):
+ def AddNode(node, path):
+ if node.name != '/':
+ path += '/' + node.name
+ for prop in node.props.values():
+ if prop.name in prop_names:
+ prop_path = path + ':' + prop.name
+ tree[prop_path[len(prefix):]] = fdt_util.fdt32_to_cpu(
+ prop.value)
+ for subnode in node.subnodes:
+ AddNode(subnode, path)
+
+ tree = {}
+ AddNode(dtb.GetRoot(), '')
+ return tree
+
+ def _CheckSign(self, fit, key):
+ try:
+ tools.run('fit_check_sign', '-k', key, '-f', fit)
+ except:
+ self.fail('Expected signed FIT container')
+ return False
+ return True
+
+ def testRun(self):
+ """Test a basic run with valid args"""
+ result = self._RunBinman('-h')
+
+ def testFullHelp(self):
+ """Test that the full help is displayed with -H"""
+ result = self._RunBinman('-H')
+ help_file = os.path.join(self._binman_dir, 'README.rst')
+ # Remove possible extraneous strings
+ extra = '::::::::::::::\n' + help_file + '\n::::::::::::::\n'
+ gothelp = result.stdout.replace(extra, '')
+ self.assertEqual(len(gothelp), os.path.getsize(help_file))
+ self.assertEqual(0, len(result.stderr))
+ self.assertEqual(0, result.return_code)
+
+ def testFullHelpInternal(self):
+ """Test that the full help is displayed with -H"""
+ try:
+ command.test_result = command.CommandResult()
+ result = self._DoBinman('-H')
+ help_file = os.path.join(self._binman_dir, 'README.rst')
+ finally:
+ command.test_result = None
+
+ def testHelp(self):
+ """Test that the basic help is displayed with -h"""
+ result = self._RunBinman('-h')
+ self.assertTrue(len(result.stdout) > 200)
+ self.assertEqual(0, len(result.stderr))
+ self.assertEqual(0, result.return_code)
+
+ def testBoard(self):
+ """Test that we can run it with a specific board"""
+ self._SetupDtb('005_simple.dts', 'sandbox/u-boot.dtb')
+ TestFunctional._MakeInputFile('sandbox/u-boot.bin', U_BOOT_DATA)
+ result = self._DoBinman('build', '-n', '-b', 'sandbox')
+ self.assertEqual(0, result)
+
+ def testNeedBoard(self):
+ """Test that we get an error when no board ius supplied"""
+ with self.assertRaises(ValueError) as e:
+ result = self._DoBinman('build')
+ self.assertIn("Must provide a board to process (use -b <board>)",
+ str(e.exception))
+
+ def testMissingDt(self):
+ """Test that an invalid device-tree file generates an error"""
+ with self.assertRaises(Exception) as e:
+ self._RunBinman('build', '-d', 'missing_file')
+ # We get one error from libfdt, and a different one from fdtget.
+ self.AssertInList(["Couldn't open blob from 'missing_file'",
+ 'No such file or directory'], str(e.exception))
+
+ def testBrokenDt(self):
+ """Test that an invalid device-tree source file generates an error
+
+ Since this is a source file it should be compiled and the error
+ will come from the device-tree compiler (dtc).
+ """
+ with self.assertRaises(Exception) as e:
+ self._RunBinman('build', '-d', self.TestFile('001_invalid.dts'))
+ self.assertIn("FATAL ERROR: Unable to parse input tree",
+ str(e.exception))
+
+ def testMissingNode(self):
+ """Test that a device tree without a 'binman' node generates an error"""
+ with self.assertRaises(Exception) as e:
+ self._DoBinman('build', '-d', self.TestFile('002_missing_node.dts'))
+ self.assertIn("does not have a 'binman' node", str(e.exception))
+
+ def testEmpty(self):
+ """Test that an empty binman node works OK (i.e. does nothing)"""
+ result = self._RunBinman('build', '-d', self.TestFile('003_empty.dts'))
+ self.assertEqual(0, len(result.stderr))
+ self.assertEqual(0, result.return_code)
+
+ def testInvalidEntry(self):
+ """Test that an invalid entry is flagged"""
+ with self.assertRaises(Exception) as e:
+ result = self._RunBinman('build', '-d',
+ self.TestFile('004_invalid_entry.dts'))
+ self.assertIn("Unknown entry type 'not-a-valid-type' in node "
+ "'/binman/not-a-valid-type'", str(e.exception))
+
+ def testSimple(self):
+ """Test a simple binman with a single file"""
+ data = self._DoReadFile('005_simple.dts')
+ self.assertEqual(U_BOOT_DATA, data)
+
+ def testSimpleDebug(self):
+ """Test a simple binman run with debugging enabled"""
+ self._DoTestFile('005_simple.dts', debug=True)
+
+ def testDual(self):
+ """Test that we can handle creating two images
+
+ This also tests image padding.
+ """
+ retcode = self._DoTestFile('006_dual_image.dts')
+ self.assertEqual(0, retcode)
+
+ image = control.images['image1']
+ self.assertEqual(len(U_BOOT_DATA), image.size)
+ fname = tools.get_output_filename('image1.bin')
+ self.assertTrue(os.path.exists(fname))
+ with open(fname, 'rb') as fd:
+ data = fd.read()
+ self.assertEqual(U_BOOT_DATA, data)
+
+ image = control.images['image2']
+ self.assertEqual(3 + len(U_BOOT_DATA) + 5, image.size)
+ fname = tools.get_output_filename('image2.bin')
+ self.assertTrue(os.path.exists(fname))
+ with open(fname, 'rb') as fd:
+ data = fd.read()
+ self.assertEqual(U_BOOT_DATA, data[3:7])
+ self.assertEqual(tools.get_bytes(0, 3), data[:3])
+ self.assertEqual(tools.get_bytes(0, 5), data[7:])
+
+ def testBadAlign(self):
+ """Test that an invalid alignment value is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('007_bad_align.dts')
+ self.assertIn("Node '/binman/u-boot': Alignment 23 must be a power "
+ "of two", str(e.exception))
+
+ def testPackSimple(self):
+ """Test that packing works as expected"""
+ retcode = self._DoTestFile('008_pack.dts')
+ self.assertEqual(0, retcode)
+ self.assertIn('image', control.images)
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(5, len(entries))
+
+ # First u-boot
+ self.assertIn('u-boot', entries)
+ entry = entries['u-boot']
+ self.assertEqual(0, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ # Second u-boot, aligned to 16-byte boundary
+ self.assertIn('u-boot-align', entries)
+ entry = entries['u-boot-align']
+ self.assertEqual(16, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ # Third u-boot, size 23 bytes
+ self.assertIn('u-boot-size', entries)
+ entry = entries['u-boot-size']
+ self.assertEqual(20, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+ self.assertEqual(23, entry.size)
+
+ # Fourth u-boot, placed immediate after the above
+ self.assertIn('u-boot-next', entries)
+ entry = entries['u-boot-next']
+ self.assertEqual(43, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ # Fifth u-boot, placed at a fixed offset
+ self.assertIn('u-boot-fixed', entries)
+ entry = entries['u-boot-fixed']
+ self.assertEqual(61, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ self.assertEqual(65, image.size)
+
+ def testPackExtra(self):
+ """Test that extra packing feature works as expected"""
+ data, _, _, out_dtb_fname = self._DoReadFileDtb('009_pack_extra.dts',
+ update_dtb=True)
+
+ self.assertIn('image', control.images)
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(6, len(entries))
+
+ # First u-boot with padding before and after (included in minimum size)
+ self.assertIn('u-boot', entries)
+ entry = entries['u-boot']
+ self.assertEqual(0, entry.offset)
+ self.assertEqual(3, entry.pad_before)
+ self.assertEqual(3 + 5 + len(U_BOOT_DATA), entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data)
+ self.assertEqual(tools.get_bytes(0, 3) + U_BOOT_DATA +
+ tools.get_bytes(0, 5), data[:entry.size])
+ pos = entry.size
+
+ # Second u-boot has an aligned size, but it has no effect
+ self.assertIn('u-boot-align-size-nop', entries)
+ entry = entries['u-boot-align-size-nop']
+ self.assertEqual(pos, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data)
+ self.assertEqual(U_BOOT_DATA, data[pos:pos + entry.size])
+ pos += entry.size
+
+ # Third u-boot has an aligned size too
+ self.assertIn('u-boot-align-size', entries)
+ entry = entries['u-boot-align-size']
+ self.assertEqual(pos, entry.offset)
+ self.assertEqual(32, entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data)
+ self.assertEqual(U_BOOT_DATA + tools.get_bytes(0, 32 - len(U_BOOT_DATA)),
+ data[pos:pos + entry.size])
+ pos += entry.size
+
+ # Fourth u-boot has an aligned end
+ self.assertIn('u-boot-align-end', entries)
+ entry = entries['u-boot-align-end']
+ self.assertEqual(48, entry.offset)
+ self.assertEqual(16, entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data[:len(U_BOOT_DATA)])
+ self.assertEqual(U_BOOT_DATA + tools.get_bytes(0, 16 - len(U_BOOT_DATA)),
+ data[pos:pos + entry.size])
+ pos += entry.size
+
+ # Fifth u-boot immediately afterwards
+ self.assertIn('u-boot-align-both', entries)
+ entry = entries['u-boot-align-both']
+ self.assertEqual(64, entry.offset)
+ self.assertEqual(64, entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data[:len(U_BOOT_DATA)])
+ self.assertEqual(U_BOOT_DATA + tools.get_bytes(0, 64 - len(U_BOOT_DATA)),
+ data[pos:pos + entry.size])
+
+ # Sixth u-boot with both minimum size and aligned size
+ self.assertIn('u-boot-min-size', entries)
+ entry = entries['u-boot-min-size']
+ self.assertEqual(128, entry.offset)
+ self.assertEqual(32, entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data[:len(U_BOOT_DATA)])
+ self.assertEqual(U_BOOT_DATA + tools.get_bytes(0, 32 - len(U_BOOT_DATA)),
+ data[pos:pos + entry.size])
+
+ self.CheckNoGaps(entries)
+ self.assertEqual(160, image.size)
+
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['size', 'offset', 'image-pos'])
+ expected = {
+ 'image-pos': 0,
+ 'offset': 0,
+ 'size': 160,
+
+ 'u-boot:image-pos': 0,
+ 'u-boot:offset': 0,
+ 'u-boot:size': 3 + 5 + len(U_BOOT_DATA),
+
+ 'u-boot-align-size-nop:image-pos': 12,
+ 'u-boot-align-size-nop:offset': 12,
+ 'u-boot-align-size-nop:size': 4,
+
+ 'u-boot-align-size:image-pos': 16,
+ 'u-boot-align-size:offset': 16,
+ 'u-boot-align-size:size': 32,
+
+ 'u-boot-align-end:image-pos': 48,
+ 'u-boot-align-end:offset': 48,
+ 'u-boot-align-end:size': 16,
+
+ 'u-boot-align-both:image-pos': 64,
+ 'u-boot-align-both:offset': 64,
+ 'u-boot-align-both:size': 64,
+
+ 'u-boot-min-size:image-pos': 128,
+ 'u-boot-min-size:offset': 128,
+ 'u-boot-min-size:size': 32,
+ }
+ self.assertEqual(expected, props)
+
+ def testPackAlignPowerOf2(self):
+ """Test that invalid entry alignment is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('010_pack_align_power2.dts')
+ self.assertIn("Node '/binman/u-boot': Alignment 5 must be a power "
+ "of two", str(e.exception))
+
+ def testPackAlignSizePowerOf2(self):
+ """Test that invalid entry size alignment is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('011_pack_align_size_power2.dts')
+ self.assertIn("Node '/binman/u-boot': Alignment size 55 must be a "
+ "power of two", str(e.exception))
+
+ def testPackInvalidAlign(self):
+ """Test detection of an offset that does not match its alignment"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('012_pack_inv_align.dts')
+ self.assertIn("Node '/binman/u-boot': Offset 0x5 (5) does not match "
+ "align 0x4 (4)", str(e.exception))
+
+ def testPackInvalidSizeAlign(self):
+ """Test that invalid entry size alignment is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('013_pack_inv_size_align.dts')
+ self.assertIn("Node '/binman/u-boot': Size 0x5 (5) does not match "
+ "align-size 0x4 (4)", str(e.exception))
+
+ def testPackOverlap(self):
+ """Test that overlapping regions are detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('014_pack_overlap.dts')
+ self.assertIn("Node '/binman/u-boot-align': Offset 0x3 (3) overlaps "
+ "with previous entry '/binman/u-boot' ending at 0x4 (4)",
+ str(e.exception))
+
+ def testPackEntryOverflow(self):
+ """Test that entries that overflow their size are detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('015_pack_overflow.dts')
+ self.assertIn("Node '/binman/u-boot': Entry contents size is 0x4 (4) "
+ "but entry size is 0x3 (3)", str(e.exception))
+
+ def testPackImageOverflow(self):
+ """Test that entries which overflow the image size are detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('016_pack_image_overflow.dts')
+ self.assertIn("Section '/binman': contents size 0x4 (4) exceeds section "
+ "size 0x3 (3)", str(e.exception))
+
+ def testPackImageSize(self):
+ """Test that the image size can be set"""
+ retcode = self._DoTestFile('017_pack_image_size.dts')
+ self.assertEqual(0, retcode)
+ self.assertIn('image', control.images)
+ image = control.images['image']
+ self.assertEqual(7, image.size)
+
+ def testPackImageSizeAlign(self):
+ """Test that image size alignemnt works as expected"""
+ retcode = self._DoTestFile('018_pack_image_align.dts')
+ self.assertEqual(0, retcode)
+ self.assertIn('image', control.images)
+ image = control.images['image']
+ self.assertEqual(16, image.size)
+
+ def testPackInvalidImageAlign(self):
+ """Test that invalid image alignment is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('019_pack_inv_image_align.dts')
+ self.assertIn("Section '/binman': Size 0x7 (7) does not match "
+ "align-size 0x8 (8)", str(e.exception))
+
+ def testPackAlignPowerOf2Inv(self):
+ """Test that invalid image alignment is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('020_pack_inv_image_align_power2.dts')
+ self.assertIn("Image '/binman': Alignment size 131 must be a power of "
+ "two", str(e.exception))
+
+ def testImagePadByte(self):
+ """Test that the image pad byte can be specified"""
+ self._SetupSplElf()
+ data = self._DoReadFile('021_image_pad.dts')
+ self.assertEqual(U_BOOT_SPL_DATA + tools.get_bytes(0xff, 1) +
+ U_BOOT_DATA, data)
+
+ def testImageName(self):
+ """Test that image files can be named"""
+ retcode = self._DoTestFile('022_image_name.dts')
+ self.assertEqual(0, retcode)
+ image = control.images['image1']
+ fname = tools.get_output_filename('test-name')
+ self.assertTrue(os.path.exists(fname))
+
+ image = control.images['image2']
+ fname = tools.get_output_filename('test-name.xx')
+ self.assertTrue(os.path.exists(fname))
+
+ def testBlobFilename(self):
+ """Test that generic blobs can be provided by filename"""
+ data = self._DoReadFile('023_blob.dts')
+ self.assertEqual(BLOB_DATA, data)
+
+ def testPackSorted(self):
+ """Test that entries can be sorted"""
+ self._SetupSplElf()
+ data = self._DoReadFile('024_sorted.dts')
+ self.assertEqual(tools.get_bytes(0, 1) + U_BOOT_SPL_DATA +
+ tools.get_bytes(0, 2) + U_BOOT_DATA, data)
+
+ def testPackZeroOffset(self):
+ """Test that an entry at offset 0 is not given a new offset"""
+ self._SetupSplElf()
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('025_pack_zero_size.dts')
+ self.assertIn("Node '/binman/u-boot-spl': Offset 0x0 (0) overlaps "
+ "with previous entry '/binman/u-boot' ending at 0x4 (4)",
+ str(e.exception))
+
+ def testPackUbootDtb(self):
+ """Test that a device tree can be added to U-Boot"""
+ data = self._DoReadFile('026_pack_u_boot_dtb.dts')
+ self.assertEqual(U_BOOT_NODTB_DATA + U_BOOT_DTB_DATA, data)
+
+ def testPackX86RomNoSize(self):
+ """Test that the end-at-4gb property requires a size property"""
+ self._SetupSplElf()
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('027_pack_4gb_no_size.dts')
+ self.assertIn("Image '/binman': Section size must be provided when "
+ "using end-at-4gb", str(e.exception))
+
+ def test4gbAndSkipAtStartTogether(self):
+ """Test that the end-at-4gb and skip-at-size property can't be used
+ together"""
+ self._SetupSplElf()
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('098_4gb_and_skip_at_start_together.dts')
+ self.assertIn("Image '/binman': Provide either 'end-at-4gb' or "
+ "'skip-at-start'", str(e.exception))
+
+ def testPackX86RomOutside(self):
+ """Test that the end-at-4gb property checks for offset boundaries"""
+ self._SetupSplElf()
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('028_pack_4gb_outside.dts')
+ self.assertIn("Node '/binman/u-boot': Offset 0x0 (0) size 0x4 (4) "
+ "is outside the section '/binman' starting at "
+ '0xffffffe0 (4294967264) of size 0x20 (32)',
+ str(e.exception))
+
+ def testPackX86Rom(self):
+ """Test that a basic x86 ROM can be created"""
+ self._SetupSplElf()
+ data = self._DoReadFile('029_x86_rom.dts')
+ self.assertEqual(U_BOOT_DATA + tools.get_bytes(0, 3) + U_BOOT_SPL_DATA +
+ tools.get_bytes(0, 2), data)
+
+ def testPackX86RomMeNoDesc(self):
+ """Test that an invalid Intel descriptor entry is detected"""
+ try:
+ TestFunctional._MakeInputFile('descriptor-empty.bin', b'')
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('163_x86_rom_me_empty.dts')
+ self.assertIn("Node '/binman/intel-descriptor': Cannot find Intel Flash Descriptor (FD) signature",
+ str(e.exception))
+ finally:
+ self._SetupDescriptor()
+
+ def testPackX86RomBadDesc(self):
+ """Test that the Intel requires a descriptor entry"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('030_x86_rom_me_no_desc.dts')
+ self.assertIn("Node '/binman/intel-me': No offset set with "
+ "offset-unset: should another entry provide this correct "
+ "offset?", str(e.exception))
+
+ def testPackX86RomMe(self):
+ """Test that an x86 ROM with an ME region can be created"""
+ data = self._DoReadFile('031_x86_rom_me.dts')
+ expected_desc = tools.read_file(self.TestFile('descriptor.bin'))
+ if data[:0x1000] != expected_desc:
+ self.fail('Expected descriptor binary at start of image')
+ self.assertEqual(ME_DATA, data[0x1000:0x1000 + len(ME_DATA)])
+
+ def testPackVga(self):
+ """Test that an image with a VGA binary can be created"""
+ data = self._DoReadFile('032_intel_vga.dts')
+ self.assertEqual(VGA_DATA, data[:len(VGA_DATA)])
+
+ def testPackStart16(self):
+ """Test that an image with an x86 start16 region can be created"""
+ data = self._DoReadFile('033_x86_start16.dts')
+ self.assertEqual(X86_START16_DATA, data[:len(X86_START16_DATA)])
+
+ def testPackPowerpcMpc85xxBootpgResetvec(self):
+ """Test that an image with powerpc-mpc85xx-bootpg-resetvec can be
+ created"""
+ data = self._DoReadFile('150_powerpc_mpc85xx_bootpg_resetvec.dts')
+ self.assertEqual(PPC_MPC85XX_BR_DATA, data[:len(PPC_MPC85XX_BR_DATA)])
+
+ def _RunMicrocodeTest(self, dts_fname, nodtb_data, ucode_second=False):
+ """Handle running a test for insertion of microcode
+
+ Args:
+ dts_fname: Name of test .dts file
+ nodtb_data: Data that we expect in the first section
+ ucode_second: True if the microsecond entry is second instead of
+ third
+
+ Returns:
+ Tuple:
+ Contents of first region (U-Boot or SPL)
+ Offset and size components of microcode pointer, as inserted
+ in the above (two 4-byte words)
+ """
+ data = self._DoReadFile(dts_fname, True)
+
+ # Now check the device tree has no microcode
+ if ucode_second:
+ ucode_content = data[len(nodtb_data):]
+ ucode_pos = len(nodtb_data)
+ dtb_with_ucode = ucode_content[16:]
+ fdt_len = self.GetFdtLen(dtb_with_ucode)
+ else:
+ dtb_with_ucode = data[len(nodtb_data):]
+ fdt_len = self.GetFdtLen(dtb_with_ucode)
+ ucode_content = dtb_with_ucode[fdt_len:]
+ ucode_pos = len(nodtb_data) + fdt_len
+ fname = tools.get_output_filename('test.dtb')
+ with open(fname, 'wb') as fd:
+ fd.write(dtb_with_ucode)
+ dtb = fdt.FdtScan(fname)
+ ucode = dtb.GetNode('/microcode')
+ self.assertTrue(ucode)
+ for node in ucode.subnodes:
+ self.assertFalse(node.props.get('data'))
+
+ # Check that the microcode appears immediately after the Fdt
+ # This matches the concatenation of the data properties in
+ # the /microcode/update@xxx nodes in 34_x86_ucode.dts.
+ ucode_data = struct.pack('>4L', 0x12345678, 0x12345679, 0xabcd0000,
+ 0x78235609)
+ self.assertEqual(ucode_data, ucode_content[:len(ucode_data)])
+
+ # Check that the microcode pointer was inserted. It should match the
+ # expected offset and size
+ pos_and_size = struct.pack('<2L', 0xfffffe00 + ucode_pos,
+ len(ucode_data))
+ u_boot = data[:len(nodtb_data)]
+ return u_boot, pos_and_size
+
+ def testPackUbootMicrocode(self):
+ """Test that x86 microcode can be handled correctly
+
+ We expect to see the following in the image, in order:
+ u-boot-nodtb.bin with a microcode pointer inserted at the correct
+ place
+ u-boot.dtb with the microcode removed
+ the microcode
+ """
+ first, pos_and_size = self._RunMicrocodeTest('034_x86_ucode.dts',
+ U_BOOT_NODTB_DATA)
+ self.assertEqual(b'nodtb with microcode' + pos_and_size +
+ b' somewhere in here', first)
+
+ def _RunPackUbootSingleMicrocode(self):
+ """Test that x86 microcode can be handled correctly
+
+ We expect to see the following in the image, in order:
+ u-boot-nodtb.bin with a microcode pointer inserted at the correct
+ place
+ u-boot.dtb with the microcode
+ an empty microcode region
+ """
+ # We need the libfdt library to run this test since only that allows
+ # finding the offset of a property. This is required by
+ # Entry_u_boot_dtb_with_ucode.ObtainContents().
+ data = self._DoReadFile('035_x86_single_ucode.dts', True)
+
+ second = data[len(U_BOOT_NODTB_DATA):]
+
+ fdt_len = self.GetFdtLen(second)
+ third = second[fdt_len:]
+ second = second[:fdt_len]
+
+ ucode_data = struct.pack('>2L', 0x12345678, 0x12345679)
+ self.assertIn(ucode_data, second)
+ ucode_pos = second.find(ucode_data) + len(U_BOOT_NODTB_DATA)
+
+ # Check that the microcode pointer was inserted. It should match the
+ # expected offset and size
+ pos_and_size = struct.pack('<2L', 0xfffffe00 + ucode_pos,
+ len(ucode_data))
+ first = data[:len(U_BOOT_NODTB_DATA)]
+ self.assertEqual(b'nodtb with microcode' + pos_and_size +
+ b' somewhere in here', first)
+
+ def testPackUbootSingleMicrocode(self):
+ """Test that x86 microcode can be handled correctly with fdt_normal.
+ """
+ self._RunPackUbootSingleMicrocode()
+
+ def testUBootImg(self):
+ """Test that u-boot.img can be put in a file"""
+ data = self._DoReadFile('036_u_boot_img.dts')
+ self.assertEqual(U_BOOT_IMG_DATA, data)
+
+ def testNoMicrocode(self):
+ """Test that a missing microcode region is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('037_x86_no_ucode.dts', True)
+ self.assertIn("Node '/binman/u-boot-dtb-with-ucode': No /microcode "
+ "node found in ", str(e.exception))
+
+ def testMicrocodeWithoutNode(self):
+ """Test that a missing u-boot-dtb-with-ucode node is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('038_x86_ucode_missing_node.dts', True)
+ self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Cannot find "
+ "microcode region u-boot-dtb-with-ucode", str(e.exception))
+
+ def testMicrocodeWithoutNode2(self):
+ """Test that a missing u-boot-ucode node is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('039_x86_ucode_missing_node2.dts', True)
+ self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Cannot find "
+ "microcode region u-boot-ucode", str(e.exception))
+
+ def testMicrocodeWithoutPtrInElf(self):
+ """Test that a U-Boot binary without the microcode symbol is detected"""
+ # ELF file without a '_dt_ucode_base_size' symbol
+ try:
+ TestFunctional._MakeInputFile('u-boot',
+ tools.read_file(self.ElfTestFile('u_boot_no_ucode_ptr')))
+
+ with self.assertRaises(ValueError) as e:
+ self._RunPackUbootSingleMicrocode()
+ self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Cannot locate "
+ "_dt_ucode_base_size symbol in u-boot", str(e.exception))
+
+ finally:
+ # Put the original file back
+ TestFunctional._MakeInputFile('u-boot',
+ tools.read_file(self.ElfTestFile('u_boot_ucode_ptr')))
+
+ def testMicrocodeNotInImage(self):
+ """Test that microcode must be placed within the image"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('040_x86_ucode_not_in_image.dts', True)
+ self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Microcode "
+ "pointer _dt_ucode_base_size at fffffe14 is outside the "
+ "section ranging from 00000000 to 0000002e", str(e.exception))
+
+ def testWithoutMicrocode(self):
+ """Test that we can cope with an image without microcode (e.g. qemu)"""
+ TestFunctional._MakeInputFile('u-boot',
+ tools.read_file(self.ElfTestFile('u_boot_no_ucode_ptr')))
+ data, dtb, _, _ = self._DoReadFileDtb('044_x86_optional_ucode.dts', True)
+
+ # Now check the device tree has no microcode
+ self.assertEqual(U_BOOT_NODTB_DATA, data[:len(U_BOOT_NODTB_DATA)])
+ second = data[len(U_BOOT_NODTB_DATA):]
+
+ fdt_len = self.GetFdtLen(second)
+ self.assertEqual(dtb, second[:fdt_len])
+
+ used_len = len(U_BOOT_NODTB_DATA) + fdt_len
+ third = data[used_len:]
+ self.assertEqual(tools.get_bytes(0, 0x200 - used_len), third)
+
+ def testUnknownPosSize(self):
+ """Test that microcode must be placed within the image"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('041_unknown_pos_size.dts', True)
+ self.assertIn("Section '/binman': Unable to set offset/size for unknown "
+ "entry 'invalid-entry'", str(e.exception))
+
+ def testPackFsp(self):
+ """Test that an image with a FSP binary can be created"""
+ data = self._DoReadFile('042_intel_fsp.dts')
+ self.assertEqual(FSP_DATA, data[:len(FSP_DATA)])
+
+ def testPackCmc(self):
+ """Test that an image with a CMC binary can be created"""
+ data = self._DoReadFile('043_intel_cmc.dts')
+ self.assertEqual(CMC_DATA, data[:len(CMC_DATA)])
+
+ def testPackVbt(self):
+ """Test that an image with a VBT binary can be created"""
+ data = self._DoReadFile('046_intel_vbt.dts')
+ self.assertEqual(VBT_DATA, data[:len(VBT_DATA)])
+
+ def testSplBssPad(self):
+ """Test that we can pad SPL's BSS with zeros"""
+ # ELF file with a '__bss_size' symbol
+ self._SetupSplElf()
+ data = self._DoReadFile('047_spl_bss_pad.dts')
+ self.assertEqual(U_BOOT_SPL_DATA + tools.get_bytes(0, 10) + U_BOOT_DATA,
+ data)
+
+ def testSplBssPadMissing(self):
+ """Test that a missing symbol is detected"""
+ self._SetupSplElf('u_boot_ucode_ptr')
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('047_spl_bss_pad.dts')
+ self.assertIn('Expected __bss_size symbol in spl/u-boot-spl',
+ str(e.exception))
+
+ def testPackStart16Spl(self):
+ """Test that an image with an x86 start16 SPL region can be created"""
+ data = self._DoReadFile('048_x86_start16_spl.dts')
+ self.assertEqual(X86_START16_SPL_DATA, data[:len(X86_START16_SPL_DATA)])
+
+ def _PackUbootSplMicrocode(self, dts, ucode_second=False):
+ """Helper function for microcode tests
+
+ We expect to see the following in the image, in order:
+ u-boot-spl-nodtb.bin with a microcode pointer inserted at the
+ correct place
+ u-boot.dtb with the microcode removed
+ the microcode
+
+ Args:
+ dts: Device tree file to use for test
+ ucode_second: True if the microsecond entry is second instead of
+ third
+ """
+ self._SetupSplElf('u_boot_ucode_ptr')
+ first, pos_and_size = self._RunMicrocodeTest(dts, U_BOOT_SPL_NODTB_DATA,
+ ucode_second=ucode_second)
+ self.assertEqual(b'splnodtb with microc' + pos_and_size +
+ b'ter somewhere in here', first)
+
+ def testPackUbootSplMicrocode(self):
+ """Test that x86 microcode can be handled correctly in SPL"""
+ self._SetupSplElf()
+ self._PackUbootSplMicrocode('049_x86_ucode_spl.dts')
+
+ def testPackUbootSplMicrocodeReorder(self):
+ """Test that order doesn't matter for microcode entries
+
+ This is the same as testPackUbootSplMicrocode but when we process the
+ u-boot-ucode entry we have not yet seen the u-boot-dtb-with-ucode
+ entry, so we reply on binman to try later.
+ """
+ self._PackUbootSplMicrocode('058_x86_ucode_spl_needs_retry.dts',
+ ucode_second=True)
+
+ def testPackMrc(self):
+ """Test that an image with an MRC binary can be created"""
+ data = self._DoReadFile('050_intel_mrc.dts')
+ self.assertEqual(MRC_DATA, data[:len(MRC_DATA)])
+
+ def testSplDtb(self):
+ """Test that an image with spl/u-boot-spl.dtb can be created"""
+ self._SetupSplElf()
+ data = self._DoReadFile('051_u_boot_spl_dtb.dts')
+ self.assertEqual(U_BOOT_SPL_DTB_DATA, data[:len(U_BOOT_SPL_DTB_DATA)])
+
+ def testSplNoDtb(self):
+ """Test that an image with spl/u-boot-spl-nodtb.bin can be created"""
+ self._SetupSplElf()
+ data = self._DoReadFile('052_u_boot_spl_nodtb.dts')
+ self.assertEqual(U_BOOT_SPL_NODTB_DATA, data[:len(U_BOOT_SPL_NODTB_DATA)])
+
+ def checkSymbols(self, dts, base_data, u_boot_offset, entry_args=None,
+ use_expanded=False, no_write_symbols=False):
+ """Check the image contains the expected symbol values
+
+ Args:
+ dts: Device tree file to use for test
+ base_data: Data before and after 'u-boot' section
+ u_boot_offset: Offset of 'u-boot' section in image
+ entry_args: Dict of entry args to supply to binman
+ key: arg name
+ value: value of that arg
+ use_expanded: True to use expanded entries where available, e.g.
+ 'u-boot-expanded' instead of 'u-boot'
+ """
+ elf_fname = self.ElfTestFile('u_boot_binman_syms')
+ syms = elf.GetSymbols(elf_fname, ['binman', 'image'])
+ addr = elf.GetSymbolAddress(elf_fname, '__image_copy_start')
+ self.assertEqual(syms['_binman_sym_magic'].address, addr)
+ self.assertEqual(syms['_binman_u_boot_spl_any_prop_offset'].address,
+ addr + 4)
+
+ self._SetupSplElf('u_boot_binman_syms')
+ data = self._DoReadFileDtb(dts, entry_args=entry_args,
+ use_expanded=use_expanded)[0]
+ # The image should contain the symbols from u_boot_binman_syms.c
+ # Note that image_pos is adjusted by the base address of the image,
+ # which is 0x10 in our test image
+ sym_values = struct.pack('<LLQLL', elf.BINMAN_SYM_MAGIC_VALUE,
+ 0x00, u_boot_offset + len(U_BOOT_DATA),
+ 0x10 + u_boot_offset, 0x04)
+ if no_write_symbols:
+ expected = (base_data +
+ tools.get_bytes(0xff, 0x38 - len(base_data)) +
+ U_BOOT_DATA + base_data)
+ else:
+ expected = (sym_values + base_data[24:] +
+ tools.get_bytes(0xff, 1) + U_BOOT_DATA + sym_values +
+ base_data[24:])
+ self.assertEqual(expected, data)
+
+ def testSymbols(self):
+ """Test binman can assign symbols embedded in U-Boot"""
+ self.checkSymbols('053_symbols.dts', U_BOOT_SPL_DATA, 0x1c)
+
+ def testSymbolsNoDtb(self):
+ """Test binman can assign symbols embedded in U-Boot SPL"""
+ self.checkSymbols('196_symbols_nodtb.dts',
+ U_BOOT_SPL_NODTB_DATA + U_BOOT_SPL_DTB_DATA,
+ 0x38)
+
+ def testPackUnitAddress(self):
+ """Test that we support multiple binaries with the same name"""
+ data = self._DoReadFile('054_unit_address.dts')
+ self.assertEqual(U_BOOT_DATA + U_BOOT_DATA, data)
+
+ def testSections(self):
+ """Basic test of sections"""
+ data = self._DoReadFile('055_sections.dts')
+ expected = (U_BOOT_DATA + tools.get_bytes(ord('!'), 12) +
+ U_BOOT_DATA + tools.get_bytes(ord('a'), 12) +
+ U_BOOT_DATA + tools.get_bytes(ord('&'), 4))
+ self.assertEqual(expected, data)
+
+ def testMap(self):
+ """Tests outputting a map of the images"""
+ _, _, map_data, _ = self._DoReadFileDtb('055_sections.dts', map=True)
+ self.assertEqual('''ImagePos Offset Size Name
+00000000 00000000 00000028 image
+00000000 00000000 00000010 section@0
+00000000 00000000 00000004 u-boot
+00000010 00000010 00000010 section@1
+00000010 00000000 00000004 u-boot
+00000020 00000020 00000004 section@2
+00000020 00000000 00000004 u-boot
+''', map_data)
+
+ def testNamePrefix(self):
+ """Tests that name prefixes are used"""
+ _, _, map_data, _ = self._DoReadFileDtb('056_name_prefix.dts', map=True)
+ self.assertEqual('''ImagePos Offset Size Name
+00000000 00000000 00000028 image
+00000000 00000000 00000010 section@0
+00000000 00000000 00000004 ro-u-boot
+00000010 00000010 00000010 section@1
+00000010 00000000 00000004 rw-u-boot
+''', map_data)
+
+ def testUnknownContents(self):
+ """Test that obtaining the contents works as expected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('057_unknown_contents.dts', True)
+ self.assertIn("Image '/binman': Internal error: Could not complete "
+ "processing of contents: remaining ["
+ "<binman.etype._testing.Entry__testing ", str(e.exception))
+
+ def testBadChangeSize(self):
+ """Test that trying to change the size of an entry fails"""
+ try:
+ state.SetAllowEntryExpansion(False)
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('059_change_size.dts', True)
+ self.assertIn("Node '/binman/_testing': Cannot update entry size from 2 to 3",
+ str(e.exception))
+ finally:
+ state.SetAllowEntryExpansion(True)
+
+ def testUpdateFdt(self):
+ """Test that we can update the device tree with offset/size info"""
+ _, _, _, out_dtb_fname = self._DoReadFileDtb('060_fdt_update.dts',
+ update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS + REPACK_DTB_PROPS)
+ self.assertEqual({
+ 'image-pos': 0,
+ 'offset': 0,
+ '_testing:offset': 32,
+ '_testing:size': 2,
+ '_testing:image-pos': 32,
+ 'section@0/u-boot:offset': 0,
+ 'section@0/u-boot:size': len(U_BOOT_DATA),
+ 'section@0/u-boot:image-pos': 0,
+ 'section@0:offset': 0,
+ 'section@0:size': 16,
+ 'section@0:image-pos': 0,
+
+ 'section@1/u-boot:offset': 0,
+ 'section@1/u-boot:size': len(U_BOOT_DATA),
+ 'section@1/u-boot:image-pos': 16,
+ 'section@1:offset': 16,
+ 'section@1:size': 16,
+ 'section@1:image-pos': 16,
+ 'size': 40
+ }, props)
+
+ def testUpdateFdtBad(self):
+ """Test that we detect when ProcessFdt never completes"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('061_fdt_update_bad.dts', update_dtb=True)
+ self.assertIn('Could not complete processing of Fdt: remaining '
+ '[<binman.etype._testing.Entry__testing',
+ str(e.exception))
+
+ def testEntryArgs(self):
+ """Test passing arguments to entries from the command line"""
+ entry_args = {
+ 'test-str-arg': 'test1',
+ 'test-int-arg': '456',
+ }
+ self._DoReadFileDtb('062_entry_args.dts', entry_args=entry_args)
+ self.assertIn('image', control.images)
+ entry = control.images['image'].GetEntries()['_testing']
+ self.assertEqual('test0', entry.test_str_fdt)
+ self.assertEqual('test1', entry.test_str_arg)
+ self.assertEqual(123, entry.test_int_fdt)
+ self.assertEqual(456, entry.test_int_arg)
+
+ def testEntryArgsMissing(self):
+ """Test missing arguments and properties"""
+ entry_args = {
+ 'test-int-arg': '456',
+ }
+ self._DoReadFileDtb('063_entry_args_missing.dts', entry_args=entry_args)
+ entry = control.images['image'].GetEntries()['_testing']
+ self.assertEqual('test0', entry.test_str_fdt)
+ self.assertEqual(None, entry.test_str_arg)
+ self.assertEqual(None, entry.test_int_fdt)
+ self.assertEqual(456, entry.test_int_arg)
+
+ def testEntryArgsRequired(self):
+ """Test missing arguments and properties"""
+ entry_args = {
+ 'test-int-arg': '456',
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('064_entry_args_required.dts')
+ self.assertIn("Node '/binman/_testing': "
+ 'Missing required properties/entry args: test-str-arg, '
+ 'test-int-fdt, test-int-arg',
+ str(e.exception))
+
+ def testEntryArgsInvalidFormat(self):
+ """Test that an invalid entry-argument format is detected"""
+ args = ['build', '-d', self.TestFile('064_entry_args_required.dts'),
+ '-ano-value']
+ with self.assertRaises(ValueError) as e:
+ self._DoBinman(*args)
+ self.assertIn("Invalid entry arguemnt 'no-value'", str(e.exception))
+
+ def testEntryArgsInvalidInteger(self):
+ """Test that an invalid entry-argument integer is detected"""
+ entry_args = {
+ 'test-int-arg': 'abc',
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('062_entry_args.dts', entry_args=entry_args)
+ self.assertIn("Node '/binman/_testing': Cannot convert entry arg "
+ "'test-int-arg' (value 'abc') to integer",
+ str(e.exception))
+
+ def testEntryArgsInvalidDatatype(self):
+ """Test that an invalid entry-argument datatype is detected
+
+ This test could be written in entry_test.py except that it needs
+ access to control.entry_args, which seems more than that module should
+ be able to see.
+ """
+ entry_args = {
+ 'test-bad-datatype-arg': '12',
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('065_entry_args_unknown_datatype.dts',
+ entry_args=entry_args)
+ self.assertIn('GetArg() internal error: Unknown data type ',
+ str(e.exception))
+
+ def testText(self):
+ """Test for a text entry type"""
+ entry_args = {
+ 'test-id': TEXT_DATA,
+ 'test-id2': TEXT_DATA2,
+ 'test-id3': TEXT_DATA3,
+ }
+ data, _, _, _ = self._DoReadFileDtb('066_text.dts',
+ entry_args=entry_args)
+ expected = (tools.to_bytes(TEXT_DATA) +
+ tools.get_bytes(0, 8 - len(TEXT_DATA)) +
+ tools.to_bytes(TEXT_DATA2) + tools.to_bytes(TEXT_DATA3) +
+ b'some text' + b'more text')
+ self.assertEqual(expected, data)
+
+ def testEntryDocs(self):
+ """Test for creation of entry documentation"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ control.WriteEntryDocs(control.GetEntryModules())
+ self.assertTrue(len(stdout.getvalue()) > 0)
+
+ def testEntryDocsMissing(self):
+ """Test handling of missing entry documentation"""
+ with self.assertRaises(ValueError) as e:
+ with test_util.capture_sys_output() as (stdout, stderr):
+ control.WriteEntryDocs(control.GetEntryModules(), 'u_boot')
+ self.assertIn('Documentation is missing for modules: u_boot',
+ str(e.exception))
+
+ def testFmap(self):
+ """Basic test of generation of a flashrom fmap"""
+ data = self._DoReadFile('067_fmap.dts')
+ fhdr, fentries = fmap_util.DecodeFmap(data[32:])
+ expected = (U_BOOT_DATA + tools.get_bytes(ord('!'), 12) +
+ U_BOOT_DATA + tools.get_bytes(ord('a'), 12))
+ self.assertEqual(expected, data[:32])
+ self.assertEqual(b'__FMAP__', fhdr.signature)
+ self.assertEqual(1, fhdr.ver_major)
+ self.assertEqual(0, fhdr.ver_minor)
+ self.assertEqual(0, fhdr.base)
+ expect_size = fmap_util.FMAP_HEADER_LEN + fmap_util.FMAP_AREA_LEN * 5
+ self.assertEqual(16 + 16 + expect_size, fhdr.image_size)
+ self.assertEqual(b'FMAP', fhdr.name)
+ self.assertEqual(5, fhdr.nareas)
+ fiter = iter(fentries)
+
+ fentry = next(fiter)
+ self.assertEqual(b'SECTION0', fentry.name)
+ self.assertEqual(0, fentry.offset)
+ self.assertEqual(16, fentry.size)
+ self.assertEqual(fmap_util.FMAP_AREA_PRESERVE, fentry.flags)
+
+ fentry = next(fiter)
+ self.assertEqual(b'RO_U_BOOT', fentry.name)
+ self.assertEqual(0, fentry.offset)
+ self.assertEqual(4, fentry.size)
+ self.assertEqual(0, fentry.flags)
+
+ fentry = next(fiter)
+ self.assertEqual(b'SECTION1', fentry.name)
+ self.assertEqual(16, fentry.offset)
+ self.assertEqual(16, fentry.size)
+ self.assertEqual(0, fentry.flags)
+
+ fentry = next(fiter)
+ self.assertEqual(b'RW_U_BOOT', fentry.name)
+ self.assertEqual(16, fentry.offset)
+ self.assertEqual(4, fentry.size)
+ self.assertEqual(0, fentry.flags)
+
+ fentry = next(fiter)
+ self.assertEqual(b'FMAP', fentry.name)
+ self.assertEqual(32, fentry.offset)
+ self.assertEqual(expect_size, fentry.size)
+ self.assertEqual(0, fentry.flags)
+
+ def testBlobNamedByArg(self):
+ """Test we can add a blob with the filename coming from an entry arg"""
+ entry_args = {
+ 'cros-ec-rw-path': 'ecrw.bin',
+ }
+ self._DoReadFileDtb('068_blob_named_by_arg.dts', entry_args=entry_args)
+
+ def testFill(self):
+ """Test for an fill entry type"""
+ data = self._DoReadFile('069_fill.dts')
+ expected = tools.get_bytes(0xff, 8) + tools.get_bytes(0, 8)
+ self.assertEqual(expected, data)
+
+ def testFillNoSize(self):
+ """Test for an fill entry type with no size"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('070_fill_no_size.dts')
+ self.assertIn("'fill' entry is missing properties: size",
+ str(e.exception))
+
+ def _HandleGbbCommand(self, pipe_list):
+ """Fake calls to the futility utility"""
+ if 'futility' in pipe_list[0][0]:
+ fname = pipe_list[0][-1]
+ # Append our GBB data to the file, which will happen every time the
+ # futility command is called.
+ with open(fname, 'ab') as fd:
+ fd.write(GBB_DATA)
+ return command.CommandResult()
+
+ def testGbb(self):
+ """Test for the Chromium OS Google Binary Block"""
+ command.test_result = self._HandleGbbCommand
+ entry_args = {
+ 'keydir': 'devkeys',
+ 'bmpblk': 'bmpblk.bin',
+ }
+ data, _, _, _ = self._DoReadFileDtb('071_gbb.dts', entry_args=entry_args)
+
+ # Since futility
+ expected = (GBB_DATA + GBB_DATA + tools.get_bytes(0, 8) +
+ tools.get_bytes(0, 0x2180 - 16))
+ self.assertEqual(expected, data)
+
+ def testGbbTooSmall(self):
+ """Test for the Chromium OS Google Binary Block being large enough"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('072_gbb_too_small.dts')
+ self.assertIn("Node '/binman/gbb': GBB is too small",
+ str(e.exception))
+
+ def testGbbNoSize(self):
+ """Test for the Chromium OS Google Binary Block having a size"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('073_gbb_no_size.dts')
+ self.assertIn("Node '/binman/gbb': GBB must have a fixed size",
+ str(e.exception))
+
+ def testGbbMissing(self):
+ """Test that binman still produces an image if futility is missing"""
+ entry_args = {
+ 'keydir': 'devkeys',
+ }
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('071_gbb.dts', force_missing_bintools='futility',
+ entry_args=entry_args)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: futility")
+
+ def _HandleVblockCommand(self, pipe_list):
+ """Fake calls to the futility utility
+
+ The expected pipe is:
+
+ [('futility', 'vbutil_firmware', '--vblock',
+ 'vblock.vblock', '--keyblock', 'devkeys/firmware.keyblock',
+ '--signprivate', 'devkeys/firmware_data_key.vbprivk',
+ '--version', '1', '--fv', 'input.vblock', '--kernelkey',
+ 'devkeys/kernel_subkey.vbpubk', '--flags', '1')]
+
+ This writes to the output file (here, 'vblock.vblock'). If
+ self._hash_data is False, it writes VBLOCK_DATA, else it writes a hash
+ of the input data (here, 'input.vblock').
+ """
+ if 'futility' in pipe_list[0][0]:
+ fname = pipe_list[0][3]
+ with open(fname, 'wb') as fd:
+ if self._hash_data:
+ infile = pipe_list[0][11]
+ m = hashlib.sha256()
+ data = tools.read_file(infile)
+ m.update(data)
+ fd.write(m.digest())
+ else:
+ fd.write(VBLOCK_DATA)
+
+ return command.CommandResult()
+
+ def testVblock(self):
+ """Test for the Chromium OS Verified Boot Block"""
+ self._hash_data = False
+ command.test_result = self._HandleVblockCommand
+ entry_args = {
+ 'keydir': 'devkeys',
+ }
+ data, _, _, _ = self._DoReadFileDtb('074_vblock.dts',
+ entry_args=entry_args)
+ expected = U_BOOT_DATA + VBLOCK_DATA + U_BOOT_DTB_DATA
+ self.assertEqual(expected, data)
+
+ def testVblockNoContent(self):
+ """Test we detect a vblock which has no content to sign"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('075_vblock_no_content.dts')
+ self.assertIn("Node '/binman/vblock': Collection must have a 'content' "
+ 'property', str(e.exception))
+
+ def testVblockBadPhandle(self):
+ """Test that we detect a vblock with an invalid phandle in contents"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('076_vblock_bad_phandle.dts')
+ self.assertIn("Node '/binman/vblock': Cannot find node for phandle "
+ '1000', str(e.exception))
+
+ def testVblockBadEntry(self):
+ """Test that we detect an entry that points to a non-entry"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('077_vblock_bad_entry.dts')
+ self.assertIn("Node '/binman/vblock': Cannot find entry for node "
+ "'other'", str(e.exception))
+
+ def testVblockContent(self):
+ """Test that the vblock signs the right data"""
+ self._hash_data = True
+ command.test_result = self._HandleVblockCommand
+ entry_args = {
+ 'keydir': 'devkeys',
+ }
+ data = self._DoReadFileDtb(
+ '189_vblock_content.dts', use_real_dtb=True, update_dtb=True,
+ entry_args=entry_args)[0]
+ hashlen = 32 # SHA256 hash is 32 bytes
+ self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+ hashval = data[-hashlen:]
+ dtb = data[len(U_BOOT_DATA):-hashlen]
+
+ expected_data = U_BOOT_DATA + dtb
+
+ # The hashval should be a hash of the dtb
+ m = hashlib.sha256()
+ m.update(expected_data)
+ expected_hashval = m.digest()
+ self.assertEqual(expected_hashval, hashval)
+
+ def testVblockMissing(self):
+ """Test that binman still produces an image if futility is missing"""
+ entry_args = {
+ 'keydir': 'devkeys',
+ }
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('074_vblock.dts',
+ force_missing_bintools='futility',
+ entry_args=entry_args)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: futility")
+
+ def testTpl(self):
+ """Test that an image with TPL and its device tree can be created"""
+ # ELF file with a '__bss_size' symbol
+ self._SetupTplElf()
+ data = self._DoReadFile('078_u_boot_tpl.dts')
+ self.assertEqual(U_BOOT_TPL_DATA + U_BOOT_TPL_DTB_DATA, data)
+
+ def testUsesPos(self):
+ """Test that the 'pos' property cannot be used anymore"""
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('079_uses_pos.dts')
+ self.assertIn("Node '/binman/u-boot': Please use 'offset' instead of "
+ "'pos'", str(e.exception))
+
+ def testFillZero(self):
+ """Test for an fill entry type with a size of 0"""
+ data = self._DoReadFile('080_fill_empty.dts')
+ self.assertEqual(tools.get_bytes(0, 16), data)
+
+ def testTextMissing(self):
+ """Test for a text entry type where there is no text"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('066_text.dts',)
+ self.assertIn("Node '/binman/text': No value provided for text label "
+ "'test-id'", str(e.exception))
+
+ def testPackStart16Tpl(self):
+ """Test that an image with an x86 start16 TPL region can be created"""
+ data = self._DoReadFile('081_x86_start16_tpl.dts')
+ self.assertEqual(X86_START16_TPL_DATA, data[:len(X86_START16_TPL_DATA)])
+
+ def testSelectImage(self):
+ """Test that we can select which images to build"""
+ expected = 'Skipping images: image1'
+
+ # We should only get the expected message in verbose mode
+ for verbosity in (0, 2):
+ with test_util.capture_sys_output() as (stdout, stderr):
+ retcode = self._DoTestFile('006_dual_image.dts',
+ verbosity=verbosity,
+ images=['image2'])
+ self.assertEqual(0, retcode)
+ if verbosity:
+ self.assertIn(expected, stdout.getvalue())
+ else:
+ self.assertNotIn(expected, stdout.getvalue())
+
+ self.assertFalse(os.path.exists(tools.get_output_filename('image1.bin')))
+ self.assertTrue(os.path.exists(tools.get_output_filename('image2.bin')))
+ self._CleanupOutputDir()
+
+ def testUpdateFdtAll(self):
+ """Test that all device trees are updated with offset/size info"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+ data = self._DoReadFileRealDtb('082_fdt_update_all.dts')
+
+ base_expected = {
+ 'offset': 0,
+ 'image-pos': 0,
+ 'size': 2320,
+ 'section:offset': 0,
+ 'section:image-pos': 0,
+ 'section:size': 565,
+ 'section/u-boot-dtb:offset': 0,
+ 'section/u-boot-dtb:image-pos': 0,
+ 'section/u-boot-dtb:size': 565,
+ 'u-boot-spl-dtb:offset': 565,
+ 'u-boot-spl-dtb:image-pos': 565,
+ 'u-boot-spl-dtb:size': 585,
+ 'u-boot-tpl-dtb:offset': 1150,
+ 'u-boot-tpl-dtb:image-pos': 1150,
+ 'u-boot-tpl-dtb:size': 585,
+ 'u-boot-vpl-dtb:image-pos': 1735,
+ 'u-boot-vpl-dtb:offset': 1735,
+ 'u-boot-vpl-dtb:size': 585,
+ }
+
+ # We expect three device-tree files in the output, one after the other.
+ # Read them in sequence. We look for an 'spl' property in the SPL tree,
+ # and 'tpl' in the TPL tree, to make sure they are distinct from the
+ # main U-Boot tree. All three should have the same postions and offset.
+ start = 0
+ self.maxDiff = None
+ for item in ['', 'spl', 'tpl', 'vpl']:
+ dtb = fdt.Fdt.FromData(data[start:])
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS + REPACK_DTB_PROPS +
+ ['spl', 'tpl', 'vpl'])
+ expected = dict(base_expected)
+ if item:
+ expected[item] = 0
+ self.assertEqual(expected, props)
+ start += dtb._fdt_obj.totalsize()
+
+ def testUpdateFdtOutput(self):
+ """Test that output DTB files are updated"""
+ try:
+ data, dtb_data, _, _ = self._DoReadFileDtb('082_fdt_update_all.dts',
+ use_real_dtb=True, update_dtb=True, reset_dtbs=False)
+
+ # Unfortunately, compiling a source file always results in a file
+ # called source.dtb (see fdt_util.EnsureCompiled()). The test
+ # source file (e.g. test/075_fdt_update_all.dts) thus does not enter
+ # binman as a file called u-boot.dtb. To fix this, copy the file
+ # over to the expected place.
+ start = 0
+ for fname in ['u-boot.dtb.out', 'spl/u-boot-spl.dtb.out',
+ 'tpl/u-boot-tpl.dtb.out', 'vpl/u-boot-vpl.dtb.out']:
+ dtb = fdt.Fdt.FromData(data[start:])
+ size = dtb._fdt_obj.totalsize()
+ pathname = tools.get_output_filename(os.path.split(fname)[1])
+ outdata = tools.read_file(pathname)
+ name = os.path.split(fname)[0]
+
+ if name:
+ orig_indata = self._GetDtbContentsForSpls(dtb_data, name)
+ else:
+ orig_indata = dtb_data
+ self.assertNotEqual(outdata, orig_indata,
+ "Expected output file '%s' be updated" % pathname)
+ self.assertEqual(outdata, data[start:start + size],
+ "Expected output file '%s' to match output image" %
+ pathname)
+ start += size
+ finally:
+ self._ResetDtbs()
+
+ def _decompress(self, data):
+ bintool = self.comp_bintools['lz4']
+ return bintool.decompress(data)
+
+ def testCompress(self):
+ """Test compression of blobs"""
+ self._CheckLz4()
+ data, _, _, out_dtb_fname = self._DoReadFileDtb('083_compress.dts',
+ use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['size', 'uncomp-size'])
+ orig = self._decompress(data)
+ self.assertEquals(COMPRESS_DATA, orig)
+
+ # Do a sanity check on various fields
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(1, len(entries))
+
+ entry = entries['blob']
+ self.assertEqual(COMPRESS_DATA, entry.uncomp_data)
+ self.assertEqual(len(COMPRESS_DATA), entry.uncomp_size)
+ orig = self._decompress(entry.data)
+ self.assertEqual(orig, entry.uncomp_data)
+
+ self.assertEqual(image.data, entry.data)
+
+ expected = {
+ 'blob:uncomp-size': len(COMPRESS_DATA),
+ 'blob:size': len(data),
+ 'size': len(data),
+ }
+ self.assertEqual(expected, props)
+
+ def testFiles(self):
+ """Test bringing in multiple files"""
+ data = self._DoReadFile('084_files.dts')
+ self.assertEqual(FILES_DATA, data)
+
+ def testFilesCompress(self):
+ """Test bringing in multiple files and compressing them"""
+ self._CheckLz4()
+ data = self._DoReadFile('085_files_compress.dts')
+
+ image = control.images['image']
+ entries = image.GetEntries()
+ files = entries['files']
+ entries = files._entries
+
+ orig = b''
+ for i in range(1, 3):
+ key = '%d.dat' % i
+ start = entries[key].image_pos
+ len = entries[key].size
+ chunk = data[start:start + len]
+ orig += self._decompress(chunk)
+
+ self.assertEqual(FILES_DATA, orig)
+
+ def testFilesMissing(self):
+ """Test missing files"""
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('086_files_none.dts')
+ self.assertIn("Node '/binman/files': Pattern \'files/*.none\' matched "
+ 'no files', str(e.exception))
+
+ def testFilesNoPattern(self):
+ """Test missing files"""
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('087_files_no_pattern.dts')
+ self.assertIn("Node '/binman/files': Missing 'pattern' property",
+ str(e.exception))
+
+ def testExtendSize(self):
+ """Test an extending entry"""
+ data, _, map_data, _ = self._DoReadFileDtb('088_extend_size.dts',
+ map=True)
+ expect = (tools.get_bytes(ord('a'), 8) + U_BOOT_DATA +
+ MRC_DATA + tools.get_bytes(ord('b'), 1) + U_BOOT_DATA +
+ tools.get_bytes(ord('c'), 8) + U_BOOT_DATA +
+ tools.get_bytes(ord('d'), 8))
+ self.assertEqual(expect, data)
+ self.assertEqual('''ImagePos Offset Size Name
+00000000 00000000 00000028 image
+00000000 00000000 00000008 fill
+00000008 00000008 00000004 u-boot
+0000000c 0000000c 00000004 section
+0000000c 00000000 00000003 intel-mrc
+00000010 00000010 00000004 u-boot2
+00000014 00000014 0000000c section2
+00000014 00000000 00000008 fill
+0000001c 00000008 00000004 u-boot
+00000020 00000020 00000008 fill2
+''', map_data)
+
+ def testExtendSizeBad(self):
+ """Test an extending entry which fails to provide contents"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('089_extend_size_bad.dts', map=True)
+ self.assertIn("Node '/binman/_testing': Cannot obtain contents when "
+ 'expanding entry', str(e.exception))
+
+ def testHash(self):
+ """Test hashing of the contents of an entry"""
+ _, _, _, out_dtb_fname = self._DoReadFileDtb('090_hash.dts',
+ use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ hash_node = dtb.GetNode('/binman/u-boot/hash').props['value']
+ m = hashlib.sha256()
+ m.update(U_BOOT_DATA)
+ self.assertEqual(m.digest(), b''.join(hash_node.value))
+
+ def testHashNoAlgo(self):
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('091_hash_no_algo.dts', update_dtb=True)
+ self.assertIn("Node \'/binman/u-boot\': Missing \'algo\' property for "
+ 'hash node', str(e.exception))
+
+ def testHashBadAlgo(self):
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('092_hash_bad_algo.dts', update_dtb=True)
+ self.assertIn("Node '/binman/u-boot': Unknown hash algorithm 'invalid'",
+ str(e.exception))
+
+ def testHashSection(self):
+ """Test hashing of the contents of an entry"""
+ _, _, _, out_dtb_fname = self._DoReadFileDtb('099_hash_section.dts',
+ use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ hash_node = dtb.GetNode('/binman/section/hash').props['value']
+ m = hashlib.sha256()
+ m.update(U_BOOT_DATA)
+ m.update(tools.get_bytes(ord('a'), 16))
+ self.assertEqual(m.digest(), b''.join(hash_node.value))
+
+ def testPackUBootTplMicrocode(self):
+ """Test that x86 microcode can be handled correctly in TPL
+
+ We expect to see the following in the image, in order:
+ u-boot-tpl-nodtb.bin with a microcode pointer inserted at the correct
+ place
+ u-boot-tpl.dtb with the microcode removed
+ the microcode
+ """
+ self._SetupTplElf('u_boot_ucode_ptr')
+ first, pos_and_size = self._RunMicrocodeTest('093_x86_tpl_ucode.dts',
+ U_BOOT_TPL_NODTB_DATA)
+ self.assertEqual(b'tplnodtb with microc' + pos_and_size +
+ b'ter somewhere in here', first)
+
+ def testFmapX86(self):
+ """Basic test of generation of a flashrom fmap"""
+ data = self._DoReadFile('094_fmap_x86.dts')
+ fhdr, fentries = fmap_util.DecodeFmap(data[32:])
+ expected = U_BOOT_DATA + MRC_DATA + tools.get_bytes(ord('a'), 32 - 7)
+ self.assertEqual(expected, data[:32])
+ fhdr, fentries = fmap_util.DecodeFmap(data[32:])
+
+ self.assertEqual(0x100, fhdr.image_size)
+
+ self.assertEqual(0, fentries[0].offset)
+ self.assertEqual(4, fentries[0].size)
+ self.assertEqual(b'U_BOOT', fentries[0].name)
+
+ self.assertEqual(4, fentries[1].offset)
+ self.assertEqual(3, fentries[1].size)
+ self.assertEqual(b'INTEL_MRC', fentries[1].name)
+
+ self.assertEqual(32, fentries[2].offset)
+ self.assertEqual(fmap_util.FMAP_HEADER_LEN +
+ fmap_util.FMAP_AREA_LEN * 3, fentries[2].size)
+ self.assertEqual(b'FMAP', fentries[2].name)
+
+ def testFmapX86Section(self):
+ """Basic test of generation of a flashrom fmap"""
+ data = self._DoReadFile('095_fmap_x86_section.dts')
+ expected = U_BOOT_DATA + MRC_DATA + tools.get_bytes(ord('b'), 32 - 7)
+ self.assertEqual(expected, data[:32])
+ fhdr, fentries = fmap_util.DecodeFmap(data[36:])
+
+ self.assertEqual(0x180, fhdr.image_size)
+ expect_size = fmap_util.FMAP_HEADER_LEN + fmap_util.FMAP_AREA_LEN * 4
+ fiter = iter(fentries)
+
+ fentry = next(fiter)
+ self.assertEqual(b'U_BOOT', fentry.name)
+ self.assertEqual(0, fentry.offset)
+ self.assertEqual(4, fentry.size)
+
+ fentry = next(fiter)
+ self.assertEqual(b'SECTION', fentry.name)
+ self.assertEqual(4, fentry.offset)
+ self.assertEqual(0x20 + expect_size, fentry.size)
+
+ fentry = next(fiter)
+ self.assertEqual(b'INTEL_MRC', fentry.name)
+ self.assertEqual(4, fentry.offset)
+ self.assertEqual(3, fentry.size)
+
+ fentry = next(fiter)
+ self.assertEqual(b'FMAP', fentry.name)
+ self.assertEqual(36, fentry.offset)
+ self.assertEqual(expect_size, fentry.size)
+
+ def testElf(self):
+ """Basic test of ELF entries"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+ with open(self.ElfTestFile('bss_data'), 'rb') as fd:
+ TestFunctional._MakeInputFile('-boot', fd.read())
+ data = self._DoReadFile('096_elf.dts')
+
+ def testElfStrip(self):
+ """Basic test of ELF entries"""
+ self._SetupSplElf()
+ with open(self.ElfTestFile('bss_data'), 'rb') as fd:
+ TestFunctional._MakeInputFile('-boot', fd.read())
+ data = self._DoReadFile('097_elf_strip.dts')
+
+ def testPackOverlapMap(self):
+ """Test that overlapping regions are detected"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('014_pack_overlap.dts', map=True)
+ map_fname = tools.get_output_filename('image.map')
+ self.assertEqual("Wrote map file '%s' to show errors\n" % map_fname,
+ stdout.getvalue())
+
+ # We should not get an inmage, but there should be a map file
+ self.assertFalse(os.path.exists(tools.get_output_filename('image.bin')))
+ self.assertTrue(os.path.exists(map_fname))
+ map_data = tools.read_file(map_fname, binary=False)
+ self.assertEqual('''ImagePos Offset Size Name
+<none> 00000000 00000008 image
+<none> 00000000 00000004 u-boot
+<none> 00000003 00000004 u-boot-align
+''', map_data)
+
+ def testPackRefCode(self):
+ """Test that an image with an Intel Reference code binary works"""
+ data = self._DoReadFile('100_intel_refcode.dts')
+ self.assertEqual(REFCODE_DATA, data[:len(REFCODE_DATA)])
+
+ def testSectionOffset(self):
+ """Tests use of a section with an offset"""
+ data, _, map_data, _ = self._DoReadFileDtb('101_sections_offset.dts',
+ map=True)
+ self.assertEqual('''ImagePos Offset Size Name
+00000000 00000000 00000038 image
+00000004 00000004 00000010 section@0
+00000004 00000000 00000004 u-boot
+00000018 00000018 00000010 section@1
+00000018 00000000 00000004 u-boot
+0000002c 0000002c 00000004 section@2
+0000002c 00000000 00000004 u-boot
+''', map_data)
+ self.assertEqual(data,
+ tools.get_bytes(0x26, 4) + U_BOOT_DATA +
+ tools.get_bytes(0x21, 12) +
+ tools.get_bytes(0x26, 4) + U_BOOT_DATA +
+ tools.get_bytes(0x61, 12) +
+ tools.get_bytes(0x26, 4) + U_BOOT_DATA +
+ tools.get_bytes(0x26, 8))
+
+ def testCbfsRaw(self):
+ """Test base handling of a Coreboot Filesystem (CBFS)
+
+ The exact contents of the CBFS is verified by similar tests in
+ cbfs_util_test.py. The tests here merely check that the files added to
+ the CBFS can be found in the final image.
+ """
+ data = self._DoReadFile('102_cbfs_raw.dts')
+ size = 0xb0
+
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertEqual(size, cbfs.rom_size)
+
+ self.assertIn('u-boot-dtb', cbfs.files)
+ cfile = cbfs.files['u-boot-dtb']
+ self.assertEqual(U_BOOT_DTB_DATA, cfile.data)
+
+ def testCbfsArch(self):
+ """Test on non-x86 architecture"""
+ data = self._DoReadFile('103_cbfs_raw_ppc.dts')
+ size = 0x100
+
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertEqual(size, cbfs.rom_size)
+
+ self.assertIn('u-boot-dtb', cbfs.files)
+ cfile = cbfs.files['u-boot-dtb']
+ self.assertEqual(U_BOOT_DTB_DATA, cfile.data)
+
+ def testCbfsStage(self):
+ """Tests handling of a Coreboot Filesystem (CBFS)"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ elf_fname = os.path.join(self._indir, 'cbfs-stage.elf')
+ elf.MakeElf(elf_fname, U_BOOT_DATA, U_BOOT_DTB_DATA)
+ size = 0xb0
+
+ data = self._DoReadFile('104_cbfs_stage.dts')
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertEqual(size, cbfs.rom_size)
+
+ self.assertIn('u-boot', cbfs.files)
+ cfile = cbfs.files['u-boot']
+ self.assertEqual(U_BOOT_DATA + U_BOOT_DTB_DATA, cfile.data)
+
+ def testCbfsRawCompress(self):
+ """Test handling of compressing raw files"""
+ self._CheckLz4()
+ data = self._DoReadFile('105_cbfs_raw_compress.dts')
+ size = 0x140
+
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertIn('u-boot', cbfs.files)
+ cfile = cbfs.files['u-boot']
+ self.assertEqual(COMPRESS_DATA, cfile.data)
+
+ def testCbfsBadArch(self):
+ """Test handling of a bad architecture"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('106_cbfs_bad_arch.dts')
+ self.assertIn("Invalid architecture 'bad-arch'", str(e.exception))
+
+ def testCbfsNoSize(self):
+ """Test handling of a missing size property"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('107_cbfs_no_size.dts')
+ self.assertIn('entry must have a size property', str(e.exception))
+
+ def testCbfsNoContents(self):
+ """Test handling of a CBFS entry which does not provide contentsy"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('108_cbfs_no_contents.dts')
+ self.assertIn('Could not complete processing of contents',
+ str(e.exception))
+
+ def testCbfsBadCompress(self):
+ """Test handling of a bad architecture"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('109_cbfs_bad_compress.dts')
+ self.assertIn("Invalid compression in 'u-boot': 'invalid-algo'",
+ str(e.exception))
+
+ def testCbfsNamedEntries(self):
+ """Test handling of named entries"""
+ data = self._DoReadFile('110_cbfs_name.dts')
+
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertIn('FRED', cbfs.files)
+ cfile1 = cbfs.files['FRED']
+ self.assertEqual(U_BOOT_DATA, cfile1.data)
+
+ self.assertIn('hello', cbfs.files)
+ cfile2 = cbfs.files['hello']
+ self.assertEqual(U_BOOT_DTB_DATA, cfile2.data)
+
+ def _SetupIfwi(self, fname):
+ """Set up to run an IFWI test
+
+ Args:
+ fname: Filename of input file to provide (fitimage.bin or ifwi.bin)
+ """
+ self._SetupSplElf()
+ self._SetupTplElf()
+
+ # Intel Integrated Firmware Image (IFWI) file
+ with gzip.open(self.TestFile('%s.gz' % fname), 'rb') as fd:
+ data = fd.read()
+ TestFunctional._MakeInputFile(fname,data)
+
+ def _CheckIfwi(self, data):
+ """Check that an image with an IFWI contains the correct output
+
+ Args:
+ data: Conents of output file
+ """
+ expected_desc = tools.read_file(self.TestFile('descriptor.bin'))
+ if data[:0x1000] != expected_desc:
+ self.fail('Expected descriptor binary at start of image')
+
+ # We expect to find the TPL wil in subpart IBBP entry IBBL
+ image_fname = tools.get_output_filename('image.bin')
+ tpl_fname = tools.get_output_filename('tpl.out')
+ ifwitool = bintool.Bintool.create('ifwitool')
+ ifwitool.extract(image_fname, 'IBBP', 'IBBL', tpl_fname)
+
+ tpl_data = tools.read_file(tpl_fname)
+ self.assertEqual(U_BOOT_TPL_DATA, tpl_data[:len(U_BOOT_TPL_DATA)])
+
+ def testPackX86RomIfwi(self):
+ """Test that an x86 ROM with Integrated Firmware Image can be created"""
+ self._SetupIfwi('fitimage.bin')
+ data = self._DoReadFile('111_x86_rom_ifwi.dts')
+ self._CheckIfwi(data)
+
+ def testPackX86RomIfwiNoDesc(self):
+ """Test that an x86 ROM with IFWI can be created from an ifwi.bin file"""
+ self._SetupIfwi('ifwi.bin')
+ data = self._DoReadFile('112_x86_rom_ifwi_nodesc.dts')
+ self._CheckIfwi(data)
+
+ def testPackX86RomIfwiNoData(self):
+ """Test that an x86 ROM with IFWI handles missing data"""
+ self._SetupIfwi('ifwi.bin')
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('113_x86_rom_ifwi_nodata.dts')
+ self.assertIn('Could not complete processing of contents',
+ str(e.exception))
+
+ def testIfwiMissing(self):
+ """Test that binman still produces an image if ifwitool is missing"""
+ self._SetupIfwi('fitimage.bin')
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('111_x86_rom_ifwi.dts',
+ force_missing_bintools='ifwitool')
+ err = stderr.getvalue()
+ self.assertRegex(err,
+ "Image 'image'.*missing bintools.*: ifwitool")
+
+ def testCbfsOffset(self):
+ """Test a CBFS with files at particular offsets
+
+ Like all CFBS tests, this is just checking the logic that calls
+ cbfs_util. See cbfs_util_test for fully tests (e.g. test_cbfs_offset()).
+ """
+ data = self._DoReadFile('114_cbfs_offset.dts')
+ size = 0x200
+
+ cbfs = cbfs_util.CbfsReader(data)
+ self.assertEqual(size, cbfs.rom_size)
+
+ self.assertIn('u-boot', cbfs.files)
+ cfile = cbfs.files['u-boot']
+ self.assertEqual(U_BOOT_DATA, cfile.data)
+ self.assertEqual(0x40, cfile.cbfs_offset)
+
+ self.assertIn('u-boot-dtb', cbfs.files)
+ cfile2 = cbfs.files['u-boot-dtb']
+ self.assertEqual(U_BOOT_DTB_DATA, cfile2.data)
+ self.assertEqual(0x140, cfile2.cbfs_offset)
+
+ def testFdtmap(self):
+ """Test an FDT map can be inserted in the image"""
+ data = self.data = self._DoReadFileRealDtb('115_fdtmap.dts')
+ fdtmap_data = data[len(U_BOOT_DATA):]
+ magic = fdtmap_data[:8]
+ self.assertEqual(b'_FDTMAP_', magic)
+ self.assertEqual(tools.get_bytes(0, 8), fdtmap_data[8:16])
+
+ fdt_data = fdtmap_data[16:]
+ dtb = fdt.Fdt.FromData(fdt_data)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS, prefix='/')
+ self.assertEqual({
+ 'image-pos': 0,
+ 'offset': 0,
+ 'u-boot:offset': 0,
+ 'u-boot:size': len(U_BOOT_DATA),
+ 'u-boot:image-pos': 0,
+ 'fdtmap:image-pos': 4,
+ 'fdtmap:offset': 4,
+ 'fdtmap:size': len(fdtmap_data),
+ 'size': len(data),
+ }, props)
+
+ def testFdtmapNoMatch(self):
+ """Check handling of an FDT map when the section cannot be found"""
+ self.data = self._DoReadFileRealDtb('115_fdtmap.dts')
+
+ # Mangle the section name, which should cause a mismatch between the
+ # correct FDT path and the one expected by the section
+ image = control.images['image']
+ image._node.path += '-suffix'
+ entries = image.GetEntries()
+ fdtmap = entries['fdtmap']
+ with self.assertRaises(ValueError) as e:
+ fdtmap._GetFdtmap()
+ self.assertIn("Cannot locate node for path '/binman-suffix'",
+ str(e.exception))
+
+ def testFdtmapHeader(self):
+ """Test an FDT map and image header can be inserted in the image"""
+ data = self.data = self._DoReadFileRealDtb('116_fdtmap_hdr.dts')
+ fdtmap_pos = len(U_BOOT_DATA)
+ fdtmap_data = data[fdtmap_pos:]
+ fdt_data = fdtmap_data[16:]
+ dtb = fdt.Fdt.FromData(fdt_data)
+ fdt_size = dtb.GetFdtObj().totalsize()
+ hdr_data = data[-8:]
+ self.assertEqual(b'BinM', hdr_data[:4])
+ offset = struct.unpack('<I', hdr_data[4:])[0] & 0xffffffff
+ self.assertEqual(fdtmap_pos - 0x400, offset - (1 << 32))
+
+ def testFdtmapHeaderStart(self):
+ """Test an image header can be inserted at the image start"""
+ data = self.data = self._DoReadFileRealDtb('117_fdtmap_hdr_start.dts')
+ fdtmap_pos = 0x100 + len(U_BOOT_DATA)
+ hdr_data = data[:8]
+ self.assertEqual(b'BinM', hdr_data[:4])
+ offset = struct.unpack('<I', hdr_data[4:])[0]
+ self.assertEqual(fdtmap_pos, offset)
+
+ def testFdtmapHeaderPos(self):
+ """Test an image header can be inserted at a chosen position"""
+ data = self.data = self._DoReadFileRealDtb('118_fdtmap_hdr_pos.dts')
+ fdtmap_pos = 0x100 + len(U_BOOT_DATA)
+ hdr_data = data[0x80:0x88]
+ self.assertEqual(b'BinM', hdr_data[:4])
+ offset = struct.unpack('<I', hdr_data[4:])[0]
+ self.assertEqual(fdtmap_pos, offset)
+
+ def testHeaderMissingFdtmap(self):
+ """Test an image header requires an fdtmap"""
+ with self.assertRaises(ValueError) as e:
+ self.data = self._DoReadFileRealDtb('119_fdtmap_hdr_missing.dts')
+ self.assertIn("'image_header' section must have an 'fdtmap' sibling",
+ str(e.exception))
+
+ def testHeaderNoLocation(self):
+ """Test an image header with a no specified location is detected"""
+ with self.assertRaises(ValueError) as e:
+ self.data = self._DoReadFileRealDtb('120_hdr_no_location.dts')
+ self.assertIn("Invalid location 'None', expected 'start' or 'end'",
+ str(e.exception))
+
+ def testEntryExpand(self):
+ """Test extending an entry after it is packed"""
+ data = self._DoReadFile('121_entry_extend.dts')
+ self.assertEqual(b'aaa', data[:3])
+ self.assertEqual(U_BOOT_DATA, data[3:3 + len(U_BOOT_DATA)])
+ self.assertEqual(b'aaa', data[-3:])
+
+ def testEntryExtendBad(self):
+ """Test extending an entry after it is packed, twice"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('122_entry_extend_twice.dts')
+ self.assertIn("Image '/binman': Entries changed size after packing",
+ str(e.exception))
+
+ def testEntryExtendSection(self):
+ """Test extending an entry within a section after it is packed"""
+ data = self._DoReadFile('123_entry_extend_section.dts')
+ self.assertEqual(b'aaa', data[:3])
+ self.assertEqual(U_BOOT_DATA, data[3:3 + len(U_BOOT_DATA)])
+ self.assertEqual(b'aaa', data[-3:])
+
+ def testCompressDtb(self):
+ """Test that compress of device-tree files is supported"""
+ self._CheckLz4()
+ data = self.data = self._DoReadFileRealDtb('124_compress_dtb.dts')
+ self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+ comp_data = data[len(U_BOOT_DATA):]
+ orig = self._decompress(comp_data)
+ dtb = fdt.Fdt.FromData(orig)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['size', 'uncomp-size'])
+ expected = {
+ 'u-boot:size': len(U_BOOT_DATA),
+ 'u-boot-dtb:uncomp-size': len(orig),
+ 'u-boot-dtb:size': len(comp_data),
+ 'size': len(data),
+ }
+ self.assertEqual(expected, props)
+
+ def testCbfsUpdateFdt(self):
+ """Test that we can update the device tree with CBFS offset/size info"""
+ self._CheckLz4()
+ data, _, _, out_dtb_fname = self._DoReadFileDtb('125_cbfs_update.dts',
+ update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS + ['uncomp-size'])
+ del props['cbfs/u-boot:size']
+ self.assertEqual({
+ 'offset': 0,
+ 'size': len(data),
+ 'image-pos': 0,
+ 'cbfs:offset': 0,
+ 'cbfs:size': len(data),
+ 'cbfs:image-pos': 0,
+ 'cbfs/u-boot:offset': 0x30,
+ 'cbfs/u-boot:uncomp-size': len(U_BOOT_DATA),
+ 'cbfs/u-boot:image-pos': 0x30,
+ 'cbfs/u-boot-dtb:offset': 0xa4,
+ 'cbfs/u-boot-dtb:size': len(U_BOOT_DATA),
+ 'cbfs/u-boot-dtb:image-pos': 0xa4,
+ }, props)
+
+ def testCbfsBadType(self):
+ """Test an image header with a no specified location is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('126_cbfs_bad_type.dts')
+ self.assertIn("Unknown cbfs-type 'badtype'", str(e.exception))
+
+ def testList(self):
+ """Test listing the files in an image"""
+ self._CheckLz4()
+ data = self._DoReadFile('127_list.dts')
+ image = control.images['image']
+ entries = image.BuildEntryList()
+ self.assertEqual(7, len(entries))
+
+ ent = entries[0]
+ self.assertEqual(0, ent.indent)
+ self.assertEqual('image', ent.name)
+ self.assertEqual('section', ent.etype)
+ self.assertEqual(len(data), ent.size)
+ self.assertEqual(0, ent.image_pos)
+ self.assertEqual(None, ent.uncomp_size)
+ self.assertEqual(0, ent.offset)
+
+ ent = entries[1]
+ self.assertEqual(1, ent.indent)
+ self.assertEqual('u-boot', ent.name)
+ self.assertEqual('u-boot', ent.etype)
+ self.assertEqual(len(U_BOOT_DATA), ent.size)
+ self.assertEqual(0, ent.image_pos)
+ self.assertEqual(None, ent.uncomp_size)
+ self.assertEqual(0, ent.offset)
+
+ ent = entries[2]
+ self.assertEqual(1, ent.indent)
+ self.assertEqual('section', ent.name)
+ self.assertEqual('section', ent.etype)
+ section_size = ent.size
+ self.assertEqual(0x100, ent.image_pos)
+ self.assertEqual(None, ent.uncomp_size)
+ self.assertEqual(0x100, ent.offset)
+
+ ent = entries[3]
+ self.assertEqual(2, ent.indent)
+ self.assertEqual('cbfs', ent.name)
+ self.assertEqual('cbfs', ent.etype)
+ self.assertEqual(0x400, ent.size)
+ self.assertEqual(0x100, ent.image_pos)
+ self.assertEqual(None, ent.uncomp_size)
+ self.assertEqual(0, ent.offset)
+
+ ent = entries[4]
+ self.assertEqual(3, ent.indent)
+ self.assertEqual('u-boot', ent.name)
+ self.assertEqual('u-boot', ent.etype)
+ self.assertEqual(len(U_BOOT_DATA), ent.size)
+ self.assertEqual(0x138, ent.image_pos)
+ self.assertEqual(None, ent.uncomp_size)
+ self.assertEqual(0x38, ent.offset)
+
+ ent = entries[5]
+ self.assertEqual(3, ent.indent)
+ self.assertEqual('u-boot-dtb', ent.name)
+ self.assertEqual('text', ent.etype)
+ self.assertGreater(len(COMPRESS_DATA), ent.size)
+ self.assertEqual(0x178, ent.image_pos)
+ self.assertEqual(len(COMPRESS_DATA), ent.uncomp_size)
+ self.assertEqual(0x78, ent.offset)
+
+ ent = entries[6]
+ self.assertEqual(2, ent.indent)
+ self.assertEqual('u-boot-dtb', ent.name)
+ self.assertEqual('u-boot-dtb', ent.etype)
+ self.assertEqual(0x500, ent.image_pos)
+ self.assertEqual(len(U_BOOT_DTB_DATA), ent.uncomp_size)
+ dtb_size = ent.size
+ # Compressing this data expands it since headers are added
+ self.assertGreater(dtb_size, len(U_BOOT_DTB_DATA))
+ self.assertEqual(0x400, ent.offset)
+
+ self.assertEqual(len(data), 0x100 + section_size)
+ self.assertEqual(section_size, 0x400 + dtb_size)
+
+ def testFindFdtmap(self):
+ """Test locating an FDT map in an image"""
+ self._CheckLz4()
+ data = self.data = self._DoReadFileRealDtb('128_decode_image.dts')
+ image = control.images['image']
+ entries = image.GetEntries()
+ entry = entries['fdtmap']
+ self.assertEqual(entry.image_pos, fdtmap.LocateFdtmap(data))
+
+ def testFindFdtmapMissing(self):
+ """Test failing to locate an FDP map"""
+ data = self._DoReadFile('005_simple.dts')
+ self.assertEqual(None, fdtmap.LocateFdtmap(data))
+
+ def testFindImageHeader(self):
+ """Test locating a image header"""
+ self._CheckLz4()
+ data = self.data = self._DoReadFileRealDtb('128_decode_image.dts')
+ image = control.images['image']
+ entries = image.GetEntries()
+ entry = entries['fdtmap']
+ # The header should point to the FDT map
+ self.assertEqual(entry.image_pos, image_header.LocateHeaderOffset(data))
+
+ def testFindImageHeaderStart(self):
+ """Test locating a image header located at the start of an image"""
+ data = self.data = self._DoReadFileRealDtb('117_fdtmap_hdr_start.dts')
+ image = control.images['image']
+ entries = image.GetEntries()
+ entry = entries['fdtmap']
+ # The header should point to the FDT map
+ self.assertEqual(entry.image_pos, image_header.LocateHeaderOffset(data))
+
+ def testFindImageHeaderMissing(self):
+ """Test failing to locate an image header"""
+ data = self._DoReadFile('005_simple.dts')
+ self.assertEqual(None, image_header.LocateHeaderOffset(data))
+
+ def testReadImage(self):
+ """Test reading an image and accessing its FDT map"""
+ self._CheckLz4()
+ data = self.data = self._DoReadFileRealDtb('128_decode_image.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ orig_image = control.images['image']
+ image = Image.FromFile(image_fname)
+ self.assertEqual(orig_image.GetEntries().keys(),
+ image.GetEntries().keys())
+
+ orig_entry = orig_image.GetEntries()['fdtmap']
+ entry = image.GetEntries()['fdtmap']
+ self.assertEquals(orig_entry.offset, entry.offset)
+ self.assertEquals(orig_entry.size, entry.size)
+ self.assertEquals(orig_entry.image_pos, entry.image_pos)
+
+ def testReadImageNoHeader(self):
+ """Test accessing an image's FDT map without an image header"""
+ self._CheckLz4()
+ data = self._DoReadFileRealDtb('129_decode_image_nohdr.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ image = Image.FromFile(image_fname)
+ self.assertTrue(isinstance(image, Image))
+ self.assertEqual('image', image.image_name[-5:])
+
+ def testReadImageFail(self):
+ """Test failing to read an image image's FDT map"""
+ self._DoReadFile('005_simple.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ with self.assertRaises(ValueError) as e:
+ image = Image.FromFile(image_fname)
+ self.assertIn("Cannot find FDT map in image", str(e.exception))
+
+ def testListCmd(self):
+ """Test listing the files in an image using an Fdtmap"""
+ self._CheckLz4()
+ data = self._DoReadFileRealDtb('130_list_fdtmap.dts')
+
+ # lz4 compression size differs depending on the version
+ image = control.images['image']
+ entries = image.GetEntries()
+ section_size = entries['section'].size
+ fdt_size = entries['section'].GetEntries()['u-boot-dtb'].size
+ fdtmap_offset = entries['fdtmap'].offset
+
+ tmpdir = None
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoBinman('ls', '-i', updated_fname)
+ finally:
+ if tmpdir:
+ shutil.rmtree(tmpdir)
+ lines = stdout.getvalue().splitlines()
+ expected = [
+'Name Image-pos Size Entry-type Offset Uncomp-size',
+'----------------------------------------------------------------------',
+'image 0 c00 section 0',
+' u-boot 0 4 u-boot 0',
+' section 100 %x section 100' % section_size,
+' cbfs 100 400 cbfs 0',
+' u-boot 120 4 u-boot 20',
+' u-boot-dtb 180 105 u-boot-dtb 80 3c9',
+' u-boot-dtb 500 %x u-boot-dtb 400 3c9' % fdt_size,
+' fdtmap %x 3bd fdtmap %x' %
+ (fdtmap_offset, fdtmap_offset),
+' image-header bf8 8 image-header bf8',
+ ]
+ self.assertEqual(expected, lines)
+
+ def testListCmdFail(self):
+ """Test failing to list an image"""
+ self._DoReadFile('005_simple.dts')
+ tmpdir = None
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ with self.assertRaises(ValueError) as e:
+ self._DoBinman('ls', '-i', updated_fname)
+ finally:
+ if tmpdir:
+ shutil.rmtree(tmpdir)
+ self.assertIn("Cannot find FDT map in image", str(e.exception))
+
+ def _RunListCmd(self, paths, expected):
+ """List out entries and check the result
+
+ Args:
+ paths: List of paths to pass to the list command
+ expected: Expected list of filenames to be returned, in order
+ """
+ self._CheckLz4()
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ image = Image.FromFile(image_fname)
+ lines = image.GetListEntries(paths)[1]
+ files = [line[0].strip() for line in lines[1:]]
+ self.assertEqual(expected, files)
+
+ def testListCmdSection(self):
+ """Test listing the files in a section"""
+ self._RunListCmd(['section'],
+ ['section', 'cbfs', 'u-boot', 'u-boot-dtb', 'u-boot-dtb'])
+
+ def testListCmdFile(self):
+ """Test listing a particular file"""
+ self._RunListCmd(['*u-boot-dtb'], ['u-boot-dtb', 'u-boot-dtb'])
+
+ def testListCmdWildcard(self):
+ """Test listing a wildcarded file"""
+ self._RunListCmd(['*boot*'],
+ ['u-boot', 'u-boot', 'u-boot-dtb', 'u-boot-dtb'])
+
+ def testListCmdWildcardMulti(self):
+ """Test listing a wildcarded file"""
+ self._RunListCmd(['*cb*', '*head*'],
+ ['cbfs', 'u-boot', 'u-boot-dtb', 'image-header'])
+
+ def testListCmdEmpty(self):
+ """Test listing a wildcarded file"""
+ self._RunListCmd(['nothing'], [])
+
+ def testListCmdPath(self):
+ """Test listing the files in a sub-entry of a section"""
+ self._RunListCmd(['section/cbfs'], ['cbfs', 'u-boot', 'u-boot-dtb'])
+
+ def _RunExtractCmd(self, entry_name, decomp=True):
+ """Extract an entry from an image
+
+ Args:
+ entry_name: Entry name to extract
+ decomp: True to decompress the data if compressed, False to leave
+ it in its raw uncompressed format
+
+ Returns:
+ data from entry
+ """
+ self._CheckLz4()
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ return control.ReadEntry(image_fname, entry_name, decomp)
+
+ def testExtractSimple(self):
+ """Test extracting a single file"""
+ data = self._RunExtractCmd('u-boot')
+ self.assertEqual(U_BOOT_DATA, data)
+
+ def testExtractSection(self):
+ """Test extracting the files in a section"""
+ data = self._RunExtractCmd('section')
+ cbfs_data = data[:0x400]
+ cbfs = cbfs_util.CbfsReader(cbfs_data)
+ self.assertEqual(['u-boot', 'u-boot-dtb', ''], list(cbfs.files.keys()))
+ dtb_data = data[0x400:]
+ dtb = self._decompress(dtb_data)
+ self.assertEqual(EXTRACT_DTB_SIZE, len(dtb))
+
+ def testExtractCompressed(self):
+ """Test extracting compressed data"""
+ data = self._RunExtractCmd('section/u-boot-dtb')
+ self.assertEqual(EXTRACT_DTB_SIZE, len(data))
+
+ def testExtractRaw(self):
+ """Test extracting compressed data without decompressing it"""
+ data = self._RunExtractCmd('section/u-boot-dtb', decomp=False)
+ dtb = self._decompress(data)
+ self.assertEqual(EXTRACT_DTB_SIZE, len(dtb))
+
+ def testExtractCbfs(self):
+ """Test extracting CBFS data"""
+ data = self._RunExtractCmd('section/cbfs/u-boot')
+ self.assertEqual(U_BOOT_DATA, data)
+
+ def testExtractCbfsCompressed(self):
+ """Test extracting CBFS compressed data"""
+ data = self._RunExtractCmd('section/cbfs/u-boot-dtb')
+ self.assertEqual(EXTRACT_DTB_SIZE, len(data))
+
+ def testExtractCbfsRaw(self):
+ """Test extracting CBFS compressed data without decompressing it"""
+ bintool = self.comp_bintools['lzma_alone']
+ self._CheckBintool(bintool)
+ data = self._RunExtractCmd('section/cbfs/u-boot-dtb', decomp=False)
+ dtb = bintool.decompress(data)
+ self.assertEqual(EXTRACT_DTB_SIZE, len(dtb))
+
+ def testExtractBadEntry(self):
+ """Test extracting a bad section path"""
+ with self.assertRaises(ValueError) as e:
+ self._RunExtractCmd('section/does-not-exist')
+ self.assertIn("Entry 'does-not-exist' not found in '/section'",
+ str(e.exception))
+
+ def testExtractMissingFile(self):
+ """Test extracting file that does not exist"""
+ with self.assertRaises(IOError) as e:
+ control.ReadEntry('missing-file', 'name')
+
+ def testExtractBadFile(self):
+ """Test extracting an invalid file"""
+ fname = os.path.join(self._indir, 'badfile')
+ tools.write_file(fname, b'')
+ with self.assertRaises(ValueError) as e:
+ control.ReadEntry(fname, 'name')
+
+ def testExtractCmd(self):
+ """Test extracting a file fron an image on the command line"""
+ self._CheckLz4()
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ fname = os.path.join(self._indir, 'output.extact')
+ tmpdir = None
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoBinman('extract', '-i', updated_fname, 'u-boot',
+ '-f', fname)
+ finally:
+ if tmpdir:
+ shutil.rmtree(tmpdir)
+ data = tools.read_file(fname)
+ self.assertEqual(U_BOOT_DATA, data)
+
+ def testExtractOneEntry(self):
+ """Test extracting a single entry fron an image """
+ self._CheckLz4()
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ fname = os.path.join(self._indir, 'output.extact')
+ control.ExtractEntries(image_fname, fname, None, ['u-boot'])
+ data = tools.read_file(fname)
+ self.assertEqual(U_BOOT_DATA, data)
+
+ def _CheckExtractOutput(self, decomp):
+ """Helper to test file output with and without decompression
+
+ Args:
+ decomp: True to decompress entry data, False to output it raw
+ """
+ def _CheckPresent(entry_path, expect_data, expect_size=None):
+ """Check and remove expected file
+
+ This checks the data/size of a file and removes the file both from
+ the outfiles set and from the output directory. Once all files are
+ processed, both the set and directory should be empty.
+
+ Args:
+ entry_path: Entry path
+ expect_data: Data to expect in file, or None to skip check
+ expect_size: Size of data to expect in file, or None to skip
+ """
+ path = os.path.join(outdir, entry_path)
+ data = tools.read_file(path)
+ os.remove(path)
+ if expect_data:
+ self.assertEqual(expect_data, data)
+ elif expect_size:
+ self.assertEqual(expect_size, len(data))
+ outfiles.remove(path)
+
+ def _CheckDirPresent(name):
+ """Remove expected directory
+
+ This gives an error if the directory does not exist as expected
+
+ Args:
+ name: Name of directory to remove
+ """
+ path = os.path.join(outdir, name)
+ os.rmdir(path)
+
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ outdir = os.path.join(self._indir, 'extract')
+ einfos = control.ExtractEntries(image_fname, None, outdir, [], decomp)
+
+ # Create a set of all file that were output (should be 9)
+ outfiles = set()
+ for root, dirs, files in os.walk(outdir):
+ outfiles |= set([os.path.join(root, fname) for fname in files])
+ self.assertEqual(9, len(outfiles))
+ self.assertEqual(9, len(einfos))
+
+ image = control.images['image']
+ entries = image.GetEntries()
+
+ # Check the 9 files in various ways
+ section = entries['section']
+ section_entries = section.GetEntries()
+ cbfs_entries = section_entries['cbfs'].GetEntries()
+ _CheckPresent('u-boot', U_BOOT_DATA)
+ _CheckPresent('section/cbfs/u-boot', U_BOOT_DATA)
+ dtb_len = EXTRACT_DTB_SIZE
+ if not decomp:
+ dtb_len = cbfs_entries['u-boot-dtb'].size
+ _CheckPresent('section/cbfs/u-boot-dtb', None, dtb_len)
+ if not decomp:
+ dtb_len = section_entries['u-boot-dtb'].size
+ _CheckPresent('section/u-boot-dtb', None, dtb_len)
+
+ fdtmap = entries['fdtmap']
+ _CheckPresent('fdtmap', fdtmap.data)
+ hdr = entries['image-header']
+ _CheckPresent('image-header', hdr.data)
+
+ _CheckPresent('section/root', section.data)
+ cbfs = section_entries['cbfs']
+ _CheckPresent('section/cbfs/root', cbfs.data)
+ data = tools.read_file(image_fname)
+ _CheckPresent('root', data)
+
+ # There should be no files left. Remove all the directories to check.
+ # If there are any files/dirs remaining, one of these checks will fail.
+ self.assertEqual(0, len(outfiles))
+ _CheckDirPresent('section/cbfs')
+ _CheckDirPresent('section')
+ _CheckDirPresent('')
+ self.assertFalse(os.path.exists(outdir))
+
+ def testExtractAllEntries(self):
+ """Test extracting all entries"""
+ self._CheckLz4()
+ self._CheckExtractOutput(decomp=True)
+
+ def testExtractAllEntriesRaw(self):
+ """Test extracting all entries without decompressing them"""
+ self._CheckLz4()
+ self._CheckExtractOutput(decomp=False)
+
+ def testExtractSelectedEntries(self):
+ """Test extracting some entries"""
+ self._CheckLz4()
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ outdir = os.path.join(self._indir, 'extract')
+ einfos = control.ExtractEntries(image_fname, None, outdir,
+ ['*cb*', '*head*'])
+
+ # File output is tested by testExtractAllEntries(), so just check that
+ # the expected entries are selected
+ names = [einfo.name for einfo in einfos]
+ self.assertEqual(names,
+ ['cbfs', 'u-boot', 'u-boot-dtb', 'image-header'])
+
+ def testExtractNoEntryPaths(self):
+ """Test extracting some entries"""
+ self._CheckLz4()
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ with self.assertRaises(ValueError) as e:
+ control.ExtractEntries(image_fname, 'fname', None, [])
+ self.assertIn('Must specify an entry path to write with -f',
+ str(e.exception))
+
+ def testExtractTooManyEntryPaths(self):
+ """Test extracting some entries"""
+ self._CheckLz4()
+ self._DoReadFileRealDtb('130_list_fdtmap.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ with self.assertRaises(ValueError) as e:
+ control.ExtractEntries(image_fname, 'fname', None, ['a', 'b'])
+ self.assertIn('Must specify exactly one entry path to write with -f',
+ str(e.exception))
+
+ def testPackAlignSection(self):
+ """Test that sections can have alignment"""
+ self._DoReadFile('131_pack_align_section.dts')
+
+ self.assertIn('image', control.images)
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(3, len(entries))
+
+ # First u-boot
+ self.assertIn('u-boot', entries)
+ entry = entries['u-boot']
+ self.assertEqual(0, entry.offset)
+ self.assertEqual(0, entry.image_pos)
+ self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ # Section0
+ self.assertIn('section0', entries)
+ section0 = entries['section0']
+ self.assertEqual(0x10, section0.offset)
+ self.assertEqual(0x10, section0.image_pos)
+ self.assertEqual(len(U_BOOT_DATA), section0.size)
+
+ # Second u-boot
+ section_entries = section0.GetEntries()
+ self.assertIn('u-boot', section_entries)
+ entry = section_entries['u-boot']
+ self.assertEqual(0, entry.offset)
+ self.assertEqual(0x10, entry.image_pos)
+ self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ # Section1
+ self.assertIn('section1', entries)
+ section1 = entries['section1']
+ self.assertEqual(0x14, section1.offset)
+ self.assertEqual(0x14, section1.image_pos)
+ self.assertEqual(0x20, section1.size)
+
+ # Second u-boot
+ section_entries = section1.GetEntries()
+ self.assertIn('u-boot', section_entries)
+ entry = section_entries['u-boot']
+ self.assertEqual(0, entry.offset)
+ self.assertEqual(0x14, entry.image_pos)
+ self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ # Section2
+ self.assertIn('section2', section_entries)
+ section2 = section_entries['section2']
+ self.assertEqual(0x4, section2.offset)
+ self.assertEqual(0x18, section2.image_pos)
+ self.assertEqual(4, section2.size)
+
+ # Third u-boot
+ section_entries = section2.GetEntries()
+ self.assertIn('u-boot', section_entries)
+ entry = section_entries['u-boot']
+ self.assertEqual(0, entry.offset)
+ self.assertEqual(0x18, entry.image_pos)
+ self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ def _RunReplaceCmd(self, entry_name, data, decomp=True, allow_resize=True,
+ dts='132_replace.dts'):
+ """Replace an entry in an image
+
+ This writes the entry data to update it, then opens the updated file and
+ returns the value that it now finds there.
+
+ Args:
+ entry_name: Entry name to replace
+ data: Data to replace it with
+ decomp: True to compress the data if needed, False if data is
+ already compressed so should be used as is
+ allow_resize: True to allow entries to change size, False to raise
+ an exception
+
+ Returns:
+ Tuple:
+ data from entry
+ data from fdtmap (excluding header)
+ Image object that was modified
+ """
+ dtb_data = self._DoReadFileDtb(dts, use_real_dtb=True,
+ update_dtb=True)[1]
+
+ self.assertIn('image', control.images)
+ image = control.images['image']
+ entries = image.GetEntries()
+ orig_dtb_data = entries['u-boot-dtb'].data
+ orig_fdtmap_data = entries['fdtmap'].data
+
+ image_fname = tools.get_output_filename('image.bin')
+ updated_fname = tools.get_output_filename('image-updated.bin')
+ tools.write_file(updated_fname, tools.read_file(image_fname))
+ image = control.WriteEntry(updated_fname, entry_name, data, decomp,
+ allow_resize)
+ data = control.ReadEntry(updated_fname, entry_name, decomp)
+
+ # The DT data should not change unless resized:
+ if not allow_resize:
+ new_dtb_data = entries['u-boot-dtb'].data
+ self.assertEqual(new_dtb_data, orig_dtb_data)
+ new_fdtmap_data = entries['fdtmap'].data
+ self.assertEqual(new_fdtmap_data, orig_fdtmap_data)
+
+ return data, orig_fdtmap_data[fdtmap.FDTMAP_HDR_LEN:], image
+
+ def testReplaceSimple(self):
+ """Test replacing a single file"""
+ expected = b'x' * len(U_BOOT_DATA)
+ data, expected_fdtmap, _ = self._RunReplaceCmd('u-boot', expected,
+ allow_resize=False)
+ self.assertEqual(expected, data)
+
+ # Test that the state looks right. There should be an FDT for the fdtmap
+ # that we jsut read back in, and it should match what we find in the
+ # 'control' tables. Checking for an FDT that does not exist should
+ # return None.
+ path, fdtmap = state.GetFdtContents('fdtmap')
+ self.assertIsNotNone(path)
+ self.assertEqual(expected_fdtmap, fdtmap)
+
+ dtb = state.GetFdtForEtype('fdtmap')
+ self.assertEqual(dtb.GetContents(), fdtmap)
+
+ missing_path, missing_fdtmap = state.GetFdtContents('missing')
+ self.assertIsNone(missing_path)
+ self.assertIsNone(missing_fdtmap)
+
+ missing_dtb = state.GetFdtForEtype('missing')
+ self.assertIsNone(missing_dtb)
+
+ self.assertEqual('/binman', state.fdt_path_prefix)
+
+ def testReplaceResizeFail(self):
+ """Test replacing a file by something larger"""
+ expected = U_BOOT_DATA + b'x'
+ with self.assertRaises(ValueError) as e:
+ self._RunReplaceCmd('u-boot', expected, allow_resize=False,
+ dts='139_replace_repack.dts')
+ self.assertIn("Node '/u-boot': Entry data size does not match, but resize is disabled",
+ str(e.exception))
+
+ def testReplaceMulti(self):
+ """Test replacing entry data where multiple images are generated"""
+ data = self._DoReadFileDtb('133_replace_multi.dts', use_real_dtb=True,
+ update_dtb=True)[0]
+ expected = b'x' * len(U_BOOT_DATA)
+ updated_fname = tools.get_output_filename('image-updated.bin')
+ tools.write_file(updated_fname, data)
+ entry_name = 'u-boot'
+ control.WriteEntry(updated_fname, entry_name, expected,
+ allow_resize=False)
+ data = control.ReadEntry(updated_fname, entry_name)
+ self.assertEqual(expected, data)
+
+ # Check the state looks right.
+ self.assertEqual('/binman/image', state.fdt_path_prefix)
+
+ # Now check we can write the first image
+ image_fname = tools.get_output_filename('first-image.bin')
+ updated_fname = tools.get_output_filename('first-updated.bin')
+ tools.write_file(updated_fname, tools.read_file(image_fname))
+ entry_name = 'u-boot'
+ control.WriteEntry(updated_fname, entry_name, expected,
+ allow_resize=False)
+ data = control.ReadEntry(updated_fname, entry_name)
+ self.assertEqual(expected, data)
+
+ # Check the state looks right.
+ self.assertEqual('/binman/first-image', state.fdt_path_prefix)
+
+ def testUpdateFdtAllRepack(self):
+ """Test that all device trees are updated with offset/size info"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+ data = self._DoReadFileRealDtb('134_fdt_update_all_repack.dts')
+ SECTION_SIZE = 0x300
+ DTB_SIZE = 602
+ FDTMAP_SIZE = 608
+ base_expected = {
+ 'offset': 0,
+ 'size': SECTION_SIZE + DTB_SIZE * 2 + FDTMAP_SIZE,
+ 'image-pos': 0,
+ 'section:offset': 0,
+ 'section:size': SECTION_SIZE,
+ 'section:image-pos': 0,
+ 'section/u-boot-dtb:offset': 4,
+ 'section/u-boot-dtb:size': 636,
+ 'section/u-boot-dtb:image-pos': 4,
+ 'u-boot-spl-dtb:offset': SECTION_SIZE,
+ 'u-boot-spl-dtb:size': DTB_SIZE,
+ 'u-boot-spl-dtb:image-pos': SECTION_SIZE,
+ 'u-boot-tpl-dtb:offset': SECTION_SIZE + DTB_SIZE,
+ 'u-boot-tpl-dtb:image-pos': SECTION_SIZE + DTB_SIZE,
+ 'u-boot-tpl-dtb:size': DTB_SIZE,
+ 'fdtmap:offset': SECTION_SIZE + DTB_SIZE * 2,
+ 'fdtmap:size': FDTMAP_SIZE,
+ 'fdtmap:image-pos': SECTION_SIZE + DTB_SIZE * 2,
+ }
+ main_expected = {
+ 'section:orig-size': SECTION_SIZE,
+ 'section/u-boot-dtb:orig-offset': 4,
+ }
+
+ # We expect three device-tree files in the output, with the first one
+ # within a fixed-size section.
+ # Read them in sequence. We look for an 'spl' property in the SPL tree,
+ # and 'tpl' in the TPL tree, to make sure they are distinct from the
+ # main U-Boot tree. All three should have the same positions and offset
+ # except that the main tree should include the main_expected properties
+ start = 4
+ for item in ['', 'spl', 'tpl', None]:
+ if item is None:
+ start += 16 # Move past fdtmap header
+ dtb = fdt.Fdt.FromData(data[start:])
+ dtb.Scan()
+ props = self._GetPropTree(dtb,
+ BASE_DTB_PROPS + REPACK_DTB_PROPS + ['spl', 'tpl'],
+ prefix='/' if item is None else '/binman/')
+ expected = dict(base_expected)
+ if item:
+ expected[item] = 0
+ else:
+ # Main DTB and fdtdec should include the 'orig-' properties
+ expected.update(main_expected)
+ # Helpful for debugging:
+ #for prop in sorted(props):
+ #print('prop %s %s %s' % (prop, props[prop], expected[prop]))
+ self.assertEqual(expected, props)
+ if item == '':
+ start = SECTION_SIZE
+ else:
+ start += dtb._fdt_obj.totalsize()
+
+ def testFdtmapHeaderMiddle(self):
+ """Test an FDT map in the middle of an image when it should be at end"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileRealDtb('135_fdtmap_hdr_middle.dts')
+ self.assertIn("Invalid sibling order 'middle' for image-header: Must be at 'end' to match location",
+ str(e.exception))
+
+ def testFdtmapHeaderStartBad(self):
+ """Test an FDT map in middle of an image when it should be at start"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileRealDtb('136_fdtmap_hdr_startbad.dts')
+ self.assertIn("Invalid sibling order 'end' for image-header: Must be at 'start' to match location",
+ str(e.exception))
+
+ def testFdtmapHeaderEndBad(self):
+ """Test an FDT map at the start of an image when it should be at end"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileRealDtb('137_fdtmap_hdr_endbad.dts')
+ self.assertIn("Invalid sibling order 'start' for image-header: Must be at 'end' to match location",
+ str(e.exception))
+
+ def testFdtmapHeaderNoSize(self):
+ """Test an image header at the end of an image with undefined size"""
+ self._DoReadFileRealDtb('138_fdtmap_hdr_nosize.dts')
+
+ def testReplaceResize(self):
+ """Test replacing a single file in an entry with a larger file"""
+ expected = U_BOOT_DATA + b'x'
+ data, _, image = self._RunReplaceCmd('u-boot', expected,
+ dts='139_replace_repack.dts')
+ self.assertEqual(expected, data)
+
+ entries = image.GetEntries()
+ dtb_data = entries['u-boot-dtb'].data
+ dtb = fdt.Fdt.FromData(dtb_data)
+ dtb.Scan()
+
+ # The u-boot section should now be larger in the dtb
+ node = dtb.GetNode('/binman/u-boot')
+ self.assertEqual(len(expected), fdt_util.GetInt(node, 'size'))
+
+ # Same for the fdtmap
+ fdata = entries['fdtmap'].data
+ fdtb = fdt.Fdt.FromData(fdata[fdtmap.FDTMAP_HDR_LEN:])
+ fdtb.Scan()
+ fnode = fdtb.GetNode('/u-boot')
+ self.assertEqual(len(expected), fdt_util.GetInt(fnode, 'size'))
+
+ def testReplaceResizeNoRepack(self):
+ """Test replacing an entry with a larger file when not allowed"""
+ expected = U_BOOT_DATA + b'x'
+ with self.assertRaises(ValueError) as e:
+ self._RunReplaceCmd('u-boot', expected)
+ self.assertIn('Entry data size does not match, but allow-repack is not present for this image',
+ str(e.exception))
+
+ def testEntryShrink(self):
+ """Test contracting an entry after it is packed"""
+ try:
+ state.SetAllowEntryContraction(True)
+ data = self._DoReadFileDtb('140_entry_shrink.dts',
+ update_dtb=True)[0]
+ finally:
+ state.SetAllowEntryContraction(False)
+ self.assertEqual(b'a', data[:1])
+ self.assertEqual(U_BOOT_DATA, data[1:1 + len(U_BOOT_DATA)])
+ self.assertEqual(b'a', data[-1:])
+
+ def testEntryShrinkFail(self):
+ """Test not being allowed to contract an entry after it is packed"""
+ data = self._DoReadFileDtb('140_entry_shrink.dts', update_dtb=True)[0]
+
+ # In this case there is a spare byte at the end of the data. The size of
+ # the contents is only 1 byte but we still have the size before it
+ # shrunk.
+ self.assertEqual(b'a\0', data[:2])
+ self.assertEqual(U_BOOT_DATA, data[2:2 + len(U_BOOT_DATA)])
+ self.assertEqual(b'a\0', data[-2:])
+
+ def testDescriptorOffset(self):
+ """Test that the Intel descriptor is always placed at at the start"""
+ data = self._DoReadFileDtb('141_descriptor_offset.dts')
+ image = control.images['image']
+ entries = image.GetEntries()
+ desc = entries['intel-descriptor']
+ self.assertEqual(0xff800000, desc.offset);
+ self.assertEqual(0xff800000, desc.image_pos);
+
+ def testReplaceCbfs(self):
+ """Test replacing a single file in CBFS without changing the size"""
+ self._CheckLz4()
+ expected = b'x' * len(U_BOOT_DATA)
+ data = self._DoReadFileRealDtb('142_replace_cbfs.dts')
+ updated_fname = tools.get_output_filename('image-updated.bin')
+ tools.write_file(updated_fname, data)
+ entry_name = 'section/cbfs/u-boot'
+ control.WriteEntry(updated_fname, entry_name, expected,
+ allow_resize=True)
+ data = control.ReadEntry(updated_fname, entry_name)
+ self.assertEqual(expected, data)
+
+ def testReplaceResizeCbfs(self):
+ """Test replacing a single file in CBFS with one of a different size"""
+ self._CheckLz4()
+ expected = U_BOOT_DATA + b'x'
+ data = self._DoReadFileRealDtb('142_replace_cbfs.dts')
+ updated_fname = tools.get_output_filename('image-updated.bin')
+ tools.write_file(updated_fname, data)
+ entry_name = 'section/cbfs/u-boot'
+ control.WriteEntry(updated_fname, entry_name, expected,
+ allow_resize=True)
+ data = control.ReadEntry(updated_fname, entry_name)
+ self.assertEqual(expected, data)
+
+ def _SetupForReplace(self):
+ """Set up some files to use to replace entries
+
+ This generates an image, copies it to a new file, extracts all the files
+ in it and updates some of them
+
+ Returns:
+ List
+ Image filename
+ Output directory
+ Expected values for updated entries, each a string
+ """
+ data = self._DoReadFileRealDtb('143_replace_all.dts')
+
+ updated_fname = tools.get_output_filename('image-updated.bin')
+ tools.write_file(updated_fname, data)
+
+ outdir = os.path.join(self._indir, 'extract')
+ einfos = control.ExtractEntries(updated_fname, None, outdir, [])
+
+ expected1 = b'x' + U_BOOT_DATA + b'y'
+ u_boot_fname1 = os.path.join(outdir, 'u-boot')
+ tools.write_file(u_boot_fname1, expected1)
+
+ expected2 = b'a' + U_BOOT_DATA + b'b'
+ u_boot_fname2 = os.path.join(outdir, 'u-boot2')
+ tools.write_file(u_boot_fname2, expected2)
+
+ expected_text = b'not the same text'
+ text_fname = os.path.join(outdir, 'text')
+ tools.write_file(text_fname, expected_text)
+
+ dtb_fname = os.path.join(outdir, 'u-boot-dtb')
+ dtb = fdt.FdtScan(dtb_fname)
+ node = dtb.GetNode('/binman/text')
+ node.AddString('my-property', 'the value')
+ dtb.Sync(auto_resize=True)
+ dtb.Flush()
+
+ return updated_fname, outdir, expected1, expected2, expected_text
+
+ def _CheckReplaceMultiple(self, entry_paths):
+ """Handle replacing the contents of multiple entries
+
+ Args:
+ entry_paths: List of entry paths to replace
+
+ Returns:
+ List
+ Dict of entries in the image:
+ key: Entry name
+ Value: Entry object
+ Expected values for updated entries, each a string
+ """
+ updated_fname, outdir, expected1, expected2, expected_text = (
+ self._SetupForReplace())
+ control.ReplaceEntries(updated_fname, None, outdir, entry_paths)
+
+ image = Image.FromFile(updated_fname)
+ image.LoadData()
+ return image.GetEntries(), expected1, expected2, expected_text
+
+ def testReplaceAll(self):
+ """Test replacing the contents of all entries"""
+ entries, expected1, expected2, expected_text = (
+ self._CheckReplaceMultiple([]))
+ data = entries['u-boot'].data
+ self.assertEqual(expected1, data)
+
+ data = entries['u-boot2'].data
+ self.assertEqual(expected2, data)
+
+ data = entries['text'].data
+ self.assertEqual(expected_text, data)
+
+ # Check that the device tree is updated
+ data = entries['u-boot-dtb'].data
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+ node = dtb.GetNode('/binman/text')
+ self.assertEqual('the value', node.props['my-property'].value)
+
+ def testReplaceSome(self):
+ """Test replacing the contents of a few entries"""
+ entries, expected1, expected2, expected_text = (
+ self._CheckReplaceMultiple(['u-boot2', 'text']))
+
+ # This one should not change
+ data = entries['u-boot'].data
+ self.assertEqual(U_BOOT_DATA, data)
+
+ data = entries['u-boot2'].data
+ self.assertEqual(expected2, data)
+
+ data = entries['text'].data
+ self.assertEqual(expected_text, data)
+
+ def testReplaceCmd(self):
+ """Test replacing a file fron an image on the command line"""
+ self._DoReadFileRealDtb('143_replace_all.dts')
+
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+
+ fname = os.path.join(tmpdir, 'update-u-boot.bin')
+ expected = b'x' * len(U_BOOT_DATA)
+ tools.write_file(fname, expected)
+
+ self._DoBinman('replace', '-i', updated_fname, 'u-boot', '-f', fname)
+ data = tools.read_file(updated_fname)
+ self.assertEqual(expected, data[:len(expected)])
+ map_fname = os.path.join(tmpdir, 'image-updated.map')
+ self.assertFalse(os.path.exists(map_fname))
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def testReplaceCmdSome(self):
+ """Test replacing some files fron an image on the command line"""
+ updated_fname, outdir, expected1, expected2, expected_text = (
+ self._SetupForReplace())
+
+ self._DoBinman('replace', '-i', updated_fname, '-I', outdir,
+ 'u-boot2', 'text')
+
+ tools.prepare_output_dir(None)
+ image = Image.FromFile(updated_fname)
+ image.LoadData()
+ entries = image.GetEntries()
+
+ # This one should not change
+ data = entries['u-boot'].data
+ self.assertEqual(U_BOOT_DATA, data)
+
+ data = entries['u-boot2'].data
+ self.assertEqual(expected2, data)
+
+ data = entries['text'].data
+ self.assertEqual(expected_text, data)
+
+ def testReplaceMissing(self):
+ """Test replacing entries where the file is missing"""
+ updated_fname, outdir, expected1, expected2, expected_text = (
+ self._SetupForReplace())
+
+ # Remove one of the files, to generate a warning
+ u_boot_fname1 = os.path.join(outdir, 'u-boot')
+ os.remove(u_boot_fname1)
+
+ with test_util.capture_sys_output() as (stdout, stderr):
+ control.ReplaceEntries(updated_fname, None, outdir, [])
+ self.assertIn("Skipping entry '/u-boot' from missing file",
+ stderr.getvalue())
+
+ def testReplaceCmdMap(self):
+ """Test replacing a file fron an image on the command line"""
+ self._DoReadFileRealDtb('143_replace_all.dts')
+
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+
+ fname = os.path.join(self._indir, 'update-u-boot.bin')
+ expected = b'x' * len(U_BOOT_DATA)
+ tools.write_file(fname, expected)
+
+ self._DoBinman('replace', '-i', updated_fname, 'u-boot',
+ '-f', fname, '-m')
+ map_fname = os.path.join(tmpdir, 'image-updated.map')
+ self.assertTrue(os.path.exists(map_fname))
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def testReplaceNoEntryPaths(self):
+ """Test replacing an entry without an entry path"""
+ self._DoReadFileRealDtb('143_replace_all.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ with self.assertRaises(ValueError) as e:
+ control.ReplaceEntries(image_fname, 'fname', None, [])
+ self.assertIn('Must specify an entry path to read with -f',
+ str(e.exception))
+
+ def testReplaceTooManyEntryPaths(self):
+ """Test extracting some entries"""
+ self._DoReadFileRealDtb('143_replace_all.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ with self.assertRaises(ValueError) as e:
+ control.ReplaceEntries(image_fname, 'fname', None, ['a', 'b'])
+ self.assertIn('Must specify exactly one entry path to write with -f',
+ str(e.exception))
+
+ def testPackReset16(self):
+ """Test that an image with an x86 reset16 region can be created"""
+ data = self._DoReadFile('144_x86_reset16.dts')
+ self.assertEqual(X86_RESET16_DATA, data[:len(X86_RESET16_DATA)])
+
+ def testPackReset16Spl(self):
+ """Test that an image with an x86 reset16-spl region can be created"""
+ data = self._DoReadFile('145_x86_reset16_spl.dts')
+ self.assertEqual(X86_RESET16_SPL_DATA, data[:len(X86_RESET16_SPL_DATA)])
+
+ def testPackReset16Tpl(self):
+ """Test that an image with an x86 reset16-tpl region can be created"""
+ data = self._DoReadFile('146_x86_reset16_tpl.dts')
+ self.assertEqual(X86_RESET16_TPL_DATA, data[:len(X86_RESET16_TPL_DATA)])
+
+ def testPackIntelFit(self):
+ """Test that an image with an Intel FIT and pointer can be created"""
+ data = self._DoReadFile('147_intel_fit.dts')
+ self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+ fit = data[16:32];
+ self.assertEqual(b'_FIT_ \x01\x00\x00\x00\x00\x01\x80}' , fit)
+ ptr = struct.unpack('<i', data[0x40:0x44])[0]
+
+ image = control.images['image']
+ entries = image.GetEntries()
+ expected_ptr = entries['intel-fit'].image_pos - (1 << 32)
+ self.assertEqual(expected_ptr, ptr)
+
+ def testPackIntelFitMissing(self):
+ """Test detection of a FIT pointer with not FIT region"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('148_intel_fit_missing.dts')
+ self.assertIn("'intel-fit-ptr' section must have an 'intel-fit' sibling",
+ str(e.exception))
+
+ def _CheckSymbolsTplSection(self, dts, expected_vals):
+ data = self._DoReadFile(dts)
+ sym_values = struct.pack('<LLQLL', elf.BINMAN_SYM_MAGIC_VALUE, *expected_vals)
+ upto1 = 4 + len(U_BOOT_SPL_DATA)
+ expected1 = tools.get_bytes(0xff, 4) + sym_values + U_BOOT_SPL_DATA[24:]
+ self.assertEqual(expected1, data[:upto1])
+
+ upto2 = upto1 + 1 + len(U_BOOT_SPL_DATA)
+ expected2 = tools.get_bytes(0xff, 1) + sym_values + U_BOOT_SPL_DATA[24:]
+ self.assertEqual(expected2, data[upto1:upto2])
+
+ upto3 = 0x3c + len(U_BOOT_DATA)
+ expected3 = tools.get_bytes(0xff, 1) + U_BOOT_DATA
+ self.assertEqual(expected3, data[upto2:upto3])
+
+ expected4 = sym_values + U_BOOT_TPL_DATA[24:]
+ self.assertEqual(expected4, data[upto3:upto3 + len(U_BOOT_TPL_DATA)])
+
+ def testSymbolsTplSection(self):
+ """Test binman can assign symbols embedded in U-Boot TPL in a section"""
+ self._SetupSplElf('u_boot_binman_syms')
+ self._SetupTplElf('u_boot_binman_syms')
+ self._CheckSymbolsTplSection('149_symbols_tpl.dts',
+ [0x04, 0x20, 0x10 + 0x3c, 0x04])
+
+ def testSymbolsTplSectionX86(self):
+ """Test binman can assign symbols in a section with end-at-4gb"""
+ self._SetupSplElf('u_boot_binman_syms_x86')
+ self._SetupTplElf('u_boot_binman_syms_x86')
+ self._CheckSymbolsTplSection('155_symbols_tpl_x86.dts',
+ [0xffffff04, 0xffffff20, 0xffffff3c,
+ 0x04])
+
+ def testPackX86RomIfwiSectiom(self):
+ """Test that a section can be placed in an IFWI region"""
+ self._SetupIfwi('fitimage.bin')
+ data = self._DoReadFile('151_x86_rom_ifwi_section.dts')
+ self._CheckIfwi(data)
+
+ def testPackFspM(self):
+ """Test that an image with a FSP memory-init binary can be created"""
+ data = self._DoReadFile('152_intel_fsp_m.dts')
+ self.assertEqual(FSP_M_DATA, data[:len(FSP_M_DATA)])
+
+ def testPackFspS(self):
+ """Test that an image with a FSP silicon-init binary can be created"""
+ data = self._DoReadFile('153_intel_fsp_s.dts')
+ self.assertEqual(FSP_S_DATA, data[:len(FSP_S_DATA)])
+
+ def testPackFspT(self):
+ """Test that an image with a FSP temp-ram-init binary can be created"""
+ data = self._DoReadFile('154_intel_fsp_t.dts')
+ self.assertEqual(FSP_T_DATA, data[:len(FSP_T_DATA)])
+
+ def testMkimage(self):
+ """Test using mkimage to build an image"""
+ self._SetupSplElf()
+ data = self._DoReadFile('156_mkimage.dts')
+
+ # Just check that the data appears in the file somewhere
+ self.assertIn(U_BOOT_SPL_DATA, data)
+
+ def testMkimageMissing(self):
+ """Test that binman still produces an image if mkimage is missing"""
+ self._SetupSplElf()
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('156_mkimage.dts',
+ force_missing_bintools='mkimage')
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: mkimage")
+
+ def testExtblob(self):
+ """Test an image with an external blob"""
+ data = self._DoReadFile('157_blob_ext.dts')
+ self.assertEqual(REFCODE_DATA, data)
+
+ def testExtblobMissing(self):
+ """Test an image with a missing external blob"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('158_blob_ext_missing.dts')
+ self.assertIn("Filename 'missing-file' not found in input path",
+ str(e.exception))
+
+ def testExtblobMissingOk(self):
+ """Test an image with an missing external blob that is allowed"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ ret = self._DoTestFile('158_blob_ext_missing.dts',
+ allow_missing=True)
+ self.assertEqual(103, ret)
+ err = stderr.getvalue()
+ self.assertIn('(missing-file)', err)
+ self.assertRegex(err, "Image 'image'.*missing.*: blob-ext")
+ self.assertIn('Some images are invalid', err)
+
+ def testExtblobMissingOkFlag(self):
+ """Test an image with an missing external blob allowed with -W"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ ret = self._DoTestFile('158_blob_ext_missing.dts',
+ allow_missing=True, ignore_missing=True)
+ self.assertEqual(0, ret)
+ err = stderr.getvalue()
+ self.assertIn('(missing-file)', err)
+ self.assertRegex(err, "Image 'image'.*missing.*: blob-ext")
+ self.assertIn('Some images are invalid', err)
+
+ def testExtblobMissingOkSect(self):
+ """Test an image with an missing external blob that is allowed"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('159_blob_ext_missing_sect.dts',
+ allow_missing=True)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing.*: blob-ext blob-ext2")
+
+ def testPackX86RomMeMissingDesc(self):
+ """Test that an missing Intel descriptor entry is allowed"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('164_x86_rom_me_missing.dts', allow_missing=True)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing.*: intel-descriptor")
+
+ def testPackX86RomMissingIfwi(self):
+ """Test that an x86 ROM with Integrated Firmware Image can be created"""
+ self._SetupIfwi('fitimage.bin')
+ pathname = os.path.join(self._indir, 'fitimage.bin')
+ os.remove(pathname)
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('111_x86_rom_ifwi.dts', allow_missing=True)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing.*: intel-ifwi")
+
+ def testPackOverlapZero(self):
+ """Test that zero-size overlapping regions are ignored"""
+ self._DoTestFile('160_pack_overlap_zero.dts')
+
+ def _CheckSimpleFitData(self, fit_data, kernel_data, fdt1_data):
+ # The data should be inside the FIT
+ dtb = fdt.Fdt.FromData(fit_data)
+ dtb.Scan()
+ fnode = dtb.GetNode('/images/kernel')
+ self.assertIn('data', fnode.props)
+
+ fname = os.path.join(self._indir, 'fit_data.fit')
+ tools.write_file(fname, fit_data)
+ out = tools.run('dumpimage', '-l', fname)
+
+ # Check a few features to make sure the plumbing works. We don't need
+ # to test the operation of mkimage or dumpimage here. First convert the
+ # output into a dict where the keys are the fields printed by dumpimage
+ # and the values are a list of values for each field
+ lines = out.splitlines()
+
+ # Converts "Compression: gzip compressed" into two groups:
+ # 'Compression' and 'gzip compressed'
+ re_line = re.compile(r'^ *([^:]*)(?:: *(.*))?$')
+ vals = collections.defaultdict(list)
+ for line in lines:
+ mat = re_line.match(line)
+ vals[mat.group(1)].append(mat.group(2))
+
+ self.assertEquals('FIT description: test-desc', lines[0])
+ self.assertIn('Created:', lines[1])
+ self.assertIn('Image 0 (kernel)', vals)
+ self.assertIn('Hash value', vals)
+ data_sizes = vals.get('Data Size')
+ self.assertIsNotNone(data_sizes)
+ self.assertEqual(2, len(data_sizes))
+ # Format is "4 Bytes = 0.00 KiB = 0.00 MiB" so take the first word
+ self.assertEqual(len(kernel_data), int(data_sizes[0].split()[0]))
+ self.assertEqual(len(fdt1_data), int(data_sizes[1].split()[0]))
+
+ # Check if entry listing correctly omits /images/
+ image = control.images['image']
+ fit_entry = image.GetEntries()['fit']
+ subentries = list(fit_entry.GetEntries().keys())
+ expected = ['kernel', 'fdt-1']
+ self.assertEqual(expected, subentries)
+
+ def testSimpleFit(self):
+ """Test an image with a FIT inside"""
+ self._SetupSplElf()
+ data = self._DoReadFile('161_fit.dts')
+ self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+ self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):])
+ fit_data = data[len(U_BOOT_DATA):-len(U_BOOT_NODTB_DATA)]
+
+ self._CheckSimpleFitData(fit_data, U_BOOT_DATA, U_BOOT_SPL_DTB_DATA)
+
+ def testSimpleFitExpandsSubentries(self):
+ """Test that FIT images expand their subentries"""
+ data = self._DoReadFileDtb('161_fit.dts', use_expanded=True)[0]
+ self.assertEqual(U_BOOT_EXP_DATA, data[:len(U_BOOT_EXP_DATA)])
+ self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):])
+ fit_data = data[len(U_BOOT_EXP_DATA):-len(U_BOOT_NODTB_DATA)]
+
+ self._CheckSimpleFitData(fit_data, U_BOOT_EXP_DATA, U_BOOT_SPL_DTB_DATA)
+
+ def testSimpleFitImagePos(self):
+ """Test that we have correct image-pos for FIT subentries"""
+ data, _, _, out_dtb_fname = self._DoReadFileDtb('161_fit.dts',
+ update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS + REPACK_DTB_PROPS)
+
+ self.maxDiff = None
+ self.assertEqual({
+ 'image-pos': 0,
+ 'offset': 0,
+ 'size': 1890,
+
+ 'u-boot:image-pos': 0,
+ 'u-boot:offset': 0,
+ 'u-boot:size': 4,
+
+ 'fit:image-pos': 4,
+ 'fit:offset': 4,
+ 'fit:size': 1840,
+
+ 'fit/images/kernel:image-pos': 304,
+ 'fit/images/kernel:offset': 300,
+ 'fit/images/kernel:size': 4,
+
+ 'fit/images/kernel/u-boot:image-pos': 304,
+ 'fit/images/kernel/u-boot:offset': 0,
+ 'fit/images/kernel/u-boot:size': 4,
+
+ 'fit/images/fdt-1:image-pos': 552,
+ 'fit/images/fdt-1:offset': 548,
+ 'fit/images/fdt-1:size': 6,
+
+ 'fit/images/fdt-1/u-boot-spl-dtb:image-pos': 552,
+ 'fit/images/fdt-1/u-boot-spl-dtb:offset': 0,
+ 'fit/images/fdt-1/u-boot-spl-dtb:size': 6,
+
+ 'u-boot-nodtb:image-pos': 1844,
+ 'u-boot-nodtb:offset': 1844,
+ 'u-boot-nodtb:size': 46,
+ }, props)
+
+ # Actually check the data is where we think it is
+ for node, expected in [
+ ("u-boot", U_BOOT_DATA),
+ ("fit/images/kernel", U_BOOT_DATA),
+ ("fit/images/kernel/u-boot", U_BOOT_DATA),
+ ("fit/images/fdt-1", U_BOOT_SPL_DTB_DATA),
+ ("fit/images/fdt-1/u-boot-spl-dtb", U_BOOT_SPL_DTB_DATA),
+ ("u-boot-nodtb", U_BOOT_NODTB_DATA),
+ ]:
+ image_pos = props[f"{node}:image-pos"]
+ size = props[f"{node}:size"]
+ self.assertEqual(len(expected), size)
+ self.assertEqual(expected, data[image_pos:image_pos+size])
+
+ def testFitExternal(self):
+ """Test an image with an FIT with external images"""
+ data = self._DoReadFile('162_fit_external.dts')
+ fit_data = data[len(U_BOOT_DATA):-2] # _testing is 2 bytes
+
+ # Size of the external-data region as set up by mkimage
+ external_data_size = len(U_BOOT_DATA) + 2
+ expected_size = (len(U_BOOT_DATA) + 0x400 +
+ tools.align(external_data_size, 4) +
+ len(U_BOOT_NODTB_DATA))
+
+ # The data should be outside the FIT
+ dtb = fdt.Fdt.FromData(fit_data)
+ dtb.Scan()
+ fnode = dtb.GetNode('/images/kernel')
+ self.assertNotIn('data', fnode.props)
+ self.assertEqual(len(U_BOOT_DATA),
+ fdt_util.fdt32_to_cpu(fnode.props['data-size'].value))
+ fit_pos = 0x400;
+ self.assertEqual(
+ fit_pos,
+ fdt_util.fdt32_to_cpu(fnode.props['data-position'].value))
+
+ self.assertEquals(expected_size, len(data))
+ actual_pos = len(U_BOOT_DATA) + fit_pos
+ self.assertEqual(U_BOOT_DATA + b'aa',
+ data[actual_pos:actual_pos + external_data_size])
+
+ def testFitExternalImagePos(self):
+ """Test that we have correct image-pos for external FIT subentries"""
+ data, _, _, out_dtb_fname = self._DoReadFileDtb('162_fit_external.dts',
+ update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS + REPACK_DTB_PROPS)
+
+ self.assertEqual({
+ 'image-pos': 0,
+ 'offset': 0,
+ 'size': 1082,
+
+ 'u-boot:image-pos': 0,
+ 'u-boot:offset': 0,
+ 'u-boot:size': 4,
+
+ 'fit:size': 1032,
+ 'fit:offset': 4,
+ 'fit:image-pos': 4,
+
+ 'fit/images/kernel:size': 4,
+ 'fit/images/kernel:offset': 1024,
+ 'fit/images/kernel:image-pos': 1028,
+
+ 'fit/images/kernel/u-boot:size': 4,
+ 'fit/images/kernel/u-boot:offset': 0,
+ 'fit/images/kernel/u-boot:image-pos': 1028,
+
+ 'fit/images/fdt-1:size': 2,
+ 'fit/images/fdt-1:offset': 1028,
+ 'fit/images/fdt-1:image-pos': 1032,
+
+ 'fit/images/fdt-1/_testing:size': 2,
+ 'fit/images/fdt-1/_testing:offset': 0,
+ 'fit/images/fdt-1/_testing:image-pos': 1032,
+
+ 'u-boot-nodtb:image-pos': 1036,
+ 'u-boot-nodtb:offset': 1036,
+ 'u-boot-nodtb:size': 46,
+ }, props)
+
+ # Actually check the data is where we think it is
+ for node, expected in [
+ ("u-boot", U_BOOT_DATA),
+ ("fit/images/kernel", U_BOOT_DATA),
+ ("fit/images/kernel/u-boot", U_BOOT_DATA),
+ ("fit/images/fdt-1", b'aa'),
+ ("fit/images/fdt-1/_testing", b'aa'),
+ ("u-boot-nodtb", U_BOOT_NODTB_DATA),
+ ]:
+ image_pos = props[f"{node}:image-pos"]
+ size = props[f"{node}:size"]
+ self.assertEqual(len(expected), size)
+ self.assertEqual(expected, data[image_pos:image_pos+size])
+
+ def testFitMissing(self):
+ """Test that binman complains if mkimage is missing"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('162_fit_external.dts',
+ force_missing_bintools='mkimage')
+ self.assertIn("Node '/binman/fit': Missing tool: 'mkimage'",
+ str(e.exception))
+
+ def testFitMissingOK(self):
+ """Test that binman still produces a FIT image if mkimage is missing"""
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('162_fit_external.dts', allow_missing=True,
+ force_missing_bintools='mkimage')
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: mkimage")
+
+ def testSectionIgnoreHashSignature(self):
+ """Test that sections ignore hash, signature nodes for its data"""
+ data = self._DoReadFile('165_section_ignore_hash_signature.dts')
+ expected = (U_BOOT_DATA + U_BOOT_DATA)
+ self.assertEqual(expected, data)
+
+ def testPadInSections(self):
+ """Test pad-before, pad-after for entries in sections"""
+ data, _, _, out_dtb_fname = self._DoReadFileDtb(
+ '166_pad_in_sections.dts', update_dtb=True)
+ expected = (U_BOOT_DATA + tools.get_bytes(ord('!'), 12) +
+ U_BOOT_DATA + tools.get_bytes(ord('!'), 6) +
+ U_BOOT_DATA)
+ self.assertEqual(expected, data)
+
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['size', 'image-pos', 'offset'])
+ expected = {
+ 'image-pos': 0,
+ 'offset': 0,
+ 'size': 12 + 6 + 3 * len(U_BOOT_DATA),
+
+ 'section:image-pos': 0,
+ 'section:offset': 0,
+ 'section:size': 12 + 6 + 3 * len(U_BOOT_DATA),
+
+ 'section/before:image-pos': 0,
+ 'section/before:offset': 0,
+ 'section/before:size': len(U_BOOT_DATA),
+
+ 'section/u-boot:image-pos': 4,
+ 'section/u-boot:offset': 4,
+ 'section/u-boot:size': 12 + len(U_BOOT_DATA) + 6,
+
+ 'section/after:image-pos': 26,
+ 'section/after:offset': 26,
+ 'section/after:size': len(U_BOOT_DATA),
+ }
+ self.assertEqual(expected, props)
+
+ def testFitImageSubentryAlignment(self):
+ """Test relative alignability of FIT image subentries"""
+ self._SetupSplElf()
+ entry_args = {
+ 'test-id': TEXT_DATA,
+ }
+ data, _, _, _ = self._DoReadFileDtb('167_fit_image_subentry_alignment.dts',
+ entry_args=entry_args)
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+
+ node = dtb.GetNode('/images/kernel')
+ data = dtb.GetProps(node)["data"].bytes
+ align_pad = 0x10 - (len(U_BOOT_SPL_DATA) % 0x10)
+ expected = (tools.get_bytes(0, 0x20) + U_BOOT_SPL_DATA +
+ tools.get_bytes(0, align_pad) + U_BOOT_DATA)
+ self.assertEqual(expected, data)
+
+ node = dtb.GetNode('/images/fdt-1')
+ data = dtb.GetProps(node)["data"].bytes
+ expected = (U_BOOT_SPL_DTB_DATA + tools.get_bytes(0, 20) +
+ tools.to_bytes(TEXT_DATA) + tools.get_bytes(0, 30) +
+ U_BOOT_DTB_DATA)
+ self.assertEqual(expected, data)
+
+ def testFitExtblobMissingOk(self):
+ """Test a FIT with a missing external blob that is allowed"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('168_fit_missing_blob.dts',
+ allow_missing=True)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing.*: atf-bl31")
+
+ def testBlobNamedByArgMissing(self):
+ """Test handling of a missing entry arg"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('068_blob_named_by_arg.dts')
+ self.assertIn("Missing required properties/entry args: cros-ec-rw-path",
+ str(e.exception))
+
+ def testPackBl31(self):
+ """Test that an image with an ATF BL31 binary can be created"""
+ data = self._DoReadFile('169_atf_bl31.dts')
+ self.assertEqual(ATF_BL31_DATA, data[:len(ATF_BL31_DATA)])
+
+ def testPackScp(self):
+ """Test that an image with an SCP binary can be created"""
+ data = self._DoReadFile('172_scp.dts')
+ self.assertEqual(SCP_DATA, data[:len(SCP_DATA)])
+
+ def testFitFdt(self):
+ """Test an image with an FIT with multiple FDT images"""
+ def _CheckFdt(seq, expected_data):
+ """Check the FDT nodes
+
+ Args:
+ seq: Sequence number to check (0 or 1)
+ expected_data: Expected contents of 'data' property
+ """
+ name = 'fdt-%d' % seq
+ fnode = dtb.GetNode('/images/%s' % name)
+ self.assertIsNotNone(fnode)
+ self.assertEqual({'description','type', 'compression', 'data'},
+ set(fnode.props.keys()))
+ self.assertEqual(expected_data, fnode.props['data'].bytes)
+ self.assertEqual('fdt-test-fdt%d.dtb' % seq,
+ fnode.props['description'].value)
+ self.assertEqual(fnode.subnodes[0].name, 'hash')
+
+ def _CheckConfig(seq, expected_data):
+ """Check the configuration nodes
+
+ Args:
+ seq: Sequence number to check (0 or 1)
+ expected_data: Expected contents of 'data' property
+ """
+ cnode = dtb.GetNode('/configurations')
+ self.assertIn('default', cnode.props)
+ self.assertEqual('config-2', cnode.props['default'].value)
+
+ name = 'config-%d' % seq
+ fnode = dtb.GetNode('/configurations/%s' % name)
+ self.assertIsNotNone(fnode)
+ self.assertEqual({'description','firmware', 'loadables', 'fdt'},
+ set(fnode.props.keys()))
+ self.assertEqual('conf-test-fdt%d.dtb' % seq,
+ fnode.props['description'].value)
+ self.assertEqual('fdt-%d' % seq, fnode.props['fdt'].value)
+
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt2',
+ }
+ data = self._DoReadFileDtb(
+ '170_fit_fdt.dts',
+ entry_args=entry_args,
+ extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
+ self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):])
+ fit_data = data[len(U_BOOT_DATA):-len(U_BOOT_NODTB_DATA)]
+
+ dtb = fdt.Fdt.FromData(fit_data)
+ dtb.Scan()
+ fnode = dtb.GetNode('/images/kernel')
+ self.assertIn('data', fnode.props)
+
+ # Check all the properties in fdt-1 and fdt-2
+ _CheckFdt(1, TEST_FDT1_DATA)
+ _CheckFdt(2, TEST_FDT2_DATA)
+
+ # Check configurations
+ _CheckConfig(1, TEST_FDT1_DATA)
+ _CheckConfig(2, TEST_FDT2_DATA)
+
+ def testFitFdtMissingList(self):
+ """Test handling of a missing 'of-list' entry arg"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('170_fit_fdt.dts')
+ self.assertIn("Generator node requires 'of-list' entry argument",
+ str(e.exception))
+
+ def testFitFdtEmptyList(self):
+ """Test handling of an empty 'of-list' entry arg"""
+ entry_args = {
+ 'of-list': '',
+ }
+ data = self._DoReadFileDtb('170_fit_fdt.dts', entry_args=entry_args)[0]
+
+ def testFitFdtMissingProp(self):
+ """Test handling of a missing 'fit,fdt-list' property"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('171_fit_fdt_missing_prop.dts')
+ self.assertIn("Generator node requires 'fit,fdt-list' property",
+ str(e.exception))
+
+ def testFitFdtMissing(self):
+ """Test handling of a missing 'default-dt' entry arg"""
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb(
+ '170_fit_fdt.dts',
+ entry_args=entry_args,
+ extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
+ self.assertIn("Generated 'default' node requires default-dt entry argument",
+ str(e.exception))
+
+ def testFitFdtNotInList(self):
+ """Test handling of a default-dt that is not in the of-list"""
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt3',
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb(
+ '170_fit_fdt.dts',
+ entry_args=entry_args,
+ extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
+ self.assertIn("default-dt entry argument 'test-fdt3' not found in fdt list: test-fdt1, test-fdt2",
+ str(e.exception))
+
+ def testFitExtblobMissingHelp(self):
+ """Test display of help messages when an external blob is missing"""
+ control.missing_blob_help = control._ReadMissingBlobHelp()
+ control.missing_blob_help['wibble'] = 'Wibble test'
+ control.missing_blob_help['another'] = 'Another test'
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('168_fit_missing_blob.dts',
+ allow_missing=True)
+ err = stderr.getvalue()
+
+ # We can get the tag from the name, the type or the missing-msg
+ # property. Check all three.
+ self.assertIn('You may need to build ARM Trusted', err)
+ self.assertIn('Wibble test', err)
+ self.assertIn('Another test', err)
+
+ def testMissingBlob(self):
+ """Test handling of a blob containing a missing file"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('173_missing_blob.dts', allow_missing=True)
+ self.assertIn("Filename 'missing' not found in input path",
+ str(e.exception))
+
+ def testEnvironment(self):
+ """Test adding a U-Boot environment"""
+ data = self._DoReadFile('174_env.dts')
+ self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+ self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):])
+ env = data[len(U_BOOT_DATA):-len(U_BOOT_NODTB_DATA)]
+ self.assertEqual(b'\x1b\x97\x22\x7c\x01var1=1\0var2="2"\0\0\xff\xff',
+ env)
+
+ def testEnvironmentNoSize(self):
+ """Test that a missing 'size' property is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('175_env_no_size.dts')
+ self.assertIn("'u-boot-env' entry must have a size property",
+ str(e.exception))
+
+ def testEnvironmentTooSmall(self):
+ """Test handling of an environment that does not fit"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('176_env_too_small.dts')
+
+ # checksum, start byte, environment with \0 terminator, final \0
+ need = 4 + 1 + len(ENV_DATA) + 1 + 1
+ short = need - 0x8
+ self.assertIn("too small to hold data (need %#x more bytes)" % short,
+ str(e.exception))
+
+ def testSkipAtStart(self):
+ """Test handling of skip-at-start section"""
+ data = self._DoReadFile('177_skip_at_start.dts')
+ self.assertEqual(U_BOOT_DATA, data)
+
+ image = control.images['image']
+ entries = image.GetEntries()
+ section = entries['section']
+ self.assertEqual(0, section.offset)
+ self.assertEqual(len(U_BOOT_DATA), section.size)
+ self.assertEqual(U_BOOT_DATA, section.GetData())
+
+ entry = section.GetEntries()['u-boot']
+ self.assertEqual(16, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data)
+
+ def testSkipAtStartPad(self):
+ """Test handling of skip-at-start section with padded entry"""
+ data = self._DoReadFile('178_skip_at_start_pad.dts')
+ before = tools.get_bytes(0, 8)
+ after = tools.get_bytes(0, 4)
+ all = before + U_BOOT_DATA + after
+ self.assertEqual(all, data)
+
+ image = control.images['image']
+ entries = image.GetEntries()
+ section = entries['section']
+ self.assertEqual(0, section.offset)
+ self.assertEqual(len(all), section.size)
+ self.assertEqual(all, section.GetData())
+
+ entry = section.GetEntries()['u-boot']
+ self.assertEqual(16, entry.offset)
+ self.assertEqual(len(all), entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data)
+
+ def testSkipAtStartSectionPad(self):
+ """Test handling of skip-at-start section with padding"""
+ data = self._DoReadFile('179_skip_at_start_section_pad.dts')
+ before = tools.get_bytes(0, 8)
+ after = tools.get_bytes(0, 4)
+ all = before + U_BOOT_DATA + after
+ self.assertEqual(all, data)
+
+ image = control.images['image']
+ entries = image.GetEntries()
+ section = entries['section']
+ self.assertEqual(0, section.offset)
+ self.assertEqual(len(all), section.size)
+ self.assertEqual(U_BOOT_DATA, section.data)
+ self.assertEqual(all, section.GetPaddedData())
+
+ entry = section.GetEntries()['u-boot']
+ self.assertEqual(16, entry.offset)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+ self.assertEqual(U_BOOT_DATA, entry.data)
+
+ def testSectionPad(self):
+ """Testing padding with sections"""
+ data = self._DoReadFile('180_section_pad.dts')
+ expected = (tools.get_bytes(ord('&'), 3) +
+ tools.get_bytes(ord('!'), 5) +
+ U_BOOT_DATA +
+ tools.get_bytes(ord('!'), 1) +
+ tools.get_bytes(ord('&'), 2))
+ self.assertEqual(expected, data)
+
+ def testSectionAlign(self):
+ """Testing alignment with sections"""
+ data = self._DoReadFileDtb('181_section_align.dts', map=True)[0]
+ expected = (b'\0' + # fill section
+ tools.get_bytes(ord('&'), 1) + # padding to section align
+ b'\0' + # fill section
+ tools.get_bytes(ord('!'), 3) + # padding to u-boot align
+ U_BOOT_DATA +
+ tools.get_bytes(ord('!'), 4) + # padding to u-boot size
+ tools.get_bytes(ord('!'), 4)) # padding to section size
+ self.assertEqual(expected, data)
+
+ def testCompressImage(self):
+ """Test compression of the entire image"""
+ self._CheckLz4()
+ data, _, _, out_dtb_fname = self._DoReadFileDtb(
+ '182_compress_image.dts', use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size',
+ 'uncomp-size'])
+ orig = self._decompress(data)
+ self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig)
+
+ # Do a sanity check on various fields
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(2, len(entries))
+
+ entry = entries['blob']
+ self.assertEqual(COMPRESS_DATA, entry.data)
+ self.assertEqual(len(COMPRESS_DATA), entry.size)
+
+ entry = entries['u-boot']
+ self.assertEqual(U_BOOT_DATA, entry.data)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ self.assertEqual(len(data), image.size)
+ self.assertEqual(COMPRESS_DATA + U_BOOT_DATA, image.uncomp_data)
+ self.assertEqual(len(COMPRESS_DATA + U_BOOT_DATA), image.uncomp_size)
+ orig = self._decompress(image.data)
+ self.assertEqual(orig, image.uncomp_data)
+
+ expected = {
+ 'blob:offset': 0,
+ 'blob:size': len(COMPRESS_DATA),
+ 'u-boot:offset': len(COMPRESS_DATA),
+ 'u-boot:size': len(U_BOOT_DATA),
+ 'uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA),
+ 'offset': 0,
+ 'image-pos': 0,
+ 'size': len(data),
+ }
+ self.assertEqual(expected, props)
+
+ def testCompressImageLess(self):
+ """Test compression where compression reduces the image size"""
+ self._CheckLz4()
+ data, _, _, out_dtb_fname = self._DoReadFileDtb(
+ '183_compress_image_less.dts', use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size',
+ 'uncomp-size'])
+ orig = self._decompress(data)
+
+ self.assertEquals(COMPRESS_DATA + COMPRESS_DATA + U_BOOT_DATA, orig)
+
+ # Do a sanity check on various fields
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(2, len(entries))
+
+ entry = entries['blob']
+ self.assertEqual(COMPRESS_DATA_BIG, entry.data)
+ self.assertEqual(len(COMPRESS_DATA_BIG), entry.size)
+
+ entry = entries['u-boot']
+ self.assertEqual(U_BOOT_DATA, entry.data)
+ self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+ self.assertEqual(len(data), image.size)
+ self.assertEqual(COMPRESS_DATA_BIG + U_BOOT_DATA, image.uncomp_data)
+ self.assertEqual(len(COMPRESS_DATA_BIG + U_BOOT_DATA),
+ image.uncomp_size)
+ orig = self._decompress(image.data)
+ self.assertEqual(orig, image.uncomp_data)
+
+ expected = {
+ 'blob:offset': 0,
+ 'blob:size': len(COMPRESS_DATA_BIG),
+ 'u-boot:offset': len(COMPRESS_DATA_BIG),
+ 'u-boot:size': len(U_BOOT_DATA),
+ 'uncomp-size': len(COMPRESS_DATA_BIG + U_BOOT_DATA),
+ 'offset': 0,
+ 'image-pos': 0,
+ 'size': len(data),
+ }
+ self.assertEqual(expected, props)
+
+ def testCompressSectionSize(self):
+ """Test compression of a section with a fixed size"""
+ self._CheckLz4()
+ data, _, _, out_dtb_fname = self._DoReadFileDtb(
+ '184_compress_section_size.dts', use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size',
+ 'uncomp-size'])
+ orig = self._decompress(data)
+ self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig)
+ expected = {
+ 'section/blob:offset': 0,
+ 'section/blob:size': len(COMPRESS_DATA),
+ 'section/u-boot:offset': len(COMPRESS_DATA),
+ 'section/u-boot:size': len(U_BOOT_DATA),
+ 'section:offset': 0,
+ 'section:image-pos': 0,
+ 'section:uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA),
+ 'section:size': 0x30,
+ 'offset': 0,
+ 'image-pos': 0,
+ 'size': 0x30,
+ }
+ self.assertEqual(expected, props)
+
+ def testCompressSection(self):
+ """Test compression of a section with no fixed size"""
+ self._CheckLz4()
+ data, _, _, out_dtb_fname = self._DoReadFileDtb(
+ '185_compress_section.dts', use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size',
+ 'uncomp-size'])
+ orig = self._decompress(data)
+ self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig)
+ expected = {
+ 'section/blob:offset': 0,
+ 'section/blob:size': len(COMPRESS_DATA),
+ 'section/u-boot:offset': len(COMPRESS_DATA),
+ 'section/u-boot:size': len(U_BOOT_DATA),
+ 'section:offset': 0,
+ 'section:image-pos': 0,
+ 'section:uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA),
+ 'section:size': len(data),
+ 'offset': 0,
+ 'image-pos': 0,
+ 'size': len(data),
+ }
+ self.assertEqual(expected, props)
+
+ def testLz4Missing(self):
+ """Test that binman still produces an image if lz4 is missing"""
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('185_compress_section.dts',
+ force_missing_bintools='lz4')
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: lz4")
+
+ def testCompressExtra(self):
+ """Test compression of a section with no fixed size"""
+ self._CheckLz4()
+ data, _, _, out_dtb_fname = self._DoReadFileDtb(
+ '186_compress_extra.dts', use_real_dtb=True, update_dtb=True)
+ dtb = fdt.Fdt(out_dtb_fname)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size',
+ 'uncomp-size'])
+
+ base = data[len(U_BOOT_DATA):]
+ self.assertEquals(U_BOOT_DATA, base[:len(U_BOOT_DATA)])
+ rest = base[len(U_BOOT_DATA):]
+
+ # Check compressed data
+ bintool = self.comp_bintools['lz4']
+ expect1 = bintool.compress(COMPRESS_DATA + U_BOOT_DATA)
+ data1 = rest[:len(expect1)]
+ section1 = self._decompress(data1)
+ self.assertEquals(expect1, data1)
+ self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, section1)
+ rest1 = rest[len(expect1):]
+
+ expect2 = bintool.compress(COMPRESS_DATA + COMPRESS_DATA)
+ data2 = rest1[:len(expect2)]
+ section2 = self._decompress(data2)
+ self.assertEquals(expect2, data2)
+ self.assertEquals(COMPRESS_DATA + COMPRESS_DATA, section2)
+ rest2 = rest1[len(expect2):]
+
+ expect_size = (len(U_BOOT_DATA) + len(U_BOOT_DATA) + len(expect1) +
+ len(expect2) + len(U_BOOT_DATA))
+ #self.assertEquals(expect_size, len(data))
+
+ #self.assertEquals(U_BOOT_DATA, rest2)
+
+ self.maxDiff = None
+ expected = {
+ 'u-boot:offset': 0,
+ 'u-boot:image-pos': 0,
+ 'u-boot:size': len(U_BOOT_DATA),
+
+ 'base:offset': len(U_BOOT_DATA),
+ 'base:image-pos': len(U_BOOT_DATA),
+ 'base:size': len(data) - len(U_BOOT_DATA),
+ 'base/u-boot:offset': 0,
+ 'base/u-boot:image-pos': len(U_BOOT_DATA),
+ 'base/u-boot:size': len(U_BOOT_DATA),
+ 'base/u-boot2:offset': len(U_BOOT_DATA) + len(expect1) +
+ len(expect2),
+ 'base/u-boot2:image-pos': len(U_BOOT_DATA) * 2 + len(expect1) +
+ len(expect2),
+ 'base/u-boot2:size': len(U_BOOT_DATA),
+
+ 'base/section:offset': len(U_BOOT_DATA),
+ 'base/section:image-pos': len(U_BOOT_DATA) * 2,
+ 'base/section:size': len(expect1),
+ 'base/section:uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA),
+ 'base/section/blob:offset': 0,
+ 'base/section/blob:size': len(COMPRESS_DATA),
+ 'base/section/u-boot:offset': len(COMPRESS_DATA),
+ 'base/section/u-boot:size': len(U_BOOT_DATA),
+
+ 'base/section2:offset': len(U_BOOT_DATA) + len(expect1),
+ 'base/section2:image-pos': len(U_BOOT_DATA) * 2 + len(expect1),
+ 'base/section2:size': len(expect2),
+ 'base/section2:uncomp-size': len(COMPRESS_DATA + COMPRESS_DATA),
+ 'base/section2/blob:offset': 0,
+ 'base/section2/blob:size': len(COMPRESS_DATA),
+ 'base/section2/blob2:offset': len(COMPRESS_DATA),
+ 'base/section2/blob2:size': len(COMPRESS_DATA),
+
+ 'offset': 0,
+ 'image-pos': 0,
+ 'size': len(data),
+ }
+ self.assertEqual(expected, props)
+
+ def testSymbolsSubsection(self):
+ """Test binman can assign symbols from a subsection"""
+ self.checkSymbols('187_symbols_sub.dts', U_BOOT_SPL_DATA, 0x1c)
+
+ def testReadImageEntryArg(self):
+ """Test reading an image that would need an entry arg to generate"""
+ entry_args = {
+ 'cros-ec-rw-path': 'ecrw.bin',
+ }
+ data = self.data = self._DoReadFileDtb(
+ '188_image_entryarg.dts',use_real_dtb=True, update_dtb=True,
+ entry_args=entry_args)
+
+ image_fname = tools.get_output_filename('image.bin')
+ orig_image = control.images['image']
+
+ # This should not generate an error about the missing 'cros-ec-rw-path'
+ # since we are reading the image from a file. Compare with
+ # testEntryArgsRequired()
+ image = Image.FromFile(image_fname)
+ self.assertEqual(orig_image.GetEntries().keys(),
+ image.GetEntries().keys())
+
+ def testFilesAlign(self):
+ """Test alignment with files"""
+ data = self._DoReadFile('190_files_align.dts')
+
+ # The first string is 15 bytes so will align to 16
+ expect = FILES_DATA[:15] + b'\0' + FILES_DATA[15:]
+ self.assertEqual(expect, data)
+
+ def testReadImageSkip(self):
+ """Test reading an image and accessing its FDT map"""
+ data = self.data = self._DoReadFileRealDtb('191_read_image_skip.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ orig_image = control.images['image']
+ image = Image.FromFile(image_fname)
+ self.assertEqual(orig_image.GetEntries().keys(),
+ image.GetEntries().keys())
+
+ orig_entry = orig_image.GetEntries()['fdtmap']
+ entry = image.GetEntries()['fdtmap']
+ self.assertEqual(orig_entry.offset, entry.offset)
+ self.assertEqual(orig_entry.size, entry.size)
+ self.assertEqual(16, entry.image_pos)
+
+ u_boot = image.GetEntries()['section'].GetEntries()['u-boot']
+
+ self.assertEquals(U_BOOT_DATA, u_boot.ReadData())
+
+ def testTplNoDtb(self):
+ """Test that an image with tpl/u-boot-tpl-nodtb.bin can be created"""
+ self._SetupTplElf()
+ data = self._DoReadFile('192_u_boot_tpl_nodtb.dts')
+ self.assertEqual(U_BOOT_TPL_NODTB_DATA,
+ data[:len(U_BOOT_TPL_NODTB_DATA)])
+
+ def testTplBssPad(self):
+ """Test that we can pad TPL's BSS with zeros"""
+ # ELF file with a '__bss_size' symbol
+ self._SetupTplElf()
+ data = self._DoReadFile('193_tpl_bss_pad.dts')
+ self.assertEqual(U_BOOT_TPL_DATA + tools.get_bytes(0, 10) + U_BOOT_DATA,
+ data)
+
+ def testTplBssPadMissing(self):
+ """Test that a missing symbol is detected"""
+ self._SetupTplElf('u_boot_ucode_ptr')
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('193_tpl_bss_pad.dts')
+ self.assertIn('Expected __bss_size symbol in tpl/u-boot-tpl',
+ str(e.exception))
+
+ def checkDtbSizes(self, data, pad_len, start):
+ """Check the size arguments in a dtb embedded in an image
+
+ Args:
+ data: The image data
+ pad_len: Length of the pad section in the image, in bytes
+ start: Start offset of the devicetree to examine, within the image
+
+ Returns:
+ Size of the devicetree in bytes
+ """
+ dtb_data = data[start:]
+ dtb = fdt.Fdt.FromData(dtb_data)
+ fdt_size = dtb.GetFdtObj().totalsize()
+ dtb.Scan()
+ props = self._GetPropTree(dtb, 'size')
+ self.assertEqual({
+ 'size': len(data),
+ 'u-boot-spl/u-boot-spl-bss-pad:size': pad_len,
+ 'u-boot-spl/u-boot-spl-dtb:size': 801,
+ 'u-boot-spl/u-boot-spl-nodtb:size': len(U_BOOT_SPL_NODTB_DATA),
+ 'u-boot-spl:size': 860,
+ 'u-boot-tpl:size': len(U_BOOT_TPL_DATA),
+ 'u-boot/u-boot-dtb:size': 781,
+ 'u-boot/u-boot-nodtb:size': len(U_BOOT_NODTB_DATA),
+ 'u-boot:size': 827,
+ }, props)
+ return fdt_size
+
+ def testExpanded(self):
+ """Test that an expanded entry type is selected when needed"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+
+ # SPL has a devicetree, TPL does not
+ entry_args = {
+ 'spl-dtb': '1',
+ 'spl-bss-pad': 'y',
+ 'tpl-dtb': '',
+ }
+ self._DoReadFileDtb('194_fdt_incl.dts', use_expanded=True,
+ entry_args=entry_args)
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(3, len(entries))
+
+ # First, u-boot, which should be expanded into u-boot-nodtb and dtb
+ self.assertIn('u-boot', entries)
+ entry = entries['u-boot']
+ self.assertEqual('u-boot-expanded', entry.etype)
+ subent = entry.GetEntries()
+ self.assertEqual(2, len(subent))
+ self.assertIn('u-boot-nodtb', subent)
+ self.assertIn('u-boot-dtb', subent)
+
+ # Second, u-boot-spl, which should be expanded into three parts
+ self.assertIn('u-boot-spl', entries)
+ entry = entries['u-boot-spl']
+ self.assertEqual('u-boot-spl-expanded', entry.etype)
+ subent = entry.GetEntries()
+ self.assertEqual(3, len(subent))
+ self.assertIn('u-boot-spl-nodtb', subent)
+ self.assertIn('u-boot-spl-bss-pad', subent)
+ self.assertIn('u-boot-spl-dtb', subent)
+
+ # Third, u-boot-tpl, which should be not be expanded, since TPL has no
+ # devicetree
+ self.assertIn('u-boot-tpl', entries)
+ entry = entries['u-boot-tpl']
+ self.assertEqual('u-boot-tpl', entry.etype)
+ self.assertEqual(None, entry.GetEntries())
+
+ def testExpandedTpl(self):
+ """Test that an expanded entry type is selected for TPL when needed"""
+ self._SetupTplElf()
+
+ entry_args = {
+ 'tpl-bss-pad': 'y',
+ 'tpl-dtb': 'y',
+ }
+ self._DoReadFileDtb('195_fdt_incl_tpl.dts', use_expanded=True,
+ entry_args=entry_args)
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(1, len(entries))
+
+ # We only have u-boot-tpl, which be expanded
+ self.assertIn('u-boot-tpl', entries)
+ entry = entries['u-boot-tpl']
+ self.assertEqual('u-boot-tpl-expanded', entry.etype)
+ subent = entry.GetEntries()
+ self.assertEqual(3, len(subent))
+ self.assertIn('u-boot-tpl-nodtb', subent)
+ self.assertIn('u-boot-tpl-bss-pad', subent)
+ self.assertIn('u-boot-tpl-dtb', subent)
+
+ def testExpandedNoPad(self):
+ """Test an expanded entry without BSS pad enabled"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+
+ # SPL has a devicetree, TPL does not
+ entry_args = {
+ 'spl-dtb': 'something',
+ 'spl-bss-pad': 'n',
+ 'tpl-dtb': '',
+ }
+ self._DoReadFileDtb('194_fdt_incl.dts', use_expanded=True,
+ entry_args=entry_args)
+ image = control.images['image']
+ entries = image.GetEntries()
+
+ # Just check u-boot-spl, which should be expanded into two parts
+ self.assertIn('u-boot-spl', entries)
+ entry = entries['u-boot-spl']
+ self.assertEqual('u-boot-spl-expanded', entry.etype)
+ subent = entry.GetEntries()
+ self.assertEqual(2, len(subent))
+ self.assertIn('u-boot-spl-nodtb', subent)
+ self.assertIn('u-boot-spl-dtb', subent)
+
+ def testExpandedTplNoPad(self):
+ """Test that an expanded entry type with padding disabled in TPL"""
+ self._SetupTplElf()
+
+ entry_args = {
+ 'tpl-bss-pad': '',
+ 'tpl-dtb': 'y',
+ }
+ self._DoReadFileDtb('195_fdt_incl_tpl.dts', use_expanded=True,
+ entry_args=entry_args)
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(1, len(entries))
+
+ # We only have u-boot-tpl, which be expanded
+ self.assertIn('u-boot-tpl', entries)
+ entry = entries['u-boot-tpl']
+ self.assertEqual('u-boot-tpl-expanded', entry.etype)
+ subent = entry.GetEntries()
+ self.assertEqual(2, len(subent))
+ self.assertIn('u-boot-tpl-nodtb', subent)
+ self.assertIn('u-boot-tpl-dtb', subent)
+
+ def testFdtInclude(self):
+ """Test that an Fdt is update within all binaries"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+
+ # SPL has a devicetree, TPL does not
+ self.maxDiff = None
+ entry_args = {
+ 'spl-dtb': '1',
+ 'spl-bss-pad': 'y',
+ 'tpl-dtb': '',
+ }
+ # Build the image. It includes two separate devicetree binaries, each
+ # with their own contents, but all contain the binman definition.
+ data = self._DoReadFileDtb(
+ '194_fdt_incl.dts', use_real_dtb=True, use_expanded=True,
+ update_dtb=True, entry_args=entry_args)[0]
+ pad_len = 10
+
+ # Check the U-Boot dtb
+ start = len(U_BOOT_NODTB_DATA)
+ fdt_size = self.checkDtbSizes(data, pad_len, start)
+
+ # Now check SPL
+ start += fdt_size + len(U_BOOT_SPL_NODTB_DATA) + pad_len
+ fdt_size = self.checkDtbSizes(data, pad_len, start)
+
+ # TPL has no devicetree
+ start += fdt_size + len(U_BOOT_TPL_DATA)
+ self.assertEqual(len(data), start)
+
+ def testSymbolsExpanded(self):
+ """Test binman can assign symbols in expanded entries"""
+ entry_args = {
+ 'spl-dtb': '1',
+ }
+ self.checkSymbols('197_symbols_expand.dts', U_BOOT_SPL_NODTB_DATA +
+ U_BOOT_SPL_DTB_DATA, 0x38,
+ entry_args=entry_args, use_expanded=True)
+
+ def testCollection(self):
+ """Test a collection"""
+ data = self._DoReadFile('198_collection.dts')
+ self.assertEqual(U_BOOT_NODTB_DATA + U_BOOT_DTB_DATA +
+ tools.get_bytes(0xff, 2) + U_BOOT_NODTB_DATA +
+ tools.get_bytes(0xfe, 3) + U_BOOT_DTB_DATA,
+ data)
+
+ def testCollectionSection(self):
+ """Test a collection where a section must be built first"""
+ # Sections never have their contents when GetData() is called, but when
+ # BuildSectionData() is called with required=True, a section will force
+ # building the contents, producing an error is anything is still
+ # missing.
+ data = self._DoReadFile('199_collection_section.dts')
+ section = U_BOOT_NODTB_DATA + U_BOOT_DTB_DATA
+ self.assertEqual(section + U_BOOT_DATA + tools.get_bytes(0xff, 2) +
+ section + tools.get_bytes(0xfe, 3) + U_BOOT_DATA,
+ data)
+
+ def testAlignDefault(self):
+ """Test that default alignment works on sections"""
+ data = self._DoReadFile('200_align_default.dts')
+ expected = (U_BOOT_DATA + tools.get_bytes(0, 8 - len(U_BOOT_DATA)) +
+ U_BOOT_DATA)
+ # Special alignment for section
+ expected += tools.get_bytes(0, 32 - len(expected))
+ # No alignment within the nested section
+ expected += U_BOOT_DATA + U_BOOT_NODTB_DATA;
+ # Now the final piece, which should be default-aligned
+ expected += tools.get_bytes(0, 88 - len(expected)) + U_BOOT_NODTB_DATA
+ self.assertEqual(expected, data)
+
+ def testPackOpenSBI(self):
+ """Test that an image with an OpenSBI binary can be created"""
+ data = self._DoReadFile('201_opensbi.dts')
+ self.assertEqual(OPENSBI_DATA, data[:len(OPENSBI_DATA)])
+
+ def testSectionsSingleThread(self):
+ """Test sections without multithreading"""
+ data = self._DoReadFileDtb('055_sections.dts', threads=0)[0]
+ expected = (U_BOOT_DATA + tools.get_bytes(ord('!'), 12) +
+ U_BOOT_DATA + tools.get_bytes(ord('a'), 12) +
+ U_BOOT_DATA + tools.get_bytes(ord('&'), 4))
+ self.assertEqual(expected, data)
+
+ def testThreadTimeout(self):
+ """Test handling a thread that takes too long"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('202_section_timeout.dts',
+ test_section_timeout=True)
+ self.assertIn("Timed out obtaining contents", str(e.exception))
+
+ def testTiming(self):
+ """Test output of timing information"""
+ data = self._DoReadFile('055_sections.dts')
+ with test_util.capture_sys_output() as (stdout, stderr):
+ state.TimingShow()
+ self.assertIn('read:', stdout.getvalue())
+ self.assertIn('compress:', stdout.getvalue())
+
+ def testUpdateFdtInElf(self):
+ """Test that we can update the devicetree in an ELF file"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ infile = elf_fname = self.ElfTestFile('u_boot_binman_embed')
+ outfile = os.path.join(self._indir, 'u-boot.out')
+ begin_sym = 'dtb_embed_begin'
+ end_sym = 'dtb_embed_end'
+ retcode = self._DoTestFile(
+ '060_fdt_update.dts', update_dtb=True,
+ update_fdt_in_elf=','.join([infile,outfile,begin_sym,end_sym]))
+ self.assertEqual(0, retcode)
+
+ # Check that the output file does in fact contact a dtb with the binman
+ # definition in the correct place
+ syms = elf.GetSymbolFileOffset(infile,
+ ['dtb_embed_begin', 'dtb_embed_end'])
+ data = tools.read_file(outfile)
+ dtb_data = data[syms['dtb_embed_begin'].offset:
+ syms['dtb_embed_end'].offset]
+
+ dtb = fdt.Fdt.FromData(dtb_data)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS + REPACK_DTB_PROPS)
+ self.assertEqual({
+ 'image-pos': 0,
+ 'offset': 0,
+ '_testing:offset': 32,
+ '_testing:size': 2,
+ '_testing:image-pos': 32,
+ 'section@0/u-boot:offset': 0,
+ 'section@0/u-boot:size': len(U_BOOT_DATA),
+ 'section@0/u-boot:image-pos': 0,
+ 'section@0:offset': 0,
+ 'section@0:size': 16,
+ 'section@0:image-pos': 0,
+
+ 'section@1/u-boot:offset': 0,
+ 'section@1/u-boot:size': len(U_BOOT_DATA),
+ 'section@1/u-boot:image-pos': 16,
+ 'section@1:offset': 16,
+ 'section@1:size': 16,
+ 'section@1:image-pos': 16,
+ 'size': 40
+ }, props)
+
+ def testUpdateFdtInElfInvalid(self):
+ """Test that invalid args are detected with --update-fdt-in-elf"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('060_fdt_update.dts', update_fdt_in_elf='fred')
+ self.assertIn("Invalid args ['fred'] to --update-fdt-in-elf",
+ str(e.exception))
+
+ def testUpdateFdtInElfNoSyms(self):
+ """Test that missing symbols are detected with --update-fdt-in-elf"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ infile = elf_fname = self.ElfTestFile('u_boot_binman_embed')
+ outfile = ''
+ begin_sym = 'wrong_begin'
+ end_sym = 'wrong_end'
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile(
+ '060_fdt_update.dts',
+ update_fdt_in_elf=','.join([infile,outfile,begin_sym,end_sym]))
+ self.assertIn("Expected two symbols 'wrong_begin' and 'wrong_end': got 0:",
+ str(e.exception))
+
+ def testUpdateFdtInElfTooSmall(self):
+ """Test that an over-large dtb is detected with --update-fdt-in-elf"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ infile = elf_fname = self.ElfTestFile('u_boot_binman_embed_sm')
+ outfile = os.path.join(self._indir, 'u-boot.out')
+ begin_sym = 'dtb_embed_begin'
+ end_sym = 'dtb_embed_end'
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile(
+ '060_fdt_update.dts', update_dtb=True,
+ update_fdt_in_elf=','.join([infile,outfile,begin_sym,end_sym]))
+ self.assertRegex(
+ str(e.exception),
+ "Not enough space in '.*u_boot_binman_embed_sm' for data length.*")
+
+ def testVersion(self):
+ """Test we can get the binman version"""
+ version = '(unreleased)'
+ self.assertEqual(version, state.GetVersion(self._indir))
+
+ with self.assertRaises(SystemExit):
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoBinman('-V')
+ self.assertEqual('Binman %s\n' % version, stderr.getvalue())
+
+ # Try running the tool too, just to be safe
+ result = self._RunBinman('-V')
+ self.assertEqual('Binman %s\n' % version, result.stderr)
+
+ # Set up a version file to make sure that works
+ version = 'v2025.01-rc2'
+ tools.write_file(os.path.join(self._indir, 'version'), version,
+ binary=False)
+ self.assertEqual(version, state.GetVersion(self._indir))
+
+ def testAltFormat(self):
+ """Test that alternative formats can be used to extract"""
+ self._DoReadFileRealDtb('213_fdtmap_alt_format.dts')
+
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ with test_util.capture_sys_output() as (stdout, _):
+ self._DoBinman('extract', '-i', updated_fname, '-F', 'list')
+ self.assertEqual(
+ '''Flag (-F) Entry type Description
+fdt fdtmap Extract the devicetree blob from the fdtmap
+''',
+ stdout.getvalue())
+
+ dtb = os.path.join(tmpdir, 'fdt.dtb')
+ self._DoBinman('extract', '-i', updated_fname, '-F', 'fdt', '-f',
+ dtb, 'fdtmap')
+
+ # Check that we can read it and it can be scanning, meaning it does
+ # not have a 16-byte fdtmap header
+ data = tools.read_file(dtb)
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+
+ # Now check u-boot which has no alt_format
+ fname = os.path.join(tmpdir, 'fdt.dtb')
+ self._DoBinman('extract', '-i', updated_fname, '-F', 'dummy',
+ '-f', fname, 'u-boot')
+ data = tools.read_file(fname)
+ self.assertEqual(U_BOOT_DATA, data)
+
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def testExtblobList(self):
+ """Test an image with an external blob list"""
+ data = self._DoReadFile('215_blob_ext_list.dts')
+ self.assertEqual(REFCODE_DATA + FSP_M_DATA, data)
+
+ def testExtblobListMissing(self):
+ """Test an image with a missing external blob"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('216_blob_ext_list_missing.dts')
+ self.assertIn("Filename 'missing-file' not found in input path",
+ str(e.exception))
+
+ def testExtblobListMissingOk(self):
+ """Test an image with an missing external blob that is allowed"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('216_blob_ext_list_missing.dts',
+ allow_missing=True)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing.*: blob-ext")
+
+ def testFip(self):
+ """Basic test of generation of an ARM Firmware Image Package (FIP)"""
+ data = self._DoReadFile('203_fip.dts')
+ hdr, fents = fip_util.decode_fip(data)
+ self.assertEqual(fip_util.HEADER_MAGIC, hdr.name)
+ self.assertEqual(fip_util.HEADER_SERIAL, hdr.serial)
+ self.assertEqual(0x123, hdr.flags)
+
+ self.assertEqual(2, len(fents))
+
+ fent = fents[0]
+ self.assertEqual(
+ bytes([0x47, 0xd4, 0x08, 0x6d, 0x4c, 0xfe, 0x98, 0x46,
+ 0x9b, 0x95, 0x29, 0x50, 0xcb, 0xbd, 0x5a, 0x0]), fent.uuid)
+ self.assertEqual('soc-fw', fent.fip_type)
+ self.assertEqual(0x88, fent.offset)
+ self.assertEqual(len(ATF_BL31_DATA), fent.size)
+ self.assertEqual(0x123456789abcdef, fent.flags)
+ self.assertEqual(ATF_BL31_DATA, fent.data)
+ self.assertEqual(True, fent.valid)
+
+ fent = fents[1]
+ self.assertEqual(
+ bytes([0x65, 0x92, 0x27, 0x03, 0x2f, 0x74, 0xe6, 0x44,
+ 0x8d, 0xff, 0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10]), fent.uuid)
+ self.assertEqual('scp-fwu-cfg', fent.fip_type)
+ self.assertEqual(0x8c, fent.offset)
+ self.assertEqual(len(ATF_BL31_DATA), fent.size)
+ self.assertEqual(0, fent.flags)
+ self.assertEqual(ATF_BL2U_DATA, fent.data)
+ self.assertEqual(True, fent.valid)
+
+ def testFipOther(self):
+ """Basic FIP with something that isn't a external blob"""
+ data = self._DoReadFile('204_fip_other.dts')
+ hdr, fents = fip_util.decode_fip(data)
+
+ self.assertEqual(2, len(fents))
+ fent = fents[1]
+ self.assertEqual('rot-cert', fent.fip_type)
+ self.assertEqual(b'aa', fent.data)
+
+ def testFipNoType(self):
+ """FIP with an entry of an unknown type"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('205_fip_no_type.dts')
+ self.assertIn("Must provide a fip-type (node name 'u-boot' is not a known FIP type)",
+ str(e.exception))
+
+ def testFipUuid(self):
+ """Basic FIP with a manual uuid"""
+ data = self._DoReadFile('206_fip_uuid.dts')
+ hdr, fents = fip_util.decode_fip(data)
+
+ self.assertEqual(2, len(fents))
+ fent = fents[1]
+ self.assertEqual(None, fent.fip_type)
+ self.assertEqual(
+ bytes([0xfc, 0x65, 0x13, 0x92, 0x4a, 0x5b, 0x11, 0xec,
+ 0x94, 0x35, 0xff, 0x2d, 0x1c, 0xfc, 0x79, 0x9c]),
+ fent.uuid)
+ self.assertEqual(U_BOOT_DATA, fent.data)
+
+ def testFipLs(self):
+ """Test listing a FIP"""
+ data = self._DoReadFileRealDtb('207_fip_ls.dts')
+ hdr, fents = fip_util.decode_fip(data)
+
+ tmpdir = None
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoBinman('ls', '-i', updated_fname)
+ finally:
+ if tmpdir:
+ shutil.rmtree(tmpdir)
+ lines = stdout.getvalue().splitlines()
+ expected = [
+'Name Image-pos Size Entry-type Offset Uncomp-size',
+'--------------------------------------------------------------',
+'image 0 2d3 section 0',
+' atf-fip 0 90 atf-fip 0',
+' soc-fw 88 4 blob-ext 88',
+' u-boot 8c 4 u-boot 8c',
+' fdtmap 90 243 fdtmap 90',
+]
+ self.assertEqual(expected, lines)
+
+ image = control.images['image']
+ entries = image.GetEntries()
+ fdtmap = entries['fdtmap']
+
+ fdtmap_data = data[fdtmap.image_pos:fdtmap.image_pos + fdtmap.size]
+ magic = fdtmap_data[:8]
+ self.assertEqual(b'_FDTMAP_', magic)
+ self.assertEqual(tools.get_bytes(0, 8), fdtmap_data[8:16])
+
+ fdt_data = fdtmap_data[16:]
+ dtb = fdt.Fdt.FromData(fdt_data)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, BASE_DTB_PROPS, prefix='/')
+ self.assertEqual({
+ 'atf-fip/soc-fw:image-pos': 136,
+ 'atf-fip/soc-fw:offset': 136,
+ 'atf-fip/soc-fw:size': 4,
+ 'atf-fip/u-boot:image-pos': 140,
+ 'atf-fip/u-boot:offset': 140,
+ 'atf-fip/u-boot:size': 4,
+ 'atf-fip:image-pos': 0,
+ 'atf-fip:offset': 0,
+ 'atf-fip:size': 144,
+ 'image-pos': 0,
+ 'offset': 0,
+ 'fdtmap:image-pos': fdtmap.image_pos,
+ 'fdtmap:offset': fdtmap.offset,
+ 'fdtmap:size': len(fdtmap_data),
+ 'size': len(data),
+ }, props)
+
+ def testFipExtractOneEntry(self):
+ """Test extracting a single entry fron an FIP"""
+ self._DoReadFileRealDtb('207_fip_ls.dts')
+ image_fname = tools.get_output_filename('image.bin')
+ fname = os.path.join(self._indir, 'output.extact')
+ control.ExtractEntries(image_fname, fname, None, ['atf-fip/u-boot'])
+ data = tools.read_file(fname)
+ self.assertEqual(U_BOOT_DATA, data)
+
+ def testFipReplace(self):
+ """Test replacing a single file in a FIP"""
+ expected = U_BOOT_DATA + tools.get_bytes(0x78, 50)
+ data = self._DoReadFileRealDtb('208_fip_replace.dts')
+ updated_fname = tools.get_output_filename('image-updated.bin')
+ tools.write_file(updated_fname, data)
+ entry_name = 'atf-fip/u-boot'
+ control.WriteEntry(updated_fname, entry_name, expected,
+ allow_resize=True)
+ actual = control.ReadEntry(updated_fname, entry_name)
+ self.assertEqual(expected, actual)
+
+ new_data = tools.read_file(updated_fname)
+ hdr, fents = fip_util.decode_fip(new_data)
+
+ self.assertEqual(2, len(fents))
+
+ # Check that the FIP entry is updated
+ fent = fents[1]
+ self.assertEqual(0x8c, fent.offset)
+ self.assertEqual(len(expected), fent.size)
+ self.assertEqual(0, fent.flags)
+ self.assertEqual(expected, fent.data)
+ self.assertEqual(True, fent.valid)
+
+ def testFipMissing(self):
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('209_fip_missing.dts', allow_missing=True)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing.*: rmm-fw")
+
+ def testFipSize(self):
+ """Test a FIP with a size property"""
+ data = self._DoReadFile('210_fip_size.dts')
+ self.assertEqual(0x100 + len(U_BOOT_DATA), len(data))
+ hdr, fents = fip_util.decode_fip(data)
+ self.assertEqual(fip_util.HEADER_MAGIC, hdr.name)
+ self.assertEqual(fip_util.HEADER_SERIAL, hdr.serial)
+
+ self.assertEqual(1, len(fents))
+
+ fent = fents[0]
+ self.assertEqual('soc-fw', fent.fip_type)
+ self.assertEqual(0x60, fent.offset)
+ self.assertEqual(len(ATF_BL31_DATA), fent.size)
+ self.assertEqual(ATF_BL31_DATA, fent.data)
+ self.assertEqual(True, fent.valid)
+
+ rest = data[0x60 + len(ATF_BL31_DATA):0x100]
+ self.assertEqual(tools.get_bytes(0xff, len(rest)), rest)
+
+ def testFipBadAlign(self):
+ """Test that an invalid alignment value in a FIP is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('211_fip_bad_align.dts')
+ self.assertIn(
+ "Node \'/binman/atf-fip\': FIP alignment 31 must be a power of two",
+ str(e.exception))
+
+ def testFipCollection(self):
+ """Test using a FIP in a collection"""
+ data = self._DoReadFile('212_fip_collection.dts')
+ entry1 = control.images['image'].GetEntries()['collection']
+ data1 = data[:entry1.size]
+ hdr1, fents2 = fip_util.decode_fip(data1)
+
+ entry2 = control.images['image'].GetEntries()['atf-fip']
+ data2 = data[entry2.offset:entry2.offset + entry2.size]
+ hdr1, fents2 = fip_util.decode_fip(data2)
+
+ # The 'collection' entry should have U-Boot included at the end
+ self.assertEqual(entry1.size - len(U_BOOT_DATA), entry2.size)
+ self.assertEqual(data1, data2 + U_BOOT_DATA)
+ self.assertEqual(U_BOOT_DATA, data1[-4:])
+
+ # There should be a U-Boot after the final FIP
+ self.assertEqual(U_BOOT_DATA, data[-4:])
+
+ def testFakeBlob(self):
+ """Test handling of faking an external blob"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('217_fake_blob.dts', allow_missing=True,
+ allow_fake_blobs=True)
+ err = stderr.getvalue()
+ self.assertRegex(
+ err,
+ "Image '.*' has faked external blobs and is non-functional: .*")
+
+ def testExtblobListFaked(self):
+ """Test an extblob with missing external blob that are faked"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('216_blob_ext_list_missing.dts',
+ allow_fake_blobs=True)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*faked.*: blob-ext-list")
+
+ def testListBintools(self):
+ args = ['tool', '--list']
+ with test_util.capture_sys_output() as (stdout, _):
+ self._DoBinman(*args)
+ out = stdout.getvalue().splitlines()
+ self.assertTrue(len(out) >= 2)
+
+ def testFetchBintools(self):
+ def fail_download(url):
+ """Take the tools.download() function by raising an exception"""
+ raise urllib.error.URLError('my error')
+
+ args = ['tool']
+ with self.assertRaises(ValueError) as e:
+ self._DoBinman(*args)
+ self.assertIn("Invalid arguments to 'tool' subcommand",
+ str(e.exception))
+
+ args = ['tool', '--fetch']
+ with self.assertRaises(ValueError) as e:
+ self._DoBinman(*args)
+ self.assertIn('Please specify bintools to fetch', str(e.exception))
+
+ args = ['tool', '--fetch', '_testing']
+ with unittest.mock.patch.object(tools, 'download',
+ side_effect=fail_download):
+ with test_util.capture_sys_output() as (stdout, _):
+ self._DoBinman(*args)
+ self.assertIn('failed to fetch with all methods', stdout.getvalue())
+
+ def testBintoolDocs(self):
+ """Test for creation of bintool documentation"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ control.write_bintool_docs(control.bintool.Bintool.get_tool_list())
+ self.assertTrue(len(stdout.getvalue()) > 0)
+
+ def testBintoolDocsMissing(self):
+ """Test handling of missing bintool documentation"""
+ with self.assertRaises(ValueError) as e:
+ with test_util.capture_sys_output() as (stdout, stderr):
+ control.write_bintool_docs(
+ control.bintool.Bintool.get_tool_list(), 'mkimage')
+ self.assertIn('Documentation is missing for modules: mkimage',
+ str(e.exception))
+
+ def testListWithGenNode(self):
+ """Check handling of an FDT map when the section cannot be found"""
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ }
+ data = self._DoReadFileDtb(
+ '219_fit_gennode.dts',
+ entry_args=entry_args,
+ use_real_dtb=True,
+ extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])
+
+ tmpdir = None
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._RunBinman('ls', '-i', updated_fname)
+ finally:
+ if tmpdir:
+ shutil.rmtree(tmpdir)
+
+ def testFitSubentryUsesBintool(self):
+ """Test that binman FIT subentries can use bintools"""
+ command.test_result = self._HandleGbbCommand
+ entry_args = {
+ 'keydir': 'devkeys',
+ 'bmpblk': 'bmpblk.bin',
+ }
+ data, _, _, _ = self._DoReadFileDtb('220_fit_subentry_bintool.dts',
+ entry_args=entry_args)
+
+ expected = (GBB_DATA + GBB_DATA + tools.get_bytes(0, 8) +
+ tools.get_bytes(0, 0x2180 - 16))
+ self.assertIn(expected, data)
+
+ def testFitSubentryMissingBintool(self):
+ """Test that binman reports missing bintools for FIT subentries"""
+ entry_args = {
+ 'keydir': 'devkeys',
+ }
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('220_fit_subentry_bintool.dts',
+ force_missing_bintools='futility', entry_args=entry_args)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: futility")
+
+ def testFitSubentryHashSubnode(self):
+ """Test an image with a FIT inside"""
+ self._SetupSplElf()
+ data, _, _, out_dtb_name = self._DoReadFileDtb(
+ '221_fit_subentry_hash.dts', use_real_dtb=True, update_dtb=True)
+
+ mkimage_dtb = fdt.Fdt.FromData(data)
+ mkimage_dtb.Scan()
+ binman_dtb = fdt.Fdt(out_dtb_name)
+ binman_dtb.Scan()
+
+ # Check that binman didn't add hash values
+ fnode = binman_dtb.GetNode('/binman/fit/images/kernel/hash')
+ self.assertNotIn('value', fnode.props)
+
+ fnode = binman_dtb.GetNode('/binman/fit/images/fdt-1/hash')
+ self.assertNotIn('value', fnode.props)
+
+ # Check that mkimage added hash values
+ fnode = mkimage_dtb.GetNode('/images/kernel/hash')
+ self.assertIn('value', fnode.props)
+
+ fnode = mkimage_dtb.GetNode('/images/fdt-1/hash')
+ self.assertIn('value', fnode.props)
+
+ def testPackTeeOs(self):
+ """Test that an image with an TEE binary can be created"""
+ data = self._DoReadFile('222_tee_os.dts')
+ self.assertEqual(TEE_OS_DATA, data[:len(TEE_OS_DATA)])
+
+ def testPackTiDm(self):
+ """Test that an image with a TI DM binary can be created"""
+ data = self._DoReadFile('225_ti_dm.dts')
+ self.assertEqual(TI_DM_DATA, data[:len(TI_DM_DATA)])
+
+ def testFitFdtOper(self):
+ """Check handling of a specified FIT operation"""
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt2',
+ }
+ self._DoReadFileDtb(
+ '223_fit_fdt_oper.dts',
+ entry_args=entry_args,
+ extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
+
+ def testFitFdtBadOper(self):
+ """Check handling of an FDT map when the section cannot be found"""
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFileDtb('224_fit_bad_oper.dts')
+ self.assertIn("Node '/binman/fit': subnode 'images/@fdt-SEQ': Unknown operation 'unknown'",
+ str(exc.exception))
+
+ def test_uses_expand_size(self):
+ """Test that the 'expand-size' property cannot be used anymore"""
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('225_expand_size_bad.dts')
+ self.assertIn(
+ "Node '/binman/u-boot': Please use 'extend-size' instead of 'expand-size'",
+ str(e.exception))
+
+ def testFitSplitElf(self):
+ """Test an image with an FIT with an split-elf operation"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt2',
+ 'atf-bl31-path': 'bl31.elf',
+ 'tee-os-path': 'tee.elf',
+ }
+ test_subdir = os.path.join(self._indir, TEST_FDT_SUBDIR)
+ data = self._DoReadFileDtb(
+ '226_fit_split_elf.dts',
+ entry_args=entry_args,
+ extra_indirs=[test_subdir])[0]
+
+ self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):])
+ fit_data = data[len(U_BOOT_DATA):-len(U_BOOT_NODTB_DATA)]
+
+ base_keys = {'description', 'type', 'arch', 'os', 'compression',
+ 'data', 'load'}
+ dtb = fdt.Fdt.FromData(fit_data)
+ dtb.Scan()
+
+ elf_data = tools.read_file(os.path.join(self._indir, 'bl31.elf'))
+ segments, entry = elf.read_loadable_segments(elf_data)
+
+ # We assume there are two segments
+ self.assertEquals(2, len(segments))
+
+ atf1 = dtb.GetNode('/images/atf-1')
+ _, start, data = segments[0]
+ self.assertEqual(base_keys | {'entry'}, atf1.props.keys())
+ self.assertEqual(entry,
+ fdt_util.fdt32_to_cpu(atf1.props['entry'].value))
+ self.assertEqual(start,
+ fdt_util.fdt32_to_cpu(atf1.props['load'].value))
+ self.assertEqual(data, atf1.props['data'].bytes)
+
+ hash_node = atf1.FindNode('hash')
+ self.assertIsNotNone(hash_node)
+ self.assertEqual({'algo', 'value'}, hash_node.props.keys())
+
+ atf2 = dtb.GetNode('/images/atf-2')
+ self.assertEqual(base_keys, atf2.props.keys())
+ _, start, data = segments[1]
+ self.assertEqual(start,
+ fdt_util.fdt32_to_cpu(atf2.props['load'].value))
+ self.assertEqual(data, atf2.props['data'].bytes)
+
+ hash_node = atf2.FindNode('hash')
+ self.assertIsNotNone(hash_node)
+ self.assertEqual({'algo', 'value'}, hash_node.props.keys())
+
+ hash_node = dtb.GetNode('/images/tee-1/hash-1')
+ self.assertIsNotNone(hash_node)
+ self.assertEqual({'algo', 'value'}, hash_node.props.keys())
+
+ conf = dtb.GetNode('/configurations')
+ self.assertEqual({'default'}, conf.props.keys())
+
+ for subnode in conf.subnodes:
+ self.assertEqual({'description', 'fdt', 'loadables'},
+ subnode.props.keys())
+ self.assertEqual(
+ ['atf-1', 'atf-2', 'tee-1', 'tee-2'],
+ fdt_util.GetStringList(subnode, 'loadables'))
+
+ def _check_bad_fit(self, dts):
+ """Check a bad FIT
+
+ This runs with the given dts and returns the assertion raised
+
+ Args:
+ dts (str): dts filename to use
+
+ Returns:
+ str: Assertion string raised
+ """
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt2',
+ 'atf-bl31-path': 'bl31.elf',
+ 'tee-os-path': 'tee.elf',
+ }
+ test_subdir = os.path.join(self._indir, TEST_FDT_SUBDIR)
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFileDtb(dts, entry_args=entry_args,
+ extra_indirs=[test_subdir])[0]
+ return str(exc.exception)
+
+ def testFitSplitElfBadElf(self):
+ """Test a FIT split-elf operation with an invalid ELF file"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ TestFunctional._MakeInputFile('bad.elf', tools.get_bytes(100, 100))
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt2',
+ 'atf-bl31-path': 'bad.elf',
+ 'tee-os-path': 'tee.elf',
+ }
+ test_subdir = os.path.join(self._indir, TEST_FDT_SUBDIR)
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFileDtb(
+ '226_fit_split_elf.dts',
+ entry_args=entry_args,
+ extra_indirs=[test_subdir])[0]
+ self.assertIn(
+ "Node '/binman/fit': subnode 'images/@atf-SEQ': Failed to read ELF file: Magic number does not match",
+ str(exc.exception))
+
+ def checkFitSplitElf(self, **kwargs):
+ """Test an split-elf FIT with a missing ELF file
+
+ Args:
+ kwargs (dict of str): Arguments to pass to _DoTestFile()
+
+ Returns:
+ tuple:
+ str: stdout result
+ str: stderr result
+ """
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt2',
+ 'atf-bl31-path': 'bl31.elf',
+ 'tee-os-path': 'missing.elf',
+ }
+ test_subdir = os.path.join(self._indir, TEST_FDT_SUBDIR)
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile(
+ '226_fit_split_elf.dts', entry_args=entry_args,
+ extra_indirs=[test_subdir], verbosity=3, **kwargs)
+ out = stdout.getvalue()
+ err = stderr.getvalue()
+ return out, err
+
+ def testFitSplitElfBadDirective(self):
+ """Test a FIT split-elf invalid fit,xxx directive in an image node"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ err = self._check_bad_fit('227_fit_bad_dir.dts')
+ self.assertIn(
+ "Node '/binman/fit': subnode 'images/@atf-SEQ': Unknown directive 'fit,something'",
+ err)
+
+ def testFitSplitElfBadDirectiveConfig(self):
+ """Test a FIT split-elf with invalid fit,xxx directive in config"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ err = self._check_bad_fit('228_fit_bad_dir_config.dts')
+ self.assertEqual(
+ "Node '/binman/fit': subnode 'configurations/@config-SEQ': Unknown directive 'fit,config'",
+ err)
+
+
+ def testFitSplitElfMissing(self):
+ """Test an split-elf FIT with a missing ELF file"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ out, err = self.checkFitSplitElf(allow_missing=True)
+ self.assertRegex(
+ err,
+ "Image '.*' is missing external blobs and is non-functional: .*")
+ self.assertNotRegex(out, '.*Faked blob.*')
+ fname = tools.get_output_filename('binman-fake/missing.elf')
+ self.assertFalse(os.path.exists(fname))
+
+ def testFitSplitElfFaked(self):
+ """Test an split-elf FIT with faked ELF file"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ out, err = self.checkFitSplitElf(allow_missing=True, allow_fake_blobs=True)
+ self.assertRegex(
+ err,
+ "Image '.*' is missing external blobs and is non-functional: .*")
+ self.assertRegex(
+ out,
+ "Entry '/binman/fit/images/@tee-SEQ/tee-os': Faked blob '.*binman-fake/missing.elf")
+ fname = tools.get_output_filename('binman-fake/missing.elf')
+ self.assertTrue(os.path.exists(fname))
+
+ def testMkimageMissingBlob(self):
+ """Test using mkimage to build an image"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('229_mkimage_missing.dts', allow_missing=True,
+ allow_fake_blobs=True)
+ err = stderr.getvalue()
+ self.assertRegex(
+ err,
+ "Image '.*' has faked external blobs and is non-functional: .*")
+
+ def testPreLoad(self):
+ """Test an image with a pre-load header"""
+ entry_args = {
+ 'pre-load-key-path': os.path.join(self._binman_dir, 'test'),
+ }
+ data = self._DoReadFileDtb(
+ '230_pre_load.dts', entry_args=entry_args,
+ extra_indirs=[os.path.join(self._binman_dir, 'test')])[0]
+ self.assertEqual(PRE_LOAD_MAGIC, data[:len(PRE_LOAD_MAGIC)])
+ self.assertEqual(PRE_LOAD_VERSION, data[4:4 + len(PRE_LOAD_VERSION)])
+ self.assertEqual(PRE_LOAD_HDR_SIZE, data[8:8 + len(PRE_LOAD_HDR_SIZE)])
+
+ def testPreLoadNoKey(self):
+ """Test an image with a pre-load heade0r with missing key"""
+ with self.assertRaises(FileNotFoundError) as exc:
+ self._DoReadFile('230_pre_load.dts')
+ self.assertIn("No such file or directory: 'dev.key'",
+ str(exc.exception))
+
+ def testPreLoadPkcs(self):
+ """Test an image with a pre-load header with padding pkcs"""
+ entry_args = {
+ 'pre-load-key-path': os.path.join(self._binman_dir, 'test'),
+ }
+ data = self._DoReadFileDtb('231_pre_load_pkcs.dts',
+ entry_args=entry_args)[0]
+ self.assertEqual(PRE_LOAD_MAGIC, data[:len(PRE_LOAD_MAGIC)])
+ self.assertEqual(PRE_LOAD_VERSION, data[4:4 + len(PRE_LOAD_VERSION)])
+ self.assertEqual(PRE_LOAD_HDR_SIZE, data[8:8 + len(PRE_LOAD_HDR_SIZE)])
+
+ def testPreLoadPss(self):
+ """Test an image with a pre-load header with padding pss"""
+ entry_args = {
+ 'pre-load-key-path': os.path.join(self._binman_dir, 'test'),
+ }
+ data = self._DoReadFileDtb('232_pre_load_pss.dts',
+ entry_args=entry_args)[0]
+ self.assertEqual(PRE_LOAD_MAGIC, data[:len(PRE_LOAD_MAGIC)])
+ self.assertEqual(PRE_LOAD_VERSION, data[4:4 + len(PRE_LOAD_VERSION)])
+ self.assertEqual(PRE_LOAD_HDR_SIZE, data[8:8 + len(PRE_LOAD_HDR_SIZE)])
+
+ def testPreLoadInvalidPadding(self):
+ """Test an image with a pre-load header with an invalid padding"""
+ entry_args = {
+ 'pre-load-key-path': os.path.join(self._binman_dir, 'test'),
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('233_pre_load_invalid_padding.dts',
+ entry_args=entry_args)
+
+ def testPreLoadInvalidSha(self):
+ """Test an image with a pre-load header with an invalid hash"""
+ entry_args = {
+ 'pre-load-key-path': os.path.join(self._binman_dir, 'test'),
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('234_pre_load_invalid_sha.dts',
+ entry_args=entry_args)
+
+ def testPreLoadInvalidAlgo(self):
+ """Test an image with a pre-load header with an invalid algo"""
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('235_pre_load_invalid_algo.dts')
+
+ def testPreLoadInvalidKey(self):
+ """Test an image with a pre-load header with an invalid key"""
+ entry_args = {
+ 'pre-load-key-path': os.path.join(self._binman_dir, 'test'),
+ }
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFileDtb('236_pre_load_invalid_key.dts',
+ entry_args=entry_args)
+
+ def _CheckSafeUniqueNames(self, *images):
+ """Check all entries of given images for unsafe unique names"""
+ for image in images:
+ entries = {}
+ image._CollectEntries(entries, {}, image)
+ for entry in entries.values():
+ uniq = entry.GetUniqueName()
+
+ # Used as part of a filename, so must not be absolute paths.
+ self.assertFalse(os.path.isabs(uniq))
+
+ def testSafeUniqueNames(self):
+ """Test entry unique names are safe in single image configuration"""
+ data = self._DoReadFileRealDtb('237_unique_names.dts')
+
+ orig_image = control.images['image']
+ image_fname = tools.get_output_filename('image.bin')
+ image = Image.FromFile(image_fname)
+
+ self._CheckSafeUniqueNames(orig_image, image)
+
+ def testSafeUniqueNamesMulti(self):
+ """Test entry unique names are safe with multiple images"""
+ data = self._DoReadFileRealDtb('238_unique_names_multi.dts')
+
+ orig_image = control.images['image']
+ image_fname = tools.get_output_filename('image.bin')
+ image = Image.FromFile(image_fname)
+
+ self._CheckSafeUniqueNames(orig_image, image)
+
+ def testReplaceCmdWithBintool(self):
+ """Test replacing an entry that needs a bintool to pack"""
+ data = self._DoReadFileRealDtb('239_replace_with_bintool.dts')
+ expected = U_BOOT_DATA + b'aa'
+ self.assertEqual(expected, data[:len(expected)])
+
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ fname = os.path.join(tmpdir, 'update-testing.bin')
+ tools.write_file(fname, b'zz')
+ self._DoBinman('replace', '-i', updated_fname,
+ '_testing', '-f', fname)
+
+ data = tools.read_file(updated_fname)
+ expected = U_BOOT_DATA + b'zz'
+ self.assertEqual(expected, data[:len(expected)])
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def testReplaceCmdOtherWithBintool(self):
+ """Test replacing an entry when another needs a bintool to pack"""
+ data = self._DoReadFileRealDtb('239_replace_with_bintool.dts')
+ expected = U_BOOT_DATA + b'aa'
+ self.assertEqual(expected, data[:len(expected)])
+
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+ fname = os.path.join(tmpdir, 'update-u-boot.bin')
+ tools.write_file(fname, b'x' * len(U_BOOT_DATA))
+ self._DoBinman('replace', '-i', updated_fname,
+ 'u-boot', '-f', fname)
+
+ data = tools.read_file(updated_fname)
+ expected = b'x' * len(U_BOOT_DATA) + b'aa'
+ self.assertEqual(expected, data[:len(expected)])
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def testReplaceResizeNoRepackSameSize(self):
+ """Test replacing entries with same-size data without repacking"""
+ expected = b'x' * len(U_BOOT_DATA)
+ data, expected_fdtmap, _ = self._RunReplaceCmd('u-boot', expected)
+ self.assertEqual(expected, data)
+
+ path, fdtmap = state.GetFdtContents('fdtmap')
+ self.assertIsNotNone(path)
+ self.assertEqual(expected_fdtmap, fdtmap)
+
+ def testReplaceResizeNoRepackSmallerSize(self):
+ """Test replacing entries with smaller-size data without repacking"""
+ new_data = b'x'
+ data, expected_fdtmap, _ = self._RunReplaceCmd('u-boot', new_data)
+ expected = new_data.ljust(len(U_BOOT_DATA), b'\0')
+ self.assertEqual(expected, data)
+
+ path, fdtmap = state.GetFdtContents('fdtmap')
+ self.assertIsNotNone(path)
+ self.assertEqual(expected_fdtmap, fdtmap)
+
+ def testExtractFit(self):
+ """Test extracting a FIT section"""
+ self._DoReadFileRealDtb('240_fit_extract_replace.dts')
+ image_fname = tools.get_output_filename('image.bin')
+
+ fit_data = control.ReadEntry(image_fname, 'fit')
+ fit = fdt.Fdt.FromData(fit_data)
+ fit.Scan()
+
+ # Check subentry data inside the extracted fit
+ for node_path, expected in [
+ ('/images/kernel', U_BOOT_DATA),
+ ('/images/fdt-1', U_BOOT_NODTB_DATA),
+ ('/images/scr-1', COMPRESS_DATA),
+ ]:
+ node = fit.GetNode(node_path)
+ data = fit.GetProps(node)['data'].bytes
+ self.assertEqual(expected, data)
+
+ def testExtractFitSubentries(self):
+ """Test extracting FIT section subentries"""
+ self._DoReadFileRealDtb('240_fit_extract_replace.dts')
+ image_fname = tools.get_output_filename('image.bin')
+
+ for entry_path, expected in [
+ ('fit/kernel', U_BOOT_DATA),
+ ('fit/kernel/u-boot', U_BOOT_DATA),
+ ('fit/fdt-1', U_BOOT_NODTB_DATA),
+ ('fit/fdt-1/u-boot-nodtb', U_BOOT_NODTB_DATA),
+ ('fit/scr-1', COMPRESS_DATA),
+ ('fit/scr-1/blob', COMPRESS_DATA),
+ ]:
+ data = control.ReadEntry(image_fname, entry_path)
+ self.assertEqual(expected, data)
+
+ def testReplaceFitSubentryLeafSameSize(self):
+ """Test replacing a FIT leaf subentry with same-size data"""
+ new_data = b'x' * len(U_BOOT_DATA)
+ data, expected_fdtmap, _ = self._RunReplaceCmd(
+ 'fit/kernel/u-boot', new_data,
+ dts='240_fit_extract_replace.dts')
+ self.assertEqual(new_data, data)
+
+ path, fdtmap = state.GetFdtContents('fdtmap')
+ self.assertIsNotNone(path)
+ self.assertEqual(expected_fdtmap, fdtmap)
+
+ def testReplaceFitSubentryLeafBiggerSize(self):
+ """Test replacing a FIT leaf subentry with bigger-size data"""
+ new_data = b'ub' * len(U_BOOT_NODTB_DATA)
+ data, expected_fdtmap, _ = self._RunReplaceCmd(
+ 'fit/fdt-1/u-boot-nodtb', new_data,
+ dts='240_fit_extract_replace.dts')
+ self.assertEqual(new_data, data)
+
+ # Will be repacked, so fdtmap must change
+ path, fdtmap = state.GetFdtContents('fdtmap')
+ self.assertIsNotNone(path)
+ self.assertNotEqual(expected_fdtmap, fdtmap)
+
+ def testReplaceFitSubentryLeafSmallerSize(self):
+ """Test replacing a FIT leaf subentry with smaller-size data"""
+ new_data = b'x'
+ expected = new_data.ljust(len(U_BOOT_NODTB_DATA), b'\0')
+ data, expected_fdtmap, _ = self._RunReplaceCmd(
+ 'fit/fdt-1/u-boot-nodtb', new_data,
+ dts='240_fit_extract_replace.dts')
+ self.assertEqual(expected, data)
+
+ path, fdtmap = state.GetFdtContents('fdtmap')
+ self.assertIsNotNone(path)
+ self.assertEqual(expected_fdtmap, fdtmap)
+
+ def testReplaceSectionSimple(self):
+ """Test replacing a simple section with same-sized data"""
+ new_data = b'w' * len(COMPRESS_DATA + U_BOOT_DATA)
+ data, expected_fdtmap, image = self._RunReplaceCmd('section',
+ new_data, dts='241_replace_section_simple.dts')
+ self.assertEqual(new_data, data)
+
+ entries = image.GetEntries()
+ self.assertIn('section', entries)
+ entry = entries['section']
+ self.assertEqual(len(new_data), entry.size)
+
+ def testReplaceSectionLarger(self):
+ """Test replacing a simple section with larger data"""
+ new_data = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) + 1)
+ data, expected_fdtmap, image = self._RunReplaceCmd('section',
+ new_data, dts='241_replace_section_simple.dts')
+ self.assertEqual(new_data, data)
+
+ entries = image.GetEntries()
+ self.assertIn('section', entries)
+ entry = entries['section']
+ self.assertEqual(len(new_data), entry.size)
+ fentry = entries['fdtmap']
+ self.assertEqual(entry.offset + entry.size, fentry.offset)
+
+ def testReplaceSectionSmaller(self):
+ """Test replacing a simple section with smaller data"""
+ new_data = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) - 1) + b'\0'
+ data, expected_fdtmap, image = self._RunReplaceCmd('section',
+ new_data, dts='241_replace_section_simple.dts')
+ self.assertEqual(new_data, data)
+
+ # The new size is the same as the old, just with a pad byte at the end
+ entries = image.GetEntries()
+ self.assertIn('section', entries)
+ entry = entries['section']
+ self.assertEqual(len(new_data), entry.size)
+
+ def testReplaceSectionSmallerAllow(self):
+ """Test failing to replace a simple section with smaller data"""
+ new_data = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) - 1)
+ try:
+ state.SetAllowEntryContraction(True)
+ with self.assertRaises(ValueError) as exc:
+ self._RunReplaceCmd('section', new_data,
+ dts='241_replace_section_simple.dts')
+ finally:
+ state.SetAllowEntryContraction(False)
+
+ # Since we have no information about the position of things within the
+ # section, we cannot adjust the position of /section-u-boot so it ends
+ # up outside the section
+ self.assertIn(
+ "Node '/section/u-boot': Offset 0x24 (36) size 0x4 (4) is outside "
+ "the section '/section' starting at 0x0 (0) of size 0x27 (39)",
+ str(exc.exception))
+
+ def testMkimageImagename(self):
+ """Test using mkimage with -n holding the data too"""
+ self._SetupSplElf()
+ data = self._DoReadFile('242_mkimage_name.dts')
+
+ # Check that the data appears in the file somewhere
+ self.assertIn(U_BOOT_SPL_DATA, data)
+
+ # Get struct legacy_img_hdr -> ih_name
+ name = data[0x20:0x40]
+
+ # Build the filename that we expect to be placed in there, by virtue of
+ # the -n paraameter
+ expect = os.path.join(tools.get_output_dir(), 'mkimage.mkimage')
+
+ # Check that the image name is set to the temporary filename used
+ self.assertEqual(expect.encode('utf-8')[:0x20], name)
+
+ def testMkimageImage(self):
+ """Test using mkimage with -n holding the data too"""
+ self._SetupSplElf()
+ data = self._DoReadFile('243_mkimage_image.dts')
+
+ # Check that the data appears in the file somewhere
+ self.assertIn(U_BOOT_SPL_DATA, data)
+
+ # Get struct legacy_img_hdr -> ih_name
+ name = data[0x20:0x40]
+
+ # Build the filename that we expect to be placed in there, by virtue of
+ # the -n paraameter
+ expect = os.path.join(tools.get_output_dir(), 'mkimage-n.mkimage')
+
+ # Check that the image name is set to the temporary filename used
+ self.assertEqual(expect.encode('utf-8')[:0x20], name)
+
+ # Check the corect data is in the imagename file
+ self.assertEqual(U_BOOT_DATA, tools.read_file(expect))
+
+ def testMkimageImageNoContent(self):
+ """Test using mkimage with -n and no data"""
+ self._SetupSplElf()
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFile('244_mkimage_image_no_content.dts')
+ self.assertIn('Could not complete processing of contents',
+ str(exc.exception))
+
+ def testMkimageImageBad(self):
+ """Test using mkimage with imagename node and data-to-imagename"""
+ self._SetupSplElf()
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFile('245_mkimage_image_bad.dts')
+ self.assertIn('Cannot use both imagename node and data-to-imagename',
+ str(exc.exception))
+
+ def testCollectionOther(self):
+ """Test a collection where the data comes from another section"""
+ data = self._DoReadFile('246_collection_other.dts')
+ self.assertEqual(U_BOOT_NODTB_DATA + U_BOOT_DTB_DATA +
+ tools.get_bytes(0xff, 2) + U_BOOT_NODTB_DATA +
+ tools.get_bytes(0xfe, 3) + U_BOOT_DTB_DATA,
+ data)
+
+ def testMkimageCollection(self):
+ """Test using a collection referring to an entry in a mkimage entry"""
+ self._SetupSplElf()
+ data = self._DoReadFile('247_mkimage_coll.dts')
+ expect = U_BOOT_SPL_DATA + U_BOOT_DATA
+ self.assertEqual(expect, data[:len(expect)])
+
+ def testCompressDtbPrependInvalid(self):
+ """Test that invalid header is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('248_compress_dtb_prepend_invalid.dts')
+ self.assertIn("Node '/binman/u-boot-dtb': Invalid prepend in "
+ "'u-boot-dtb': 'invalid'", str(e.exception))
+
+ def testCompressDtbPrependLength(self):
+ """Test that compress with length header works as expected"""
+ data = self._DoReadFileRealDtb('249_compress_dtb_prepend_length.dts')
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertIn('u-boot-dtb', entries)
+ u_boot_dtb = entries['u-boot-dtb']
+ self.assertIn('fdtmap', entries)
+ fdtmap = entries['fdtmap']
+
+ image_fname = tools.get_output_filename('image.bin')
+ orig = control.ReadEntry(image_fname, 'u-boot-dtb')
+ dtb = fdt.Fdt.FromData(orig)
+ dtb.Scan()
+ props = self._GetPropTree(dtb, ['size', 'uncomp-size'])
+ expected = {
+ 'u-boot:size': len(U_BOOT_DATA),
+ 'u-boot-dtb:uncomp-size': len(orig),
+ 'u-boot-dtb:size': u_boot_dtb.size,
+ 'fdtmap:size': fdtmap.size,
+ 'size': len(data),
+ }
+ self.assertEqual(expected, props)
+
+ # Check implementation
+ self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+ rest = data[len(U_BOOT_DATA):]
+ comp_data_len = struct.unpack('<I', rest[:4])[0]
+ comp_data = rest[4:4 + comp_data_len]
+ orig2 = self._decompress(comp_data)
+ self.assertEqual(orig, orig2)
+
+ def testInvalidCompress(self):
+ """Test that invalid compress algorithm is detected"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('250_compress_dtb_invalid.dts')
+ self.assertIn("Unknown algorithm 'invalid'", str(e.exception))
+
+ def testCompUtilCompressions(self):
+ """Test compression algorithms"""
+ for bintool in self.comp_bintools.values():
+ self._CheckBintool(bintool)
+ data = bintool.compress(COMPRESS_DATA)
+ self.assertNotEqual(COMPRESS_DATA, data)
+ orig = bintool.decompress(data)
+ self.assertEquals(COMPRESS_DATA, orig)
+
+ def testCompUtilVersions(self):
+ """Test tool version of compression algorithms"""
+ for bintool in self.comp_bintools.values():
+ self._CheckBintool(bintool)
+ version = bintool.version()
+ self.assertRegex(version, '^v?[0-9]+[0-9.]*')
+
+ def testCompUtilPadding(self):
+ """Test padding of compression algorithms"""
+ # Skip zstd because it doesn't support padding
+ for bintool in [v for k,v in self.comp_bintools.items() if k != 'zstd']:
+ self._CheckBintool(bintool)
+ data = bintool.compress(COMPRESS_DATA)
+ self.assertNotEqual(COMPRESS_DATA, data)
+ data += tools.get_bytes(0, 64)
+ orig = bintool.decompress(data)
+ self.assertEquals(COMPRESS_DATA, orig)
+
+ def testCompressDtbZstd(self):
+ """Test that zstd compress of device-tree files failed"""
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('251_compress_dtb_zstd.dts')
+ self.assertIn("Node '/binman/u-boot-dtb': The zstd compression "
+ "requires a length header", str(e.exception))
+
+ def testMkimageMultipleDataFiles(self):
+ """Test passing multiple files to mkimage in a mkimage entry"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+ data = self._DoReadFile('252_mkimage_mult_data.dts')
+ # Size of files are packed in their 4B big-endian format
+ expect = struct.pack('>I', len(U_BOOT_TPL_DATA))
+ expect += struct.pack('>I', len(U_BOOT_SPL_DATA))
+ # Size info is always followed by a 4B zero value.
+ expect += tools.get_bytes(0, 4)
+ expect += U_BOOT_TPL_DATA
+ # All but last files are 4B-aligned
+ align_pad = len(U_BOOT_TPL_DATA) % 4
+ if align_pad:
+ expect += tools.get_bytes(0, align_pad)
+ expect += U_BOOT_SPL_DATA
+ self.assertEqual(expect, data[-len(expect):])
+
+ def testMkimageMultipleExpanded(self):
+ """Test passing multiple files to mkimage in a mkimage entry"""
+ self._SetupSplElf()
+ self._SetupTplElf()
+ entry_args = {
+ 'spl-bss-pad': 'y',
+ 'spl-dtb': 'y',
+ }
+ data = self._DoReadFileDtb('252_mkimage_mult_data.dts',
+ use_expanded=True, entry_args=entry_args)[0]
+ pad_len = 10
+ tpl_expect = U_BOOT_TPL_DATA
+ spl_expect = U_BOOT_SPL_NODTB_DATA + tools.get_bytes(0, pad_len)
+ spl_expect += U_BOOT_SPL_DTB_DATA
+
+ content = data[0x40:]
+ lens = struct.unpack('>III', content[:12])
+
+ # Size of files are packed in their 4B big-endian format
+ # Size info is always followed by a 4B zero value.
+ self.assertEqual(len(tpl_expect), lens[0])
+ self.assertEqual(len(spl_expect), lens[1])
+ self.assertEqual(0, lens[2])
+
+ rest = content[12:]
+ self.assertEqual(tpl_expect, rest[:len(tpl_expect)])
+
+ rest = rest[len(tpl_expect):]
+ align_pad = len(tpl_expect) % 4
+ self.assertEqual(tools.get_bytes(0, align_pad), rest[:align_pad])
+ rest = rest[align_pad:]
+ self.assertEqual(spl_expect, rest)
+
+ def testMkimageMultipleNoContent(self):
+ """Test passing multiple data files to mkimage with one data file having no content"""
+ self._SetupSplElf()
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFile('253_mkimage_mult_no_content.dts')
+ self.assertIn('Could not complete processing of contents',
+ str(exc.exception))
+
+ def testMkimageFilename(self):
+ """Test using mkimage to build a binary with a filename"""
+ self._SetupSplElf()
+ retcode = self._DoTestFile('254_mkimage_filename.dts')
+ self.assertEqual(0, retcode)
+ fname = tools.get_output_filename('mkimage-test.bin')
+ self.assertTrue(os.path.exists(fname))
+
+ def testVpl(self):
+ """Test that an image with VPL and its device tree can be created"""
+ # ELF file with a '__bss_size' symbol
+ self._SetupVplElf()
+ data = self._DoReadFile('255_u_boot_vpl.dts')
+ self.assertEqual(U_BOOT_VPL_DATA + U_BOOT_VPL_DTB_DATA, data)
+
+ def testVplNoDtb(self):
+ """Test that an image with vpl/u-boot-vpl-nodtb.bin can be created"""
+ self._SetupVplElf()
+ data = self._DoReadFile('256_u_boot_vpl_nodtb.dts')
+ self.assertEqual(U_BOOT_VPL_NODTB_DATA,
+ data[:len(U_BOOT_VPL_NODTB_DATA)])
+
+ def testExpandedVpl(self):
+ """Test that an expanded entry type is selected for TPL when needed"""
+ self._SetupVplElf()
+
+ entry_args = {
+ 'vpl-bss-pad': 'y',
+ 'vpl-dtb': 'y',
+ }
+ self._DoReadFileDtb('257_fdt_incl_vpl.dts', use_expanded=True,
+ entry_args=entry_args)
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertEqual(1, len(entries))
+
+ # We only have u-boot-vpl, which be expanded
+ self.assertIn('u-boot-vpl', entries)
+ entry = entries['u-boot-vpl']
+ self.assertEqual('u-boot-vpl-expanded', entry.etype)
+ subent = entry.GetEntries()
+ self.assertEqual(3, len(subent))
+ self.assertIn('u-boot-vpl-nodtb', subent)
+ self.assertIn('u-boot-vpl-bss-pad', subent)
+ self.assertIn('u-boot-vpl-dtb', subent)
+
+ def testVplBssPadMissing(self):
+ """Test that a missing symbol is detected"""
+ self._SetupVplElf('u_boot_ucode_ptr')
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('258_vpl_bss_pad.dts')
+ self.assertIn('Expected __bss_size symbol in vpl/u-boot-vpl',
+ str(e.exception))
+
+ def testSymlink(self):
+ """Test that image files can be symlinked"""
+ retcode = self._DoTestFile('259_symlink.dts', debug=True, map=True)
+ self.assertEqual(0, retcode)
+ image = control.images['test_image']
+ fname = tools.get_output_filename('test_image.bin')
+ sname = tools.get_output_filename('symlink_to_test.bin')
+ self.assertTrue(os.path.islink(sname))
+ self.assertEqual(os.readlink(sname), fname)
+
+ def testSymlinkOverwrite(self):
+ """Test that symlinked images can be overwritten"""
+ testdir = TestFunctional._MakeInputDir('symlinktest')
+ self._DoTestFile('259_symlink.dts', debug=True, map=True, output_dir=testdir)
+ # build the same image again in the same directory so that existing symlink is present
+ self._DoTestFile('259_symlink.dts', debug=True, map=True, output_dir=testdir)
+ fname = tools.get_output_filename('test_image.bin')
+ sname = tools.get_output_filename('symlink_to_test.bin')
+ self.assertTrue(os.path.islink(sname))
+ self.assertEqual(os.readlink(sname), fname)
+
+ def testSymbolsElf(self):
+ """Test binman can assign symbols embedded in an ELF file"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ self._SetupTplElf('u_boot_binman_syms')
+ self._SetupVplElf('u_boot_binman_syms')
+ self._SetupSplElf('u_boot_binman_syms')
+ data = self._DoReadFileDtb('260_symbols_elf.dts')[0]
+ image_fname = tools.get_output_filename('image.bin')
+
+ image = control.images['image']
+ entries = image.GetEntries()
+
+ for entry in entries.values():
+ # No symbols in u-boot and it has faked contents anyway
+ if entry.name == 'u-boot':
+ continue
+ edata = data[entry.image_pos:entry.image_pos + entry.size]
+ efname = tools.get_output_filename(f'edata-{entry.name}')
+ tools.write_file(efname, edata)
+
+ syms = elf.GetSymbolFileOffset(efname, ['_binman_u_boot'])
+ re_name = re.compile('_binman_(u_boot_(.*))_prop_(.*)')
+ for name, sym in syms.items():
+ msg = 'test'
+ val = elf.GetSymbolValue(sym, edata, msg)
+ entry_m = re_name.match(name)
+ if entry_m:
+ ename, prop = entry_m.group(1), entry_m.group(3)
+ entry, entry_name, prop_name = image.LookupEntry(entries,
+ name, msg)
+ if prop_name == 'offset':
+ expect_val = entry.offset
+ elif prop_name == 'image_pos':
+ expect_val = entry.image_pos
+ elif prop_name == 'size':
+ expect_val = entry.size
+ self.assertEqual(expect_val, val)
+
+ def testSymbolsElfBad(self):
+ """Check error when trying to write symbols without the elftools lib"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ self._SetupTplElf('u_boot_binman_syms')
+ self._SetupVplElf('u_boot_binman_syms')
+ self._SetupSplElf('u_boot_binman_syms')
+ try:
+ elf.ELF_TOOLS = False
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFileDtb('260_symbols_elf.dts')
+ finally:
+ elf.ELF_TOOLS = True
+ self.assertIn(
+ "Section '/binman': entry '/binman/u-boot-spl-elf': "
+ 'Cannot write symbols to an ELF file without Python elftools',
+ str(exc.exception))
+
+ def testSectionFilename(self):
+ """Check writing of section contents to a file"""
+ data = self._DoReadFile('261_section_fname.dts')
+ expected = (b'&&' + U_BOOT_DATA + b'&&&' +
+ tools.get_bytes(ord('!'), 7) +
+ U_BOOT_DATA + tools.get_bytes(ord('&'), 12))
+ self.assertEqual(expected, data)
+
+ sect_fname = tools.get_output_filename('outfile.bin')
+ self.assertTrue(os.path.exists(sect_fname))
+ sect_data = tools.read_file(sect_fname)
+ self.assertEqual(U_BOOT_DATA, sect_data)
+
+ def testAbsent(self):
+ """Check handling of absent entries"""
+ data = self._DoReadFile('262_absent.dts')
+ self.assertEqual(U_BOOT_DATA + U_BOOT_IMG_DATA, data)
+
+ def testPackTeeOsOptional(self):
+ """Test that an image with an optional TEE binary can be created"""
+ entry_args = {
+ 'tee-os-path': 'tee.elf',
+ }
+ data = self._DoReadFileDtb('263_tee_os_opt.dts',
+ entry_args=entry_args)[0]
+ self.assertEqual(U_BOOT_DATA + U_BOOT_IMG_DATA, data)
+
+ def checkFitTee(self, dts, tee_fname):
+ """Check that a tee-os entry works and returns data
+
+ Args:
+ dts (str): Device tree filename to use
+ tee_fname (str): filename containing tee-os
+
+ Returns:
+ bytes: Image contents
+ """
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ entry_args = {
+ 'of-list': 'test-fdt1 test-fdt2',
+ 'default-dt': 'test-fdt2',
+ 'tee-os-path': tee_fname,
+ }
+ test_subdir = os.path.join(self._indir, TEST_FDT_SUBDIR)
+ data = self._DoReadFileDtb(dts, entry_args=entry_args,
+ extra_indirs=[test_subdir])[0]
+ return data
+
+ def testFitTeeOsOptionalFit(self):
+ """Test an image with a FIT with an optional OP-TEE binary"""
+ data = self.checkFitTee('264_tee_os_opt_fit.dts', 'tee.bin')
+
+ # There should be only one node, holding the data set up in SetUpClass()
+ # for tee.bin
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+ node = dtb.GetNode('/images/tee-1')
+ self.assertEqual(TEE_ADDR,
+ fdt_util.fdt32_to_cpu(node.props['load'].value))
+ self.assertEqual(TEE_ADDR,
+ fdt_util.fdt32_to_cpu(node.props['entry'].value))
+ self.assertEqual(U_BOOT_DATA, node.props['data'].bytes)
+
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self.checkFitTee('264_tee_os_opt_fit.dts', '')
+ err = stderr.getvalue()
+ self.assertRegex(
+ err,
+ "Image '.*' is missing optional external blobs but is still functional: tee-os")
+
+ def testFitTeeOsOptionalFitBad(self):
+ """Test an image with a FIT with an optional OP-TEE binary"""
+ with self.assertRaises(ValueError) as exc:
+ self.checkFitTee('265_tee_os_opt_fit_bad.dts', 'tee.bin')
+ self.assertIn(
+ "Node '/binman/fit': subnode 'images/@tee-SEQ': Failed to read ELF file: Magic number does not match",
+ str(exc.exception))
+
+ def testFitTeeOsBad(self):
+ """Test an OP-TEE binary with wrong formats"""
+ self.make_tee_bin('tee.bad1', 123)
+ with self.assertRaises(ValueError) as exc:
+ self.checkFitTee('264_tee_os_opt_fit.dts', 'tee.bad1')
+ self.assertIn(
+ "Node '/binman/fit/images/@tee-SEQ/tee-os': OP-TEE paged mode not supported",
+ str(exc.exception))
+
+ self.make_tee_bin('tee.bad2', 0, b'extra data')
+ with self.assertRaises(ValueError) as exc:
+ self.checkFitTee('264_tee_os_opt_fit.dts', 'tee.bad2')
+ self.assertIn(
+ "Node '/binman/fit/images/@tee-SEQ/tee-os': Invalid OP-TEE file: size mismatch (expected 0x4, have 0xe)",
+ str(exc.exception))
+
+ def testExtblobOptional(self):
+ """Test an image with an external blob that is optional"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ data = self._DoReadFile('266_blob_ext_opt.dts')
+ self.assertEqual(REFCODE_DATA, data)
+ err = stderr.getvalue()
+ self.assertRegex(
+ err,
+ "Image '.*' is missing optional external blobs but is still functional: missing")
+
+ def testSectionInner(self):
+ """Test an inner section with a size"""
+ data = self._DoReadFile('267_section_inner.dts')
+ expected = U_BOOT_DATA + tools.get_bytes(0, 12)
+ self.assertEqual(expected, data)
+
+ def testNull(self):
+ """Test an image with a null entry"""
+ data = self._DoReadFile('268_null.dts')
+ self.assertEqual(U_BOOT_DATA + b'\xff\xff\xff\xff' + U_BOOT_IMG_DATA, data)
+
+ def testOverlap(self):
+ """Test an image with a overlapping entry"""
+ data = self._DoReadFile('269_overlap.dts')
+ self.assertEqual(U_BOOT_DATA[:1] + b'aa' + U_BOOT_DATA[3:], data)
+
+ image = control.images['image']
+ entries = image.GetEntries()
+
+ self.assertIn('inset', entries)
+ inset = entries['inset']
+ self.assertEqual(1, inset.offset);
+ self.assertEqual(1, inset.image_pos);
+ self.assertEqual(2, inset.size);
+
+ def testOverlapNull(self):
+ """Test an image with a null overlap"""
+ data = self._DoReadFile('270_overlap_null.dts')
+ self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+
+ # Check the FMAP
+ fhdr, fentries = fmap_util.DecodeFmap(data[len(U_BOOT_DATA):])
+ self.assertEqual(4, fhdr.nareas)
+ fiter = iter(fentries)
+
+ fentry = next(fiter)
+ self.assertEqual(b'SECTION', fentry.name)
+ self.assertEqual(0, fentry.offset)
+ self.assertEqual(len(U_BOOT_DATA), fentry.size)
+ self.assertEqual(0, fentry.flags)
+
+ fentry = next(fiter)
+ self.assertEqual(b'U_BOOT', fentry.name)
+ self.assertEqual(0, fentry.offset)
+ self.assertEqual(len(U_BOOT_DATA), fentry.size)
+ self.assertEqual(0, fentry.flags)
+
+ # Make sure that the NULL entry appears in the FMAP
+ fentry = next(fiter)
+ self.assertEqual(b'NULL', fentry.name)
+ self.assertEqual(1, fentry.offset)
+ self.assertEqual(2, fentry.size)
+ self.assertEqual(0, fentry.flags)
+
+ fentry = next(fiter)
+ self.assertEqual(b'FMAP', fentry.name)
+ self.assertEqual(len(U_BOOT_DATA), fentry.offset)
+
+ def testOverlapBad(self):
+ """Test an image with a bad overlapping entry"""
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFile('271_overlap_bad.dts')
+ self.assertIn(
+ "Node '/binman/inset': Offset 0x10 (16) ending at 0x12 (18) must overlap with existing entries",
+ str(exc.exception))
+
+ def testOverlapNoOffset(self):
+ """Test an image with a bad overlapping entry"""
+ with self.assertRaises(ValueError) as exc:
+ self._DoReadFile('272_overlap_no_size.dts')
+ self.assertIn(
+ "Node '/binman/inset': 'fill' entry is missing properties: size",
+ str(exc.exception))
+
+ def testBlobSymbol(self):
+ """Test a blob with symbols read from an ELF file"""
+ elf_fname = self.ElfTestFile('blob_syms')
+ TestFunctional._MakeInputFile('blob_syms', tools.read_file(elf_fname))
+ TestFunctional._MakeInputFile('blob_syms.bin',
+ tools.read_file(self.ElfTestFile('blob_syms.bin')))
+
+ data = self._DoReadFile('273_blob_symbol.dts')
+
+ syms = elf.GetSymbols(elf_fname, ['binman', 'image'])
+ addr = elf.GetSymbolAddress(elf_fname, '__my_start_sym')
+ self.assertEqual(syms['_binman_sym_magic'].address, addr)
+ self.assertEqual(syms['_binman_inset_prop_offset'].address, addr + 4)
+ self.assertEqual(syms['_binman_inset_prop_size'].address, addr + 8)
+
+ sym_values = struct.pack('<LLL', elf.BINMAN_SYM_MAGIC_VALUE, 4, 8)
+ expected = sym_values
+ self.assertEqual(expected, data[:len(expected)])
+
+ def testOffsetFromElf(self):
+ """Test a blob with symbols read from an ELF file"""
+ elf_fname = self.ElfTestFile('blob_syms')
+ TestFunctional._MakeInputFile('blob_syms', tools.read_file(elf_fname))
+ TestFunctional._MakeInputFile('blob_syms.bin',
+ tools.read_file(self.ElfTestFile('blob_syms.bin')))
+
+ data = self._DoReadFile('274_offset_from_elf.dts')
+
+ syms = elf.GetSymbols(elf_fname, ['binman', 'image'])
+ base = elf.GetSymbolAddress(elf_fname, '__my_start_sym')
+
+ image = control.images['image']
+ entries = image.GetEntries()
+
+ self.assertIn('inset', entries)
+ inset = entries['inset']
+
+ self.assertEqual(base + 4, inset.offset);
+ self.assertEqual(base + 4, inset.image_pos);
+ self.assertEqual(4, inset.size);
+
+ self.assertIn('inset2', entries)
+ inset = entries['inset2']
+ self.assertEqual(base + 8, inset.offset);
+ self.assertEqual(base + 8, inset.image_pos);
+ self.assertEqual(4, inset.size);
+
+ def testFitAlign(self):
+ """Test an image with an FIT with aligned external data"""
+ data = self._DoReadFile('275_fit_align.dts')
+ self.assertEqual(4096, len(data))
+
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+
+ props = self._GetPropTree(dtb, ['data-position'])
+ expected = {
+ 'u-boot:data-position': 1024,
+ 'fdt-1:data-position': 2048,
+ 'fdt-2:data-position': 3072,
+ }
+ self.assertEqual(expected, props)
+
+ def testFitFirmwareLoadables(self):
+ """Test an image with an FIT that use fit,firmware"""
+ if not elf.ELF_TOOLS:
+ self.skipTest('Python elftools not available')
+ entry_args = {
+ 'of-list': 'test-fdt1',
+ 'default-dt': 'test-fdt1',
+ 'atf-bl31-path': 'bl31.elf',
+ 'tee-os-path': 'missing.bin',
+ }
+ test_subdir = os.path.join(self._indir, TEST_FDT_SUBDIR)
+ with test_util.capture_sys_output() as (stdout, stderr):
+ data = self._DoReadFileDtb(
+ '276_fit_firmware_loadables.dts',
+ entry_args=entry_args,
+ extra_indirs=[test_subdir])[0]
+
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+
+ node = dtb.GetNode('/configurations/conf-uboot-1')
+ self.assertEqual('u-boot', node.props['firmware'].value)
+ self.assertEqual(['atf-1', 'atf-2'],
+ fdt_util.GetStringList(node, 'loadables'))
+
+ node = dtb.GetNode('/configurations/conf-atf-1')
+ self.assertEqual('atf-1', node.props['firmware'].value)
+ self.assertEqual(['u-boot', 'atf-2'],
+ fdt_util.GetStringList(node, 'loadables'))
+
+ node = dtb.GetNode('/configurations/conf-missing-uboot-1')
+ self.assertEqual('u-boot', node.props['firmware'].value)
+ self.assertEqual(['atf-1', 'atf-2'],
+ fdt_util.GetStringList(node, 'loadables'))
+
+ node = dtb.GetNode('/configurations/conf-missing-atf-1')
+ self.assertEqual('atf-1', node.props['firmware'].value)
+ self.assertEqual(['u-boot', 'atf-2'],
+ fdt_util.GetStringList(node, 'loadables'))
+
+ node = dtb.GetNode('/configurations/conf-missing-tee-1')
+ self.assertEqual('atf-1', node.props['firmware'].value)
+ self.assertEqual(['u-boot', 'atf-2'],
+ fdt_util.GetStringList(node, 'loadables'))
+
+ def testTooldir(self):
+ """Test that we can specify the tooldir"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self.assertEqual(0, self._DoBinman('--tooldir', 'fred',
+ 'tool', '-l'))
+ self.assertEqual('fred', bintool.Bintool.tooldir)
+
+ # Check that the toolpath is updated correctly
+ self.assertEqual(['fred'], tools.tool_search_paths)
+
+ # Try with a few toolpaths; the tooldir should be at the end
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self.assertEqual(0, self._DoBinman(
+ '--toolpath', 'mary', '--toolpath', 'anna', '--tooldir', 'fred',
+ 'tool', '-l'))
+ self.assertEqual(['mary', 'anna', 'fred'], tools.tool_search_paths)
+
+ def testReplaceSectionEntry(self):
+ """Test replacing an entry in a section"""
+ expect_data = b'w' * len(U_BOOT_DATA + COMPRESS_DATA)
+ entry_data, expected_fdtmap, image = self._RunReplaceCmd('section/blob',
+ expect_data, dts='241_replace_section_simple.dts')
+ self.assertEqual(expect_data, entry_data)
+
+ entries = image.GetEntries()
+ self.assertIn('section', entries)
+ section = entries['section']
+
+ sect_entries = section.GetEntries()
+ self.assertIn('blob', sect_entries)
+ entry = sect_entries['blob']
+ self.assertEqual(len(expect_data), entry.size)
+
+ fname = tools.get_output_filename('image-updated.bin')
+ data = tools.read_file(fname)
+
+ new_blob_data = data[entry.image_pos:entry.image_pos + len(expect_data)]
+ self.assertEqual(expect_data, new_blob_data)
+
+ self.assertEqual(U_BOOT_DATA,
+ data[entry.image_pos + len(expect_data):]
+ [:len(U_BOOT_DATA)])
+
+ def testReplaceSectionDeep(self):
+ """Test replacing an entry in two levels of sections"""
+ expect_data = b'w' * len(U_BOOT_DATA + COMPRESS_DATA)
+ entry_data, expected_fdtmap, image = self._RunReplaceCmd(
+ 'section/section/blob', expect_data,
+ dts='278_replace_section_deep.dts')
+ self.assertEqual(expect_data, entry_data)
+
+ entries = image.GetEntries()
+ self.assertIn('section', entries)
+ section = entries['section']
+
+ subentries = section.GetEntries()
+ self.assertIn('section', subentries)
+ section = subentries['section']
+
+ sect_entries = section.GetEntries()
+ self.assertIn('blob', sect_entries)
+ entry = sect_entries['blob']
+ self.assertEqual(len(expect_data), entry.size)
+
+ fname = tools.get_output_filename('image-updated.bin')
+ data = tools.read_file(fname)
+
+ new_blob_data = data[entry.image_pos:entry.image_pos + len(expect_data)]
+ self.assertEqual(expect_data, new_blob_data)
+
+ self.assertEqual(U_BOOT_DATA,
+ data[entry.image_pos + len(expect_data):]
+ [:len(U_BOOT_DATA)])
+
+ def testReplaceFitSibling(self):
+ """Test an image with a FIT inside where we replace its sibling"""
+ self._SetupSplElf()
+ fname = TestFunctional._MakeInputFile('once', b'available once')
+ self._DoReadFileRealDtb('277_replace_fit_sibling.dts')
+ os.remove(fname)
+
+ try:
+ tmpdir, updated_fname = self._SetupImageInTmpdir()
+
+ fname = os.path.join(tmpdir, 'update-blob')
+ expected = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) + 1)
+ tools.write_file(fname, expected)
+
+ self._DoBinman('replace', '-i', updated_fname, 'blob', '-f', fname)
+ data = tools.read_file(updated_fname)
+ start = len(U_BOOT_DTB_DATA)
+ self.assertEqual(expected, data[start:start + len(expected)])
+ map_fname = os.path.join(tmpdir, 'image-updated.map')
+ self.assertFalse(os.path.exists(map_fname))
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def testX509Cert(self):
+ """Test creating an X509 certificate"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': keyfile,
+ }
+ data = self._DoReadFileDtb('279_x509_cert.dts',
+ entry_args=entry_args)[0]
+ cert = data[:-4]
+ self.assertEqual(U_BOOT_DATA, data[-4:])
+
+ # TODO: verify the signature
+
+ def testX509CertMissing(self):
+ """Test that binman still produces an image if openssl is missing"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': 'keyfile',
+ }
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('279_x509_cert.dts',
+ force_missing_bintools='openssl',
+ entry_args=entry_args)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: openssl")
+
+ def testPackRockchipTpl(self):
+ """Test that an image with a Rockchip TPL binary can be created"""
+ data = self._DoReadFile('291_rockchip_tpl.dts')
+ self.assertEqual(ROCKCHIP_TPL_DATA, data[:len(ROCKCHIP_TPL_DATA)])
+
+ def testMkimageMissingBlobMultiple(self):
+ """Test missing blob with mkimage entry and multiple-data-files"""
+ with test_util.capture_sys_output() as (stdout, stderr):
+ self._DoTestFile('292_mkimage_missing_multiple.dts', allow_missing=True)
+ err = stderr.getvalue()
+ self.assertIn("is missing external blobs and is non-functional", err)
+
+ with self.assertRaises(ValueError) as e:
+ self._DoTestFile('292_mkimage_missing_multiple.dts', allow_missing=False)
+ self.assertIn("not found in input path", str(e.exception))
+
+ def _PrepareSignEnv(self, dts='280_fit_sign.dts'):
+ """Prepare sign environment
+
+ Create private and public keys, add pubkey into dtb.
+
+ Returns:
+ Tuple:
+ FIT container
+ Image name
+ Private key
+ DTB
+ """
+ self._SetupSplElf()
+ data = self._DoReadFileRealDtb(dts)
+ updated_fname = tools.get_output_filename('image-updated.bin')
+ tools.write_file(updated_fname, data)
+ dtb = tools.get_output_filename('source.dtb')
+ private_key = tools.get_output_filename('test_key.key')
+ public_key = tools.get_output_filename('test_key.crt')
+ fit = tools.get_output_filename('fit.fit')
+ key_dir = tools.get_output_dir()
+
+ tools.run('openssl', 'req', '-batch' , '-newkey', 'rsa:4096',
+ '-sha256', '-new', '-nodes', '-x509', '-keyout',
+ private_key, '-out', public_key)
+ tools.run('fdt_add_pubkey', '-a', 'sha256,rsa4096', '-k', key_dir,
+ '-n', 'test_key', '-r', 'conf', dtb)
+
+ return fit, updated_fname, private_key, dtb
+
+ def testSignSimple(self):
+ """Test that a FIT container can be signed in image"""
+ is_signed = False
+ fit, fname, private_key, dtb = self._PrepareSignEnv()
+
+ # do sign with private key
+ control.SignEntries(fname, None, private_key, 'sha256,rsa4096',
+ ['fit'])
+ is_signed = self._CheckSign(fit, dtb)
+
+ self.assertEqual(is_signed, True)
+
+ def testSignExactFIT(self):
+ """Test that a FIT container can be signed and replaced in image"""
+ is_signed = False
+ fit, fname, private_key, dtb = self._PrepareSignEnv()
+
+ # Make sure we propagate the toolpath, since mkimage may not be on PATH
+ args = []
+ if self.toolpath:
+ for path in self.toolpath:
+ args += ['--toolpath', path]
+
+ # do sign with private key
+ self._DoBinman(*args, 'sign', '-i', fname, '-k', private_key, '-a',
+ 'sha256,rsa4096', '-f', fit, 'fit')
+ is_signed = self._CheckSign(fit, dtb)
+
+ self.assertEqual(is_signed, True)
+
+ def testSignNonFit(self):
+ """Test a non-FIT entry cannot be signed"""
+ is_signed = False
+ fit, fname, private_key, _ = self._PrepareSignEnv(
+ '281_sign_non_fit.dts')
+
+ # do sign with private key
+ with self.assertRaises(ValueError) as e:
+ self._DoBinman('sign', '-i', fname, '-k', private_key, '-a',
+ 'sha256,rsa4096', '-f', fit, 'u-boot')
+ self.assertIn(
+ "Node '/u-boot': Updating signatures is not supported with this entry type",
+ str(e.exception))
+
+ def testSignMissingMkimage(self):
+ """Test that FIT signing handles a missing mkimage tool"""
+ fit, fname, private_key, _ = self._PrepareSignEnv()
+
+ # try to sign with a missing mkimage tool
+ bintool.Bintool.set_missing_list(['mkimage'])
+ with self.assertRaises(ValueError) as e:
+ control.SignEntries(fname, None, private_key, 'sha256,rsa4096',
+ ['fit'])
+ self.assertIn("Node '/fit': Missing tool: 'mkimage'", str(e.exception))
+
+ def testSymbolNoWrite(self):
+ """Test disabling of symbol writing"""
+ self._SetupSplElf()
+ self.checkSymbols('282_symbols_disable.dts', U_BOOT_SPL_DATA, 0x1c,
+ no_write_symbols=True)
+
+ def testSymbolNoWriteExpanded(self):
+ """Test disabling of symbol writing in expanded entries"""
+ entry_args = {
+ 'spl-dtb': '1',
+ }
+ self.checkSymbols('282_symbols_disable.dts', U_BOOT_SPL_NODTB_DATA +
+ U_BOOT_SPL_DTB_DATA, 0x38,
+ entry_args=entry_args, use_expanded=True,
+ no_write_symbols=True)
+
+ def testMkimageSpecial(self):
+ """Test mkimage ignores special hash-1 node"""
+ data = self._DoReadFile('283_mkimage_special.dts')
+
+ # Just check that the data appears in the file somewhere
+ self.assertIn(U_BOOT_DATA, data)
+
+ def testFitFdtList(self):
+ """Test an image with an FIT with the fit,fdt-list-val option"""
+ entry_args = {
+ 'default-dt': 'test-fdt2',
+ }
+ data = self._DoReadFileDtb(
+ '284_fit_fdt_list.dts',
+ entry_args=entry_args,
+ extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
+ self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):])
+ fit_data = data[len(U_BOOT_DATA):-len(U_BOOT_NODTB_DATA)]
+
+ def testSplEmptyBss(self):
+ """Test an expanded SPL with a zero-size BSS"""
+ # ELF file with a '__bss_size' symbol
+ self._SetupSplElf(src_fname='bss_data_zero')
+
+ entry_args = {
+ 'spl-bss-pad': 'y',
+ 'spl-dtb': 'y',
+ }
+ data = self._DoReadFileDtb('285_spl_expand.dts',
+ use_expanded=True, entry_args=entry_args)[0]
+
+ def testTemplate(self):
+ """Test using a template"""
+ TestFunctional._MakeInputFile('vga2.bin', b'#' + VGA_DATA)
+ data = self._DoReadFile('286_template.dts')
+ first = U_BOOT_DATA + VGA_DATA + U_BOOT_DTB_DATA
+ second = U_BOOT_DATA + b'#' + VGA_DATA + U_BOOT_DTB_DATA
+ self.assertEqual(U_BOOT_IMG_DATA + first + second, data)
+
+ dtb_fname1 = tools.get_output_filename('u-boot.dtb.tmpl1')
+ self.assertTrue(os.path.exists(dtb_fname1))
+ dtb = fdt.Fdt.FromData(tools.read_file(dtb_fname1))
+ dtb.Scan()
+ node1 = dtb.GetNode('/binman/template')
+ self.assertTrue(node1)
+ vga = dtb.GetNode('/binman/first/intel-vga')
+ self.assertTrue(vga)
+
+ dtb_fname2 = tools.get_output_filename('u-boot.dtb.tmpl2')
+ self.assertTrue(os.path.exists(dtb_fname2))
+ dtb2 = fdt.Fdt.FromData(tools.read_file(dtb_fname2))
+ dtb2.Scan()
+ node2 = dtb2.GetNode('/binman/template')
+ self.assertFalse(node2)
+
+ def testTemplateBlobMulti(self):
+ """Test using a template with 'multiple-images' enabled"""
+ TestFunctional._MakeInputFile('my-blob.bin', b'blob')
+ TestFunctional._MakeInputFile('my-blob2.bin', b'other')
+ retcode = self._DoTestFile('287_template_multi.dts')
+
+ self.assertEqual(0, retcode)
+ image = control.images['image']
+ image_fname = tools.get_output_filename('my-image.bin')
+ data = tools.read_file(image_fname)
+ self.assertEqual(b'blob@@@@other', data)
+
+ def testTemplateFit(self):
+ """Test using a template in a FIT"""
+ fit_data = self._DoReadFile('288_template_fit.dts')
+ fname = os.path.join(self._indir, 'fit_data.fit')
+ tools.write_file(fname, fit_data)
+ out = tools.run('dumpimage', '-l', fname)
+
+ def testTemplateSection(self):
+ """Test using a template in a section (not at top level)"""
+ TestFunctional._MakeInputFile('vga2.bin', b'#' + VGA_DATA)
+ data = self._DoReadFile('289_template_section.dts')
+ first = U_BOOT_DATA + VGA_DATA + U_BOOT_DTB_DATA
+ second = U_BOOT_DATA + b'#' + VGA_DATA + U_BOOT_DTB_DATA
+ self.assertEqual(U_BOOT_IMG_DATA + first + second + first, data)
+
+ def testMkimageSymbols(self):
+ """Test using mkimage to build an image with symbols in it"""
+ self._SetupSplElf('u_boot_binman_syms')
+ data = self._DoReadFile('290_mkimage_sym.dts')
+
+ image = control.images['image']
+ entries = image.GetEntries()
+ self.assertIn('u-boot', entries)
+ u_boot = entries['u-boot']
+
+ mkim = entries['mkimage']
+ mkim_entries = mkim.GetEntries()
+ self.assertIn('u-boot-spl', mkim_entries)
+ spl = mkim_entries['u-boot-spl']
+ self.assertIn('u-boot-spl2', mkim_entries)
+ spl2 = mkim_entries['u-boot-spl2']
+
+ # skip the mkimage header and the area sizes
+ mk_data = data[mkim.offset + 0x40:]
+ size, term = struct.unpack('>LL', mk_data[:8])
+
+ # There should be only one image, so check that the zero terminator is
+ # present
+ self.assertEqual(0, term)
+
+ content = mk_data[8:8 + size]
+
+ # The image should contain the symbols from u_boot_binman_syms.c
+ # Note that image_pos is adjusted by the base address of the image,
+ # which is 0x10 in our test image
+ spl_data = content[:0x18]
+ content = content[0x1b:]
+
+ # After the header is a table of offsets for each image. There should
+ # only be one image, then a 0 terminator, so figure out the real start
+ # of the image data
+ base = 0x40 + 8
+
+ # Check symbols in both u-boot-spl and u-boot-spl2
+ for i in range(2):
+ vals = struct.unpack('<LLQLL', spl_data)
+
+ # The image should contain the symbols from u_boot_binman_syms.c
+ # Note that image_pos is adjusted by the base address of the image,
+ # which is 0x10 in our 'u_boot_binman_syms' test image
+ self.assertEqual(elf.BINMAN_SYM_MAGIC_VALUE, vals[0])
+ self.assertEqual(base, vals[1])
+ self.assertEqual(spl2.offset, vals[2])
+ # figure out the internal positions of its components
+ self.assertEqual(0x10 + u_boot.image_pos, vals[3])
+
+ # Check that spl and spl2 are actually at the indicated positions
+ self.assertEqual(
+ elf.BINMAN_SYM_MAGIC_VALUE,
+ struct.unpack('<I', data[spl.image_pos:spl.image_pos + 4])[0])
+ self.assertEqual(
+ elf.BINMAN_SYM_MAGIC_VALUE,
+ struct.unpack('<I', data[spl2.image_pos:spl2.image_pos + 4])[0])
+
+ self.assertEqual(len(U_BOOT_DATA), vals[4])
+
+ # Move to next
+ spl_data = content[:0x18]
+
+ def testTemplatePhandle(self):
+ """Test using a template in a node containing a phandle"""
+ entry_args = {
+ 'atf-bl31-path': 'bl31.elf',
+ }
+ data = self._DoReadFileDtb('309_template_phandle.dts',
+ entry_args=entry_args)
+ fname = tools.get_output_filename('image.bin')
+ out = tools.run('dumpimage', '-l', fname)
+
+ # We should see the FIT description and one for each of the two images
+ lines = out.splitlines()
+ descs = [line.split()[-1] for line in lines if 'escription' in line]
+ self.assertEqual(['test-desc', 'atf', 'fdt'], descs)
+
+ def testTemplatePhandleDup(self):
+ """Test using a template in a node containing a phandle"""
+ entry_args = {
+ 'atf-bl31-path': 'bl31.elf',
+ }
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('310_template_phandle_dup.dts',
+ entry_args=entry_args)
+ self.assertIn(
+ 'Duplicate phandle 1 in nodes /binman/image/fit/images/atf/atf-bl31 and /binman/image-2/fit/images/atf/atf-bl31',
+ str(e.exception))
+
+ def testTIBoardConfig(self):
+ """Test that a schema validated board config file can be generated"""
+ data = self._DoReadFile('293_ti_board_cfg.dts')
+ self.assertEqual(TI_BOARD_CONFIG_DATA, data)
+
+ def testTIBoardConfigLint(self):
+ """Test that an incorrectly linted config file would generate error"""
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('323_ti_board_cfg_phony.dts')
+ self.assertIn("Yamllint error", str(e.exception))
+
+ def testTIBoardConfigCombined(self):
+ """Test that a schema validated combined board config file can be generated"""
+ data = self._DoReadFile('294_ti_board_cfg_combined.dts')
+ configlen_noheader = TI_BOARD_CONFIG_DATA * 4
+ self.assertGreater(data, configlen_noheader)
+
+ def testTIBoardConfigNoDataType(self):
+ """Test that error is thrown when data type is not supported"""
+ with self.assertRaises(ValueError) as e:
+ data = self._DoReadFile('295_ti_board_cfg_no_type.dts')
+ self.assertIn("Schema validation error", str(e.exception))
+
+ def testPackTiSecure(self):
+ """Test that an image with a TI secured binary can be created"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': keyfile,
+ }
+ data = self._DoReadFileDtb('296_ti_secure.dts',
+ entry_args=entry_args)[0]
+ self.assertGreater(len(data), len(TI_UNSECURE_DATA))
+
+ def testPackTiSecureFirewall(self):
+ """Test that an image with a TI secured binary can be created"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': keyfile,
+ }
+ data_no_firewall = self._DoReadFileDtb('296_ti_secure.dts',
+ entry_args=entry_args)[0]
+ data_firewall = self._DoReadFileDtb('324_ti_secure_firewall.dts',
+ entry_args=entry_args)[0]
+ self.assertGreater(len(data_firewall),len(data_no_firewall))
+
+ def testPackTiSecureFirewallMissingProperty(self):
+ """Test that an image with a TI secured binary can be created"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': keyfile,
+ }
+ with self.assertRaises(ValueError) as e:
+ data_firewall = self._DoReadFileDtb('325_ti_secure_firewall_missing_property.dts',
+ entry_args=entry_args)[0]
+ self.assertRegex(str(e.exception), "Node '/binman/ti-secure': Subnode 'firewall-0-2' is missing properties: id,region")
+
+ def testPackTiSecureMissingTool(self):
+ """Test that an image with a TI secured binary (non-functional) can be created
+ when openssl is missing"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': keyfile,
+ }
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('296_ti_secure.dts',
+ force_missing_bintools='openssl',
+ entry_args=entry_args)
+ err = stderr.getvalue()
+ self.assertRegex(err, "Image 'image'.*missing bintools.*: openssl")
+
+ def testPackTiSecureROM(self):
+ """Test that a ROM image with a TI secured binary can be created"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': keyfile,
+ }
+ data = self._DoReadFileDtb('297_ti_secure_rom.dts',
+ entry_args=entry_args)[0]
+ data_a = self._DoReadFileDtb('299_ti_secure_rom_a.dts',
+ entry_args=entry_args)[0]
+ data_b = self._DoReadFileDtb('300_ti_secure_rom_b.dts',
+ entry_args=entry_args)[0]
+ self.assertGreater(len(data), len(TI_UNSECURE_DATA))
+ self.assertGreater(len(data_a), len(TI_UNSECURE_DATA))
+ self.assertGreater(len(data_b), len(TI_UNSECURE_DATA))
+
+ def testPackTiSecureROMCombined(self):
+ """Test that a ROM image with a TI secured binary can be created"""
+ keyfile = self.TestFile('key.key')
+ entry_args = {
+ 'keyfile': keyfile,
+ }
+ data = self._DoReadFileDtb('298_ti_secure_rom_combined.dts',
+ entry_args=entry_args)[0]
+ self.assertGreater(len(data), len(TI_UNSECURE_DATA))
+
+ def testEncryptedNoAlgo(self):
+ """Test encrypted node with missing required properties"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('301_encrypted_no_algo.dts')
+ self.assertIn(
+ "Node '/binman/fit/images/u-boot/encrypted': 'encrypted' entry is missing properties: algo iv-filename",
+ str(e.exception))
+
+ def testEncryptedInvalidIvfile(self):
+ """Test encrypted node with invalid iv file"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('302_encrypted_invalid_iv_file.dts')
+ self.assertIn("Filename 'invalid-iv-file' not found in input path",
+ str(e.exception))
+
+ def testEncryptedMissingKey(self):
+ """Test encrypted node with missing key properties"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFileDtb('303_encrypted_missing_key.dts')
+ self.assertIn(
+ "Node '/binman/fit/images/u-boot/encrypted': Provide either 'key-filename' or 'key-source'",
+ str(e.exception))
+
+ def testEncryptedKeySource(self):
+ """Test encrypted node with key-source property"""
+ data = self._DoReadFileDtb('304_encrypted_key_source.dts')[0]
+
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+
+ node = dtb.GetNode('/images/u-boot/cipher')
+ self.assertEqual('algo-name', node.props['algo'].value)
+ self.assertEqual('key-source-value', node.props['key-source'].value)
+ self.assertEqual(ENCRYPTED_IV_DATA,
+ tools.to_bytes(''.join(node.props['iv'].value)))
+ self.assertNotIn('key', node.props)
+
+ def testEncryptedKeyFile(self):
+ """Test encrypted node with key-filename property"""
+ data = self._DoReadFileDtb('305_encrypted_key_file.dts')[0]
+
+ dtb = fdt.Fdt.FromData(data)
+ dtb.Scan()
+
+ node = dtb.GetNode('/images/u-boot/cipher')
+ self.assertEqual('algo-name', node.props['algo'].value)
+ self.assertEqual(ENCRYPTED_IV_DATA,
+ tools.to_bytes(''.join(node.props['iv'].value)))
+ self.assertEqual(ENCRYPTED_KEY_DATA,
+ tools.to_bytes(''.join(node.props['key'].value)))
+ self.assertNotIn('key-source', node.props)
+
+
+ def testSplPubkeyDtb(self):
+ """Test u_boot_spl_pubkey_dtb etype"""
+ data = tools.read_file(self.TestFile("key.pem"))
+ self._MakeInputFile("key.crt", data)
+ self._DoReadFileRealDtb('306_spl_pubkey_dtb.dts')
+ image = control.images['image']
+ entries = image.GetEntries()
+ dtb_entry = entries['u-boot-spl-pubkey-dtb']
+ dtb_data = dtb_entry.GetData()
+ dtb = fdt.Fdt.FromData(dtb_data)
+ dtb.Scan()
+
+ signature_node = dtb.GetNode('/signature')
+ self.assertIsNotNone(signature_node)
+ key_node = signature_node.FindNode("key-key")
+ self.assertIsNotNone(key_node)
+ self.assertEqual(fdt_util.GetString(key_node, "required"),
+ "conf")
+ self.assertEqual(fdt_util.GetString(key_node, "algo"),
+ "sha384,rsa4096")
+ self.assertEqual(fdt_util.GetString(key_node, "key-name-hint"),
+ "key")
+
+ def testXilinxBootgenSigning(self):
+ """Test xilinx-bootgen etype"""
+ bootgen = bintool.Bintool.create('bootgen')
+ self._CheckBintool(bootgen)
+ data = tools.read_file(self.TestFile("key.key"))
+ self._MakeInputFile("psk.pem", data)
+ self._MakeInputFile("ssk.pem", data)
+ self._SetupPmuFwlElf()
+ self._SetupSplElf()
+ self._DoReadFileRealDtb('307_xilinx_bootgen_sign.dts')
+ image_fname = tools.get_output_filename('image.bin')
+
+ # Read partition header table and check if authentication is enabled
+ bootgen_out = bootgen.run_cmd("-arch", "zynqmp",
+ "-read", image_fname, "pht").splitlines()
+ attributes = {"authentication": None,
+ "core": None,
+ "encryption": None}
+
+ for l in bootgen_out:
+ for a in attributes.keys():
+ if a in l:
+ m = re.match(fr".*{a} \[([^]]+)\]", l)
+ attributes[a] = m.group(1)
+
+ self.assertTrue(attributes['authentication'] == "rsa")
+ self.assertTrue(attributes['core'] == "a53-0")
+ self.assertTrue(attributes['encryption'] == "no")
+
+ def testXilinxBootgenSigningEncryption(self):
+ """Test xilinx-bootgen etype"""
+ bootgen = bintool.Bintool.create('bootgen')
+ self._CheckBintool(bootgen)
+ data = tools.read_file(self.TestFile("key.key"))
+ self._MakeInputFile("psk.pem", data)
+ self._MakeInputFile("ssk.pem", data)
+ self._SetupPmuFwlElf()
+ self._SetupSplElf()
+ self._DoReadFileRealDtb('308_xilinx_bootgen_sign_enc.dts')
+ image_fname = tools.get_output_filename('image.bin')
+
+ # Read boot header in order to verify encryption source and
+ # encryption parameter
+ bootgen_out = bootgen.run_cmd("-arch", "zynqmp",
+ "-read", image_fname, "bh").splitlines()
+ attributes = {"auth_only":
+ {"re": r".*auth_only \[([^]]+)\]", "value": None},
+ "encryption_keystore":
+ {"re": r" *encryption_keystore \(0x28\) : (.*)",
+ "value": None},
+ }
+
+ for l in bootgen_out:
+ for a in attributes.keys():
+ if a in l:
+ m = re.match(attributes[a]['re'], l)
+ attributes[a] = m.group(1)
+
+ # Check if fsbl-attribute is set correctly
+ self.assertTrue(attributes['auth_only'] == "true")
+ # Check if key is stored in efuse
+ self.assertTrue(attributes['encryption_keystore'] == "0xa5c3c5a3")
+
+ def testXilinxBootgenMissing(self):
+ """Test that binman still produces an image if bootgen is missing"""
+ data = tools.read_file(self.TestFile("key.key"))
+ self._MakeInputFile("psk.pem", data)
+ self._MakeInputFile("ssk.pem", data)
+ self._SetupPmuFwlElf()
+ self._SetupSplElf()
+ with test_util.capture_sys_output() as (_, stderr):
+ self._DoTestFile('307_xilinx_bootgen_sign.dts',
+ force_missing_bintools='bootgen')
+ err = stderr.getvalue()
+ self.assertRegex(err,
+ "Image 'image'.*missing bintools.*: bootgen")
+
+ def _GetCapsuleHeaders(self, data):
+ """Get the capsule header contents
+
+ Args:
+ data: Capsule file contents
+
+ Returns:
+ Dict:
+ key: Capsule Header name (str)
+ value: Header field value (str)
+ """
+ capsule_file = os.path.join(self._indir, 'test.capsule')
+ tools.write_file(capsule_file, data)
+
+ out = tools.run('mkeficapsule', '--dump-capsule', capsule_file)
+ lines = out.splitlines()
+
+ re_line = re.compile(r'^([^:\-\t]*)(?:\t*\s*:\s*(.*))?$')
+ vals = {}
+ for line in lines:
+ mat = re_line.match(line)
+ if mat:
+ vals[mat.group(1)] = mat.group(2)
+
+ return vals
+
+ def _CheckCapsule(self, data, signed_capsule=False, version_check=False,
+ capoemflags=False):
+ fmp_signature = "3153534D" # 'M', 'S', 'S', '1'
+ fmp_size = "00000010"
+ fmp_fw_version = "00000002"
+ capsule_image_index = "00000001"
+ oemflag = "00018000"
+ auth_hdr_revision = "00000200"
+ auth_hdr_cert_type = "00000EF1"
+
+ payload_data_len = len(EFI_CAPSULE_DATA)
+
+ hdr = self._GetCapsuleHeaders(data)
+
+ self.assertEqual(FW_MGMT_GUID.upper(), hdr['EFI_CAPSULE_HDR.CAPSULE_GUID'])
+
+ self.assertEqual(CAPSULE_IMAGE_GUID.upper(),
+ hdr['FMP_CAPSULE_IMAGE_HDR.UPDATE_IMAGE_TYPE_ID'])
+ self.assertEqual(capsule_image_index,
+ hdr['FMP_CAPSULE_IMAGE_HDR.UPDATE_IMAGE_INDEX'])
+
+ if capoemflags:
+ self.assertEqual(oemflag, hdr['EFI_CAPSULE_HDR.FLAGS'])
+
+ if signed_capsule:
+ self.assertEqual(auth_hdr_revision,
+ hdr['EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.HDR.wREVISION'])
+ self.assertEqual(auth_hdr_cert_type,
+ hdr['EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.HDR.wCERTTYPE'])
+ self.assertEqual(WIN_CERT_TYPE_EFI_GUID.upper(),
+ hdr['EFI_FIRMWARE_IMAGE_AUTH.AUTH_INFO.CERT_TYPE'])
+
+ if version_check:
+ self.assertEqual(fmp_signature,
+ hdr['FMP_PAYLOAD_HDR.SIGNATURE'])
+ self.assertEqual(fmp_size,
+ hdr['FMP_PAYLOAD_HDR.HEADER_SIZE'])
+ self.assertEqual(fmp_fw_version,
+ hdr['FMP_PAYLOAD_HDR.FW_VERSION'])
+
+ self.assertEqual(payload_data_len, int(hdr['Payload Image Size']))
+
+ def _CheckEmptyCapsule(self, data, accept_capsule=False):
+ if accept_capsule:
+ capsule_hdr_guid = EMPTY_CAPSULE_ACCEPT_GUID
+ else:
+ capsule_hdr_guid = EMPTY_CAPSULE_REVERT_GUID
+
+ hdr = self._GetCapsuleHeaders(data)
+
+ self.assertEqual(capsule_hdr_guid.upper(),
+ hdr['EFI_CAPSULE_HDR.CAPSULE_GUID'])
+
+ if accept_capsule:
+ capsule_size = "0000002C"
+ else:
+ capsule_size = "0000001C"
+ self.assertEqual(capsule_size,
+ hdr['EFI_CAPSULE_HDR.CAPSULE_IMAGE_SIZE'])
+
+ if accept_capsule:
+ self.assertEqual(CAPSULE_IMAGE_GUID.upper(), hdr['ACCEPT_IMAGE_GUID'])
+
+ def testCapsuleGen(self):
+ """Test generation of EFI capsule"""
+ data = self._DoReadFile('311_capsule.dts')
+
+ self._CheckCapsule(data)
+
+ def testSignedCapsuleGen(self):
+ """Test generation of EFI capsule"""
+ data = tools.read_file(self.TestFile("key.key"))
+ self._MakeInputFile("key.key", data)
+ data = tools.read_file(self.TestFile("key.pem"))
+ self._MakeInputFile("key.crt", data)
+
+ data = self._DoReadFile('312_capsule_signed.dts')
+
+ self._CheckCapsule(data, signed_capsule=True)
+
+ def testCapsuleGenVersionSupport(self):
+ """Test generation of EFI capsule with version support"""
+ data = self._DoReadFile('313_capsule_version.dts')
+
+ self._CheckCapsule(data, version_check=True)
+
+ def testCapsuleGenSignedVer(self):
+ """Test generation of signed EFI capsule with version information"""
+ data = tools.read_file(self.TestFile("key.key"))
+ self._MakeInputFile("key.key", data)
+ data = tools.read_file(self.TestFile("key.pem"))
+ self._MakeInputFile("key.crt", data)
+
+ data = self._DoReadFile('314_capsule_signed_ver.dts')
+
+ self._CheckCapsule(data, signed_capsule=True, version_check=True)
+
+ def testCapsuleGenCapOemFlags(self):
+ """Test generation of EFI capsule with OEM Flags set"""
+ data = self._DoReadFile('315_capsule_oemflags.dts')
+
+ self._CheckCapsule(data, capoemflags=True)
+
+ def testCapsuleGenKeyMissing(self):
+ """Test that binman errors out on missing key"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('316_capsule_missing_key.dts')
+
+ self.assertIn("Both private key and public key certificate need to be provided",
+ str(e.exception))
+
+ def testCapsuleGenIndexMissing(self):
+ """Test that binman errors out on missing image index"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('317_capsule_missing_index.dts')
+
+ self.assertIn("entry is missing properties: image-index",
+ str(e.exception))
+
+ def testCapsuleGenGuidMissing(self):
+ """Test that binman errors out on missing image GUID"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('318_capsule_missing_guid.dts')
+
+ self.assertIn("entry is missing properties: image-guid",
+ str(e.exception))
+
+ def testCapsuleGenAcceptCapsule(self):
+ """Test generationg of accept EFI capsule"""
+ data = self._DoReadFile('319_capsule_accept.dts')
+
+ self._CheckEmptyCapsule(data, accept_capsule=True)
+
+ def testCapsuleGenRevertCapsule(self):
+ """Test generationg of revert EFI capsule"""
+ data = self._DoReadFile('320_capsule_revert.dts')
+
+ self._CheckEmptyCapsule(data)
+
+ def testCapsuleGenAcceptGuidMissing(self):
+ """Test that binman errors out on missing image GUID for accept capsule"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('321_capsule_accept_missing_guid.dts')
+
+ self.assertIn("Image GUID needed for generating accept capsule",
+ str(e.exception))
+
+ def testCapsuleGenEmptyCapsuleTypeMissing(self):
+ """Test that capsule-type is specified"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('322_empty_capsule_type_missing.dts')
+
+ self.assertIn("entry is missing properties: capsule-type",
+ str(e.exception))
+
+ def testCapsuleGenAcceptOrRevertMissing(self):
+ """Test that both accept and revert capsule are not specified"""
+ with self.assertRaises(ValueError) as e:
+ self._DoReadFile('323_capsule_accept_revert_missing.dts')
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/binman/image.py b/tools/binman/image.py
new file mode 100644
index 00000000000..e77b5d0d97c
--- /dev/null
+++ b/tools/binman/image.py
@@ -0,0 +1,420 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Class for an image, the output of binman
+#
+
+from collections import OrderedDict
+import fnmatch
+from operator import attrgetter
+import os
+import re
+import sys
+
+from binman.entry import Entry
+from binman.etype import fdtmap
+from binman.etype import image_header
+from binman.etype import section
+from dtoc import fdt
+from dtoc import fdt_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+class Image(section.Entry_section):
+ """A Image, representing an output from binman
+
+ An image is comprised of a collection of entries each containing binary
+ data. The image size must be large enough to hold all of this data.
+
+ This class implements the various operations needed for images.
+
+ Attributes:
+ filename: Output filename for image
+ image_node: Name of node containing the description for this image
+ fdtmap_dtb: Fdt object for the fdtmap when loading from a file
+ fdtmap_data: Contents of the fdtmap when loading from a file
+ allow_repack: True to add properties to allow the image to be safely
+ repacked later
+ test_section_timeout: Use a zero timeout for section multi-threading
+ (for testing)
+ symlink: Name of symlink to image
+
+ Args:
+ copy_to_orig: Copy offset/size to orig_offset/orig_size after reading
+ from the device tree
+ test: True if this is being called from a test of Images. This this case
+ there is no device tree defining the structure of the section, so
+ we create a section manually.
+ ignore_missing: Ignore any missing entry arguments (i.e. don't raise an
+ exception). This should be used if the Image is being loaded from
+ a file rather than generated. In that case we obviously don't need
+ the entry arguments since the contents already exists.
+ use_expanded: True if we are updating the FDT wth entry offsets, etc.
+ and should use the expanded versions of the U-Boot entries.
+ Any entry type that includes a devicetree must put it in a
+ separate entry so that it will be updated. For example. 'u-boot'
+ normally just picks up 'u-boot.bin' which includes the
+ devicetree, but this is not updateable, since it comes into
+ binman as one piece and binman doesn't know that it is actually
+ an executable followed by a devicetree. Of course it could be
+ taught this, but then when reading an image (e.g. 'binman ls')
+ it may need to be able to split the devicetree out of the image
+ in order to determine the location of things. Instead we choose
+ to ignore 'u-boot-bin' in this case, and build it ourselves in
+ binman with 'u-boot-dtb.bin' and 'u-boot.dtb'. See
+ Entry_u_boot_expanded and Entry_blob_phase for details.
+ missing_etype: Use a default entry type ('blob') if the requested one
+ does not exist in binman. This is useful if an image was created by
+ binman a newer version of binman but we want to list it in an older
+ version which does not support all the entry types.
+ generate: If true, generator nodes are processed. If false they are
+ ignored which is useful when an existing image is read back from a
+ file.
+ """
+ def __init__(self, name, node, copy_to_orig=True, test=False,
+ ignore_missing=False, use_expanded=False, missing_etype=False,
+ generate=True):
+ super().__init__(None, 'section', node, test=test)
+ self.copy_to_orig = copy_to_orig
+ self.name = name
+ self.image_name = name
+ self._filename = '%s.bin' % self.image_name
+ self.fdtmap_dtb = None
+ self.fdtmap_data = None
+ self.allow_repack = False
+ self._ignore_missing = ignore_missing
+ self.missing_etype = missing_etype
+ self.use_expanded = use_expanded
+ self.test_section_timeout = False
+ self.bintools = {}
+ self.generate = generate
+ if not test:
+ self.ReadNode()
+
+ def ReadNode(self):
+ super().ReadNode()
+ self.allow_repack = fdt_util.GetBool(self._node, 'allow-repack')
+ self._symlink = fdt_util.GetString(self._node, 'symlink')
+
+ @classmethod
+ def FromFile(cls, fname):
+ """Convert an image file into an Image for use in binman
+
+ Args:
+ fname: Filename of image file to read
+
+ Returns:
+ Image object on success
+
+ Raises:
+ ValueError if something goes wrong
+ """
+ data = tools.read_file(fname)
+ size = len(data)
+
+ # First look for an image header
+ pos = image_header.LocateHeaderOffset(data)
+ if pos is None:
+ # Look for the FDT map
+ pos = fdtmap.LocateFdtmap(data)
+ if pos is None:
+ raise ValueError('Cannot find FDT map in image')
+
+ # We don't know the FDT size, so check its header first
+ probe_dtb = fdt.Fdt.FromData(
+ data[pos + fdtmap.FDTMAP_HDR_LEN:pos + 256])
+ dtb_size = probe_dtb.GetFdtObj().totalsize()
+ fdtmap_data = data[pos:pos + dtb_size + fdtmap.FDTMAP_HDR_LEN]
+ fdt_data = fdtmap_data[fdtmap.FDTMAP_HDR_LEN:]
+ out_fname = tools.get_output_filename('fdtmap.in.dtb')
+ tools.write_file(out_fname, fdt_data)
+ dtb = fdt.Fdt(out_fname)
+ dtb.Scan()
+
+ # Return an Image with the associated nodes
+ root = dtb.GetRoot()
+ image = Image('image', root, copy_to_orig=False, ignore_missing=True,
+ missing_etype=True, generate=False)
+
+ image.image_node = fdt_util.GetString(root, 'image-node', 'image')
+ image.fdtmap_dtb = dtb
+ image.fdtmap_data = fdtmap_data
+ image._data = data
+ image._filename = fname
+ image.image_name, _ = os.path.splitext(fname)
+ return image
+
+ def Raise(self, msg):
+ """Convenience function to raise an error referencing an image"""
+ raise ValueError("Image '%s': %s" % (self._node.path, msg))
+
+ def PackEntries(self):
+ """Pack all entries into the image"""
+ super().Pack(0)
+
+ def SetImagePos(self):
+ # This first section in the image so it starts at 0
+ super().SetImagePos(0)
+
+ def ProcessEntryContents(self):
+ """Call the ProcessContents() method for each entry
+
+ This is intended to adjust the contents as needed by the entry type.
+
+ Returns:
+ True if the new data size is OK, False if expansion is needed
+ """
+ return super().ProcessContents()
+
+ def WriteSymbols(self):
+ """Write symbol values into binary files for access at run time"""
+ super().WriteSymbols(self)
+
+ def BuildImage(self):
+ """Write the image to a file"""
+ fname = tools.get_output_filename(self._filename)
+ tout.info("Writing image to '%s'" % fname)
+ with open(fname, 'wb') as fd:
+ data = self.GetPaddedData()
+ fd.write(data)
+ tout.info("Wrote %#x bytes" % len(data))
+ # Create symlink to file if symlink given
+ if self._symlink is not None:
+ sname = tools.get_output_filename(self._symlink)
+ if os.path.islink(sname):
+ os.remove(sname)
+ os.symlink(fname, sname)
+
+ def WriteMap(self):
+ """Write a map of the image to a .map file
+
+ Returns:
+ Filename of map file written
+ """
+ filename = '%s.map' % self.image_name
+ fname = tools.get_output_filename(filename)
+ with open(fname, 'w') as fd:
+ print('%8s %8s %8s %s' % ('ImagePos', 'Offset', 'Size', 'Name'),
+ file=fd)
+ super().WriteMap(fd, 0)
+ return fname
+
+ def BuildEntryList(self):
+ """List the files in an image
+
+ Returns:
+ List of entry.EntryInfo objects describing all entries in the image
+ """
+ entries = []
+ self.ListEntries(entries, 0)
+ return entries
+
+ def FindEntryPath(self, entry_path):
+ """Find an entry at a given path in the image
+
+ Args:
+ entry_path: Path to entry (e.g. /ro-section/u-boot')
+
+ Returns:
+ Entry object corresponding to that past
+
+ Raises:
+ ValueError if no entry found
+ """
+ parts = entry_path.split('/')
+ entries = self.GetEntries()
+ parent = '/'
+ for part in parts:
+ entry = entries.get(part)
+ if not entry:
+ raise ValueError("Entry '%s' not found in '%s'" %
+ (part, parent))
+ parent = entry.GetPath()
+ entries = entry.GetEntries()
+ return entry
+
+ def ReadData(self, decomp=True, alt_format=None):
+ tout.debug("Image '%s' ReadData(), size=%#x" %
+ (self.GetPath(), len(self._data)))
+ return self._data
+
+ def GetListEntries(self, entry_paths):
+ """List the entries in an image
+
+ This decodes the supplied image and returns a list of entries from that
+ image, preceded by a header.
+
+ Args:
+ entry_paths: List of paths to match (each can have wildcards). Only
+ entries whose names match one of these paths will be printed
+
+ Returns:
+ String error message if something went wrong, otherwise
+ 3-Tuple:
+ List of EntryInfo objects
+ List of lines, each
+ List of text columns, each a string
+ List of widths of each column
+ """
+ def _EntryToStrings(entry):
+ """Convert an entry to a list of strings, one for each column
+
+ Args:
+ entry: EntryInfo object containing information to output
+
+ Returns:
+ List of strings, one for each field in entry
+ """
+ def _AppendHex(val):
+ """Append a hex value, or an empty string if val is None
+
+ Args:
+ val: Integer value, or None if none
+ """
+ args.append('' if val is None else '>%x' % val)
+
+ args = [' ' * entry.indent + entry.name]
+ _AppendHex(entry.image_pos)
+ _AppendHex(entry.size)
+ args.append(entry.etype)
+ _AppendHex(entry.offset)
+ _AppendHex(entry.uncomp_size)
+ return args
+
+ def _DoLine(lines, line):
+ """Add a line to the output list
+
+ This adds a line (a list of columns) to the output list. It also updates
+ the widths[] array with the maximum width of each column
+
+ Args:
+ lines: List of lines to add to
+ line: List of strings, one for each column
+ """
+ for i, item in enumerate(line):
+ widths[i] = max(widths[i], len(item))
+ lines.append(line)
+
+ def _NameInPaths(fname, entry_paths):
+ """Check if a filename is in a list of wildcarded paths
+
+ Args:
+ fname: Filename to check
+ entry_paths: List of wildcarded paths (e.g. ['*dtb*', 'u-boot*',
+ 'section/u-boot'])
+
+ Returns:
+ True if any wildcard matches the filename (using Unix filename
+ pattern matching, not regular expressions)
+ False if not
+ """
+ for path in entry_paths:
+ if fnmatch.fnmatch(fname, path):
+ return True
+ return False
+
+ entries = self.BuildEntryList()
+
+ # This is our list of lines. Each item in the list is a list of strings, one
+ # for each column
+ lines = []
+ HEADER = ['Name', 'Image-pos', 'Size', 'Entry-type', 'Offset',
+ 'Uncomp-size']
+ num_columns = len(HEADER)
+
+ # This records the width of each column, calculated as the maximum width of
+ # all the strings in that column
+ widths = [0] * num_columns
+ _DoLine(lines, HEADER)
+
+ # We won't print anything unless it has at least this indent. So at the
+ # start we will print nothing, unless a path matches (or there are no
+ # entry paths)
+ MAX_INDENT = 100
+ min_indent = MAX_INDENT
+ path_stack = []
+ path = ''
+ indent = 0
+ selected_entries = []
+ for entry in entries:
+ if entry.indent > indent:
+ path_stack.append(path)
+ elif entry.indent < indent:
+ path_stack.pop()
+ if path_stack:
+ path = path_stack[-1] + '/' + entry.name
+ indent = entry.indent
+
+ # If there are entry paths to match and we are not looking at a
+ # sub-entry of a previously matched entry, we need to check the path
+ if entry_paths and indent <= min_indent:
+ if _NameInPaths(path[1:], entry_paths):
+ # Print this entry and all sub-entries (=higher indent)
+ min_indent = indent
+ else:
+ # Don't print this entry, nor any following entries until we get
+ # a path match
+ min_indent = MAX_INDENT
+ continue
+ _DoLine(lines, _EntryToStrings(entry))
+ selected_entries.append(entry)
+ return selected_entries, lines, widths
+
+ def LookupImageSymbol(self, sym_name, optional, msg, base_addr):
+ """Look up a symbol in an ELF file
+
+ Looks up a symbol in an ELF file. Only entry types which come from an
+ ELF image can be used by this function.
+
+ This searches through this image including all of its subsections.
+
+ At present the only entry properties supported are:
+ offset
+ image_pos - 'base_addr' is added if this is not an end-at-4gb image
+ size
+
+ Args:
+ sym_name: Symbol name in the ELF file to look up in the format
+ _binman_<entry>_prop_<property> where <entry> is the name of
+ the entry and <property> is the property to find (e.g.
+ _binman_u_boot_prop_offset). As a special case, you can append
+ _any to <entry> to have it search for any matching entry. E.g.
+ _binman_u_boot_any_prop_offset will match entries called u-boot,
+ u-boot-img and u-boot-nodtb)
+ optional: True if the symbol is optional. If False this function
+ will raise if the symbol is not found
+ msg: Message to display if an error occurs
+ base_addr: Base address of image. This is added to the returned
+ image_pos in most cases so that the returned position indicates
+ where the targeted entry/binary has actually been loaded. But
+ if end-at-4gb is used, this is not done, since the binary is
+ already assumed to be linked to the ROM position and using
+ execute-in-place (XIP).
+
+ Returns:
+ Value that should be assigned to that symbol, or None if it was
+ optional and not found
+
+ Raises:
+ ValueError if the symbol is invalid or not found, or references a
+ property which is not supported
+ """
+ entries = OrderedDict()
+ entries_by_name = {}
+ self._CollectEntries(entries, entries_by_name, self)
+ return self.LookupSymbol(sym_name, optional, msg, base_addr,
+ entries_by_name)
+
+ def CollectBintools(self):
+ """Collect all the bintools used by this image
+
+ Returns:
+ Dict of bintools:
+ key: name of tool
+ value: Bintool object
+ """
+ bintools = {}
+ super().AddBintools(bintools)
+ self.bintools = bintools
+ return bintools
diff --git a/tools/binman/image_test.py b/tools/binman/image_test.py
new file mode 100644
index 00000000000..bd51c1e55d1
--- /dev/null
+++ b/tools/binman/image_test.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2017 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the image module
+
+import unittest
+
+from binman.image import Image
+from u_boot_pylib.test_util import capture_sys_output
+
+class TestImage(unittest.TestCase):
+ def testInvalidFormat(self):
+ image = Image('name', 'node', test=True)
+ with self.assertRaises(ValueError) as e:
+ image.LookupSymbol('_binman_something_prop_', False, 'msg', 0)
+ self.assertIn(
+ "msg: Symbol '_binman_something_prop_' has invalid format",
+ str(e.exception))
+
+ def testMissingSymbol(self):
+ image = Image('name', 'node', test=True)
+ image._entries = {}
+ with self.assertRaises(ValueError) as e:
+ image.LookupSymbol('_binman_type_prop_pname', False, 'msg', 0)
+ self.assertIn("msg: Entry 'type' not found in list ()",
+ str(e.exception))
+
+ def testMissingSymbolOptional(self):
+ image = Image('name', 'node', test=True)
+ image._entries = {}
+ with capture_sys_output() as (stdout, stderr):
+ val = image.LookupSymbol('_binman_type_prop_pname', True, 'msg', 0)
+ self.assertEqual(val, None)
+ self.assertEqual("Warning: msg: Entry 'type' not found in list ()\n",
+ stderr.getvalue())
+ self.assertEqual('', stdout.getvalue())
+
+ def testBadProperty(self):
+ image = Image('name', 'node', test=True)
+ image._entries = {'u-boot': 1}
+ with self.assertRaises(ValueError) as e:
+ image.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg', 0)
+ self.assertIn("msg: No such property 'bad", str(e.exception))
diff --git a/tools/binman/index.rst b/tools/binman/index.rst
new file mode 100644
index 00000000000..6eef7b5d050
--- /dev/null
+++ b/tools/binman/index.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Binman
+======
+
+.. toctree::
+ :maxdepth: 2
+
+ README
diff --git a/tools/binman/main.py b/tools/binman/main.py
new file mode 100755
index 00000000000..92d2431aea7
--- /dev/null
+++ b/tools/binman/main.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Creates binary images from input files controlled by a description
+#
+
+"""See README for more information"""
+
+import os
+import site
+import sys
+import traceback
+
+# Get the absolute path to this file at run-time
+our_path = os.path.dirname(os.path.realpath(__file__))
+our1_path = os.path.dirname(our_path)
+our2_path = os.path.dirname(our1_path)
+
+# Extract $(srctree) from Kbuild environment, or use relative paths below
+srctree = os.environ.get('srctree', our2_path)
+
+#
+# Do not pollute source tree with cache files:
+# https://stackoverflow.com/a/60024195/2511795
+# https://bugs.python.org/issue33499
+#
+sys.pycache_prefix = os.path.relpath(our_path, srctree)
+
+# Bring in the patman and dtoc libraries (but don't override the first path
+# in PYTHONPATH)
+sys.path.insert(2, our1_path)
+
+from binman import bintool
+from u_boot_pylib import test_util
+
+# Bring in the libfdt module
+sys.path.insert(2, 'scripts/dtc/pylibfdt')
+sys.path.insert(2, os.path.join(srctree, 'scripts/dtc/pylibfdt'))
+sys.path.insert(2, os.path.join(srctree, 'build-sandbox/scripts/dtc/pylibfdt'))
+sys.path.insert(2, os.path.join(srctree, 'build-sandbox_spl/scripts/dtc/pylibfdt'))
+
+from binman import cmdline
+from binman import control
+from u_boot_pylib import test_util
+
+def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
+ """Run the functional tests and any embedded doctests
+
+ Args:
+ debug: True to enable debugging, which shows a full stack trace on error
+ verbosity: Verbosity level to use
+ test_preserve_dirs: True to preserve the input directory used by tests
+ so that it can be examined afterwards (only useful for debugging
+ tests). If a single test is selected (in args[0]) it also preserves
+ the output directory for this test. Both directories are displayed
+ on the command line.
+ processes: Number of processes to use to run tests (None=same as #CPUs)
+ args: List of positional args provided to binman. This can hold a test
+ name to execute (as in 'binman test testSections', for example)
+ toolpath: List of paths to use for tools
+ """
+ from binman import bintool_test
+ from binman import cbfs_util_test
+ from binman import elf_test
+ from binman import entry_test
+ from binman import fdt_test
+ from binman import fip_util_test
+ from binman import ftest
+ from binman import image_test
+ import doctest
+
+ test_name = args and args[0] or None
+
+ # Run the entry tests first ,since these need to be the first to import the
+ # 'entry' module.
+ result = test_util.run_test_suites(
+ 'binman', debug, verbosity, test_preserve_dirs, processes, test_name,
+ toolpath,
+ [bintool_test.TestBintool, entry_test.TestEntry, ftest.TestFunctional,
+ fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage,
+ cbfs_util_test.TestCbfs, fip_util_test.TestFip])
+
+ return (0 if result.wasSuccessful() else 1)
+
+def RunTestCoverage(toolpath, build_dir):
+ """Run the tests and check that we get 100% coverage"""
+ glob_list = control.GetEntryModules(False)
+ all_set = set([os.path.splitext(os.path.basename(item))[0]
+ for item in glob_list if '_testing' not in item])
+ extra_args = ''
+ if toolpath:
+ for path in toolpath:
+ extra_args += ' --toolpath %s' % path
+ test_util.run_test_coverage('tools/binman/binman', None,
+ ['*test*', '*main.py', 'tools/patman/*', 'tools/dtoc/*',
+ 'tools/u_boot_pylib/*'],
+ build_dir, all_set, extra_args or None)
+
+def RunBinman(args):
+ """Main entry point to binman once arguments are parsed
+
+ Args:
+ args: Command line arguments Namespace object
+ """
+ ret_code = 0
+
+ if not args.debug:
+ sys.tracebacklimit = 0
+
+ # Provide a default toolpath in the hope of finding a mkimage built from
+ # current source
+ if not args.toolpath:
+ args.toolpath = ['./tools', 'build-sandbox/tools']
+
+ if args.cmd == 'test':
+ if args.test_coverage:
+ RunTestCoverage(args.toolpath, args.build_dir)
+ else:
+ ret_code = RunTests(args.debug, args.verbosity, args.processes,
+ args.test_preserve_dirs, args.tests,
+ args.toolpath)
+
+ elif args.cmd == 'bintool-docs':
+ control.write_bintool_docs(bintool.Bintool.get_tool_list())
+
+ elif args.cmd == 'entry-docs':
+ control.WriteEntryDocs(control.GetEntryModules())
+
+ else:
+ try:
+ ret_code = control.Binman(args)
+ except Exception as e:
+ print('binman: %s' % e, file=sys.stderr)
+ if args.debug:
+ print()
+ traceback.print_exc()
+ ret_code = 1
+ return ret_code
+
+
+def start_binman():
+ args = cmdline.ParseArgs(sys.argv[1:])
+
+ ret_code = RunBinman(args)
+ sys.exit(ret_code)
+
+
+if __name__ == "__main__":
+ start_binman()
diff --git a/tools/binman/missing-blob-help b/tools/binman/missing-blob-help
new file mode 100644
index 00000000000..ab0023eb9fb
--- /dev/null
+++ b/tools/binman/missing-blob-help
@@ -0,0 +1,52 @@
+# This file contains help messages for missing external blobs. Each message has
+# a tag (MUST be just lower-case text, digits and hyphens) starting in column 1,
+# followed by a colon (:) to indicate its start. The message can include any
+# number of lines, including blank lines.
+#
+# When looking for a tag, Binman uses the value of 'missing-msg' for the entry,
+# the entry name or the entry type, in that order
+
+atf-bl31:
+See the documentation for your board. You may need to build ARM Trusted
+Firmware and build with BL31=/path/to/bl31.bin
+
+atf-bl31-sunxi:
+Please read the section on ARM Trusted Firmware (ATF) in
+board/sunxi/README.sunxi64
+
+scp-sunxi:
+SCP firmware is required for system suspend, but is otherwise optional.
+Please read the section on SCP firmware in board/sunxi/README.sunxi64
+
+iot2050-seboot:
+See the documentation for IOT2050 board. Your image is missing SEBoot
+which is mandatory for board startup. Prebuilt SEBoot located at
+meta-iot2050/tree/master/recipes-bsp/u-boot/files/prebuild/seboot_pg*.bin.
+
+iot2050-otpcmd:
+See the documentation for IOT2050 board. Your image is missing OTP command data
+block which is used for provisioning the customer keys to the board.
+Please refer to
+meta-iot2050/tree/master/recipes-bsp/secure-boot-otp-provisioning/files/make-otpcmd.sh
+for how to generate this binary. If you are not using secure boot or do not
+intend to provision the keys, disable CONFIG_IOT2050_EMBED_OTPCMD.
+
+k3-rti-wdt-firmware:
+If CONFIG_WDT_K3_RTI_LOAD_FW is enabled, a firmware image is needed for
+the R5F core(s) to trigger the system reset. One possible source is
+https://github.com/siemens/k3-rti-wdt.
+
+rockchip-tpl:
+An external TPL is required to initialize DRAM. Get the external TPL
+binary and build with ROCKCHIP_TPL=/path/to/ddr.bin. One possible source
+for the external TPL binary is https://github.com/rockchip-linux/rkbin.
+
+tee-os:
+See the documentation for your board. You may need to build Open Portable
+Trusted Execution Environment (OP-TEE) and build with TEE=/path/to/tee.bin
+
+opensbi:
+See the documentation for your board. The OpenSBI git repo is at
+https://github.com/riscv/opensbi.git
+You may need to build fw_dynamic.bin first and re-build u-boot with
+OPENSBI=/path/to/fw_dynamic.bin
diff --git a/tools/binman/pyproject.toml b/tools/binman/pyproject.toml
new file mode 100644
index 00000000000..ba34437fc53
--- /dev/null
+++ b/tools/binman/pyproject.toml
@@ -0,0 +1,29 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "binary-manager"
+version = "0.0.6"
+authors = [
+ { name="Simon Glass", email="sjg@chromium.org" },
+]
+dependencies = ["pylibfdt", "u_boot_pylib >= 0.0.6", "dtoc >= 0.0.6"]
+description = "Binman firmware-packaging tool"
+readme = "README.rst"
+requires-python = ">=3.7"
+classifiers = [
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
+ "Operating System :: OS Independent",
+]
+
+[project.urls]
+"Homepage" = "https://docs.u-boot.org/en/latest/develop/package/index.html"
+"Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues"
+
+[project.scripts]
+binman = "binman.main:start_binman"
+
+[tool.setuptools.package-data]
+patman = ["*.rst"]
diff --git a/tools/binman/setup.py b/tools/binman/setup.py
new file mode 100644
index 00000000000..9a9206eb044
--- /dev/null
+++ b/tools/binman/setup.py
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+from distutils.core import setup
+setup(name='binman',
+ version='1.0',
+ license='GPL-2.0+',
+ scripts=['binman'],
+ packages=['binman', 'binman.etype', 'binman.btool'],
+ package_dir={'binman': ''},
+ package_data={'binman': ['README.rst', 'entries.rst']},
+ classifiers=['Environment :: Console',
+ 'Topic :: Software Development :: Embedded Systems'])
diff --git a/tools/binman/state.py b/tools/binman/state.py
new file mode 100644
index 00000000000..45bae40c525
--- /dev/null
+++ b/tools/binman/state.py
@@ -0,0 +1,536 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Holds and modifies the state information held by binman
+#
+
+from collections import defaultdict
+import hashlib
+import re
+import time
+import threading
+
+from dtoc import fdt
+import os
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+
+OUR_PATH = os.path.dirname(os.path.realpath(__file__))
+
+# Map an dtb etype to its expected filename
+DTB_TYPE_FNAME = {
+ 'u-boot-spl-dtb': 'spl/u-boot-spl.dtb',
+ 'u-boot-tpl-dtb': 'tpl/u-boot-tpl.dtb',
+ 'u-boot-vpl-dtb': 'vpl/u-boot-vpl.dtb',
+ }
+
+# Records the device-tree files known to binman, keyed by entry type (e.g.
+# 'u-boot-spl-dtb'). These are the output FDT files, which can be updated by
+# binman. They have been copied to <xxx>.out files.
+#
+# key: entry type (e.g. 'u-boot-dtb)
+# value: tuple:
+# Fdt object
+# Filename
+output_fdt_info = {}
+
+# Prefix to add to an fdtmap path to turn it into a path to the /binman node
+fdt_path_prefix = ''
+
+# Arguments passed to binman to provide arguments to entries
+entry_args = {}
+
+# True to use fake device-tree files for testing (see U_BOOT_DTB_DATA in
+# ftest.py)
+use_fake_dtb = False
+
+# The DTB which contains the full image information
+main_dtb = None
+
+# Allow entries to expand after they have been packed. This is detected and
+# forces a re-pack. If not allowed, any attempted expansion causes an error in
+# Entry.ProcessContentsUpdate()
+allow_entry_expansion = True
+
+# Don't allow entries to contract after they have been packed. Instead just
+# leave some wasted space. If allowed, this is detected and forces a re-pack,
+# but may result in entries that oscillate in size, thus causing a pack error.
+# An example is a compressed device tree where the original offset values
+# result in a larger compressed size than the new ones, but then after updating
+# to the new ones, the compressed size increases, etc.
+allow_entry_contraction = False
+
+# Number of threads to use for binman (None means machine-dependent)
+num_threads = None
+
+
+class Timing:
+ """Holds information about an operation that is being timed
+
+ Properties:
+ name: Operation name (only one of each name is stored)
+ start: Start time of operation in seconds (None if not start)
+ accum:: Amount of time spent on this operation so far, in seconds
+ """
+ def __init__(self, name):
+ self.name = name
+ self.start = None # cause an error if TimingStart() is not called
+ self.accum = 0.0
+
+
+# Holds timing info for each name:
+# key: name of Timing info (Timing.name)
+# value: Timing object
+timing_info = {}
+
+
+def GetFdtForEtype(etype):
+ """Get the Fdt object for a particular device-tree entry
+
+ Binman keeps track of at least one device-tree file called u-boot.dtb but
+ can also have others (e.g. for SPL). This function looks up the given
+ entry and returns the associated Fdt object.
+
+ Args:
+ etype: Entry type of device tree (e.g. 'u-boot-dtb')
+
+ Returns:
+ Fdt object associated with the entry type
+ """
+ value = output_fdt_info.get(etype);
+ if not value:
+ return None
+ return value[0]
+
+def GetFdtPath(etype):
+ """Get the full pathname of a particular Fdt object
+
+ Similar to GetFdtForEtype() but returns the pathname associated with the
+ Fdt.
+
+ Args:
+ etype: Entry type of device tree (e.g. 'u-boot-dtb')
+
+ Returns:
+ Full path name to the associated Fdt
+ """
+ return output_fdt_info[etype][0]._fname
+
+def GetFdtContents(etype='u-boot-dtb'):
+ """Looks up the FDT pathname and contents
+
+ This is used to obtain the Fdt pathname and contents when needed by an
+ entry. It supports a 'fake' dtb, allowing tests to substitute test data for
+ the real dtb.
+
+ Args:
+ etype: Entry type to look up (e.g. 'u-boot.dtb').
+
+ Returns:
+ tuple:
+ pathname to Fdt
+ Fdt data (as bytes)
+ """
+ if etype not in output_fdt_info:
+ return None, None
+ if not use_fake_dtb:
+ pathname = GetFdtPath(etype)
+ data = GetFdtForEtype(etype).GetContents()
+ else:
+ fname = output_fdt_info[etype][1]
+ pathname = tools.get_input_filename(fname)
+ data = tools.read_file(pathname)
+ return pathname, data
+
+def UpdateFdtContents(etype, data):
+ """Update the contents of a particular device tree
+
+ The device tree is updated and written back to its file. This affects what
+ is returned from future called to GetFdtContents(), etc.
+
+ Args:
+ etype: Entry type (e.g. 'u-boot-dtb')
+ data: Data to replace the DTB with
+ """
+ dtb, fname = output_fdt_info[etype]
+ dtb_fname = dtb.GetFilename()
+ tools.write_file(dtb_fname, data)
+ dtb = fdt.FdtScan(dtb_fname)
+ output_fdt_info[etype] = [dtb, fname]
+
+def SetEntryArgs(args):
+ """Set the value of the entry args
+
+ This sets up the entry_args dict which is used to supply entry arguments to
+ entries.
+
+ Args:
+ args: List of entry arguments, each in the format "name=value"
+ """
+ global entry_args
+
+ entry_args = {}
+ tout.debug('Processing entry args:')
+ if args:
+ for arg in args:
+ m = re.match('([^=]*)=(.*)', arg)
+ if not m:
+ raise ValueError("Invalid entry arguemnt '%s'" % arg)
+ name, value = m.groups()
+ tout.debug(' %20s = %s' % (name, value))
+ entry_args[name] = value
+ tout.debug('Processing entry args done')
+
+def GetEntryArg(name):
+ """Get the value of an entry argument
+
+ Args:
+ name: Name of argument to retrieve
+
+ Returns:
+ String value of argument
+ """
+ return entry_args.get(name)
+
+def GetEntryArgBool(name):
+ """Get the value of an entry argument as a boolean
+
+ Args:
+ name: Name of argument to retrieve
+
+ Returns:
+ False if the entry argument is consider False (empty, '0' or 'n'), else
+ True
+ """
+ val = GetEntryArg(name)
+ return val and val not in ['n', '0']
+
+def Prepare(images, dtb):
+ """Get device tree files ready for use
+
+ This sets up a set of device tree files that can be retrieved by
+ GetAllFdts(). This includes U-Boot proper and any SPL device trees.
+
+ Args:
+ images: List of images being used
+ dtb: Main dtb
+ """
+ global output_fdt_info, main_dtb, fdt_path_prefix
+ # Import these here in case libfdt.py is not available, in which case
+ # the above help option still works.
+ from dtoc import fdt
+ from dtoc import fdt_util
+
+ # If we are updating the DTBs we need to put these updated versions
+ # where Entry_blob_dtb can find them. We can ignore 'u-boot.dtb'
+ # since it is assumed to be the one passed in with options.dt, and
+ # was handled just above.
+ main_dtb = dtb
+ output_fdt_info.clear()
+ fdt_path_prefix = ''
+ output_fdt_info['u-boot-dtb'] = [dtb, 'u-boot.dtb']
+ if use_fake_dtb:
+ for etype, fname in DTB_TYPE_FNAME.items():
+ output_fdt_info[etype] = [dtb, fname]
+ else:
+ fdt_set = {}
+ for etype, fname in DTB_TYPE_FNAME.items():
+ infile = tools.get_input_filename(fname, allow_missing=True)
+ if infile and os.path.exists(infile):
+ fname_dtb = fdt_util.EnsureCompiled(infile)
+ out_fname = tools.get_output_filename('%s.out' %
+ os.path.split(fname)[1])
+ tools.write_file(out_fname, tools.read_file(fname_dtb))
+ other_dtb = fdt.FdtScan(out_fname)
+ output_fdt_info[etype] = [other_dtb, out_fname]
+
+
+def PrepareFromLoadedData(image):
+ """Get device tree files ready for use with a loaded image
+
+ Loaded images are different from images that are being created by binman,
+ since there is generally already an fdtmap and we read the description from
+ that. This provides the position and size of every entry in the image with
+ no calculation required.
+
+ This function uses the same output_fdt_info[] as Prepare(). It finds the
+ device tree files, adds a reference to the fdtmap and sets the FDT path
+ prefix to translate from the fdtmap (where the root node is the image node)
+ to the normal device tree (where the image node is under a /binman node).
+
+ Args:
+ images: List of images being used
+ """
+ global output_fdt_info, main_dtb, fdt_path_prefix
+
+ tout.info('Preparing device trees')
+ output_fdt_info.clear()
+ fdt_path_prefix = ''
+ output_fdt_info['fdtmap'] = [image.fdtmap_dtb, 'u-boot.dtb']
+ main_dtb = None
+ tout.info(" Found device tree type 'fdtmap' '%s'" % image.fdtmap_dtb.name)
+ for etype, value in image.GetFdts().items():
+ entry, fname = value
+ out_fname = tools.get_output_filename('%s.dtb' % entry.etype)
+ tout.info(" Found device tree type '%s' at '%s' path '%s'" %
+ (etype, out_fname, entry.GetPath()))
+ entry._filename = entry.GetDefaultFilename()
+ data = entry.ReadData()
+
+ tools.write_file(out_fname, data)
+ dtb = fdt.Fdt(out_fname)
+ dtb.Scan()
+ image_node = dtb.GetNode('/binman')
+ if 'multiple-images' in image_node.props:
+ image_node = dtb.GetNode('/binman/%s' % image.image_node)
+ fdt_path_prefix = image_node.path
+ output_fdt_info[etype] = [dtb, None]
+ tout.info(" FDT path prefix '%s'" % fdt_path_prefix)
+
+
+def GetAllFdts():
+ """Yield all device tree files being used by binman
+
+ Yields:
+ Device trees being used (U-Boot proper, SPL, TPL, VPL)
+ """
+ if main_dtb:
+ yield main_dtb
+ for etype in output_fdt_info:
+ dtb = output_fdt_info[etype][0]
+ if dtb != main_dtb:
+ yield dtb
+
+def GetUpdateNodes(node, for_repack=False):
+ """Yield all the nodes that need to be updated in all device trees
+
+ The property referenced by this node is added to any device trees which
+ have the given node. Due to removable of unwanted nodes, SPL and TPL may
+ not have this node.
+
+ Args:
+ node: Node object in the main device tree to look up
+ for_repack: True if we want only nodes which need 'repack' properties
+ added to them (e.g. 'orig-offset'), False to return all nodes. We
+ don't add repack properties to SPL/TPL device trees.
+
+ Yields:
+ Node objects in each device tree that is in use (U-Boot proper, which
+ is node, SPL and TPL)
+ """
+ yield node
+ for entry_type, (dtb, fname) in output_fdt_info.items():
+ if dtb != node.GetFdt():
+ if for_repack and entry_type != 'u-boot-dtb':
+ continue
+ other_node = dtb.GetNode(fdt_path_prefix + node.path)
+ if other_node:
+ yield other_node
+
+def AddZeroProp(node, prop, for_repack=False):
+ """Add a new property to affected device trees with an integer value of 0.
+
+ Args:
+ prop_name: Name of property
+ for_repack: True is this property is only needed for repacking
+ """
+ for n in GetUpdateNodes(node, for_repack):
+ n.AddZeroProp(prop)
+
+def AddSubnode(node, name):
+ """Add a new subnode to a node in affected device trees
+
+ Args:
+ node: Node to add to
+ name: name of node to add
+
+ Returns:
+ New subnode that was created in main tree
+ """
+ first = None
+ for n in GetUpdateNodes(node):
+ subnode = n.AddSubnode(name)
+ if not first:
+ first = subnode
+ return first
+
+def AddString(node, prop, value):
+ """Add a new string property to affected device trees
+
+ Args:
+ prop_name: Name of property
+ value: String value (which will be \0-terminated in the DT)
+ """
+ for n in GetUpdateNodes(node):
+ n.AddString(prop, value)
+
+def AddInt(node, prop, value):
+ """Add a new string property to affected device trees
+
+ Args:
+ prop_name: Name of property
+ val: Integer value of property
+ """
+ for n in GetUpdateNodes(node):
+ n.AddInt(prop, value)
+
+def SetInt(node, prop, value, for_repack=False):
+ """Update an integer property in affected device trees with an integer value
+
+ This is not allowed to change the size of the FDT.
+
+ Args:
+ prop_name: Name of property
+ for_repack: True is this property is only needed for repacking
+ """
+ for n in GetUpdateNodes(node, for_repack):
+ tout.debug("File %s: Update node '%s' prop '%s' to %#x" %
+ (n.GetFdt().name, n.path, prop, value))
+ n.SetInt(prop, value)
+
+def CheckAddHashProp(node):
+ hash_node = node.FindNode('hash')
+ if hash_node:
+ algo = hash_node.props.get('algo')
+ if not algo:
+ return "Missing 'algo' property for hash node"
+ if algo.value == 'sha256':
+ size = 32
+ else:
+ return "Unknown hash algorithm '%s'" % algo.value
+ for n in GetUpdateNodes(hash_node):
+ n.AddEmptyProp('value', size)
+
+def CheckSetHashValue(node, get_data_func):
+ hash_node = node.FindNode('hash')
+ if hash_node:
+ algo = hash_node.props.get('algo').value
+ if algo == 'sha256':
+ m = hashlib.sha256()
+ m.update(get_data_func())
+ data = m.digest()
+ for n in GetUpdateNodes(hash_node):
+ n.SetData('value', data)
+
+def SetAllowEntryExpansion(allow):
+ """Set whether post-pack expansion of entries is allowed
+
+ Args:
+ allow: True to allow expansion, False to raise an exception
+ """
+ global allow_entry_expansion
+
+ allow_entry_expansion = allow
+
+def AllowEntryExpansion():
+ """Check whether post-pack expansion of entries is allowed
+
+ Returns:
+ True if expansion should be allowed, False if an exception should be
+ raised
+ """
+ return allow_entry_expansion
+
+def SetAllowEntryContraction(allow):
+ """Set whether post-pack contraction of entries is allowed
+
+ Args:
+ allow: True to allow contraction, False to raise an exception
+ """
+ global allow_entry_contraction
+
+ allow_entry_contraction = allow
+
+def AllowEntryContraction():
+ """Check whether post-pack contraction of entries is allowed
+
+ Returns:
+ True if contraction should be allowed, False if an exception should be
+ raised
+ """
+ return allow_entry_contraction
+
+def SetThreads(threads):
+ """Set the number of threads to use when building sections
+
+ Args:
+ threads: Number of threads to use (None for default, 0 for
+ single-threaded)
+ """
+ global num_threads
+
+ num_threads = threads
+
+def GetThreads():
+ """Get the number of threads to use when building sections
+
+ Returns:
+ Number of threads to use (None for default, 0 for single-threaded)
+ """
+ return num_threads
+
+def GetTiming(name):
+ """Get the timing info for a particular operation
+
+ The object is created if it does not already exist.
+
+ Args:
+ name: Operation name to get
+
+ Returns:
+ Timing object for the current thread
+ """
+ threaded_name = '%s:%d' % (name, threading.get_ident())
+ timing = timing_info.get(threaded_name)
+ if not timing:
+ timing = Timing(threaded_name)
+ timing_info[threaded_name] = timing
+ return timing
+
+def TimingStart(name):
+ """Start the timer for an operation
+
+ Args:
+ name: Operation name to start
+ """
+ timing = GetTiming(name)
+ timing.start = time.monotonic()
+
+def TimingAccum(name):
+ """Stop and accumlate the time for an operation
+
+ This measures the time since the last TimingStart() and adds that to the
+ accumulated time.
+
+ Args:
+ name: Operation name to start
+ """
+ timing = GetTiming(name)
+ timing.accum += time.monotonic() - timing.start
+
+def TimingShow():
+ """Show all timing information"""
+ duration = defaultdict(float)
+ for threaded_name, timing in timing_info.items():
+ name = threaded_name.split(':')[0]
+ duration[name] += timing.accum
+
+ for name, seconds in duration.items():
+ print('%10s: %10.1fms' % (name, seconds * 1000))
+
+def GetVersion(path=OUR_PATH):
+ """Get the version string for binman
+
+ Args:
+ path: Path to 'version' file
+
+ Returns:
+ str: String version, e.g. 'v2021.10'
+ """
+ version_fname = os.path.join(path, 'version')
+ if os.path.exists(version_fname):
+ version = tools.read_file(version_fname, binary=False)
+ else:
+ version = '(unreleased)'
+ return version
diff --git a/tools/binman/test/001_invalid.dts b/tools/binman/test/001_invalid.dts
new file mode 100644
index 00000000000..7d00455d7c1
--- /dev/null
+++ b/tools/binman/test/001_invalid.dts
@@ -0,0 +1,5 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
diff --git a/tools/binman/test/002_missing_node.dts b/tools/binman/test/002_missing_node.dts
new file mode 100644
index 00000000000..3a51ec2be58
--- /dev/null
+++ b/tools/binman/test/002_missing_node.dts
@@ -0,0 +1,6 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+};
diff --git a/tools/binman/test/003_empty.dts b/tools/binman/test/003_empty.dts
new file mode 100644
index 00000000000..493c9a04c97
--- /dev/null
+++ b/tools/binman/test/003_empty.dts
@@ -0,0 +1,9 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ };
+};
diff --git a/tools/binman/test/004_invalid_entry.dts b/tools/binman/test/004_invalid_entry.dts
new file mode 100644
index 00000000000..b043455bb57
--- /dev/null
+++ b/tools/binman/test/004_invalid_entry.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ not-a-valid-type {
+ };
+ };
+};
diff --git a/tools/binman/test/005_simple.dts b/tools/binman/test/005_simple.dts
new file mode 100644
index 00000000000..3771aa2261c
--- /dev/null
+++ b/tools/binman/test/005_simple.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/006_dual_image.dts b/tools/binman/test/006_dual_image.dts
new file mode 100644
index 00000000000..78be16f1649
--- /dev/null
+++ b/tools/binman/test/006_dual_image.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ multiple-images;
+ image1 {
+ u-boot {
+ };
+ };
+
+ image2 {
+ pad-before = <3>;
+ pad-after = <5>;
+
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/007_bad_align.dts b/tools/binman/test/007_bad_align.dts
new file mode 100644
index 00000000000..123bb135581
--- /dev/null
+++ b/tools/binman/test/007_bad_align.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ align = <23>;
+ };
+ };
+};
diff --git a/tools/binman/test/008_pack.dts b/tools/binman/test/008_pack.dts
new file mode 100644
index 00000000000..a88785d8352
--- /dev/null
+++ b/tools/binman/test/008_pack.dts
@@ -0,0 +1,30 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ u-boot-align {
+ type = "u-boot";
+ align = <16>;
+ };
+
+ u-boot-size {
+ type = "u-boot";
+ size = <23>;
+ };
+
+ u-boot-next {
+ type = "u-boot";
+ };
+
+ u-boot-fixed {
+ type = "u-boot";
+ offset = <61>;
+ };
+ };
+};
diff --git a/tools/binman/test/009_pack_extra.dts b/tools/binman/test/009_pack_extra.dts
new file mode 100644
index 00000000000..8d6f4910c93
--- /dev/null
+++ b/tools/binman/test/009_pack_extra.dts
@@ -0,0 +1,42 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ min-size = <12>;
+ pad-before = <3>;
+ pad-after = <5>;
+ };
+
+ u-boot-align-size-nop {
+ type = "u-boot";
+ align-size = <4>;
+ };
+
+ u-boot-align-size {
+ type = "u-boot";
+ align = <16>;
+ align-size = <32>;
+ };
+
+ u-boot-align-end {
+ type = "u-boot";
+ align-end = <64>;
+ };
+
+ u-boot-align-both {
+ type = "u-boot";
+ align = <64>;
+ align-end = <128>;
+ };
+
+ u-boot-min-size {
+ type = "u-boot";
+ min-size = <24>;
+ align-size = <16>;
+ };
+ };
+};
diff --git a/tools/binman/test/010_pack_align_power2.dts b/tools/binman/test/010_pack_align_power2.dts
new file mode 100644
index 00000000000..8f6253a3d0f
--- /dev/null
+++ b/tools/binman/test/010_pack_align_power2.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ align = <5>;
+ };
+ };
+};
diff --git a/tools/binman/test/011_pack_align_size_power2.dts b/tools/binman/test/011_pack_align_size_power2.dts
new file mode 100644
index 00000000000..04f7672ea47
--- /dev/null
+++ b/tools/binman/test/011_pack_align_size_power2.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ align-size = <55>;
+ };
+ };
+};
diff --git a/tools/binman/test/012_pack_inv_align.dts b/tools/binman/test/012_pack_inv_align.dts
new file mode 100644
index 00000000000..d8dd600edb8
--- /dev/null
+++ b/tools/binman/test/012_pack_inv_align.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ offset = <5>;
+ align = <4>;
+ };
+ };
+};
diff --git a/tools/binman/test/013_pack_inv_size_align.dts b/tools/binman/test/013_pack_inv_size_align.dts
new file mode 100644
index 00000000000..dfafa134d7b
--- /dev/null
+++ b/tools/binman/test/013_pack_inv_size_align.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ size = <5>;
+ align-size = <4>;
+ };
+ };
+};
diff --git a/tools/binman/test/014_pack_overlap.dts b/tools/binman/test/014_pack_overlap.dts
new file mode 100644
index 00000000000..3895cba3bdb
--- /dev/null
+++ b/tools/binman/test/014_pack_overlap.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ u-boot-align {
+ type = "u-boot";
+ offset = <3>;
+ };
+ };
+};
diff --git a/tools/binman/test/015_pack_overflow.dts b/tools/binman/test/015_pack_overflow.dts
new file mode 100644
index 00000000000..6f654330afc
--- /dev/null
+++ b/tools/binman/test/015_pack_overflow.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ size = <3>;
+ };
+ };
+};
diff --git a/tools/binman/test/016_pack_image_overflow.dts b/tools/binman/test/016_pack_image_overflow.dts
new file mode 100644
index 00000000000..6ae66f3ac95
--- /dev/null
+++ b/tools/binman/test/016_pack_image_overflow.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <3>;
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/017_pack_image_size.dts b/tools/binman/test/017_pack_image_size.dts
new file mode 100644
index 00000000000..2360eb5d19a
--- /dev/null
+++ b/tools/binman/test/017_pack_image_size.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <7>;
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/018_pack_image_align.dts b/tools/binman/test/018_pack_image_align.dts
new file mode 100644
index 00000000000..16cd2a422ef
--- /dev/null
+++ b/tools/binman/test/018_pack_image_align.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ align-size = <16>;
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/019_pack_inv_image_align.dts b/tools/binman/test/019_pack_inv_image_align.dts
new file mode 100644
index 00000000000..e5ee87b88fb
--- /dev/null
+++ b/tools/binman/test/019_pack_inv_image_align.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <7>;
+ align-size = <8>;
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/020_pack_inv_image_align_power2.dts b/tools/binman/test/020_pack_inv_image_align_power2.dts
new file mode 100644
index 00000000000..a428c4be520
--- /dev/null
+++ b/tools/binman/test/020_pack_inv_image_align_power2.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ align-size = <131>;
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/021_image_pad.dts b/tools/binman/test/021_image_pad.dts
new file mode 100644
index 00000000000..c5abbbcdd6a
--- /dev/null
+++ b/tools/binman/test/021_image_pad.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot-spl {
+ };
+
+ u-boot {
+ offset = <28>;
+ };
+ };
+};
diff --git a/tools/binman/test/022_image_name.dts b/tools/binman/test/022_image_name.dts
new file mode 100644
index 00000000000..94fc069c176
--- /dev/null
+++ b/tools/binman/test/022_image_name.dts
@@ -0,0 +1,21 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ multiple-images;
+ image1 {
+ filename = "test-name";
+ u-boot {
+ };
+ };
+
+ image2 {
+ filename = "test-name.xx";
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/023_blob.dts b/tools/binman/test/023_blob.dts
new file mode 100644
index 00000000000..7dcff69666a
--- /dev/null
+++ b/tools/binman/test/023_blob.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob {
+ filename = "blobfile";
+ };
+ };
+};
diff --git a/tools/binman/test/024_sorted.dts b/tools/binman/test/024_sorted.dts
new file mode 100644
index 00000000000..b54f9b14191
--- /dev/null
+++ b/tools/binman/test/024_sorted.dts
@@ -0,0 +1,17 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ u-boot {
+ offset = <30>;
+ };
+
+ u-boot-spl {
+ offset = <1>;
+ };
+ };
+};
diff --git a/tools/binman/test/025_pack_zero_size.dts b/tools/binman/test/025_pack_zero_size.dts
new file mode 100644
index 00000000000..e863c44e3fd
--- /dev/null
+++ b/tools/binman/test/025_pack_zero_size.dts
@@ -0,0 +1,15 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ u-boot-spl {
+ offset = <0>;
+ };
+ };
+};
diff --git a/tools/binman/test/026_pack_u_boot_dtb.dts b/tools/binman/test/026_pack_u_boot_dtb.dts
new file mode 100644
index 00000000000..2707a7347a4
--- /dev/null
+++ b/tools/binman/test/026_pack_u_boot_dtb.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-nodtb {
+ };
+
+ u-boot-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/027_pack_4gb_no_size.dts b/tools/binman/test/027_pack_4gb_no_size.dts
new file mode 100644
index 00000000000..371cca10d58
--- /dev/null
+++ b/tools/binman/test/027_pack_4gb_no_size.dts
@@ -0,0 +1,18 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ u-boot {
+ offset = <0xfffffff0>;
+ };
+
+ u-boot-spl {
+ offset = <0xfffffff7>;
+ };
+ };
+};
diff --git a/tools/binman/test/028_pack_4gb_outside.dts b/tools/binman/test/028_pack_4gb_outside.dts
new file mode 100644
index 00000000000..b6ad7fb56a5
--- /dev/null
+++ b/tools/binman/test/028_pack_4gb_outside.dts
@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <32>;
+ u-boot {
+ offset = <0>;
+ };
+
+ u-boot-spl {
+ offset = <0xffffffe3>;
+ };
+ };
+};
diff --git a/tools/binman/test/029_x86_rom.dts b/tools/binman/test/029_x86_rom.dts
new file mode 100644
index 00000000000..ad8f9d6e1bd
--- /dev/null
+++ b/tools/binman/test/029_x86_rom.dts
@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <36>;
+ u-boot {
+ offset = <0xffffffdc>;
+ };
+
+ u-boot-spl {
+ offset = <0xffffffe3>;
+ };
+ };
+};
diff --git a/tools/binman/test/030_x86_rom_me_no_desc.dts b/tools/binman/test/030_x86_rom_me_no_desc.dts
new file mode 100644
index 00000000000..796cb87afc7
--- /dev/null
+++ b/tools/binman/test/030_x86_rom_me_no_desc.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <16>;
+ intel-me {
+ filename = "me.bin";
+ offset-unset;
+ };
+ };
+};
diff --git a/tools/binman/test/031_x86_rom_me.dts b/tools/binman/test/031_x86_rom_me.dts
new file mode 100644
index 00000000000..b8b0a5a74bb
--- /dev/null
+++ b/tools/binman/test/031_x86_rom_me.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ intel-descriptor {
+ filename = "descriptor.bin";
+ };
+
+ intel-me {
+ filename = "me.bin";
+ offset-unset;
+ };
+ };
+};
diff --git a/tools/binman/test/032_intel_vga.dts b/tools/binman/test/032_intel_vga.dts
new file mode 100644
index 00000000000..9c532d03d3c
--- /dev/null
+++ b/tools/binman/test/032_intel_vga.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-vga {
+ filename = "vga.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/033_x86_start16.dts b/tools/binman/test/033_x86_start16.dts
new file mode 100644
index 00000000000..2e279dee9d6
--- /dev/null
+++ b/tools/binman/test/033_x86_start16.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ x86-start16 {
+ };
+ };
+};
diff --git a/tools/binman/test/034_x86_ucode.dts b/tools/binman/test/034_x86_ucode.dts
new file mode 100644
index 00000000000..40725731cd3
--- /dev/null
+++ b/tools/binman/test/034_x86_ucode.dts
@@ -0,0 +1,29 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-with-ucode-ptr {
+ };
+
+ u-boot-dtb-with-ucode {
+ };
+
+ u-boot-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/035_x86_single_ucode.dts b/tools/binman/test/035_x86_single_ucode.dts
new file mode 100644
index 00000000000..2b1f086a41c
--- /dev/null
+++ b/tools/binman/test/035_x86_single_ucode.dts
@@ -0,0 +1,26 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-with-ucode-ptr {
+ };
+
+ u-boot-dtb-with-ucode {
+ };
+
+ u-boot-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ };
+};
diff --git a/tools/binman/test/036_u_boot_img.dts b/tools/binman/test/036_u_boot_img.dts
new file mode 100644
index 00000000000..aa5a3fe4810
--- /dev/null
+++ b/tools/binman/test/036_u_boot_img.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-img {
+ };
+ };
+};
diff --git a/tools/binman/test/037_x86_no_ucode.dts b/tools/binman/test/037_x86_no_ucode.dts
new file mode 100644
index 00000000000..6da49c3da6d
--- /dev/null
+++ b/tools/binman/test/037_x86_no_ucode.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-with-ucode-ptr {
+ };
+
+ u-boot-dtb-with-ucode {
+ };
+
+ u-boot-ucode {
+ };
+ };
+};
diff --git a/tools/binman/test/038_x86_ucode_missing_node.dts b/tools/binman/test/038_x86_ucode_missing_node.dts
new file mode 100644
index 00000000000..720677c9c1e
--- /dev/null
+++ b/tools/binman/test/038_x86_ucode_missing_node.dts
@@ -0,0 +1,26 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-with-ucode-ptr {
+ };
+
+ u-boot-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/039_x86_ucode_missing_node2.dts b/tools/binman/test/039_x86_ucode_missing_node2.dts
new file mode 100644
index 00000000000..10ac086d549
--- /dev/null
+++ b/tools/binman/test/039_x86_ucode_missing_node2.dts
@@ -0,0 +1,23 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-with-ucode-ptr {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/040_x86_ucode_not_in_image.dts b/tools/binman/test/040_x86_ucode_not_in_image.dts
new file mode 100644
index 00000000000..609725824a5
--- /dev/null
+++ b/tools/binman/test/040_x86_ucode_not_in_image.dts
@@ -0,0 +1,28 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ size = <0x200>;
+ u-boot-with-ucode-ptr {
+ };
+
+ u-boot-dtb-with-ucode {
+ };
+
+ u-boot-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/041_unknown_pos_size.dts b/tools/binman/test/041_unknown_pos_size.dts
new file mode 100644
index 00000000000..94fe821c470
--- /dev/null
+++ b/tools/binman/test/041_unknown_pos_size.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ return-invalid-entry;
+ };
+ };
+};
diff --git a/tools/binman/test/042_intel_fsp.dts b/tools/binman/test/042_intel_fsp.dts
new file mode 100644
index 00000000000..8a7c889251b
--- /dev/null
+++ b/tools/binman/test/042_intel_fsp.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-fsp {
+ filename = "fsp.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/043_intel_cmc.dts b/tools/binman/test/043_intel_cmc.dts
new file mode 100644
index 00000000000..5a56c7d881a
--- /dev/null
+++ b/tools/binman/test/043_intel_cmc.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-cmc {
+ filename = "cmc.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/044_x86_optional_ucode.dts b/tools/binman/test/044_x86_optional_ucode.dts
new file mode 100644
index 00000000000..24a7040d318
--- /dev/null
+++ b/tools/binman/test/044_x86_optional_ucode.dts
@@ -0,0 +1,30 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-with-ucode-ptr {
+ optional-ucode;
+ };
+
+ u-boot-dtb-with-ucode {
+ };
+
+ u-boot-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/045_prop_test.dts b/tools/binman/test/045_prop_test.dts
new file mode 100644
index 00000000000..064de2b3167
--- /dev/null
+++ b/tools/binman/test/045_prop_test.dts
@@ -0,0 +1,23 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <16>;
+ intel-me {
+ filename = "me.bin";
+ offset-unset;
+ intval = <3>;
+ intarray = <5 6>;
+ byteval = [08];
+ bytearray = [01 23 34];
+ longbytearray = [09 0a 0b 0c];
+ stringval = "message2";
+ stringarray = "another", "multi-word", "message";
+ };
+ };
+};
diff --git a/tools/binman/test/046_intel_vbt.dts b/tools/binman/test/046_intel_vbt.dts
new file mode 100644
index 00000000000..733f5751d5a
--- /dev/null
+++ b/tools/binman/test/046_intel_vbt.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-vbt {
+ filename = "vbt.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/047_spl_bss_pad.dts b/tools/binman/test/047_spl_bss_pad.dts
new file mode 100644
index 00000000000..6bd88b83f98
--- /dev/null
+++ b/tools/binman/test/047_spl_bss_pad.dts
@@ -0,0 +1,17 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-spl {
+ };
+
+ u-boot-spl-bss-pad {
+ };
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/048_x86_start16_spl.dts b/tools/binman/test/048_x86_start16_spl.dts
new file mode 100644
index 00000000000..e2009f15f05
--- /dev/null
+++ b/tools/binman/test/048_x86_start16_spl.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ x86-start16-spl {
+ };
+ };
+};
diff --git a/tools/binman/test/049_x86_ucode_spl.dts b/tools/binman/test/049_x86_ucode_spl.dts
new file mode 100644
index 00000000000..350d2c4730b
--- /dev/null
+++ b/tools/binman/test/049_x86_ucode_spl.dts
@@ -0,0 +1,29 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-spl-with-ucode-ptr {
+ };
+
+ u-boot-dtb-with-ucode {
+ };
+
+ u-boot-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/050_intel_mrc.dts b/tools/binman/test/050_intel_mrc.dts
new file mode 100644
index 00000000000..54cd52a2b71
--- /dev/null
+++ b/tools/binman/test/050_intel_mrc.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-mrc {
+ };
+ };
+};
diff --git a/tools/binman/test/051_u_boot_spl_dtb.dts b/tools/binman/test/051_u_boot_spl_dtb.dts
new file mode 100644
index 00000000000..3912f86b4cd
--- /dev/null
+++ b/tools/binman/test/051_u_boot_spl_dtb.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ u-boot-spl-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/052_u_boot_spl_nodtb.dts b/tools/binman/test/052_u_boot_spl_nodtb.dts
new file mode 100644
index 00000000000..7f4e27780fe
--- /dev/null
+++ b/tools/binman/test/052_u_boot_spl_nodtb.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-spl-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/053_symbols.dts b/tools/binman/test/053_symbols.dts
new file mode 100644
index 00000000000..b28f34a72fa
--- /dev/null
+++ b/tools/binman/test/053_symbols.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot-spl {
+ };
+
+ u-boot {
+ offset = <0x1c>;
+ };
+
+ u-boot-spl2 {
+ type = "u-boot-spl";
+ };
+ };
+};
diff --git a/tools/binman/test/054_unit_address.dts b/tools/binman/test/054_unit_address.dts
new file mode 100644
index 00000000000..3216dbbcc19
--- /dev/null
+++ b/tools/binman/test/054_unit_address.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot@0 {
+ };
+ u-boot@1 {
+ };
+ };
+};
diff --git a/tools/binman/test/055_sections.dts b/tools/binman/test/055_sections.dts
new file mode 100644
index 00000000000..6b306aeda46
--- /dev/null
+++ b/tools/binman/test/055_sections.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ size = <0x28>;
+ section@0 {
+ read-only;
+ size = <0x10>;
+ pad-byte = <0x21>;
+
+ u-boot {
+ };
+ };
+ section@1 {
+ size = <0x10>;
+ pad-byte = <0x61>;
+
+ u-boot {
+ };
+ };
+ section@2 {
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/056_name_prefix.dts b/tools/binman/test/056_name_prefix.dts
new file mode 100644
index 00000000000..f38c80eb183
--- /dev/null
+++ b/tools/binman/test/056_name_prefix.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ size = <0x28>;
+ section@0 {
+ read-only;
+ name-prefix = "ro-";
+ size = <0x10>;
+ pad-byte = <0x21>;
+
+ u-boot {
+ };
+ };
+ section@1 {
+ name-prefix = "rw-";
+ size = <0x10>;
+ pad-byte = <0x61>;
+
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/057_unknown_contents.dts b/tools/binman/test/057_unknown_contents.dts
new file mode 100644
index 00000000000..6ea98d7cab6
--- /dev/null
+++ b/tools/binman/test/057_unknown_contents.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ return-unknown-contents;
+ };
+ };
+};
diff --git a/tools/binman/test/058_x86_ucode_spl_needs_retry.dts b/tools/binman/test/058_x86_ucode_spl_needs_retry.dts
new file mode 100644
index 00000000000..a04adaaf7ba
--- /dev/null
+++ b/tools/binman/test/058_x86_ucode_spl_needs_retry.dts
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-spl-with-ucode-ptr {
+ };
+
+ /*
+ * Microcode goes before the DTB which contains it, so binman
+ * will need to obtain the contents of the next section before
+ * obtaining the contents of this one.
+ */
+ u-boot-ucode {
+ };
+
+ u-boot-dtb-with-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/059_change_size.dts b/tools/binman/test/059_change_size.dts
new file mode 100644
index 00000000000..1a69026a64c
--- /dev/null
+++ b/tools/binman/test/059_change_size.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ bad-update-contents;
+ };
+ };
+};
diff --git a/tools/binman/test/060_fdt_update.dts b/tools/binman/test/060_fdt_update.dts
new file mode 100644
index 00000000000..f53c8a5053e
--- /dev/null
+++ b/tools/binman/test/060_fdt_update.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ size = <0x28>;
+ section@0 {
+ read-only;
+ name-prefix = "ro-";
+ size = <0x10>;
+ pad-byte = <0x21>;
+
+ u-boot {
+ };
+ };
+ section@1 {
+ name-prefix = "rw-";
+ size = <0x10>;
+ pad-byte = <0x61>;
+
+ u-boot {
+ };
+ };
+ _testing {
+ };
+ };
+};
diff --git a/tools/binman/test/061_fdt_update_bad.dts b/tools/binman/test/061_fdt_update_bad.dts
new file mode 100644
index 00000000000..e5abf31699c
--- /dev/null
+++ b/tools/binman/test/061_fdt_update_bad.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ size = <0x28>;
+ section@0 {
+ read-only;
+ name-prefix = "ro-";
+ size = <0x10>;
+ pad-byte = <0x21>;
+
+ u-boot {
+ };
+ };
+ section@1 {
+ name-prefix = "rw-";
+ size = <0x10>;
+ pad-byte = <0x61>;
+
+ u-boot {
+ };
+ };
+ _testing {
+ never-complete-process-fdt;
+ };
+ };
+};
diff --git a/tools/binman/test/062_entry_args.dts b/tools/binman/test/062_entry_args.dts
new file mode 100644
index 00000000000..4d4f102d60c
--- /dev/null
+++ b/tools/binman/test/062_entry_args.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ test-str-fdt = "test0";
+ test-int-fdt = <123>;
+ };
+ };
+};
diff --git a/tools/binman/test/063_entry_args_missing.dts b/tools/binman/test/063_entry_args_missing.dts
new file mode 100644
index 00000000000..1644e2fef3a
--- /dev/null
+++ b/tools/binman/test/063_entry_args_missing.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ test-str-fdt = "test0";
+ };
+ };
+};
diff --git a/tools/binman/test/064_entry_args_required.dts b/tools/binman/test/064_entry_args_required.dts
new file mode 100644
index 00000000000..705be100691
--- /dev/null
+++ b/tools/binman/test/064_entry_args_required.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ require-args;
+ test-str-fdt = "test0";
+ };
+ };
+};
diff --git a/tools/binman/test/065_entry_args_unknown_datatype.dts b/tools/binman/test/065_entry_args_unknown_datatype.dts
new file mode 100644
index 00000000000..3e4838f4fff
--- /dev/null
+++ b/tools/binman/test/065_entry_args_unknown_datatype.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ test-str-fdt = "test0";
+ test-int-fdt = <123>;
+ force-bad-datatype;
+ };
+ };
+};
diff --git a/tools/binman/test/066_text.dts b/tools/binman/test/066_text.dts
new file mode 100644
index 00000000000..f23a75ae929
--- /dev/null
+++ b/tools/binman/test/066_text.dts
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ text {
+ size = <8>;
+ text-label = "test-id";
+ };
+ text2 {
+ type = "text";
+ text-label = "test-id2";
+ };
+ text3 {
+ type = "text";
+ text-label = "test-id3";
+ };
+ /* This one does not use command-line args */
+ text4 {
+ type = "text";
+ text-label = "test-id4";
+ test-id4 = "some text";
+ };
+ /* Put text directly in the node */
+ text5 {
+ type = "text";
+ text = "more text";
+ };
+ };
+};
diff --git a/tools/binman/test/067_fmap.dts b/tools/binman/test/067_fmap.dts
new file mode 100644
index 00000000000..24fa6351ec3
--- /dev/null
+++ b/tools/binman/test/067_fmap.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section@0 {
+ read-only;
+ name-prefix = "ro-";
+ size = <0x10>;
+ pad-byte = <0x21>;
+ preserve;
+
+ u-boot {
+ };
+ };
+ section@1 {
+ name-prefix = "rw-";
+ size = <0x10>;
+ pad-byte = <0x61>;
+
+ u-boot {
+ };
+ };
+ fmap {
+ };
+ };
+};
diff --git a/tools/binman/test/068_blob_named_by_arg.dts b/tools/binman/test/068_blob_named_by_arg.dts
new file mode 100644
index 00000000000..e129f843cd5
--- /dev/null
+++ b/tools/binman/test/068_blob_named_by_arg.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cros-ec-rw {
+ };
+ };
+};
diff --git a/tools/binman/test/069_fill.dts b/tools/binman/test/069_fill.dts
new file mode 100644
index 00000000000..e372ea37aaa
--- /dev/null
+++ b/tools/binman/test/069_fill.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+ fill {
+ size = <8>;
+ fill-byte = [ff];
+ };
+ };
+};
diff --git a/tools/binman/test/070_fill_no_size.dts b/tools/binman/test/070_fill_no_size.dts
new file mode 100644
index 00000000000..7b1fcf1b68b
--- /dev/null
+++ b/tools/binman/test/070_fill_no_size.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+ fill {
+ fill-byte = [ff];
+ };
+ };
+};
diff --git a/tools/binman/test/071_gbb.dts b/tools/binman/test/071_gbb.dts
new file mode 100644
index 00000000000..551756372af
--- /dev/null
+++ b/tools/binman/test/071_gbb.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ gbb {
+ size = <0x2180>;
+ flags {
+ dev-screen-short-delay;
+ load-option-roms;
+ enable-alternate-os;
+ force-dev-switch-on;
+ force-dev-boot-usb;
+ disable-fw-rollback-check;
+ enter-triggers-tonorm;
+ force-dev-boot-legacy;
+ faft-key-override;
+ disable-ec-software-sync;
+ default-dev-boot-legacy;
+ disable-pd-software-sync;
+ disable-lid-shutdown;
+ force-dev-boot-fastboot-full-cap;
+ enable-serial;
+ disable-dwmp;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/072_gbb_too_small.dts b/tools/binman/test/072_gbb_too_small.dts
new file mode 100644
index 00000000000..c088f36a1d0
--- /dev/null
+++ b/tools/binman/test/072_gbb_too_small.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ gbb {
+ size = <0x200>;
+ };
+ };
+};
diff --git a/tools/binman/test/073_gbb_no_size.dts b/tools/binman/test/073_gbb_no_size.dts
new file mode 100644
index 00000000000..83be4037852
--- /dev/null
+++ b/tools/binman/test/073_gbb_no_size.dts
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ gbb {
+ };
+ };
+};
diff --git a/tools/binman/test/074_vblock.dts b/tools/binman/test/074_vblock.dts
new file mode 100644
index 00000000000..f0c21bfe9fc
--- /dev/null
+++ b/tools/binman/test/074_vblock.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u_boot: u-boot {
+ };
+
+ vblock {
+ content = <&u_boot &dtb>;
+ keyblock = "firmware.keyblock";
+ signprivate = "firmware_data_key.vbprivk";
+ version = <1>;
+ kernelkey = "kernel_subkey.vbpubk";
+ preamble-flags = <1>;
+ };
+
+ /*
+ * Put this after the vblock so that its contents are not
+ * available when the vblock first tries to obtain its contents
+ */
+ dtb: u-boot-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/075_vblock_no_content.dts b/tools/binman/test/075_vblock_no_content.dts
new file mode 100644
index 00000000000..676d9474b31
--- /dev/null
+++ b/tools/binman/test/075_vblock_no_content.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u_boot: u-boot {
+ };
+
+ vblock {
+ keyblock = "firmware.keyblock";
+ signprivate = "firmware_data_key.vbprivk";
+ version = <1>;
+ kernelkey = "kernel_subkey.vbpubk";
+ preamble-flags = <1>;
+ };
+
+ dtb: u-boot-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/076_vblock_bad_phandle.dts b/tools/binman/test/076_vblock_bad_phandle.dts
new file mode 100644
index 00000000000..ffbd0c335c3
--- /dev/null
+++ b/tools/binman/test/076_vblock_bad_phandle.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u_boot: u-boot {
+ };
+
+ vblock {
+ content = <1000>;
+ keyblock = "firmware.keyblock";
+ signprivate = "firmware_data_key.vbprivk";
+ version = <1>;
+ kernelkey = "kernel_subkey.vbpubk";
+ preamble-flags = <1>;
+ };
+
+ dtb: u-boot-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/077_vblock_bad_entry.dts b/tools/binman/test/077_vblock_bad_entry.dts
new file mode 100644
index 00000000000..764c42a56e1
--- /dev/null
+++ b/tools/binman/test/077_vblock_bad_entry.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u_boot: u-boot {
+ };
+
+ vblock {
+ content = <&u_boot &other>;
+ keyblock = "firmware.keyblock";
+ signprivate = "firmware_data_key.vbprivk";
+ version = <1>;
+ kernelkey = "kernel_subkey.vbpubk";
+ preamble-flags = <1>;
+ };
+
+ dtb: u-boot-dtb {
+ };
+ };
+
+ other: other {
+ };
+};
diff --git a/tools/binman/test/078_u_boot_tpl.dts b/tools/binman/test/078_u_boot_tpl.dts
new file mode 100644
index 00000000000..6c60b4c46f4
--- /dev/null
+++ b/tools/binman/test/078_u_boot_tpl.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot-tpl {
+ };
+ u-boot-tpl-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/079_uses_pos.dts b/tools/binman/test/079_uses_pos.dts
new file mode 100644
index 00000000000..7638b9b5e0c
--- /dev/null
+++ b/tools/binman/test/079_uses_pos.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot {
+ pos = <10>;
+ };
+ };
+};
diff --git a/tools/binman/test/080_fill_empty.dts b/tools/binman/test/080_fill_empty.dts
new file mode 100644
index 00000000000..2b78d3ae88d
--- /dev/null
+++ b/tools/binman/test/080_fill_empty.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+ fill {
+ size = <0>;
+ fill-byte = [ff];
+ };
+ };
+};
diff --git a/tools/binman/test/081_x86_start16_tpl.dts b/tools/binman/test/081_x86_start16_tpl.dts
new file mode 100644
index 00000000000..68e6bbd68f0
--- /dev/null
+++ b/tools/binman/test/081_x86_start16_tpl.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ x86-start16-tpl {
+ };
+ };
+};
diff --git a/tools/binman/test/082_fdt_update_all.dts b/tools/binman/test/082_fdt_update_all.dts
new file mode 100644
index 00000000000..1aea56989f0
--- /dev/null
+++ b/tools/binman/test/082_fdt_update_all.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section {
+ u-boot-dtb {
+ };
+ };
+ u-boot-spl-dtb {
+ };
+ u-boot-tpl-dtb {
+ };
+ u-boot-vpl-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/083_compress.dts b/tools/binman/test/083_compress.dts
new file mode 100644
index 00000000000..07813bdeaa3
--- /dev/null
+++ b/tools/binman/test/083_compress.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ blob {
+ filename = "compress";
+ compress = "lz4";
+ };
+ };
+};
diff --git a/tools/binman/test/084_files.dts b/tools/binman/test/084_files.dts
new file mode 100644
index 00000000000..8f09afd24ea
--- /dev/null
+++ b/tools/binman/test/084_files.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ files {
+ pattern = "files/*.dat";
+ files-compress = "none";
+ };
+ };
+};
diff --git a/tools/binman/test/085_files_compress.dts b/tools/binman/test/085_files_compress.dts
new file mode 100644
index 00000000000..5aeead2e6e9
--- /dev/null
+++ b/tools/binman/test/085_files_compress.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ files {
+ pattern = "files/*.dat";
+ files-compress = "lz4";
+ };
+ };
+};
diff --git a/tools/binman/test/086_files_none.dts b/tools/binman/test/086_files_none.dts
new file mode 100644
index 00000000000..34bd92f224a
--- /dev/null
+++ b/tools/binman/test/086_files_none.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ files {
+ pattern = "files/*.none";
+ compress = "none";
+ require-matches;
+ };
+ };
+};
diff --git a/tools/binman/test/087_files_no_pattern.dts b/tools/binman/test/087_files_no_pattern.dts
new file mode 100644
index 00000000000..0cb5b469cb0
--- /dev/null
+++ b/tools/binman/test/087_files_no_pattern.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ files {
+ compress = "none";
+ require-matches;
+ };
+ };
+};
diff --git a/tools/binman/test/088_extend_size.dts b/tools/binman/test/088_extend_size.dts
new file mode 100644
index 00000000000..f352699e37c
--- /dev/null
+++ b/tools/binman/test/088_extend_size.dts
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ size = <40>;
+ fill {
+ extend-size;
+ fill-byte = [61];
+ size = <0>;
+ };
+ u-boot {
+ offset = <8>;
+ };
+ section {
+ extend-size;
+ pad-byte = <0x62>;
+ intel-mrc {
+ };
+ };
+ u-boot2 {
+ type = "u-boot";
+ offset = <16>;
+ };
+ section2 {
+ type = "section";
+ fill {
+ extend-size;
+ fill-byte = [63];
+ size = <0>;
+ };
+ u-boot {
+ offset = <8>;
+ };
+ };
+ fill2 {
+ type = "fill";
+ extend-size;
+ fill-byte = [64];
+ size = <0>;
+ };
+ };
+};
diff --git a/tools/binman/test/089_extend_size_bad.dts b/tools/binman/test/089_extend_size_bad.dts
new file mode 100644
index 00000000000..edc60e43fdf
--- /dev/null
+++ b/tools/binman/test/089_extend_size_bad.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ _testing {
+ extend-size;
+ return-contents-once;
+ };
+ u-boot {
+ offset = <8>;
+ };
+ };
+};
diff --git a/tools/binman/test/090_hash.dts b/tools/binman/test/090_hash.dts
new file mode 100644
index 00000000000..200304599dc
--- /dev/null
+++ b/tools/binman/test/090_hash.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot {
+ hash {
+ algo = "sha256";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/091_hash_no_algo.dts b/tools/binman/test/091_hash_no_algo.dts
new file mode 100644
index 00000000000..b64df205117
--- /dev/null
+++ b/tools/binman/test/091_hash_no_algo.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot {
+ hash {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/092_hash_bad_algo.dts b/tools/binman/test/092_hash_bad_algo.dts
new file mode 100644
index 00000000000..d2402000db6
--- /dev/null
+++ b/tools/binman/test/092_hash_bad_algo.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot {
+ hash {
+ algo = "invalid";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/093_x86_tpl_ucode.dts b/tools/binman/test/093_x86_tpl_ucode.dts
new file mode 100644
index 00000000000..d7ed9fc66b8
--- /dev/null
+++ b/tools/binman/test/093_x86_tpl_ucode.dts
@@ -0,0 +1,29 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ u-boot-tpl-with-ucode-ptr {
+ };
+
+ u-boot-tpl-dtb-with-ucode {
+ };
+
+ u-boot-ucode {
+ };
+ };
+
+ microcode {
+ update@0 {
+ data = <0x12345678 0x12345679>;
+ };
+ update@1 {
+ data = <0xabcd0000 0x78235609>;
+ };
+ };
+};
diff --git a/tools/binman/test/094_fmap_x86.dts b/tools/binman/test/094_fmap_x86.dts
new file mode 100644
index 00000000000..613c5dab425
--- /dev/null
+++ b/tools/binman/test/094_fmap_x86.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ end-at-4gb;
+ size = <0x100>;
+ pad-byte = <0x61>;
+ u-boot {
+ };
+ intel-mrc {
+ };
+ fmap {
+ offset = <0xffffff20>;
+ };
+ };
+};
diff --git a/tools/binman/test/095_fmap_x86_section.dts b/tools/binman/test/095_fmap_x86_section.dts
new file mode 100644
index 00000000000..fd5f018c923
--- /dev/null
+++ b/tools/binman/test/095_fmap_x86_section.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ end-at-4gb;
+ size = <0x180>;
+ u-boot {
+ };
+ section {
+ pad-byte = <0x62>;
+ intel-mrc {
+ };
+ fmap {
+ offset = <0x20>;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/096_elf.dts b/tools/binman/test/096_elf.dts
new file mode 100644
index 00000000000..8e3f3f15ef0
--- /dev/null
+++ b/tools/binman/test/096_elf.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-elf {
+ };
+ u-boot-spl-elf {
+ };
+ u-boot-tpl-elf {
+ };
+ };
+};
diff --git a/tools/binman/test/097_elf_strip.dts b/tools/binman/test/097_elf_strip.dts
new file mode 100644
index 00000000000..6f3c66fd705
--- /dev/null
+++ b/tools/binman/test/097_elf_strip.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-elf {
+ strip;
+ };
+ u-boot-spl-elf {
+ };
+ };
+};
diff --git a/tools/binman/test/098_4gb_and_skip_at_start_together.dts b/tools/binman/test/098_4gb_and_skip_at_start_together.dts
new file mode 100644
index 00000000000..90c467d910b
--- /dev/null
+++ b/tools/binman/test/098_4gb_and_skip_at_start_together.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <32>;
+ sort-by-offset;
+ end-at-4gb;
+ skip-at-start = <0xffffffe0>;
+ u-boot {
+ offset = <0xffffffe0>;
+ };
+ };
+};
diff --git a/tools/binman/test/099_hash_section.dts b/tools/binman/test/099_hash_section.dts
new file mode 100644
index 00000000000..dcd8683d642
--- /dev/null
+++ b/tools/binman/test/099_hash_section.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ section {
+ u-boot {
+ };
+ fill {
+ size = <0x10>;
+ fill-byte = [61];
+ };
+ hash {
+ algo = "sha256";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/100_intel_refcode.dts b/tools/binman/test/100_intel_refcode.dts
new file mode 100644
index 00000000000..0a1a0270e5f
--- /dev/null
+++ b/tools/binman/test/100_intel_refcode.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-refcode {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/101_sections_offset.dts b/tools/binman/test/101_sections_offset.dts
new file mode 100644
index 00000000000..46708ff9b6b
--- /dev/null
+++ b/tools/binman/test/101_sections_offset.dts
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ size = <0x38>;
+ section@0 {
+ read-only;
+ offset = <0x4>;
+ size = <0x10>;
+ pad-byte = <0x21>;
+
+ u-boot {
+ };
+ };
+ section@1 {
+ size = <0x10>;
+ pad-byte = <0x61>;
+ offset = <0x18>;
+
+ u-boot {
+ };
+ };
+ section@2 {
+ offset = <0x2c>;
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/102_cbfs_raw.dts b/tools/binman/test/102_cbfs_raw.dts
new file mode 100644
index 00000000000..779cbc121ad
--- /dev/null
+++ b/tools/binman/test/102_cbfs_raw.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0xb0>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/103_cbfs_raw_ppc.dts b/tools/binman/test/103_cbfs_raw_ppc.dts
new file mode 100644
index 00000000000..df1caf092f4
--- /dev/null
+++ b/tools/binman/test/103_cbfs_raw_ppc.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0x100>;
+ cbfs-arch = "ppc64";
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/104_cbfs_stage.dts b/tools/binman/test/104_cbfs_stage.dts
new file mode 100644
index 00000000000..215e2f287a4
--- /dev/null
+++ b/tools/binman/test/104_cbfs_stage.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0xb0>;
+ u-boot {
+ type = "blob";
+ filename = "cbfs-stage.elf";
+ cbfs-type = "stage";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/105_cbfs_raw_compress.dts b/tools/binman/test/105_cbfs_raw_compress.dts
new file mode 100644
index 00000000000..646168d84b4
--- /dev/null
+++ b/tools/binman/test/105_cbfs_raw_compress.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0x140>;
+ u-boot {
+ type = "text";
+ text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
+ cbfs-type = "raw";
+ cbfs-compress = "lz4";
+ };
+ u-boot-dtb {
+ type = "text";
+ text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
+ cbfs-type = "raw";
+ cbfs-compress = "lzma";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/106_cbfs_bad_arch.dts b/tools/binman/test/106_cbfs_bad_arch.dts
new file mode 100644
index 00000000000..4318d45a7d4
--- /dev/null
+++ b/tools/binman/test/106_cbfs_bad_arch.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0x100>;
+ cbfs-arch = "bad-arch";
+ };
+ };
+};
diff --git a/tools/binman/test/107_cbfs_no_size.dts b/tools/binman/test/107_cbfs_no_size.dts
new file mode 100644
index 00000000000..3592f62f7e6
--- /dev/null
+++ b/tools/binman/test/107_cbfs_no_size.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ };
+ };
+};
diff --git a/tools/binman/test/108_cbfs_no_contents.dts b/tools/binman/test/108_cbfs_no_contents.dts
new file mode 100644
index 00000000000..623346760d2
--- /dev/null
+++ b/tools/binman/test/108_cbfs_no_contents.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0x100>;
+ _testing {
+ return-unknown-contents;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/109_cbfs_bad_compress.dts b/tools/binman/test/109_cbfs_bad_compress.dts
new file mode 100644
index 00000000000..9695024ee9b
--- /dev/null
+++ b/tools/binman/test/109_cbfs_bad_compress.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0xb0>;
+ u-boot {
+ cbfs-type = "raw";
+ cbfs-compress = "invalid-algo";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/110_cbfs_name.dts b/tools/binman/test/110_cbfs_name.dts
new file mode 100644
index 00000000000..98c16f30b41
--- /dev/null
+++ b/tools/binman/test/110_cbfs_name.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0x100>;
+ u-boot {
+ cbfs-name = "FRED";
+ cbfs-type = "raw";
+ };
+
+ hello {
+ type = "blob";
+ filename = "u-boot.dtb";
+ cbfs-type = "raw";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/111_x86_rom_ifwi.dts b/tools/binman/test/111_x86_rom_ifwi.dts
new file mode 100644
index 00000000000..c0ba4f2ea42
--- /dev/null
+++ b/tools/binman/test/111_x86_rom_ifwi.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ intel-descriptor {
+ filename = "descriptor.bin";
+ };
+
+ intel-ifwi {
+ offset-unset;
+ filename = "fitimage.bin";
+ convert-fit;
+
+ u-boot-tpl {
+ ifwi-replace;
+ ifwi-subpart = "IBBP";
+ ifwi-entry = "IBBL";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/112_x86_rom_ifwi_nodesc.dts b/tools/binman/test/112_x86_rom_ifwi_nodesc.dts
new file mode 100644
index 00000000000..0874440ab53
--- /dev/null
+++ b/tools/binman/test/112_x86_rom_ifwi_nodesc.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ intel-descriptor {
+ filename = "descriptor.bin";
+ };
+
+ intel-ifwi {
+ offset-unset;
+ filename = "ifwi.bin";
+
+ u-boot-tpl {
+ ifwi-replace;
+ ifwi-subpart = "IBBP";
+ ifwi-entry = "IBBL";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/113_x86_rom_ifwi_nodata.dts b/tools/binman/test/113_x86_rom_ifwi_nodata.dts
new file mode 100644
index 00000000000..82a4bc8cdd5
--- /dev/null
+++ b/tools/binman/test/113_x86_rom_ifwi_nodata.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ intel-descriptor {
+ filename = "descriptor.bin";
+ };
+
+ intel-ifwi {
+ offset-unset;
+ filename = "ifwi.bin";
+
+ _testing {
+ return-unknown-contents;
+ ifwi-replace;
+ ifwi-subpart = "IBBP";
+ ifwi-entry = "IBBL";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/114_cbfs_offset.dts b/tools/binman/test/114_cbfs_offset.dts
new file mode 100644
index 00000000000..7aa9d9d4bf3
--- /dev/null
+++ b/tools/binman/test/114_cbfs_offset.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x200>;
+ cbfs {
+ size = <0x200>;
+ offset = <0xfffffe00>;
+ u-boot {
+ cbfs-offset = <0x40>;
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-offset = <0x140>;
+ cbfs-type = "raw";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/115_fdtmap.dts b/tools/binman/test/115_fdtmap.dts
new file mode 100644
index 00000000000..2450c41f200
--- /dev/null
+++ b/tools/binman/test/115_fdtmap.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/116_fdtmap_hdr.dts b/tools/binman/test/116_fdtmap_hdr.dts
new file mode 100644
index 00000000000..77a2194b394
--- /dev/null
+++ b/tools/binman/test/116_fdtmap_hdr.dts
@@ -0,0 +1,17 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x400>;
+ u-boot {
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/117_fdtmap_hdr_start.dts b/tools/binman/test/117_fdtmap_hdr_start.dts
new file mode 100644
index 00000000000..17b6be00470
--- /dev/null
+++ b/tools/binman/test/117_fdtmap_hdr_start.dts
@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x400>;
+ sort-by-offset;
+ u-boot {
+ offset = <0x100>;
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "start";
+ };
+ };
+};
diff --git a/tools/binman/test/118_fdtmap_hdr_pos.dts b/tools/binman/test/118_fdtmap_hdr_pos.dts
new file mode 100644
index 00000000000..fd803f57fba
--- /dev/null
+++ b/tools/binman/test/118_fdtmap_hdr_pos.dts
@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x400>;
+ sort-by-offset;
+ u-boot {
+ offset = <0x100>;
+ };
+ fdtmap {
+ };
+ image-header {
+ offset = <0x80>;
+ };
+ };
+};
diff --git a/tools/binman/test/119_fdtmap_hdr_missing.dts b/tools/binman/test/119_fdtmap_hdr_missing.dts
new file mode 100644
index 00000000000..41bb680f08f
--- /dev/null
+++ b/tools/binman/test/119_fdtmap_hdr_missing.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ u-boot {
+ };
+ image-header {
+ offset = <0x80>;
+ location = "start";
+ };
+ };
+};
diff --git a/tools/binman/test/120_hdr_no_location.dts b/tools/binman/test/120_hdr_no_location.dts
new file mode 100644
index 00000000000..585e21f456b
--- /dev/null
+++ b/tools/binman/test/120_hdr_no_location.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ u-boot {
+ };
+ fdtmap {
+ };
+ image-header {
+ };
+ };
+};
diff --git a/tools/binman/test/121_entry_extend.dts b/tools/binman/test/121_entry_extend.dts
new file mode 100644
index 00000000000..ebb7816db90
--- /dev/null
+++ b/tools/binman/test/121_entry_extend.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ bad-update-contents;
+ };
+
+ u-boot {
+ };
+
+ _testing2 {
+ type = "_testing";
+ bad-update-contents;
+ };
+ };
+};
diff --git a/tools/binman/test/122_entry_extend_twice.dts b/tools/binman/test/122_entry_extend_twice.dts
new file mode 100644
index 00000000000..258cf859f4b
--- /dev/null
+++ b/tools/binman/test/122_entry_extend_twice.dts
@@ -0,0 +1,21 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ bad-update-contents;
+ bad-update-contents-twice;
+ };
+
+ u-boot {
+ };
+
+ _testing2 {
+ type = "_testing";
+ bad-update-contents;
+ };
+ };
+};
diff --git a/tools/binman/test/123_entry_extend_section.dts b/tools/binman/test/123_entry_extend_section.dts
new file mode 100644
index 00000000000..046f7234348
--- /dev/null
+++ b/tools/binman/test/123_entry_extend_section.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ bad-update-contents;
+ };
+
+ u-boot {
+ };
+
+ section {
+ _testing2 {
+ type = "_testing";
+ bad-update-contents;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/124_compress_dtb.dts b/tools/binman/test/124_compress_dtb.dts
new file mode 100644
index 00000000000..46bfd8b265f
--- /dev/null
+++ b/tools/binman/test/124_compress_dtb.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ };
+ };
+};
diff --git a/tools/binman/test/125_cbfs_update.dts b/tools/binman/test/125_cbfs_update.dts
new file mode 100644
index 00000000000..6d2e8a0b8ff
--- /dev/null
+++ b/tools/binman/test/125_cbfs_update.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0x100>;
+ u-boot {
+ cbfs-type = "raw";
+ cbfs-compress = "lz4";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/126_cbfs_bad_type.dts b/tools/binman/test/126_cbfs_bad_type.dts
new file mode 100644
index 00000000000..2cd6fc6d52d
--- /dev/null
+++ b/tools/binman/test/126_cbfs_bad_type.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ cbfs {
+ size = <0x100>;
+ u-boot {
+ cbfs-type = "badtype";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/127_list.dts b/tools/binman/test/127_list.dts
new file mode 100644
index 00000000000..c1d6fce3f9e
--- /dev/null
+++ b/tools/binman/test/127_list.dts
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ section {
+ align = <0x100>;
+ cbfs {
+ size = <0x400>;
+ u-boot {
+ cbfs-type = "raw";
+ cbfs-offset = <0x38>;
+ };
+ u-boot-dtb {
+ type = "text";
+ text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
+ cbfs-type = "raw";
+ cbfs-compress = "lzma";
+ cbfs-offset = <0x78>;
+ };
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/128_decode_image.dts b/tools/binman/test/128_decode_image.dts
new file mode 100644
index 00000000000..449fccc41df
--- /dev/null
+++ b/tools/binman/test/128_decode_image.dts
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ u-boot {
+ };
+ section {
+ align = <0x100>;
+ cbfs {
+ size = <0x400>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ cbfs-compress = "lzma";
+ cbfs-offset = <0x80>;
+ };
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ };
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/129_decode_image_nohdr.dts b/tools/binman/test/129_decode_image_nohdr.dts
new file mode 100644
index 00000000000..90fdd8820ca
--- /dev/null
+++ b/tools/binman/test/129_decode_image_nohdr.dts
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ u-boot {
+ };
+ section {
+ align = <0x100>;
+ cbfs {
+ size = <0x400>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ cbfs-compress = "lzma";
+ cbfs-offset = <0x80>;
+ };
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ };
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/130_list_fdtmap.dts b/tools/binman/test/130_list_fdtmap.dts
new file mode 100644
index 00000000000..449fccc41df
--- /dev/null
+++ b/tools/binman/test/130_list_fdtmap.dts
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ u-boot {
+ };
+ section {
+ align = <0x100>;
+ cbfs {
+ size = <0x400>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ cbfs-compress = "lzma";
+ cbfs-offset = <0x80>;
+ };
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ };
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/131_pack_align_section.dts b/tools/binman/test/131_pack_align_section.dts
new file mode 100644
index 00000000000..44478855b09
--- /dev/null
+++ b/tools/binman/test/131_pack_align_section.dts
@@ -0,0 +1,28 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ section0 {
+ type = "section";
+ align = <0x10>;
+ u-boot {
+ };
+ };
+ section1 {
+ type = "section";
+ align-size = <0x20>;
+ u-boot {
+ };
+ section2 {
+ type = "section";
+ u-boot {
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/132_replace.dts b/tools/binman/test/132_replace.dts
new file mode 100644
index 00000000000..6ebdcda45c5
--- /dev/null
+++ b/tools/binman/test/132_replace.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ u-boot {
+ };
+ fdtmap {
+ };
+ u-boot-dtb {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/133_replace_multi.dts b/tools/binman/test/133_replace_multi.dts
new file mode 100644
index 00000000000..38b2f39d020
--- /dev/null
+++ b/tools/binman/test/133_replace_multi.dts
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ multiple-images;
+ first-image {
+ size = <0xc00>;
+ u-boot {
+ };
+ fdtmap {
+ };
+ u-boot-dtb {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+
+ image {
+ fdtmap {
+ };
+ u-boot {
+ };
+ u-boot-dtb {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/134_fdt_update_all_repack.dts b/tools/binman/test/134_fdt_update_all_repack.dts
new file mode 100644
index 00000000000..625d37673bd
--- /dev/null
+++ b/tools/binman/test/134_fdt_update_all_repack.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ allow-repack;
+ section {
+ size = <0x300>;
+ u-boot-dtb {
+ offset = <4>;
+ };
+ };
+ u-boot-spl-dtb {
+ };
+ u-boot-tpl-dtb {
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/135_fdtmap_hdr_middle.dts b/tools/binman/test/135_fdtmap_hdr_middle.dts
new file mode 100644
index 00000000000..d6211da8ae3
--- /dev/null
+++ b/tools/binman/test/135_fdtmap_hdr_middle.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ image-header {
+ location = "end";
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/136_fdtmap_hdr_startbad.dts b/tools/binman/test/136_fdtmap_hdr_startbad.dts
new file mode 100644
index 00000000000..ec5f4bc7e3a
--- /dev/null
+++ b/tools/binman/test/136_fdtmap_hdr_startbad.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "start";
+ };
+ };
+};
diff --git a/tools/binman/test/137_fdtmap_hdr_endbad.dts b/tools/binman/test/137_fdtmap_hdr_endbad.dts
new file mode 100644
index 00000000000..ebacd71eb23
--- /dev/null
+++ b/tools/binman/test/137_fdtmap_hdr_endbad.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ image-header {
+ location = "end";
+ };
+ u-boot {
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/138_fdtmap_hdr_nosize.dts b/tools/binman/test/138_fdtmap_hdr_nosize.dts
new file mode 100644
index 00000000000..c362f8fdffb
--- /dev/null
+++ b/tools/binman/test/138_fdtmap_hdr_nosize.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/139_replace_repack.dts b/tools/binman/test/139_replace_repack.dts
new file mode 100644
index 00000000000..a3daf6f9b46
--- /dev/null
+++ b/tools/binman/test/139_replace_repack.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ allow-repack;
+ u-boot {
+ };
+ fdtmap {
+ };
+ u-boot-dtb {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/140_entry_shrink.dts b/tools/binman/test/140_entry_shrink.dts
new file mode 100644
index 00000000000..b750d638986
--- /dev/null
+++ b/tools/binman/test/140_entry_shrink.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ _testing {
+ bad-shrink-contents;
+ };
+
+ u-boot {
+ };
+
+ _testing2 {
+ type = "_testing";
+ bad-shrink-contents;
+ };
+ };
+};
diff --git a/tools/binman/test/141_descriptor_offset.dts b/tools/binman/test/141_descriptor_offset.dts
new file mode 100644
index 00000000000..f9bff016aa8
--- /dev/null
+++ b/tools/binman/test/141_descriptor_offset.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ u-boot {
+ offset = <0xffff0000>;
+ };
+ intel-descriptor {
+ filename = "descriptor.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/142_replace_cbfs.dts b/tools/binman/test/142_replace_cbfs.dts
new file mode 100644
index 00000000000..d64142f9d5c
--- /dev/null
+++ b/tools/binman/test/142_replace_cbfs.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xe00>;
+ allow-repack;
+ u-boot {
+ };
+ section {
+ align = <0x100>;
+ cbfs {
+ size = <0x400>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ cbfs-compress = "lzma";
+ cbfs-offset = <0x80>;
+ };
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ };
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/143_replace_all.dts b/tools/binman/test/143_replace_all.dts
new file mode 100644
index 00000000000..c5744a3c1c8
--- /dev/null
+++ b/tools/binman/test/143_replace_all.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ allow-repack;
+ u-boot {
+ };
+ fdtmap {
+ };
+ u-boot2 {
+ type = "u-boot";
+ };
+ text {
+ text = "some text";
+ };
+ u-boot-dtb {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/144_x86_reset16.dts b/tools/binman/test/144_x86_reset16.dts
new file mode 100644
index 00000000000..ba90333b27b
--- /dev/null
+++ b/tools/binman/test/144_x86_reset16.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ x86-reset16 {
+ };
+ };
+};
diff --git a/tools/binman/test/145_x86_reset16_spl.dts b/tools/binman/test/145_x86_reset16_spl.dts
new file mode 100644
index 00000000000..cc8d97a7e66
--- /dev/null
+++ b/tools/binman/test/145_x86_reset16_spl.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ x86-reset16-spl {
+ };
+ };
+};
diff --git a/tools/binman/test/146_x86_reset16_tpl.dts b/tools/binman/test/146_x86_reset16_tpl.dts
new file mode 100644
index 00000000000..041b16f3de1
--- /dev/null
+++ b/tools/binman/test/146_x86_reset16_tpl.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ x86-reset16-tpl {
+ };
+ };
+};
diff --git a/tools/binman/test/147_intel_fit.dts b/tools/binman/test/147_intel_fit.dts
new file mode 100644
index 00000000000..01ec40e5c72
--- /dev/null
+++ b/tools/binman/test/147_intel_fit.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ end-at-4gb;
+ size = <0x80>;
+
+ u-boot {
+ };
+
+ intel-fit {
+ };
+
+ intel-fit-ptr {
+ };
+ };
+};
diff --git a/tools/binman/test/148_intel_fit_missing.dts b/tools/binman/test/148_intel_fit_missing.dts
new file mode 100644
index 00000000000..388c76b1ab5
--- /dev/null
+++ b/tools/binman/test/148_intel_fit_missing.dts
@@ -0,0 +1,17 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ end-at-4gb;
+ size = <0x80>;
+
+ u-boot {
+ };
+
+ intel-fit-ptr {
+ };
+ };
+};
diff --git a/tools/binman/test/149_symbols_tpl.dts b/tools/binman/test/149_symbols_tpl.dts
new file mode 100644
index 00000000000..4e649c45978
--- /dev/null
+++ b/tools/binman/test/149_symbols_tpl.dts
@@ -0,0 +1,27 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot-spl {
+ offset = <4>;
+ };
+
+ u-boot-spl2 {
+ offset = <0x20>;
+ type = "u-boot-spl";
+ };
+
+ u-boot {
+ offset = <0x3c>;
+ };
+
+ section {
+ u-boot-tpl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/150_powerpc_mpc85xx_bootpg_resetvec.dts b/tools/binman/test/150_powerpc_mpc85xx_bootpg_resetvec.dts
new file mode 100644
index 00000000000..8f4b16c399c
--- /dev/null
+++ b/tools/binman/test/150_powerpc_mpc85xx_bootpg_resetvec.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ powerpc-mpc85xx-bootpg-resetvec {
+ };
+ };
+};
diff --git a/tools/binman/test/151_x86_rom_ifwi_section.dts b/tools/binman/test/151_x86_rom_ifwi_section.dts
new file mode 100644
index 00000000000..7e455c3a4b7
--- /dev/null
+++ b/tools/binman/test/151_x86_rom_ifwi_section.dts
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ intel-descriptor {
+ filename = "descriptor.bin";
+ };
+
+ intel-ifwi {
+ offset-unset;
+ filename = "fitimage.bin";
+ convert-fit;
+
+ section {
+ ifwi-replace;
+ ifwi-subpart = "IBBP";
+ ifwi-entry = "IBBL";
+ u-boot-tpl {
+ };
+ u-boot-dtb {
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/152_intel_fsp_m.dts b/tools/binman/test/152_intel_fsp_m.dts
new file mode 100644
index 00000000000..b6010f31c2b
--- /dev/null
+++ b/tools/binman/test/152_intel_fsp_m.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-fsp-m {
+ filename = "fsp_m.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/153_intel_fsp_s.dts b/tools/binman/test/153_intel_fsp_s.dts
new file mode 100644
index 00000000000..579618a8fa3
--- /dev/null
+++ b/tools/binman/test/153_intel_fsp_s.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-fsp-s {
+ filename = "fsp_s.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/154_intel_fsp_t.dts b/tools/binman/test/154_intel_fsp_t.dts
new file mode 100644
index 00000000000..8da749c1574
--- /dev/null
+++ b/tools/binman/test/154_intel_fsp_t.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ intel-fsp-t {
+ filename = "fsp_t.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/155_symbols_tpl_x86.dts b/tools/binman/test/155_symbols_tpl_x86.dts
new file mode 100644
index 00000000000..e1ce33e67fb
--- /dev/null
+++ b/tools/binman/test/155_symbols_tpl_x86.dts
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ end-at-4gb;
+ size = <0x100>;
+ u-boot-spl {
+ offset = <0xffffff04>;
+ };
+
+ u-boot-spl2 {
+ offset = <0xffffff20>;
+ type = "u-boot-spl";
+ };
+
+ u-boot {
+ offset = <0xffffff3c>;
+ };
+
+ section {
+ u-boot-tpl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/156_mkimage.dts b/tools/binman/test/156_mkimage.dts
new file mode 100644
index 00000000000..933b13143a8
--- /dev/null
+++ b/tools/binman/test/156_mkimage.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x80>;
+
+ mkimage {
+ args = "-n test -T script";
+
+ u-boot-spl {
+ };
+
+ _testing {
+ return-contents-later;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/157_blob_ext.dts b/tools/binman/test/157_blob_ext.dts
new file mode 100644
index 00000000000..8afdd5339e5
--- /dev/null
+++ b/tools/binman/test/157_blob_ext.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/158_blob_ext_missing.dts b/tools/binman/test/158_blob_ext_missing.dts
new file mode 100644
index 00000000000..d315e5592e1
--- /dev/null
+++ b/tools/binman/test/158_blob_ext_missing.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x80>;
+
+ blob-ext {
+ filename = "missing-file";
+ };
+ };
+};
diff --git a/tools/binman/test/159_blob_ext_missing_sect.dts b/tools/binman/test/159_blob_ext_missing_sect.dts
new file mode 100644
index 00000000000..5f14c541381
--- /dev/null
+++ b/tools/binman/test/159_blob_ext_missing_sect.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x80>;
+
+ section {
+ blob-ext {
+ filename = "missing-file";
+ };
+ };
+
+ blob-ext2 {
+ type = "blob-ext";
+ filename = "missing-file2";
+ };
+ };
+};
diff --git a/tools/binman/test/160_pack_overlap_zero.dts b/tools/binman/test/160_pack_overlap_zero.dts
new file mode 100644
index 00000000000..731aa1cbe6d
--- /dev/null
+++ b/tools/binman/test/160_pack_overlap_zero.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ fill {
+ size = <0>;
+ offset = <3>;
+ };
+ };
+};
diff --git a/tools/binman/test/161_fit.dts b/tools/binman/test/161_fit.dts
new file mode 100644
index 00000000000..c52d760b735
--- /dev/null
+++ b/tools/binman/test/161_fit.dts
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ u-boot {
+ };
+ };
+ fdt-1 {
+ description = "Flattened Device Tree blob";
+ type = "flat_dt";
+ arch = "ppc";
+ compression = "none";
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ u-boot-spl-dtb {
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "Boot Linux kernel with FDT blob";
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/162_fit_external.dts b/tools/binman/test/162_fit_external.dts
new file mode 100644
index 00000000000..6f2a629a985
--- /dev/null
+++ b/tools/binman/test/162_fit_external.dts
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ fit,external-offset = <0x400>;
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ u-boot {
+ };
+ };
+ fdt-1 {
+ description = "Flattened Device Tree blob";
+ type = "flat_dt";
+ arch = "ppc";
+ compression = "none";
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ _testing {
+ return-contents-later;
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "Boot Linux kernel with FDT blob";
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/163_x86_rom_me_empty.dts b/tools/binman/test/163_x86_rom_me_empty.dts
new file mode 100644
index 00000000000..9349d2d7245
--- /dev/null
+++ b/tools/binman/test/163_x86_rom_me_empty.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ intel-descriptor {
+ filename = "descriptor-empty.bin";
+ };
+
+ intel-me {
+ filename = "me.bin";
+ offset-unset;
+ };
+ };
+};
diff --git a/tools/binman/test/164_x86_rom_me_missing.dts b/tools/binman/test/164_x86_rom_me_missing.dts
new file mode 100644
index 00000000000..dce3be5e057
--- /dev/null
+++ b/tools/binman/test/164_x86_rom_me_missing.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ sort-by-offset;
+ end-at-4gb;
+ size = <0x800000>;
+ intel-descriptor {
+ filename = "descriptor-missing.bin";
+ };
+
+ intel-me {
+ filename = "me.bin";
+ offset-unset;
+ };
+ };
+};
diff --git a/tools/binman/test/165_section_ignore_hash_signature.dts b/tools/binman/test/165_section_ignore_hash_signature.dts
new file mode 100644
index 00000000000..8adbe25512a
--- /dev/null
+++ b/tools/binman/test/165_section_ignore_hash_signature.dts
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section@0 {
+ u-boot {
+ };
+ hash {
+ algo = "sha256";
+ };
+ signature {
+ algo = "sha256,rsa2048";
+ key-name-hint = "dev";
+ };
+ };
+ section@1 {
+ u-boot {
+ };
+ hash-1 {
+ algo = "sha1";
+ };
+ hash-2 {
+ algo = "sha256";
+ };
+ signature-1 {
+ algo = "sha1,rsa2048";
+ key-name-hint = "dev";
+ };
+ signature-2 {
+ algo = "sha256,rsa2048";
+ key-name-hint = "dev";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/166_pad_in_sections.dts b/tools/binman/test/166_pad_in_sections.dts
new file mode 100644
index 00000000000..f2b327ff9f3
--- /dev/null
+++ b/tools/binman/test/166_pad_in_sections.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ section {
+ pad-byte = <0x21>;
+
+ before {
+ type = "u-boot";
+ };
+ u-boot {
+ pad-before = <12>;
+ pad-after = <6>;
+ };
+ after {
+ type = "u-boot";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/167_fit_image_subentry_alignment.dts b/tools/binman/test/167_fit_image_subentry_alignment.dts
new file mode 100644
index 00000000000..360cac52661
--- /dev/null
+++ b/tools/binman/test/167_fit_image_subentry_alignment.dts
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ description = "Offset-Align Test";
+ type = "kernel";
+ arch = "arm64";
+ os = "linux";
+ compression = "none";
+ load = <00000000>;
+ entry = <00000000>;
+ u-boot-spl {
+ offset = <0x20>;
+ };
+ u-boot {
+ align = <0x10>;
+ };
+ };
+ fdt-1 {
+ description = "Pad-Before-After Test";
+ type = "flat_dt";
+ arch = "arm64";
+ compression = "none";
+ u-boot-spl-dtb {
+ };
+ text {
+ text-label = "test-id";
+ pad-before = <20>;
+ pad-after = <30>;
+ };
+ u-boot-dtb {
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "Kernel with FDT blob";
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/168_fit_missing_blob.dts b/tools/binman/test/168_fit_missing_blob.dts
new file mode 100644
index 00000000000..15f6cc07e5d
--- /dev/null
+++ b/tools/binman/test/168_fit_missing_blob.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ kernel {
+ description = "ATF BL31";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ atf-bl31 {
+ filename = "missing";
+ };
+ cros-ec-rw {
+ type = "atf-bl31";
+ missing-msg = "wibble";
+ };
+ another {
+ type = "atf-bl31";
+ };
+ };
+ };
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/169_atf_bl31.dts b/tools/binman/test/169_atf_bl31.dts
new file mode 100644
index 00000000000..2b7547d70f9
--- /dev/null
+++ b/tools/binman/test/169_atf_bl31.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ atf-bl31 {
+ filename = "bl31.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/170_fit_fdt.dts b/tools/binman/test/170_fit_fdt.dts
new file mode 100644
index 00000000000..0197ffd1597
--- /dev/null
+++ b/tools/binman/test/170_fit_fdt.dts
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ u-boot {
+ };
+ };
+ @fdt-SEQ {
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ hash {
+ algo = "sha256";
+ };
+ };
+ };
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ @config-SEQ {
+ description = "conf-NAME.dtb";
+ firmware = "uboot";
+ loadables = "atf";
+ fdt = "fdt-SEQ";
+ };
+ };
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/171_fit_fdt_missing_prop.dts b/tools/binman/test/171_fit_fdt_missing_prop.dts
new file mode 100644
index 00000000000..c36134715c6
--- /dev/null
+++ b/tools/binman/test/171_fit_fdt_missing_prop.dts
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ u-boot {
+ };
+ };
+ @fdt-SEQ {
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ };
+ };
+
+ configurations {
+ default = "config-1";
+ @config-SEQ {
+ description = "conf-NAME.dtb";
+ firmware = "uboot";
+ loadables = "atf";
+ fdt = "fdt-SEQ";
+ };
+ };
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/172_scp.dts b/tools/binman/test/172_scp.dts
new file mode 100644
index 00000000000..354e4ef17df
--- /dev/null
+++ b/tools/binman/test/172_scp.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ scp {
+ filename = "scp.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/173_missing_blob.dts b/tools/binman/test/173_missing_blob.dts
new file mode 100644
index 00000000000..ffb655a1cb4
--- /dev/null
+++ b/tools/binman/test/173_missing_blob.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob {
+ filename = "missing";
+ };
+ };
+};
diff --git a/tools/binman/test/174_env.dts b/tools/binman/test/174_env.dts
new file mode 100644
index 00000000000..d1393d2db93
--- /dev/null
+++ b/tools/binman/test/174_env.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-env {
+ filename = "env.txt";
+ size = <0x18>;
+ fill-byte = [ff];
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/175_env_no_size.dts b/tools/binman/test/175_env_no_size.dts
new file mode 100644
index 00000000000..267acd15491
--- /dev/null
+++ b/tools/binman/test/175_env_no_size.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-env {
+ filename = "env.txt";
+ fill-byte = [ff];
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/176_env_too_small.dts b/tools/binman/test/176_env_too_small.dts
new file mode 100644
index 00000000000..2db8d054639
--- /dev/null
+++ b/tools/binman/test/176_env_too_small.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-env {
+ filename = "env.txt";
+ size = <0x8>;
+ fill-byte = [ff];
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/177_skip_at_start.dts b/tools/binman/test/177_skip_at_start.dts
new file mode 100644
index 00000000000..021460b1a04
--- /dev/null
+++ b/tools/binman/test/177_skip_at_start.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section {
+ skip-at-start = <16>;
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/178_skip_at_start_pad.dts b/tools/binman/test/178_skip_at_start_pad.dts
new file mode 100644
index 00000000000..deda3c862e9
--- /dev/null
+++ b/tools/binman/test/178_skip_at_start_pad.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section {
+ skip-at-start = <16>;
+ u-boot {
+ pad-before = <8>;
+ pad-after = <4>;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/179_skip_at_start_section_pad.dts b/tools/binman/test/179_skip_at_start_section_pad.dts
new file mode 100644
index 00000000000..bf2f8f69b4d
--- /dev/null
+++ b/tools/binman/test/179_skip_at_start_section_pad.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section {
+ skip-at-start = <16>;
+ pad-before = <8>;
+ pad-after = <4>;
+
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/180_section_pad.dts b/tools/binman/test/180_section_pad.dts
new file mode 100644
index 00000000000..7e4ebf257b8
--- /dev/null
+++ b/tools/binman/test/180_section_pad.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ section@0 {
+ read-only;
+
+ /* Padding for the section uses the 0x26 pad byte */
+ pad-before = <3>;
+ pad-after = <2>;
+
+ /* Set the padding byte for entries, i.e. u-boot */
+ pad-byte = <0x21>;
+
+ u-boot {
+ pad-before = <5>;
+ pad-after = <1>;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/181_section_align.dts b/tools/binman/test/181_section_align.dts
new file mode 100644
index 00000000000..90795d131b0
--- /dev/null
+++ b/tools/binman/test/181_section_align.dts
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ fill {
+ size = <1>;
+ };
+ section@1 {
+ read-only;
+
+ /* Padding for the section uses the 0x26 pad byte */
+ align = <2>;
+ align-size = <0x10>;
+
+ /* Set the padding byte for entries, i.e. u-boot */
+ pad-byte = <0x21>;
+
+ fill {
+ size = <1>;
+ };
+
+ u-boot {
+ align = <4>;
+ align-size = <8>;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/182_compress_image.dts b/tools/binman/test/182_compress_image.dts
new file mode 100644
index 00000000000..4176b7f2e62
--- /dev/null
+++ b/tools/binman/test/182_compress_image.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ compress = "lz4";
+ blob {
+ filename = "compress";
+ };
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/183_compress_image_less.dts b/tools/binman/test/183_compress_image_less.dts
new file mode 100644
index 00000000000..1d9d57b78c9
--- /dev/null
+++ b/tools/binman/test/183_compress_image_less.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ compress = "lz4";
+ blob {
+ filename = "compress_big";
+ };
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/184_compress_section_size.dts b/tools/binman/test/184_compress_section_size.dts
new file mode 100644
index 00000000000..95ed30add1a
--- /dev/null
+++ b/tools/binman/test/184_compress_section_size.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ section {
+ size = <0x30>;
+ compress = "lz4";
+ blob {
+ filename = "compress";
+ };
+
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/185_compress_section.dts b/tools/binman/test/185_compress_section.dts
new file mode 100644
index 00000000000..dc3e340c5d6
--- /dev/null
+++ b/tools/binman/test/185_compress_section.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ section {
+ compress = "lz4";
+ blob {
+ filename = "compress";
+ };
+
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/186_compress_extra.dts b/tools/binman/test/186_compress_extra.dts
new file mode 100644
index 00000000000..59aae822638
--- /dev/null
+++ b/tools/binman/test/186_compress_extra.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot {
+ };
+ base {
+ type = "section";
+ u-boot {
+ };
+ section {
+ compress = "lz4";
+ blob {
+ filename = "compress";
+ };
+
+ u-boot {
+ };
+ };
+ section2 {
+ type = "section";
+ compress = "lz4";
+ blob {
+ filename = "compress";
+ };
+ blob2 {
+ type = "blob";
+ filename = "compress";
+ };
+ };
+ u-boot2 {
+ type = "u-boot";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/187_symbols_sub.dts b/tools/binman/test/187_symbols_sub.dts
new file mode 100644
index 00000000000..3ab62d37215
--- /dev/null
+++ b/tools/binman/test/187_symbols_sub.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section {
+ pad-byte = <0xff>;
+ u-boot-spl {
+ };
+
+ u-boot {
+ offset = <28>;
+ };
+ };
+
+ u-boot-spl2 {
+ type = "u-boot-spl";
+ };
+ };
+};
diff --git a/tools/binman/test/188_image_entryarg.dts b/tools/binman/test/188_image_entryarg.dts
new file mode 100644
index 00000000000..29d81491623
--- /dev/null
+++ b/tools/binman/test/188_image_entryarg.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ u-boot {
+ };
+ cros-ec-rw {
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/189_vblock_content.dts b/tools/binman/test/189_vblock_content.dts
new file mode 100644
index 00000000000..dcc74449c17
--- /dev/null
+++ b/tools/binman/test/189_vblock_content.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u_boot: u-boot {
+ };
+
+ dtb: u-boot-dtb {
+ };
+
+ /*
+ * Put the vblock after the dtb so that the dtb is updated
+ * before the vblock reads its data. At present binman does not
+ * understand dependencies between entries, but simply
+ * iterates again when it thinks something needs to be
+ * recalculated.
+ */
+ vblock {
+ content = <&u_boot &dtb>;
+ keyblock = "firmware.keyblock";
+ signprivate = "firmware_data_key.vbprivk";
+ version = <1>;
+ kernelkey = "kernel_subkey.vbpubk";
+ preamble-flags = <1>;
+ };
+ };
+};
diff --git a/tools/binman/test/190_files_align.dts b/tools/binman/test/190_files_align.dts
new file mode 100644
index 00000000000..213ba966d35
--- /dev/null
+++ b/tools/binman/test/190_files_align.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ files {
+ pattern = "files/*.dat";
+ files-compress = "none";
+ files-align = <4>;
+ };
+ };
+};
diff --git a/tools/binman/test/191_read_image_skip.dts b/tools/binman/test/191_read_image_skip.dts
new file mode 100644
index 00000000000..31df518fae6
--- /dev/null
+++ b/tools/binman/test/191_read_image_skip.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ end-at-4gb;
+ size = <0x400>;
+ section {
+ size = <0x10>;
+ u-boot {
+ };
+ };
+ fdtmap {
+ };
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/192_u_boot_tpl_nodtb.dts b/tools/binman/test/192_u_boot_tpl_nodtb.dts
new file mode 100644
index 00000000000..94cef395e89
--- /dev/null
+++ b/tools/binman/test/192_u_boot_tpl_nodtb.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-tpl-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/193_tpl_bss_pad.dts b/tools/binman/test/193_tpl_bss_pad.dts
new file mode 100644
index 00000000000..f5c2db0646c
--- /dev/null
+++ b/tools/binman/test/193_tpl_bss_pad.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-tpl {
+ };
+
+ u-boot-tpl-bss-pad {
+ };
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/194_fdt_incl.dts b/tools/binman/test/194_fdt_incl.dts
new file mode 100644
index 00000000000..b14c8ff8f52
--- /dev/null
+++ b/tools/binman/test/194_fdt_incl.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-spl {
+ };
+ u-boot-tpl {
+ };
+ };
+};
diff --git a/tools/binman/test/195_fdt_incl_tpl.dts b/tools/binman/test/195_fdt_incl_tpl.dts
new file mode 100644
index 00000000000..3756ac4fc47
--- /dev/null
+++ b/tools/binman/test/195_fdt_incl_tpl.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-tpl {
+ };
+ };
+};
diff --git a/tools/binman/test/196_symbols_nodtb.dts b/tools/binman/test/196_symbols_nodtb.dts
new file mode 100644
index 00000000000..5c900d60709
--- /dev/null
+++ b/tools/binman/test/196_symbols_nodtb.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot-spl-nodtb {
+ };
+ u-boot-spl-dtb {
+ };
+
+ u-boot {
+ offset = <0x38>;
+ };
+
+ u-boot-spl2 {
+ type = "u-boot-spl-nodtb";
+ };
+ u-boot-spl-dtb2 {
+ type = "u-boot-spl-dtb";
+ };
+ };
+};
diff --git a/tools/binman/test/197_symbols_expand.dts b/tools/binman/test/197_symbols_expand.dts
new file mode 100644
index 00000000000..8aee76dc755
--- /dev/null
+++ b/tools/binman/test/197_symbols_expand.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot-spl {
+ };
+
+ u-boot {
+ offset = <0x38>;
+ no-expanded;
+ };
+
+ u-boot-spl2 {
+ type = "u-boot-spl";
+ };
+ };
+};
diff --git a/tools/binman/test/198_collection.dts b/tools/binman/test/198_collection.dts
new file mode 100644
index 00000000000..484a1b0050d
--- /dev/null
+++ b/tools/binman/test/198_collection.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ collection {
+ content = <&u_boot_nodtb &dtb>;
+ };
+ fill {
+ size = <2>;
+ fill-byte = [ff];
+ };
+ u_boot_nodtb: u-boot-nodtb {
+ };
+ fill2 {
+ type = "fill";
+ size = <3>;
+ fill-byte = [fe];
+ };
+ dtb: u-boot-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/199_collection_section.dts b/tools/binman/test/199_collection_section.dts
new file mode 100644
index 00000000000..03a73194c3f
--- /dev/null
+++ b/tools/binman/test/199_collection_section.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ collection {
+ content = <&section &u_boot>;
+ };
+ fill {
+ size = <2>;
+ fill-byte = [ff];
+ };
+ section: section {
+ u-boot-nodtb {
+ };
+ u-boot-dtb {
+ };
+ };
+ fill2 {
+ type = "fill";
+ size = <3>;
+ fill-byte = [fe];
+ };
+ u_boot: u-boot {
+ no-expanded;
+ };
+ };
+};
diff --git a/tools/binman/test/200_align_default.dts b/tools/binman/test/200_align_default.dts
new file mode 100644
index 00000000000..1b155770d4c
--- /dev/null
+++ b/tools/binman/test/200_align_default.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ align-default = <8>;
+ u-boot {
+ };
+
+ u-boot-align {
+ type = "u-boot";
+ };
+
+ section {
+ align = <32>;
+ u-boot {
+ };
+
+ u-boot-nodtb {
+ };
+ };
+
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/201_opensbi.dts b/tools/binman/test/201_opensbi.dts
new file mode 100644
index 00000000000..942183f9900
--- /dev/null
+++ b/tools/binman/test/201_opensbi.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ opensbi {
+ filename = "fw_dynamic.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/202_section_timeout.dts b/tools/binman/test/202_section_timeout.dts
new file mode 100644
index 00000000000..1481450367a
--- /dev/null
+++ b/tools/binman/test/202_section_timeout.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ size = <0x28>;
+ section@0 {
+ read-only;
+ size = <0x10>;
+ pad-byte = <0x21>;
+
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/203_fip.dts b/tools/binman/test/203_fip.dts
new file mode 100644
index 00000000000..08973373240
--- /dev/null
+++ b/tools/binman/test/203_fip.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x123456789abcdef>;
+ filename = "bl31.bin";
+ };
+
+ scp-fwu-cfg {
+ filename = "bl2u.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/204_fip_other.dts b/tools/binman/test/204_fip_other.dts
new file mode 100644
index 00000000000..65039410986
--- /dev/null
+++ b/tools/binman/test/204_fip_other.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x123456789abcdef>;
+ filename = "bl31.bin";
+ };
+
+ _testing {
+ fip-type = "rot-cert";
+ return-contents-later;
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/205_fip_no_type.dts b/tools/binman/test/205_fip_no_type.dts
new file mode 100644
index 00000000000..23c8c3bc37e
--- /dev/null
+++ b/tools/binman/test/205_fip_no_type.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/206_fip_uuid.dts b/tools/binman/test/206_fip_uuid.dts
new file mode 100644
index 00000000000..c9bd44f9c31
--- /dev/null
+++ b/tools/binman/test/206_fip_uuid.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x123456789abcdef>;
+ filename = "bl31.bin";
+ };
+
+ u-boot {
+ fip-uuid = [fc 65 13 92 4a 5b 11 ec
+ 94 35 ff 2d 1c fc 79 9c];
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/207_fip_ls.dts b/tools/binman/test/207_fip_ls.dts
new file mode 100644
index 00000000000..630fca15024
--- /dev/null
+++ b/tools/binman/test/207_fip_ls.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x123456789abcdef>;
+ filename = "bl31.bin";
+ };
+
+ u-boot {
+ fip-uuid = [fc 65 13 92 4a 5b 11 ec
+ 94 35 ff 2d 1c fc 79 9c];
+ };
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/208_fip_replace.dts b/tools/binman/test/208_fip_replace.dts
new file mode 100644
index 00000000000..432c12474df
--- /dev/null
+++ b/tools/binman/test/208_fip_replace.dts
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ allow-repack;
+ atf-fip {
+ fip-hdr-flags = /bits/ 64 <0x123>;
+ soc-fw {
+ fip-flags = /bits/ 64 <0x123456789abcdef>;
+ filename = "bl31.bin";
+ };
+
+ u-boot {
+ fip-uuid = [fc 65 13 92 4a 5b 11 ec
+ 94 35 ff 2d 1c fc 79 9c];
+ };
+
+ };
+
+ u-boot {
+ };
+
+ u-boot-dtb {
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/209_fip_missing.dts b/tools/binman/test/209_fip_missing.dts
new file mode 100644
index 00000000000..43bb600d047
--- /dev/null
+++ b/tools/binman/test/209_fip_missing.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ soc-fw {
+ filename = "bl31.bin";
+ };
+
+ rmm-fw {
+ filename = "rmm.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/210_fip_size.dts b/tools/binman/test/210_fip_size.dts
new file mode 100644
index 00000000000..9dfee796459
--- /dev/null
+++ b/tools/binman/test/210_fip_size.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ size = <0x100>;
+ pad-byte = <0xff>;
+ soc-fw {
+ filename = "bl31.bin";
+ };
+ };
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/211_fip_bad_align.dts b/tools/binman/test/211_fip_bad_align.dts
new file mode 100644
index 00000000000..a0901496d80
--- /dev/null
+++ b/tools/binman/test/211_fip_bad_align.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ atf-fip {
+ fip-align = <31>;
+ size = <0x100>;
+ pad-byte = <0xff>;
+ soc-fw {
+ filename = "bl31.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/212_fip_collection.dts b/tools/binman/test/212_fip_collection.dts
new file mode 100644
index 00000000000..332c023af87
--- /dev/null
+++ b/tools/binman/test/212_fip_collection.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ collection {
+ content = <&fip &u_boot>;
+ };
+ fip: atf-fip {
+ soc-fw {
+ filename = "bl31.bin";
+ };
+
+ scp-fwu-cfg {
+ filename = "bl2u.bin";
+ };
+ };
+ u_boot: u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/213_fdtmap_alt_format.dts b/tools/binman/test/213_fdtmap_alt_format.dts
new file mode 100644
index 00000000000..d9aef04bcf6
--- /dev/null
+++ b/tools/binman/test/213_fdtmap_alt_format.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/214_no_alt_format.dts b/tools/binman/test/214_no_alt_format.dts
new file mode 100644
index 00000000000..f00bcdd5764
--- /dev/null
+++ b/tools/binman/test/214_no_alt_format.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/215_blob_ext_list.dts b/tools/binman/test/215_blob_ext_list.dts
new file mode 100644
index 00000000000..aad2f0300d3
--- /dev/null
+++ b/tools/binman/test/215_blob_ext_list.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob-ext-list {
+ filenames = "refcode.bin", "fsp_m.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/216_blob_ext_list_missing.dts b/tools/binman/test/216_blob_ext_list_missing.dts
new file mode 100644
index 00000000000..c02c335c760
--- /dev/null
+++ b/tools/binman/test/216_blob_ext_list_missing.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob-ext-list {
+ filenames = "refcode.bin", "missing-file";
+ };
+ };
+};
diff --git a/tools/binman/test/217_fake_blob.dts b/tools/binman/test/217_fake_blob.dts
new file mode 100644
index 00000000000..22cf67f4f83
--- /dev/null
+++ b/tools/binman/test/217_fake_blob.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob-ext {
+ filename = "binman_faking_test_blob";
+ };
+ };
+};
diff --git a/tools/binman/test/218_blob_ext_list_fake.dts b/tools/binman/test/218_blob_ext_list_fake.dts
new file mode 100644
index 00000000000..54ee54fdaab
--- /dev/null
+++ b/tools/binman/test/218_blob_ext_list_fake.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob-ext-list {
+ filenames = "refcode.bin", "fake-file";
+ };
+ };
+};
diff --git a/tools/binman/test/219_fit_gennode.dts b/tools/binman/test/219_fit_gennode.dts
new file mode 100644
index 00000000000..e9eda29983a
--- /dev/null
+++ b/tools/binman/test/219_fit_gennode.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ @fdt-SEQ {
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ };
+ };
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/220_fit_subentry_bintool.dts b/tools/binman/test/220_fit_subentry_bintool.dts
new file mode 100644
index 00000000000..6e29d41eeb3
--- /dev/null
+++ b/tools/binman/test/220_fit_subentry_bintool.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ test {
+ description = "Something using a bintool";
+ type = "kernel";
+ arch = "arm";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+
+ gbb {
+ size = <0x2180>;
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "Boot bintool output";
+ kernel = "kernel";
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/221_fit_subentry_hash.dts b/tools/binman/test/221_fit_subentry_hash.dts
new file mode 100644
index 00000000000..2cb04f96d08
--- /dev/null
+++ b/tools/binman/test/221_fit_subentry_hash.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash {
+ algo = "sha1";
+ };
+ u-boot {
+ };
+ };
+ fdt-1 {
+ description = "Flattened Device Tree blob";
+ type = "flat_dt";
+ arch = "ppc";
+ compression = "none";
+ hash {
+ algo = "crc32";
+ };
+ u-boot-spl-dtb {
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "Boot Linux kernel with FDT blob";
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/222_tee_os.dts b/tools/binman/test/222_tee_os.dts
new file mode 100644
index 00000000000..68854972942
--- /dev/null
+++ b/tools/binman/test/222_tee_os.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ tee-os {
+ filename = "tee-pager.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/223_fit_fdt_oper.dts b/tools/binman/test/223_fit_fdt_oper.dts
new file mode 100644
index 00000000000..e630165acf4
--- /dev/null
+++ b/tools/binman/test/223_fit_fdt_oper.dts
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ u-boot {
+ };
+ };
+ @fdt-SEQ {
+ fit,operation = "gen-fdt-nodes";
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ };
+ };
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ @config-SEQ {
+ description = "conf-NAME.dtb";
+ firmware = "uboot";
+ loadables = "atf";
+ fdt = "fdt-SEQ";
+ };
+ };
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/224_fit_bad_oper.dts b/tools/binman/test/224_fit_bad_oper.dts
new file mode 100644
index 00000000000..8a8014ea33d
--- /dev/null
+++ b/tools/binman/test/224_fit_bad_oper.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ @fdt-SEQ {
+ fit,operation = "unknown";
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/225_expand_size_bad.dts b/tools/binman/test/225_expand_size_bad.dts
new file mode 100644
index 00000000000..d4ad9a6a1ae
--- /dev/null
+++ b/tools/binman/test/225_expand_size_bad.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot {
+ expand-size;
+ };
+ };
+};
diff --git a/tools/binman/test/225_ti_dm.dts b/tools/binman/test/225_ti_dm.dts
new file mode 100644
index 00000000000..3ab754131e9
--- /dev/null
+++ b/tools/binman/test/225_ti_dm.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ binman {
+ ti-dm {
+ filename = "dm.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/226_fit_split_elf.dts b/tools/binman/test/226_fit_split_elf.dts
new file mode 100644
index 00000000000..22c453e6037
--- /dev/null
+++ b/tools/binman/test/226_fit_split_elf.dts
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ @fdt-SEQ {
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ };
+ atf: @atf-SEQ {
+ fit,operation = "split-elf";
+ description = "ARM Trusted Firmware";
+ type = "firmware";
+ arch = "arm64";
+ os = "arm-trusted-firmware";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ atf-bl31 {
+ };
+ hash {
+ algo = "sha256";
+ };
+ };
+
+ @tee-SEQ {
+ fit,operation = "split-elf";
+ description = "TEE";
+ type = "tee";
+ arch = "arm64";
+ os = "tee";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ tee-os {
+ };
+ hash-1 {
+ algo = "sha256";
+ };
+ };
+ };
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ config: @config-SEQ {
+ description = "conf-NAME.dtb";
+ fdt = "fdt-SEQ";
+ fit,loadables;
+ };
+ };
+ };
+
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/227_fit_bad_dir.dts b/tools/binman/test/227_fit_bad_dir.dts
new file mode 100644
index 00000000000..51f4816c4c2
--- /dev/null
+++ b/tools/binman/test/227_fit_bad_dir.dts
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+#include "226_fit_split_elf.dts"
+
+&atf {
+ fit,something = "bad";
+};
diff --git a/tools/binman/test/228_fit_bad_dir_config.dts b/tools/binman/test/228_fit_bad_dir_config.dts
new file mode 100644
index 00000000000..825a346c3e6
--- /dev/null
+++ b/tools/binman/test/228_fit_bad_dir_config.dts
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+#include "226_fit_split_elf.dts"
+
+&config {
+ fit,config = "bad";
+};
diff --git a/tools/binman/test/229_mkimage_missing.dts b/tools/binman/test/229_mkimage_missing.dts
new file mode 100644
index 00000000000..54a5a6c571a
--- /dev/null
+++ b/tools/binman/test/229_mkimage_missing.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-n test -T script";
+
+ blob-ext {
+ filename = "missing.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/230_pre_load.dts b/tools/binman/test/230_pre_load.dts
new file mode 100644
index 00000000000..e6d9ef40c6c
--- /dev/null
+++ b/tools/binman/test/230_pre_load.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa2048";
+ key-name = "dev.key";
+ header-size = <4096>;
+ version = <0x11223344>;
+ };
+
+ image: blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/231_pre_load_pkcs.dts b/tools/binman/test/231_pre_load_pkcs.dts
new file mode 100644
index 00000000000..66268cdb212
--- /dev/null
+++ b/tools/binman/test/231_pre_load_pkcs.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa2048";
+ padding-name = "pkcs-1.5";
+ key-name = "dev.key";
+ header-size = <4096>;
+ version = <0x11223344>;
+ };
+
+ image: blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/232_pre_load_pss.dts b/tools/binman/test/232_pre_load_pss.dts
new file mode 100644
index 00000000000..3008d3f4649
--- /dev/null
+++ b/tools/binman/test/232_pre_load_pss.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa2048";
+ padding-name = "pss";
+ key-name = "dev.key";
+ header-size = <4096>;
+ version = <0x11223344>;
+ };
+
+ image: blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/233_pre_load_invalid_padding.dts b/tools/binman/test/233_pre_load_invalid_padding.dts
new file mode 100644
index 00000000000..bbe2d1ba869
--- /dev/null
+++ b/tools/binman/test/233_pre_load_invalid_padding.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa2048";
+ padding-name = "padding";
+ key-name = "dev.key";
+ header-size = <4096>;
+ version = <1>;
+ };
+
+ image: blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/234_pre_load_invalid_sha.dts b/tools/binman/test/234_pre_load_invalid_sha.dts
new file mode 100644
index 00000000000..29afd2e37e4
--- /dev/null
+++ b/tools/binman/test/234_pre_load_invalid_sha.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pre-load {
+ content = <&image>;
+ algo-name = "sha2560,rsa2048";
+ padding-name = "pkcs-1.5";
+ key-name = "dev.key";
+ header-size = <4096>;
+ version = <1>;
+ };
+
+ image: blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/235_pre_load_invalid_algo.dts b/tools/binman/test/235_pre_load_invalid_algo.dts
new file mode 100644
index 00000000000..d6f6dd20cd9
--- /dev/null
+++ b/tools/binman/test/235_pre_load_invalid_algo.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa20480";
+ padding-name = "pkcs-1.5";
+ key-name = "dev.key";
+ header-size = <4096>;
+ version = <1>;
+ };
+
+ image: blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/236_pre_load_invalid_key.dts b/tools/binman/test/236_pre_load_invalid_key.dts
new file mode 100644
index 00000000000..f93bc9792cd
--- /dev/null
+++ b/tools/binman/test/236_pre_load_invalid_key.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pre-load {
+ content = <&image>;
+ algo-name = "sha256,rsa4096";
+ padding-name = "pkcs-1.5";
+ key-name = "dev.key";
+ header-size = <4096>;
+ version = <1>;
+ };
+
+ image: blob-ext {
+ filename = "refcode.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/237_unique_names.dts b/tools/binman/test/237_unique_names.dts
new file mode 100644
index 00000000000..6780d37f71f
--- /dev/null
+++ b/tools/binman/test/237_unique_names.dts
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ allow-repack;
+
+ u-boot {
+ };
+
+ fdtmap {
+ };
+
+ u-boot2 {
+ type = "u-boot";
+ };
+
+ text {
+ text = "some text";
+ };
+
+ u-boot-dtb {
+ };
+
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/238_unique_names_multi.dts b/tools/binman/test/238_unique_names_multi.dts
new file mode 100644
index 00000000000..db63afb445e
--- /dev/null
+++ b/tools/binman/test/238_unique_names_multi.dts
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ multiple-images;
+
+ image {
+ size = <0xc00>;
+ allow-repack;
+
+ u-boot {
+ };
+
+ fdtmap {
+ };
+
+ u-boot2 {
+ type = "u-boot";
+ };
+
+ text {
+ text = "some text";
+ };
+
+ u-boot-dtb {
+ };
+
+ image-header {
+ location = "end";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/239_replace_with_bintool.dts b/tools/binman/test/239_replace_with_bintool.dts
new file mode 100644
index 00000000000..d7fabd2cd83
--- /dev/null
+++ b/tools/binman/test/239_replace_with_bintool.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0xc00>;
+ allow-repack;
+
+ u-boot {
+ };
+
+ _testing {
+ require-bintool-for-contents;
+ require-bintool-for-pack;
+ };
+
+ fdtmap {
+ };
+
+ u-boot2 {
+ type = "u-boot";
+ };
+
+ text {
+ text = "some text";
+ };
+
+ u-boot-dtb {
+ };
+
+ image-header {
+ location = "end";
+ };
+ };
+};
diff --git a/tools/binman/test/240_fit_extract_replace.dts b/tools/binman/test/240_fit_extract_replace.dts
new file mode 100644
index 00000000000..b44d05afe1a
--- /dev/null
+++ b/tools/binman/test/240_fit_extract_replace.dts
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ allow-repack;
+
+ fill {
+ size = <0x1000>;
+ fill-byte = [77];
+ };
+
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ description = "test u-boot";
+ type = "kernel";
+ arch = "arm64";
+ os = "linux";
+ compression = "none";
+ load = <00000000>;
+ entry = <00000000>;
+
+ u-boot {
+ };
+ };
+
+ fdt-1 {
+ description = "test u-boot-nodtb";
+ type = "flat_dt";
+ arch = "arm64";
+ compression = "none";
+
+ u-boot-nodtb {
+ };
+ };
+
+ scr-1 {
+ description = "test blob";
+ type = "script";
+ arch = "arm64";
+ compression = "none";
+
+ blob {
+ filename = "compress";
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-1";
+
+ conf-1 {
+ description = "Kernel with FDT blob";
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+ };
+
+ u-boot-dtb {
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/241_replace_section_simple.dts b/tools/binman/test/241_replace_section_simple.dts
new file mode 100644
index 00000000000..c9d5c328561
--- /dev/null
+++ b/tools/binman/test/241_replace_section_simple.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ allow-repack;
+
+ u-boot-dtb {
+ };
+
+ section {
+ blob {
+ filename = "compress";
+ };
+
+ u-boot {
+ };
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/242_mkimage_name.dts b/tools/binman/test/242_mkimage_name.dts
new file mode 100644
index 00000000000..fbc82f1f8d6
--- /dev/null
+++ b/tools/binman/test/242_mkimage_name.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-T script";
+ data-to-imagename;
+
+ u-boot-spl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/243_mkimage_image.dts b/tools/binman/test/243_mkimage_image.dts
new file mode 100644
index 00000000000..6b8f4a4a401
--- /dev/null
+++ b/tools/binman/test/243_mkimage_image.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-T script";
+
+ imagename {
+ type = "u-boot";
+ };
+
+ u-boot-spl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/244_mkimage_image_no_content.dts b/tools/binman/test/244_mkimage_image_no_content.dts
new file mode 100644
index 00000000000..7306c06af45
--- /dev/null
+++ b/tools/binman/test/244_mkimage_image_no_content.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-T script";
+
+ imagename {
+ type = "_testing";
+ return-unknown-contents;
+ };
+
+ u-boot-spl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/245_mkimage_image_bad.dts b/tools/binman/test/245_mkimage_image_bad.dts
new file mode 100644
index 00000000000..54d2c99d628
--- /dev/null
+++ b/tools/binman/test/245_mkimage_image_bad.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-T script";
+ data-to-imagename;
+
+ imagename {
+ type = "u-boot";
+ };
+
+ u-boot-spl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/246_collection_other.dts b/tools/binman/test/246_collection_other.dts
new file mode 100644
index 00000000000..09de20e5bca
--- /dev/null
+++ b/tools/binman/test/246_collection_other.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ collection {
+ content = <&u_boot_nodtb &dtb>;
+ };
+ section {
+ fill {
+ size = <2>;
+ fill-byte = [ff];
+ };
+ u_boot_nodtb: u-boot-nodtb {
+ };
+ fill2 {
+ type = "fill";
+ size = <3>;
+ fill-byte = [fe];
+ };
+ };
+ dtb: u-boot-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/247_mkimage_coll.dts b/tools/binman/test/247_mkimage_coll.dts
new file mode 100644
index 00000000000..30860118860
--- /dev/null
+++ b/tools/binman/test/247_mkimage_coll.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ collection {
+ content = <&spl &u_boot>;
+ };
+ mkimage {
+ args = "-T script";
+
+ spl: u-boot-spl {
+ };
+
+ imagename {
+ type = "section";
+
+ u_boot: u-boot {
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/248_compress_dtb_prepend_invalid.dts b/tools/binman/test/248_compress_dtb_prepend_invalid.dts
new file mode 100644
index 00000000000..ee32670a913
--- /dev/null
+++ b/tools/binman/test/248_compress_dtb_prepend_invalid.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ prepend = "invalid";
+ };
+ };
+};
diff --git a/tools/binman/test/249_compress_dtb_prepend_length.dts b/tools/binman/test/249_compress_dtb_prepend_length.dts
new file mode 100644
index 00000000000..1570233637a
--- /dev/null
+++ b/tools/binman/test/249_compress_dtb_prepend_length.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-dtb {
+ compress = "lz4";
+ prepend = "length";
+ };
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/250_compress_dtb_invalid.dts b/tools/binman/test/250_compress_dtb_invalid.dts
new file mode 100644
index 00000000000..228139060bc
--- /dev/null
+++ b/tools/binman/test/250_compress_dtb_invalid.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-dtb {
+ compress = "invalid";
+ };
+ };
+};
diff --git a/tools/binman/test/251_compress_dtb_zstd.dts b/tools/binman/test/251_compress_dtb_zstd.dts
new file mode 100644
index 00000000000..90cf85d1e2c
--- /dev/null
+++ b/tools/binman/test/251_compress_dtb_zstd.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ u-boot-dtb {
+ compress = "zstd";
+ };
+ };
+};
diff --git a/tools/binman/test/252_mkimage_mult_data.dts b/tools/binman/test/252_mkimage_mult_data.dts
new file mode 100644
index 00000000000..a092bc39bf3
--- /dev/null
+++ b/tools/binman/test/252_mkimage_mult_data.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-T script";
+ multiple-data-files;
+
+ u-boot-tpl {
+ };
+
+ u-boot-spl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/253_mkimage_mult_no_content.dts b/tools/binman/test/253_mkimage_mult_no_content.dts
new file mode 100644
index 00000000000..dd65666c62e
--- /dev/null
+++ b/tools/binman/test/253_mkimage_mult_no_content.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-T script";
+ multiple-data-files;
+
+ _testing {
+ return-unknown-contents;
+ };
+
+ u-boot-spl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/254_mkimage_filename.dts b/tools/binman/test/254_mkimage_filename.dts
new file mode 100644
index 00000000000..4483790ae86
--- /dev/null
+++ b/tools/binman/test/254_mkimage_filename.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ filename = "mkimage-test.bin";
+ args = "-T script";
+
+ u-boot-spl {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/255_u_boot_vpl.dts b/tools/binman/test/255_u_boot_vpl.dts
new file mode 100644
index 00000000000..a3a281a91e0
--- /dev/null
+++ b/tools/binman/test/255_u_boot_vpl.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ u-boot-vpl {
+ };
+ u-boot-vpl-dtb {
+ };
+ };
+};
diff --git a/tools/binman/test/256_u_boot_vpl_nodtb.dts b/tools/binman/test/256_u_boot_vpl_nodtb.dts
new file mode 100644
index 00000000000..055016badd5
--- /dev/null
+++ b/tools/binman/test/256_u_boot_vpl_nodtb.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-vpl-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/257_fdt_incl_vpl.dts b/tools/binman/test/257_fdt_incl_vpl.dts
new file mode 100644
index 00000000000..435256fe317
--- /dev/null
+++ b/tools/binman/test/257_fdt_incl_vpl.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-vpl {
+ };
+ };
+};
diff --git a/tools/binman/test/258_vpl_bss_pad.dts b/tools/binman/test/258_vpl_bss_pad.dts
new file mode 100644
index 00000000000..d308dcade17
--- /dev/null
+++ b/tools/binman/test/258_vpl_bss_pad.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-vpl {
+ };
+
+ u-boot-vpl-bss-pad {
+ };
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/259_symlink.dts b/tools/binman/test/259_symlink.dts
new file mode 100644
index 00000000000..2ee1f7f05e2
--- /dev/null
+++ b/tools/binman/test/259_symlink.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ multiple-images;
+ test_image {
+ filename = "test_image.bin";
+ symlink = "symlink_to_test.bin";
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/260_symbols_elf.dts b/tools/binman/test/260_symbols_elf.dts
new file mode 100644
index 00000000000..0fae118fc12
--- /dev/null
+++ b/tools/binman/test/260_symbols_elf.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot-spl-elf {
+ };
+
+ u-boot {
+ };
+
+ u-boot-spl2 {
+ type = "u-boot-spl-elf";
+ };
+
+ u-boot-tpl-elf {
+ };
+
+ u-boot-vpl-elf {
+ };
+ };
+};
diff --git a/tools/binman/test/261_section_fname.dts b/tools/binman/test/261_section_fname.dts
new file mode 100644
index 00000000000..790381e7301
--- /dev/null
+++ b/tools/binman/test/261_section_fname.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0x26>;
+ size = <0x20>;
+ section@0 {
+ size = <0x10>;
+ pad-byte = <0x21>;
+ pad-before = <2>;
+ pad-after = <3>;
+
+ section {
+ filename = "outfile.bin";
+ u-boot {
+ };
+ };
+ };
+ section@1 {
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/262_absent.dts b/tools/binman/test/262_absent.dts
new file mode 100644
index 00000000000..2ab8766c878
--- /dev/null
+++ b/tools/binman/test/262_absent.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ _testing {
+ set-to-absent;
+ };
+
+ u-boot-img {
+ };
+ };
+};
diff --git a/tools/binman/test/263_tee_os_opt.dts b/tools/binman/test/263_tee_os_opt.dts
new file mode 100644
index 00000000000..2e4ec24ac2c
--- /dev/null
+++ b/tools/binman/test/263_tee_os_opt.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ tee-os {
+ /*
+ * this results in nothing being added since only the
+ * .bin format is supported by this etype, unless it is
+ * part of a FIT
+ */
+ };
+ u-boot-img {
+ };
+ };
+};
diff --git a/tools/binman/test/264_tee_os_opt_fit.dts b/tools/binman/test/264_tee_os_opt_fit.dts
new file mode 100644
index 00000000000..e9634d3ccdc
--- /dev/null
+++ b/tools/binman/test/264_tee_os_opt_fit.dts
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ @tee-SEQ {
+ fit,operation = "split-elf";
+ description = "TEE";
+ type = "tee";
+ arch = "arm64";
+ os = "tee";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ tee-os {
+ optional;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/265_tee_os_opt_fit_bad.dts b/tools/binman/test/265_tee_os_opt_fit_bad.dts
new file mode 100644
index 00000000000..7fa363cc199
--- /dev/null
+++ b/tools/binman/test/265_tee_os_opt_fit_bad.dts
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ @tee-SEQ {
+ fit,operation = "split-elf";
+ description = "TEE";
+ type = "tee";
+ arch = "arm64";
+ os = "tee";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ tee-os {
+ };
+
+ /*
+ * mess up the ELF data by adding
+ * another bit of data at the end
+ */
+ u-boot {
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/266_blob_ext_opt.dts b/tools/binman/test/266_blob_ext_opt.dts
new file mode 100644
index 00000000000..717153152ce
--- /dev/null
+++ b/tools/binman/test/266_blob_ext_opt.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ok {
+ type = "blob-ext";
+ filename = "refcode.bin";
+ };
+
+ missing {
+ type = "blob-ext";
+ filename = "missing.bin";
+ optional;
+ };
+ };
+};
diff --git a/tools/binman/test/267_section_inner.dts b/tools/binman/test/267_section_inner.dts
new file mode 100644
index 00000000000..f6faab3d2f9
--- /dev/null
+++ b/tools/binman/test/267_section_inner.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section {
+ size = <0x10>;
+ u-boot {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/268_null.dts b/tools/binman/test/268_null.dts
new file mode 100644
index 00000000000..3824ba85090
--- /dev/null
+++ b/tools/binman/test/268_null.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot {
+ };
+ null {
+ size = <4>;
+ };
+ u-boot-img {
+ };
+ };
+};
diff --git a/tools/binman/test/269_overlap.dts b/tools/binman/test/269_overlap.dts
new file mode 100644
index 00000000000..f949b8b359f
--- /dev/null
+++ b/tools/binman/test/269_overlap.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ inset {
+ type = "fill";
+ fill-byte = [61];
+ offset = <1>;
+ size = <2>;
+ overlap;
+ };
+ };
+};
diff --git a/tools/binman/test/270_overlap_null.dts b/tools/binman/test/270_overlap_null.dts
new file mode 100644
index 00000000000..feed9ec8920
--- /dev/null
+++ b/tools/binman/test/270_overlap_null.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ section {
+ u-boot {
+ };
+
+ null {
+ offset = <1>;
+ size = <2>;
+ overlap;
+ };
+ };
+
+ fmap {
+ };
+ };
+};
diff --git a/tools/binman/test/271_overlap_bad.dts b/tools/binman/test/271_overlap_bad.dts
new file mode 100644
index 00000000000..f2818021144
--- /dev/null
+++ b/tools/binman/test/271_overlap_bad.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ inset {
+ type = "fill";
+ fill-byte = [61];
+ offset = <0x10>;
+ size = <2>;
+ overlap;
+ };
+ };
+};
diff --git a/tools/binman/test/272_overlap_no_size.dts b/tools/binman/test/272_overlap_no_size.dts
new file mode 100644
index 00000000000..4517536f2e6
--- /dev/null
+++ b/tools/binman/test/272_overlap_no_size.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+
+ inset {
+ type = "fill";
+ fill-byte = [61];
+ overlap;
+ };
+ };
+};
diff --git a/tools/binman/test/273_blob_symbol.dts b/tools/binman/test/273_blob_symbol.dts
new file mode 100644
index 00000000000..87b0aba2120
--- /dev/null
+++ b/tools/binman/test/273_blob_symbol.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob {
+ filename = "blob_syms.bin";
+ write-symbols;
+ elf-filename = "blob_syms";
+ elf-base-sym = "__my_start_sym";
+ };
+
+ inset {
+ type = "null";
+ offset = <4>;
+ size = <8>;
+ overlap;
+ };
+ };
+};
diff --git a/tools/binman/test/274_offset_from_elf.dts b/tools/binman/test/274_offset_from_elf.dts
new file mode 100644
index 00000000000..e3372fc7c3d
--- /dev/null
+++ b/tools/binman/test/274_offset_from_elf.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ blob: blob {
+ filename = "blob_syms.bin";
+ elf-filename = "blob_syms";
+ elf-base-sym = "__my_start_sym";
+ };
+
+ inset {
+ type = "null";
+ offset-from-elf = <&blob>, "val3", <0>;
+ size = <4>;
+ overlap;
+ };
+
+ inset2 {
+ type = "null";
+ offset-from-elf = <&blob>, "val3", <4>;
+ size = <4>;
+ overlap;
+ };
+ };
+};
diff --git a/tools/binman/test/275_fit_align.dts b/tools/binman/test/275_fit_align.dts
new file mode 100644
index 00000000000..c7b06e390fa
--- /dev/null
+++ b/tools/binman/test/275_fit_align.dts
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test desc";
+ #address-cells = <1>;
+ fit,external-offset = <1024>;
+ fit,align = <1024>;
+
+ images {
+ u-boot {
+ description = "test u-boot";
+ type = "standalone";
+ arch = "arm64";
+ os = "u-boot";
+ compression = "none";
+ load = <00000000>;
+ entry = <00000000>;
+
+ u-boot-nodtb {
+ };
+ };
+
+ fdt-1 {
+ description = "test fdt";
+ type = "flat_dt";
+ compression = "none";
+
+ u-boot-dtb {
+ };
+ };
+
+ fdt-2 {
+ description = "test fdt";
+ type = "flat_dt";
+ compression = "none";
+
+ u-boot-dtb {
+ };
+ };
+ };
+
+ configurations {
+ default = "config-1";
+ config-1 {
+ description = "test config";
+ fdt = "fdt-1";
+ firmware = "u-boot";
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/276_fit_firmware_loadables.dts b/tools/binman/test/276_fit_firmware_loadables.dts
new file mode 100644
index 00000000000..2f79cdc9bb8
--- /dev/null
+++ b/tools/binman/test/276_fit_firmware_loadables.dts
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test desc";
+ #address-cells = <1>;
+ fit,fdt-list = "of-list";
+
+ images {
+ u-boot {
+ description = "test u-boot";
+ type = "standalone";
+ arch = "arm64";
+ os = "u-boot";
+ compression = "none";
+ load = <0x00000000>;
+ entry = <0x00000000>;
+
+ u-boot-nodtb {
+ };
+ };
+ tee {
+ description = "test tee";
+ type = "tee";
+ arch = "arm64";
+ os = "tee";
+ compression = "none";
+ load = <0x00200000>;
+
+ tee-os {
+ optional;
+ };
+ };
+ @atf-SEQ {
+ fit,operation = "split-elf";
+ description = "test tf-a";
+ type = "firmware";
+ arch = "arm64";
+ os = "arm-trusted-firmware";
+ compression = "none";
+ fit,load;
+ fit,entry;
+ fit,data;
+
+ atf-bl31 {
+ };
+ };
+ @fdt-SEQ {
+ description = "test fdt";
+ type = "flat_dt";
+ compression = "none";
+ };
+ };
+
+ configurations {
+ default = "@conf-uboot-DEFAULT-SEQ";
+ @conf-uboot-SEQ {
+ description = "uboot config";
+ fdt = "fdt-SEQ";
+ fit,firmware = "u-boot";
+ fit,loadables;
+ };
+ @conf-atf-SEQ {
+ description = "atf config";
+ fdt = "fdt-SEQ";
+ fit,firmware = "atf-1", "u-boot";
+ fit,loadables;
+ };
+ @conf-missing-uboot-SEQ {
+ description = "missing uboot config";
+ fdt = "fdt-SEQ";
+ fit,firmware = "missing-1", "u-boot";
+ fit,loadables;
+ };
+ @conf-missing-atf-SEQ {
+ description = "missing atf config";
+ fdt = "fdt-SEQ";
+ fit,firmware = "missing-1", "atf-1", "u-boot";
+ fit,loadables;
+ };
+ @conf-missing-tee-SEQ {
+ description = "missing tee config";
+ fdt = "fdt-SEQ";
+ fit,firmware = "atf-1", "u-boot", "tee";
+ fit,loadables;
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/277_replace_fit_sibling.dts b/tools/binman/test/277_replace_fit_sibling.dts
new file mode 100644
index 00000000000..fc941a80816
--- /dev/null
+++ b/tools/binman/test/277_replace_fit_sibling.dts
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ allow-repack;
+
+ u-boot {
+ };
+
+ blob {
+ filename = "compress";
+ };
+
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ blob-ext {
+ filename = "once";
+ };
+ };
+ fdt-1 {
+ description = "Flattened Device Tree blob";
+ type = "flat_dt";
+ arch = "ppc";
+ compression = "none";
+ hash-1 {
+ algo = "crc32";
+ };
+ u-boot-spl-dtb {
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "Boot Linux kernel with FDT blob";
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/278_replace_section_deep.dts b/tools/binman/test/278_replace_section_deep.dts
new file mode 100644
index 00000000000..fba2d7dcf28
--- /dev/null
+++ b/tools/binman/test/278_replace_section_deep.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ allow-repack;
+
+ u-boot-dtb {
+ };
+
+ section {
+ section {
+ blob {
+ filename = "compress";
+ };
+ };
+
+ u-boot {
+ };
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/279_x509_cert.dts b/tools/binman/test/279_x509_cert.dts
new file mode 100644
index 00000000000..71238172717
--- /dev/null
+++ b/tools/binman/test/279_x509_cert.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ x509-cert {
+ cert-ca = "IOT2050 Firmware Signature";
+ cert-revision-int = <0>;
+ content = <&u_boot>;
+ };
+
+ u_boot: u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/280_fit_sign.dts b/tools/binman/test/280_fit_sign.dts
new file mode 100644
index 00000000000..b9f17dc5c0b
--- /dev/null
+++ b/tools/binman/test/280_fit_sign.dts
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x100000>;
+ allow-repack;
+
+ fit {
+ description = "U-Boot";
+ offset = <0x10000>;
+ images {
+ u-boot-1 {
+ description = "U-Boot";
+ type = "standalone";
+ arch = "arm64";
+ os = "u-boot";
+ compression = "none";
+ hash-1 {
+ algo = "sha256";
+ };
+ u-boot {
+ };
+ };
+
+ fdt-1 {
+ description = "test.dtb";
+ type = "flat_dt";
+ arch = "arm64";
+ compression = "none";
+ hash-1 {
+ algo = "sha256";
+ };
+ u-boot-spl-dtb {
+ };
+ };
+
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "u-boot with fdt";
+ firmware = "u-boot-1";
+ fdt = "fdt-1";
+ signature-1 {
+ algo = "sha256,rsa4096";
+ key-name-hint = "test_key";
+ sign-images = "firmware", "fdt";
+ };
+
+ };
+ };
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/281_sign_non_fit.dts b/tools/binman/test/281_sign_non_fit.dts
new file mode 100644
index 00000000000..e16c954246d
--- /dev/null
+++ b/tools/binman/test/281_sign_non_fit.dts
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <0x100000>;
+ allow-repack;
+
+ u-boot {
+ };
+ fit {
+ description = "U-Boot";
+ offset = <0x10000>;
+ images {
+ u-boot-1 {
+ description = "U-Boot";
+ type = "standalone";
+ arch = "arm64";
+ os = "u-boot";
+ compression = "none";
+ hash-1 {
+ algo = "sha256";
+ };
+ u-boot {
+ };
+ };
+
+ fdt-1 {
+ description = "test.dtb";
+ type = "flat_dt";
+ arch = "arm64";
+ compression = "none";
+ hash-1 {
+ algo = "sha256";
+ };
+ u-boot-spl-dtb {
+ };
+ };
+
+ };
+
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ description = "u-boot with fdt";
+ firmware = "u-boot-1";
+ fdt = "fdt-1";
+ signature-1 {
+ algo = "sha256,rsa4096";
+ key-name-hint = "test_key";
+ sign-images = "firmware", "fdt";
+ };
+
+ };
+ };
+ };
+
+ fdtmap {
+ };
+ };
+};
diff --git a/tools/binman/test/282_symbols_disable.dts b/tools/binman/test/282_symbols_disable.dts
new file mode 100644
index 00000000000..6efa9335041
--- /dev/null
+++ b/tools/binman/test/282_symbols_disable.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ pad-byte = <0xff>;
+ u-boot-spl {
+ no-write-symbols;
+ };
+
+ u-boot {
+ offset = <0x38>;
+ no-expanded;
+ };
+
+ u-boot-spl2 {
+ type = "u-boot-spl";
+ no-write-symbols;
+ };
+ };
+};
diff --git a/tools/binman/test/283_mkimage_special.dts b/tools/binman/test/283_mkimage_special.dts
new file mode 100644
index 00000000000..c234093e6ec
--- /dev/null
+++ b/tools/binman/test/283_mkimage_special.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-T script";
+
+ u-boot {
+ };
+
+ hash {
+ };
+
+ imagename {
+ type = "u-boot";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/284_fit_fdt_list.dts b/tools/binman/test/284_fit_fdt_list.dts
new file mode 100644
index 00000000000..8885313f5b8
--- /dev/null
+++ b/tools/binman/test/284_fit_fdt_list.dts
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot {
+ };
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ fit,fdt-list-val = "test-fdt1", "test-fdt2";
+
+ images {
+ kernel {
+ description = "Vanilla Linux kernel";
+ type = "kernel";
+ arch = "ppc";
+ os = "linux";
+ compression = "gzip";
+ load = <00000000>;
+ entry = <00000000>;
+ hash-1 {
+ algo = "crc32";
+ };
+ hash-2 {
+ algo = "sha1";
+ };
+ u-boot {
+ };
+ };
+ @fdt-SEQ {
+ description = "fdt-NAME.dtb";
+ type = "flat_dt";
+ compression = "none";
+ hash {
+ algo = "sha256";
+ };
+ };
+ };
+
+ configurations {
+ default = "@config-DEFAULT-SEQ";
+ @config-SEQ {
+ description = "conf-NAME.dtb";
+ firmware = "uboot";
+ loadables = "atf";
+ fdt = "fdt-SEQ";
+ };
+ };
+ };
+ u-boot-nodtb {
+ };
+ };
+};
diff --git a/tools/binman/test/285_spl_expand.dts b/tools/binman/test/285_spl_expand.dts
new file mode 100644
index 00000000000..9c88ccb287b
--- /dev/null
+++ b/tools/binman/test/285_spl_expand.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-spl {
+ };
+ };
+};
diff --git a/tools/binman/test/286_template.dts b/tools/binman/test/286_template.dts
new file mode 100644
index 00000000000..6980dbfafcc
--- /dev/null
+++ b/tools/binman/test/286_template.dts
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-img {
+ };
+
+ common_part: template {
+ u-boot {
+ };
+
+ intel-vga {
+ filename = "vga.bin";
+ };
+ };
+
+ first {
+ type = "section";
+ insert-template = <&common_part>;
+
+ u-boot-dtb {
+ };
+ };
+
+ second {
+ type = "section";
+ insert-template = <&common_part>;
+
+ u-boot-dtb {
+ };
+
+ intel-vga {
+ filename = "vga2.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/287_template_multi.dts b/tools/binman/test/287_template_multi.dts
new file mode 100644
index 00000000000..122bfccd565
--- /dev/null
+++ b/tools/binman/test/287_template_multi.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+/ {
+ binman: binman {
+ multiple-images;
+
+ my_template: template {
+ blob-ext@0 {
+ filename = "my-blob.bin";
+ offset = <0>;
+ };
+ blob-ext@8 {
+ offset = <8>;
+ };
+ };
+
+ image {
+ pad-byte = <0x40>;
+ filename = "my-image.bin";
+ insert-template = <&my_template>;
+ blob-ext@8 {
+ filename = "my-blob2.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/288_template_fit.dts b/tools/binman/test/288_template_fit.dts
new file mode 100644
index 00000000000..d84dca4ea41
--- /dev/null
+++ b/tools/binman/test/288_template_fit.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman: binman {
+ multiple-images;
+
+ my_template: template {
+ fit@0 {
+ images {
+ kernel-1 {
+ };
+ kernel-2 {
+ };
+ };
+ };
+ };
+
+ image {
+ filename = "image.bin";
+ insert-template = <&my_template>;
+
+ fit@0 {
+ description = "desc";
+ configurations {
+ };
+ images {
+ kernel-3 {
+ };
+ kernel-4 {
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/289_template_section.dts b/tools/binman/test/289_template_section.dts
new file mode 100644
index 00000000000..8a744a0cf68
--- /dev/null
+++ b/tools/binman/test/289_template_section.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-img {
+ };
+
+ common_part: template {
+ u-boot {
+ };
+
+ intel-vga {
+ filename = "vga.bin";
+ };
+ };
+
+ first {
+ type = "section";
+ insert-template = <&common_part>;
+
+ u-boot-dtb {
+ };
+ };
+
+ section {
+ second {
+ type = "section";
+ insert-template = <&common_part>;
+
+ u-boot-dtb {
+ };
+
+ intel-vga {
+ filename = "vga2.bin";
+ };
+ };
+ };
+
+ second {
+ type = "section";
+ insert-template = <&common_part>;
+
+ u-boot-dtb {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/290_mkimage_sym.dts b/tools/binman/test/290_mkimage_sym.dts
new file mode 100644
index 00000000000..2dfd286ad44
--- /dev/null
+++ b/tools/binman/test/290_mkimage_sym.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-dtb {
+ };
+
+ mkimage {
+ args = "-n test -T script";
+
+ u-boot-spl {
+ };
+
+ u-boot-spl2 {
+ type = "u-boot-spl";
+ };
+ };
+
+ u-boot {
+ };
+ };
+};
diff --git a/tools/binman/test/291_rockchip_tpl.dts b/tools/binman/test/291_rockchip_tpl.dts
new file mode 100644
index 00000000000..269f56e2545
--- /dev/null
+++ b/tools/binman/test/291_rockchip_tpl.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ size = <16>;
+
+ rockchip-tpl {
+ filename = "rockchip-tpl.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/292_mkimage_missing_multiple.dts b/tools/binman/test/292_mkimage_missing_multiple.dts
new file mode 100644
index 00000000000..f84aea49ead
--- /dev/null
+++ b/tools/binman/test/292_mkimage_missing_multiple.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ mkimage {
+ args = "-n test -T script";
+ multiple-data-files;
+
+ blob-ext {
+ filename = "missing.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/293_ti_board_cfg.dts b/tools/binman/test/293_ti_board_cfg.dts
new file mode 100644
index 00000000000..cda024c1b8c
--- /dev/null
+++ b/tools/binman/test/293_ti_board_cfg.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-board-config {
+ config = "yaml/config.yaml";
+ schema = "yaml/schema.yaml";
+ };
+ };
+};
diff --git a/tools/binman/test/294_ti_board_cfg_combined.dts b/tools/binman/test/294_ti_board_cfg_combined.dts
new file mode 100644
index 00000000000..95ef449cbf4
--- /dev/null
+++ b/tools/binman/test/294_ti_board_cfg_combined.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ ti-board-config {
+ board-cfg {
+ config = "yaml/config.yaml";
+ schema = "yaml/schema.yaml";
+ };
+ sec-cfg {
+ config = "yaml/config.yaml";
+ schema = "yaml/schema.yaml";
+ };
+ rm-cfg {
+ config = "yaml/config.yaml";
+ schema = "yaml/schema.yaml";
+ };
+ pm-cfg {
+ config = "yaml/config.yaml";
+ schema = "yaml/schema.yaml";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/295_ti_board_cfg_no_type.dts b/tools/binman/test/295_ti_board_cfg_no_type.dts
new file mode 100644
index 00000000000..584b7acc5a4
--- /dev/null
+++ b/tools/binman/test/295_ti_board_cfg_no_type.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ ti-board-config {
+ config = "yaml/config.yaml";
+ schema = "yaml/schema_notype.yaml";
+ };
+ };
+};
diff --git a/tools/binman/test/296_ti_secure.dts b/tools/binman/test/296_ti_secure.dts
new file mode 100644
index 00000000000..941d0ab4ca3
--- /dev/null
+++ b/tools/binman/test/296_ti_secure.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-secure {
+ content = <&unsecure_binary>;
+ };
+ unsecure_binary: blob-ext {
+ filename = "ti_unsecure.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/297_ti_secure_rom.dts b/tools/binman/test/297_ti_secure_rom.dts
new file mode 100644
index 00000000000..1a3eca94255
--- /dev/null
+++ b/tools/binman/test/297_ti_secure_rom.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-secure-rom {
+ content = <&unsecure_binary>;
+ core-opts = <2>;
+ };
+ unsecure_binary: blob-ext {
+ filename = "ti_unsecure.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/298_ti_secure_rom_combined.dts b/tools/binman/test/298_ti_secure_rom_combined.dts
new file mode 100644
index 00000000000..bf872739bc1
--- /dev/null
+++ b/tools/binman/test/298_ti_secure_rom_combined.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-secure-rom {
+ content = <&unsecure_binary>;
+ content-sbl = <&unsecure_binary>;
+ content-sysfw = <&unsecure_binary>;
+ content-sysfw-data = <&unsecure_binary>;
+ content-sysfw-inner-cert = <&unsecure_binary>;
+ content-dm-data = <&unsecure_binary>;
+ combined;
+ sysfw-inner-cert;
+ dm-data;
+ };
+ unsecure_binary: blob-ext {
+ filename = "ti_unsecure.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/299_ti_secure_rom_a.dts b/tools/binman/test/299_ti_secure_rom_a.dts
new file mode 100644
index 00000000000..887138f0e4b
--- /dev/null
+++ b/tools/binman/test/299_ti_secure_rom_a.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-secure-rom {
+ content = <&unsecure_binary>;
+ core = "secure";
+ countersign;
+ };
+ unsecure_binary: blob-ext {
+ filename = "ti_unsecure.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/300_ti_secure_rom_b.dts b/tools/binman/test/300_ti_secure_rom_b.dts
new file mode 100644
index 00000000000..c6d6182158c
--- /dev/null
+++ b/tools/binman/test/300_ti_secure_rom_b.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-secure-rom {
+ content = <&unsecure_binary>;
+ core = "public";
+ };
+ unsecure_binary: blob-ext {
+ filename = "ti_unsecure.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/301_encrypted_no_algo.dts b/tools/binman/test/301_encrypted_no_algo.dts
new file mode 100644
index 00000000000..03f7ffee90f
--- /dev/null
+++ b/tools/binman/test/301_encrypted_no_algo.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ fit {
+ images {
+ u-boot {
+ encrypted {
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/302_encrypted_invalid_iv_file.dts b/tools/binman/test/302_encrypted_invalid_iv_file.dts
new file mode 100644
index 00000000000..388a0a6ad90
--- /dev/null
+++ b/tools/binman/test/302_encrypted_invalid_iv_file.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ binman {
+ fit {
+ images {
+ u-boot {
+ encrypted {
+ algo = "some-algo";
+ key-source = "key";
+ iv-filename = "invalid-iv-file";
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/303_encrypted_missing_key.dts b/tools/binman/test/303_encrypted_missing_key.dts
new file mode 100644
index 00000000000..d1daaa08851
--- /dev/null
+++ b/tools/binman/test/303_encrypted_missing_key.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test desc";
+
+ images {
+ u-boot {
+ encrypted {
+ algo = "algo-name";
+ iv-filename = "encrypted-file.iv";
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/304_encrypted_key_source.dts b/tools/binman/test/304_encrypted_key_source.dts
new file mode 100644
index 00000000000..884ec508db8
--- /dev/null
+++ b/tools/binman/test/304_encrypted_key_source.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test desc";
+
+ images {
+ u-boot {
+ encrypted {
+ algo = "algo-name";
+ key-source = "key-source-value";
+ iv-filename = "encrypted-file.iv";
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/305_encrypted_key_file.dts b/tools/binman/test/305_encrypted_key_file.dts
new file mode 100644
index 00000000000..efd7ee5f35a
--- /dev/null
+++ b/tools/binman/test/305_encrypted_key_file.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ fit {
+ description = "test desc";
+
+ images {
+ u-boot {
+ encrypted {
+ algo = "algo-name";
+ iv-filename = "encrypted-file.iv";
+ key-filename = "encrypted-file.key";
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/306_spl_pubkey_dtb.dts b/tools/binman/test/306_spl_pubkey_dtb.dts
new file mode 100644
index 00000000000..3256ff970cd
--- /dev/null
+++ b/tools/binman/test/306_spl_pubkey_dtb.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ u-boot-spl-pubkey-dtb {
+ algo = "sha384,rsa4096";
+ required = "conf";
+ key-name-hint = "key";
+ };
+ };
+};
diff --git a/tools/binman/test/307_xilinx_bootgen_sign.dts b/tools/binman/test/307_xilinx_bootgen_sign.dts
new file mode 100644
index 00000000000..02acf8652a5
--- /dev/null
+++ b/tools/binman/test/307_xilinx_bootgen_sign.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ xilinx-bootgen {
+ auth-params = "ppk_select=0", "spk_id=0x00000000";
+ pmufw-filename = "pmu-firmware.elf";
+ psk-key-name-hint = "psk";
+ ssk-key-name-hint = "ssk";
+
+ u-boot-spl-nodtb {
+ };
+ u-boot-spl-dtb {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/308_xilinx_bootgen_sign_enc.dts b/tools/binman/test/308_xilinx_bootgen_sign_enc.dts
new file mode 100644
index 00000000000..5d7ce4c1f5a
--- /dev/null
+++ b/tools/binman/test/308_xilinx_bootgen_sign_enc.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ xilinx-bootgen {
+ auth-params = "ppk_select=0", "spk_id=0x00000000";
+ fsbl-config = "auth_only";
+ keysrc-enc = "efuse_red_key";
+ pmufw-filename = "pmu-firmware.elf";
+ psk-key-name-hint = "psk";
+ ssk-key-name-hint = "ssk";
+
+ u-boot-spl-nodtb {
+ };
+ u-boot-spl-dtb {
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/309_template_phandle.dts b/tools/binman/test/309_template_phandle.dts
new file mode 100644
index 00000000000..c4ec1dd41be
--- /dev/null
+++ b/tools/binman/test/309_template_phandle.dts
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ multiple-images;
+
+ ti_spl_template: template-1 {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ images {
+ atf {
+ description = "atf";
+ ti-secure {
+ type = "collection";
+ content = <&atf>;
+ keyfile = "key.pem";
+ };
+ atf: atf-bl31 {
+ description = "atf";
+ };
+ };
+ };
+ };
+ };
+
+ image {
+ insert-template = <&ti_spl_template>;
+ fit {
+ images {
+ fdt-0 {
+ description = "fdt";
+ ti-secure {
+ type = "collection";
+ content = <&foo_dtb>;
+ keyfile = "key.pem";
+ };
+ foo_dtb: blob-ext {
+ filename = "vga.bin";
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/310_template_phandle_dup.dts b/tools/binman/test/310_template_phandle_dup.dts
new file mode 100644
index 00000000000..dc86f06463b
--- /dev/null
+++ b/tools/binman/test/310_template_phandle_dup.dts
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ multiple-images;
+
+ ti_spl_template: template-1 {
+ fit {
+ description = "test-desc";
+ #address-cells = <1>;
+ images {
+ atf {
+ description = "atf";
+ ti-secure {
+ type = "collection";
+ content = <&atf>;
+ keyfile = "key.pem";
+ };
+ atf: atf-bl31 {
+ description = "atf";
+ };
+ };
+ };
+ };
+ };
+
+ image {
+ insert-template = <&ti_spl_template>;
+ fit {
+ images {
+ fdt-0 {
+ description = "fdt";
+ ti-secure {
+ type = "collection";
+ content = <&foo_dtb>;
+ keyfile = "key.pem";
+ };
+ foo_dtb: blob-ext {
+ filename = "vga.bin";
+ };
+ };
+ };
+ };
+ };
+
+ image-2 {
+ insert-template = <&ti_spl_template>;
+ fit {
+ images {
+ fdt-0 {
+ description = "fdt";
+ blob-ext {
+ filename = "vga.bin";
+ };
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/311_capsule.dts b/tools/binman/test/311_capsule.dts
new file mode 100644
index 00000000000..0a62ef81dd2
--- /dev/null
+++ b/tools/binman/test/311_capsule.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ image-index = <0x1>;
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ hardware-instance = <0x0>;
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/312_capsule_signed.dts b/tools/binman/test/312_capsule_signed.dts
new file mode 100644
index 00000000000..4ab838efedd
--- /dev/null
+++ b/tools/binman/test/312_capsule_signed.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ image-index = <0x1>;
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ hardware-instance = <0x0>;
+ private-key = "key.key";
+ public-key-cert = "key.crt";
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/313_capsule_version.dts b/tools/binman/test/313_capsule_version.dts
new file mode 100644
index 00000000000..19e7e833480
--- /dev/null
+++ b/tools/binman/test/313_capsule_version.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ image-index = <0x1>;
+ fw-version = <0x2>;
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ hardware-instance = <0x0>;
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/314_capsule_signed_ver.dts b/tools/binman/test/314_capsule_signed_ver.dts
new file mode 100644
index 00000000000..649b8ccb2df
--- /dev/null
+++ b/tools/binman/test/314_capsule_signed_ver.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ image-index = <0x1>;
+ fw-version = <0x2>;
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ hardware-instance = <0x0>;
+ private-key = "key.key";
+ public-key-cert = "key.crt";
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/315_capsule_oemflags.dts b/tools/binman/test/315_capsule_oemflags.dts
new file mode 100644
index 00000000000..45853f69c31
--- /dev/null
+++ b/tools/binman/test/315_capsule_oemflags.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ image-index = <0x1>;
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ hardware-instance = <0x0>;
+ oem-flags = <0x8000>;
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/316_capsule_missing_key.dts b/tools/binman/test/316_capsule_missing_key.dts
new file mode 100644
index 00000000000..a14a74ee779
--- /dev/null
+++ b/tools/binman/test/316_capsule_missing_key.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ image-index = <0x1>;
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ hardware-instance = <0x0>;
+ private-key = "tools/binman/test/key.key";
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/317_capsule_missing_index.dts b/tools/binman/test/317_capsule_missing_index.dts
new file mode 100644
index 00000000000..99a54d55c33
--- /dev/null
+++ b/tools/binman/test/317_capsule_missing_index.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ hardware-instance = <0x0>;
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/318_capsule_missing_guid.dts b/tools/binman/test/318_capsule_missing_guid.dts
new file mode 100644
index 00000000000..85d3317ecb5
--- /dev/null
+++ b/tools/binman/test/318_capsule_missing_guid.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-capsule {
+ image-index = <0x1>;
+ hardware-instance = <0x0>;
+
+ blob {
+ filename = "capsule_input.bin";
+ };
+ };
+ };
+};
diff --git a/tools/binman/test/319_capsule_accept.dts b/tools/binman/test/319_capsule_accept.dts
new file mode 100644
index 00000000000..d48e59f859b
--- /dev/null
+++ b/tools/binman/test/319_capsule_accept.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ capsule-type = "accept";
+ };
+ };
+};
diff --git a/tools/binman/test/320_capsule_revert.dts b/tools/binman/test/320_capsule_revert.dts
new file mode 100644
index 00000000000..bd141ef2924
--- /dev/null
+++ b/tools/binman/test/320_capsule_revert.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ capsule-type = "revert";
+ };
+ };
+};
diff --git a/tools/binman/test/321_capsule_accept_missing_guid.dts b/tools/binman/test/321_capsule_accept_missing_guid.dts
new file mode 100644
index 00000000000..a0088b174c5
--- /dev/null
+++ b/tools/binman/test/321_capsule_accept_missing_guid.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ capsule-type = "accept";
+ };
+ };
+};
diff --git a/tools/binman/test/322_empty_capsule_type_missing.dts b/tools/binman/test/322_empty_capsule_type_missing.dts
new file mode 100644
index 00000000000..d356168e775
--- /dev/null
+++ b/tools/binman/test/322_empty_capsule_type_missing.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ };
+ };
+};
diff --git a/tools/binman/test/323_capsule_accept_revert_missing.dts b/tools/binman/test/323_capsule_accept_revert_missing.dts
new file mode 100644
index 00000000000..31268b20b88
--- /dev/null
+++ b/tools/binman/test/323_capsule_accept_revert_missing.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ binman {
+ efi-empty-capsule {
+ /* Image GUID for testing capsule update */
+ image-guid = "binman-test";
+ capsule-type = "foo";
+ };
+ };
+};
diff --git a/tools/binman/test/323_ti_board_cfg_phony.dts b/tools/binman/test/323_ti_board_cfg_phony.dts
new file mode 100644
index 00000000000..441296de4fd
--- /dev/null
+++ b/tools/binman/test/323_ti_board_cfg_phony.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-board-config {
+ config = "yaml/config_phony.yaml";
+ schema = "yaml/schema.yaml";
+ };
+ };
+};
diff --git a/tools/binman/test/324_ti_secure_firewall.dts b/tools/binman/test/324_ti_secure_firewall.dts
new file mode 100644
index 00000000000..7ec407fa67b
--- /dev/null
+++ b/tools/binman/test/324_ti_secure_firewall.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-secure {
+ content = <&unsecure_binary>;
+ auth-in-place = <0xa02>;
+
+ firewall-0-2 {
+ id = <0>;
+ region = <2>;
+ control = <0x31a>;
+ permissions = <0xc3ffff>;
+ start_address = <0x0 0x9e800000>;
+ end_address = <0x0 0x9fffffff>;
+ };
+
+ };
+ unsecure_binary: blob-ext {
+ filename = "ti_unsecure.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/325_ti_secure_firewall_missing_property.dts b/tools/binman/test/325_ti_secure_firewall_missing_property.dts
new file mode 100644
index 00000000000..24a0a996250
--- /dev/null
+++ b/tools/binman/test/325_ti_secure_firewall_missing_property.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ binman {
+ ti-secure {
+ content = <&unsecure_binary>;
+ auth-in-place = <0xa02>;
+
+ firewall-0-2 {
+ // id = <0>;
+ // region = <2>;
+ control = <0x31a>;
+ permissions = <0xc3ffff>;
+ start_address = <0x0 0x9e800000>;
+ end_address = <0x0 0x9fffffff>;
+ };
+
+ };
+ unsecure_binary: blob-ext {
+ filename = "ti_unsecure.bin";
+ };
+ };
+};
diff --git a/tools/binman/test/Makefile b/tools/binman/test/Makefile
new file mode 100644
index 00000000000..4d152eee9c0
--- /dev/null
+++ b/tools/binman/test/Makefile
@@ -0,0 +1,99 @@
+#
+# Builds test programs. This is launched from elf_test.BuildElfTestFiles()
+#
+# Copyright (C) 2017 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ )
+ifeq ($(findstring $(HOSTARCH),"x86" "x86_64"),)
+ifeq ($(findstring $(MAKECMDGOALS),"help" "clean"),)
+ifndef CROSS_COMPILE
+$(error Binman tests need to compile to x86, but the CPU arch of your \
+ machine is $(HOSTARCH). Set CROSS_COMPILE to a suitable cross compiler)
+endif
+endif
+endif
+
+CC = $(CROSS_COMPILE)gcc
+OBJCOPY = $(CROSS_COMPILE)objcopy
+
+VPATH := $(SRC)
+CFLAGS := -march=i386 -m32 -nostdlib -I $(SRC)../../../include -I $(SRC) \
+ -Wl,--no-dynamic-linker
+
+LDS_UCODE := -T $(SRC)u_boot_ucode_ptr.lds
+LDS_BINMAN := -T $(SRC)u_boot_binman_syms.lds
+LDS_BINMAN_BAD := -T $(SRC)u_boot_binman_syms_bad.lds
+LDS_BINMAN_X86 := -T $(SRC)u_boot_binman_syms_x86.lds
+LDS_BINMAN_EMBED := -T $(SRC)u_boot_binman_embed.lds
+LDS_EFL_SECTIONS := -T $(SRC)elf_sections.lds
+LDS_BLOB := -T $(SRC)blob_syms.lds
+
+TARGETS = u_boot_ucode_ptr u_boot_no_ucode_ptr bss_data bss_data_zero \
+ u_boot_binman_syms u_boot_binman_syms.bin u_boot_binman_syms_bad \
+ u_boot_binman_syms_size u_boot_binman_syms_x86 embed_data \
+ u_boot_binman_embed u_boot_binman_embed_sm elf_sections blob_syms.bin
+
+all: $(TARGETS)
+
+u_boot_no_ucode_ptr: CFLAGS += $(LDS_UCODE)
+u_boot_no_ucode_ptr: u_boot_no_ucode_ptr.c
+
+u_boot_ucode_ptr: CFLAGS += $(LDS_UCODE)
+u_boot_ucode_ptr: u_boot_ucode_ptr.c
+
+bss_data: CFLAGS += $(SRC)bss_data.lds
+bss_data: bss_data.c
+
+bss_data_zero: CFLAGS += $(SRC)bss_data_zero.lds
+bss_data_zero: bss_data_zero.c
+
+embed_data: CFLAGS += $(SRC)embed_data.lds
+embed_data: embed_data.c
+
+u_boot_binman_syms.bin: u_boot_binman_syms
+ $(OBJCOPY) -O binary $< -R .note.gnu.build-id $@
+
+u_boot_binman_syms: CFLAGS += $(LDS_BINMAN)
+u_boot_binman_syms: u_boot_binman_syms.c
+
+u_boot_binman_syms_x86: CFLAGS += $(LDS_BINMAN_X86)
+u_boot_binman_syms_x86: u_boot_binman_syms_x86.c
+
+u_boot_binman_syms_bad: CFLAGS += $(LDS_BINMAN_BAD)
+u_boot_binman_syms_bad: u_boot_binman_syms_bad.c
+
+u_boot_binman_syms_size: CFLAGS += $(LDS_BINMAN)
+u_boot_binman_syms_size: u_boot_binman_syms_size.c
+
+u_boot_binman_embed: CFLAGS += $(LDS_BINMAN_EMBED)
+u_boot_binman_embed: u_boot_binman_embed.c
+
+u_boot_binman_embed_sm: CFLAGS += $(LDS_BINMAN_EMBED)
+u_boot_binman_embed_sm: u_boot_binman_embed_sm.c
+
+blob_syms.bin: blob_syms
+ $(OBJCOPY) -O binary $< -R .note.gnu.build-id $@
+
+blob_syms: CFLAGS += $(LDS_BLOB)
+blob_syms: blob_syms.c
+
+elf_sections: CFLAGS += $(LDS_EFL_SECTIONS)
+elf_sections: elf_sections.c
+
+clean:
+ rm -f $(TARGETS)
+
+help:
+ @echo "Makefile for binman test programs"
+ @echo
+ @echo "Intended for use on x86 hosts"
+ @echo
+ @echo "Targets:"
+ @echo
+ @echo -e "\thelp - Print help (this is it!)"
+ @echo -e "\tall - Builds test programs (default targget)"
+ @echo -e "\tclean - Delete output files"
diff --git a/tools/binman/test/blob_syms.c b/tools/binman/test/blob_syms.c
new file mode 100644
index 00000000000..1df8d64353f
--- /dev/null
+++ b/tools/binman/test/blob_syms.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Google, Inc
+ *
+ * Simple program to create some binman symbols. This is used by binman tests.
+ */
+
+#include <linux/kconfig.h>
+#include <binman_sym.h>
+
+DECLARE_BINMAN_MAGIC_SYM;
+
+unsigned long val1 = 123;
+unsigned long val2 = 456;
+binman_sym_declare(unsigned long, inset, offset);
+unsigned long val3 = 789;
+unsigned long val4 = 999;
+binman_sym_declare(unsigned long, inset, size);
diff --git a/tools/binman/test/blob_syms.lds b/tools/binman/test/blob_syms.lds
new file mode 100644
index 00000000000..787e38dd853
--- /dev/null
+++ b/tools/binman/test/blob_syms.lds
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0x00000010;
+ _start = .;
+
+ . = ALIGN(4);
+ .text :
+ {
+ __my_start_sym = .;
+ *(.text*)
+ }
+
+ . = ALIGN(4);
+ .binman_sym_table : {
+ __binman_sym_start = .;
+ KEEP(*(SORT(.binman_sym*)));
+ __binman_sym_end = .;
+ }
+ .interp : { *(.interp*) }
+
+}
diff --git a/tools/binman/test/bss_data.c b/tools/binman/test/bss_data.c
new file mode 100644
index 00000000000..7047a3bb014
--- /dev/null
+++ b/tools/binman/test/bss_data.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Google, Inc
+ *
+ * Simple program to create a bss_data region so the symbol can be read
+ * by binutils. This is used by binman tests.
+ */
+
+int bss_data[10];
+
+int main(void)
+{
+ bss_data[2] = 2;
+
+ return 0;
+}
diff --git a/tools/binman/test/bss_data.lds b/tools/binman/test/bss_data.lds
new file mode 100644
index 00000000000..306dab50430
--- /dev/null
+++ b/tools/binman/test/bss_data.lds
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0xfffffdf0;
+ _start = .;
+ __bss_size = 10;
+}
diff --git a/tools/binman/test/bss_data_zero.c b/tools/binman/test/bss_data_zero.c
new file mode 100644
index 00000000000..7047a3bb014
--- /dev/null
+++ b/tools/binman/test/bss_data_zero.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Google, Inc
+ *
+ * Simple program to create a bss_data region so the symbol can be read
+ * by binutils. This is used by binman tests.
+ */
+
+int bss_data[10];
+
+int main(void)
+{
+ bss_data[2] = 2;
+
+ return 0;
+}
diff --git a/tools/binman/test/bss_data_zero.lds b/tools/binman/test/bss_data_zero.lds
new file mode 100644
index 00000000000..8fa0210a8f4
--- /dev/null
+++ b/tools/binman/test/bss_data_zero.lds
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0xfffffdf0;
+ _start = .;
+ __bss_size = 0;
+}
diff --git a/tools/binman/test/descriptor.bin b/tools/binman/test/descriptor.bin
new file mode 100644
index 00000000000..3d549436c27
--- /dev/null
+++ b/tools/binman/test/descriptor.bin
Binary files differ
diff --git a/tools/binman/test/dev.key b/tools/binman/test/dev.key
new file mode 100644
index 00000000000..b36bad2cfb3
--- /dev/null
+++ b/tools/binman/test/dev.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDYngNWUvXYRXX/
+WEUI7k164fcpv1srXz+u+5Y3Yhouw3kPs+ffvYyHAPfjF7aUIAgezKk/4o7AvsxE
+Rdih3T+0deAd/q/yuqN4Adzt6ImnsO/EqdtYl3Yh+Vck9xWhLd3SAw1++GfSmNMT
+gxlcc/z6z+bIh2tJNtPtRSNNHMmvYYOkBmkfwcjbMXD+fe4vBwYjVrIize+l7Yuv
+1qN2nFlq56pFi8Lj5vOvFyNhZHRvwcpWdUdkx39beNUfwrGhgewOeWngTcY75n7S
+FY45TBR1G2PR90CQvyDinCi9Mm0u5s+1WASQWPblovfD6CPbHQu4GZm+FAs7yUvr
+hA7VCyNxAgMBAAECggEAUbq0uaJNfc8faTtNuMPo2d9eGRNI+8FRTt0/3R+Xj2NT
+TvhrGUD0P4++96Df012OkshXZ3I8uD6E5ZGQ3emTeqwq5kZM7oE64jGZwO3G2k1o
++cO4reFfwgvItHrBX3HlyrI6KljhG1Vr9mW1cOuWXK+KfMiTUylrpo86dYLSGeg3
+7ZlsOPArr4eof/A0iPryQZX6X5POf7k/e9qRFYsOkoRQO8pBL3J4rIKwBl3uBN3K
++FY40vCkd8JyTo2DNfHeIe1XYA9fG2ahjD2qMsw10TUsRRMd5yhonEcJ7VzGzy8m
+MnuMDAr7CwbbLkKi4UfZUl6YDkojqerwLOrxikBqkQKBgQD6sS6asDgwiq5MtstE
+4/PxMrVEsCdkrU+jjQN749qIt/41a6lbp0Pr6aUKKKGs0QbcnCtlpp7qmhvymBcW
+hlqxk2wokKMChv4WLXjZS3DGcOdMglc81y2F+252bToN8vwUfm6DPp9/GKtejA0a
+GP57GeHxoVO7vfDX1F/vZRogRQKBgQDdNCLWOlGWvnKjfgNZHgX+Ou6ZgTSAzy+/
+hRsZPlY5nwO5iD7YkIKvqBdOmfyjlUpHWk2uAcT9pfgzYygvyBRaoQhAYBGkHItt
+slaMxnLd+09wWufoCbgJvFn+wVQxBLcA5PXB98ws0Dq8ZYuo6AOuoRivsSO4lblK
+MW0guBJXPQKBgQDGjf0ukbH/aGfC5Oi8SJvWhuYhYC/jQo2YKUEAKCjXLnuOThZW
+PHXEbUrFcAcVfH0l0B9jJIQrpiHKlAF9Wq6MhQoeWuhxQQAQCrXzzRemZJgd9gIo
+cvlgbBNCgyJ/F9vmU3kuRDRJkv1wJhbee7tbPtXA7pkGUttl5pSRZI87zQKBgQC/
+0ZkwCox72xTQP9MpcYai6nnDta5Q0NnIC+Xu4wakmwcA2WweIlqhdnMXnyLcu/YY
+n+9iqHgpuMXd0eukW62C1cexA13o4TPrYU36b5BmfKprdPlLVzo3fxTPfNjEVSFY
+7jNLC9YLOlrkym3sf53Jzjr5B/RA+d0ewHOwfs6wxQKBgFSyfjx5wtdHK4fO+Z1+
+q3bxouZryM/4CiPCFuw4+aZmRHPmufuNCvfXdF+IH8dM0E9ObwKZAe/aMP/Y+Abx
+Wz9Vm4CP6g7k3DU3INEygyjmIQQDKQ9lFdDnsP9ESzrPbaGxZhc4x2lo7qmeW1BR
+/RuiAofleFkT4s+EhLrfE/v5
+-----END PRIVATE KEY-----
diff --git a/tools/binman/test/elf_sections.c b/tools/binman/test/elf_sections.c
new file mode 100644
index 00000000000..9bcce9af021
--- /dev/null
+++ b/tools/binman/test/elf_sections.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Program containing two text sections
+ */
+
+int __attribute__((section(".sram_data"))) data[29];
+
+int __attribute__((section(".sram_code"))) calculate(int x)
+{
+ data[0] = x;
+
+ return x * x;
+}
+
+int main(void)
+{
+ return calculate(123);
+}
diff --git a/tools/binman/test/elf_sections.lds b/tools/binman/test/elf_sections.lds
new file mode 100644
index 00000000000..7b6e932592f
--- /dev/null
+++ b/tools/binman/test/elf_sections.lds
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0x00000010;
+ _start = .;
+
+ . = ALIGN(4);
+ .text :
+ {
+ *(.text*)
+ }
+
+ . = 0x00001000;
+ .sram :
+ {
+ *(.sram*)
+ }
+
+ /DISCARD/ : {
+ *(.comment)
+ *(.dyn*)
+ }
+}
diff --git a/tools/binman/test/embed_data.c b/tools/binman/test/embed_data.c
new file mode 100644
index 00000000000..08b68c550f6
--- /dev/null
+++ b/tools/binman/test/embed_data.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Simple program including some embedded data that can be accessed by binman.
+ * This is used by binman tests.
+ */
+
+int first[10] = {1};
+int before[2] __attribute__((section(".embed"))) = {2, 3};
+int embed[3] __attribute__((section(".embed"))) = {0x1234, 0x5678};
+int second[10] = {1};
+
+int main(void)
+{
+ return 0;
+}
diff --git a/tools/binman/test/embed_data.lds b/tools/binman/test/embed_data.lds
new file mode 100644
index 00000000000..d416cb21110
--- /dev/null
+++ b/tools/binman/test/embed_data.lds
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 Google LLC
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ _start = .;
+ __data_start = .;
+ .data :
+ {
+ . = ALIGN(32);
+ embed_start = .;
+ *(.embed*)
+ embed_end = .;
+ region_size = 0;
+ . = ALIGN(32);
+ *(.data*)
+ }
+}
diff --git a/tools/binman/test/files/1.dat b/tools/binman/test/files/1.dat
new file mode 100644
index 00000000000..a9524706171
--- /dev/null
+++ b/tools/binman/test/files/1.dat
@@ -0,0 +1 @@
+sorry I'm late
diff --git a/tools/binman/test/files/2.dat b/tools/binman/test/files/2.dat
new file mode 100644
index 00000000000..687ea52730d
--- /dev/null
+++ b/tools/binman/test/files/2.dat
@@ -0,0 +1 @@
+Oh, don't bother apologising, I'm sorry you're alive
diff --git a/tools/binman/test/files/ignored_dir.dat/ignore b/tools/binman/test/files/ignored_dir.dat/ignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/tools/binman/test/files/ignored_dir.dat/ignore
diff --git a/tools/binman/test/files/not-this-one b/tools/binman/test/files/not-this-one
new file mode 100644
index 00000000000..e71c2250f96
--- /dev/null
+++ b/tools/binman/test/files/not-this-one
@@ -0,0 +1 @@
+this does not have a .dat extenion
diff --git a/tools/binman/test/fitimage.bin.gz b/tools/binman/test/fitimage.bin.gz
new file mode 100644
index 00000000000..0a9dcfc4248
--- /dev/null
+++ b/tools/binman/test/fitimage.bin.gz
Binary files differ
diff --git a/tools/binman/test/generated/autoconf.h b/tools/binman/test/generated/autoconf.h
new file mode 100644
index 00000000000..6a23039f469
--- /dev/null
+++ b/tools/binman/test/generated/autoconf.h
@@ -0,0 +1,3 @@
+#define CONFIG_BINMAN 1
+#define CONFIG_SPL_BUILD 1
+#define CONFIG_SPL_BINMAN_SYMBOLS 1
diff --git a/tools/binman/test/ifwi.bin.gz b/tools/binman/test/ifwi.bin.gz
new file mode 100644
index 00000000000..25d72892944
--- /dev/null
+++ b/tools/binman/test/ifwi.bin.gz
Binary files differ
diff --git a/tools/binman/test/key.key b/tools/binman/test/key.key
new file mode 100644
index 00000000000..9de3be14da8
--- /dev/null
+++ b/tools/binman/test/key.key
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCSDLMHq1Jw3U+G
+H2wutSGrT4Xhs5Yy7uhR/rDOiuKTW3zkVdfSIliye3Nnwrl/nNUFzEJ+4t/AiDaJ
+Qk5KddTAJnOkw5SYBvFsTDhMR4HH6AyfzaaVl+AAGOg4LXwZzGYKncgOY5u6ZyMB
+SzHxozJmmoqYaCIi4Iv2VZRZw1YPBoT6sv38RQSET5ci/g+89Sfb85ZPHPu6PLlz
+ZTufG+yzAhIDsIvNpt2YlCnQ1TqoZxXsztxN1bKIP68xvlAQHSAB8+x4y0tYPE1I
+UT1DK22FMgz5iyBp6ksFaqI06fITtJjPKG13z8sXXgb4/rJ5I0lhsn1ySsHQ0zLw
+/CX4La2/VMA0Bw6GLFRhu/rOycqKfmwLm25bExV8xL6lwFohxbzBQgYr93ujGFyQ
+AXBDOphvZcdXP3CHAcEViVRjrsBWNz8wyf7X8h2FIU16kAd30WuspjmnGuvRZ6Gn
+SNDVO2tbEKvwkg6liYWy4IXtWcvooMtkhYyvFudcxRPgxEUTQ00biYfJ59ukqD7I
+hyT7pq1bZDCVnAt6dUUPWZutrbBacsyITs01hyiPxvAAQ7XRoInmW1DLqHZ+gCJU
+YJ0TaiAI8AmnjypMWRUo19l0zIgPdva8EJ+mz+kKFsZszo1nwuxQL7oUSUCb0hfB
+2k3WxNthBi3QpspUKPtKweIg9ITtIwIDAQABAoICAA9dS6ZGZTVfauLKwnhFcOXT
+R1vfpzDzhjg+CX6pCL4E1WY2C67dEySvrQvg5d/hcV2bR/GOT4izK72T3qWhsMCI
+KwlN0/+MV3CTsiaALUyJAm77VQeOwy9vb1qdml0ibie2wpmU7AiXmgykSvxHNWGq
+52KyLckqgz7mcOVikdah0nKHSwXzgs6iit1RCfnQdqGChjELdQX6Jm5X24ZZCzUn
+xhpiQ8reP5iyGZYRIIsf0SQo/O8pSI9h173tbgHL9paOATYR+Pqu2Vh+x2meE3b8
+NXY5Jy9NSRgoSCk15VQiXyMH90Av+YcbSrN+I+tvhWREQUM5Txt3ZHgKprntoEYE
+XLHAr9cvmIzLNeApt2z/g4t80xFBpIvTG3+SV/rthmq0KGCLW2kPkdujOiIwdNFF
+6fJ6ikphKAbx2NgUY+6AM5AoOh5QPMqvCdsPwO21YG1WoxmiUpNTaYMlR1fDofr/
+A/z2bFH4SiJPkHXRT2KBiJh4ZZWNzP6hOqGy+jreOpWh5IAyn7cKx6t3I28Q9df0
+tK/1PLgR8WWu6G4uHtF5lKL+LgqFCTbSu9JtLQVQntD7Qyd98sF5o23QQWyA19uU
+TVGxtkVaP1y7v+gtC+xMTW9MbGIeJiqMZuZ3xXJVvUNg1/2BDd+VAfPCOq6xGHC7
+s9MFqwUsLCAFFebXC8oVAoIBAQDKGc/o21Ags2t61IJaJjU7YwrsRywhZR+vUz5F
+xtqH4jt9AkdWpDkKbO7xNMQ2OFdnobq5mkM+iW6Jvc1fi4gm1HDyP296nPKZdFrJ
+UgGfTxOhxFLp7gsJ2F0GX5eDJYvqUTBeYB3wrQkCc+t7fLg2oS+gKGIIn2CP07Mx
+Bist3eCcDvL6QIxYS43u+ptTyAItyUYn8KwvCxlIEfjxowsxfhRWuU+Mr4A4hfGB
+64xSI1YU1AYZLMucOtK/mmlscfO8isdcyfea0GJn4VLRnNvAKL5g627IdErWHs3u
+KgYWAXtVKzHrf4hO8dpVgIzO69wAsqZEvKYGmTJhfyvBN9DdAoIBAQC5AA7s2XOX
+raVymhPwEy4I/2w9NuMFmTavOREBp/gA9uaWBdqAWn1rRJiJ5plgdcnOBFPSGBnc
+thkuWBRqkklQ0YPKhNBT48CZGBN7VUsvyTZD1+IXLW1TmY5UGT0p6/dAYkoJHnvX
+TAHl1tfmeHxVCJWV6Shf5LfJJwsAiykxzetkzmeaycy2s9GKCnkc2uFxyhKnfM0/
+SLwTuXQIJvHuErTYA4jjVOG9EGYW2/uKScPBLpB1YTliAUIvByDy6suCN5pVZGT8
+xVLTYec9lXjhfyhysOAjhD3w77Jh7Exft91fEK50k2ZkqYYnh+mYZcnR52msVSBS
+3YL9kK/9dNX/AoIBABcEaZFzqOSQiqUqns31nApvdUcDtBr5kWo+aNE5nJntQiky
+oT1U5soxLeV6xP4H3KyI1uNcllwA+v3lCAbhtVf2ygZNAz1LsrWXct+K33RtZSb/
+XRIXclpksfOP34moNQ8yv/d/qulGS8hju2YNBk3yfaIX91JUFINM8ROcSD6pDnO3
+oCSwRUupDzkwgZBBLz5Xtg3Gc1XIRdDXeyrKDvRMD7Tw1gaH1mqZlq/dS9XvAFbO
+7wLe/zGD4YzA4VDgiYnnpF0FA5Y2NX7vQqds3fo8qbIQHkXmOL+6Mmn1j0viT1Gb
+4cuYcsXK9brXMTI/2oaZ0iXx9la6C+reuPUAjmECggEBAInEvlips0hgW1ZV4cUm
+M2El/dA0YKoZqDyjDcQi9zCYra1JXKe7O603XzVK0iugbBGM7XMG2bOgtG3r0ABx
+QkH6VN/rOk1OzW31HQT6xswmVs/9I/TIsqLQNsrwJLlkbTO4PpQ97FGv27Xy4cNT
+NJwKkYMbKCMJa8hT2ACmoZ3iUIs4nrUJ1Pa2QLRBCmJvqfYYWv35lcur+cvijsNH
+ZWE68wvuzfEllBo87RnW5qLcPfhOGewf5CDU+RmWgHYGXllx2PAAnKgUtpKOVStq
+daPQEyoeCDzKzWnwxvHfjBy4CxYxkQllf5o1GJ+1ukLwgnRbljltB25OYa89IaJp
+cLcCggEAa5vbegzMKYPjR3zcVjnvhRsLXQi1vMtbUqOQ5wYMwGIef4v3QHNoF7EA
+aNpWQ/qgCTQUzl3qoQCkRiVmVBBr60Fs5y7sfA92eBxQIV5hxJftH3vmiKqeWeqm
+ila9DNw84MNAIqI2u6R3K/ur9fkSswDr3nzvFjuheW5V/M/6zAUtJZXr4iUih929
+uhf2dn6pSLR+epJ5023CVaI2zwz+U6PDEATKy9HjeKab3tQMHxQkT/5IWcLqrVTs
+0rMobIgONzQqYDi2sO05YvgNBxvX3pUvqNlthcOtauT8BoE6wxLYm7ZcWYLPn15A
+wR0+2mDpx+HDyu76q3M+KxXG2U8sJg==
+-----END PRIVATE KEY-----
diff --git a/tools/binman/test/key.pem b/tools/binman/test/key.pem
new file mode 100644
index 00000000000..7a7b84a8bba
--- /dev/null
+++ b/tools/binman/test/key.pem
@@ -0,0 +1,32 @@
+-----BEGIN CERTIFICATE-----
+MIIFcTCCA1kCFB/17qhcvpyKhG+jfS2c0qG1yjruMA0GCSqGSIb3DQEBCwUAMHUx
+CzAJBgNVBAYTAk5aMRMwEQYDVQQIDApDYW50ZXJidXJ5MRUwEwYDVQQHDAxDaHJp
+c3RjaHVyY2gxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEXMBUG
+A1UEAwwOTXkgQ29tbW9uIE5hbWUwHhcNMjMwMjEzMDM1MzMzWhcNMjQwMjEzMDM1
+MzMzWjB1MQswCQYDVQQGEwJOWjETMBEGA1UECAwKQ2FudGVyYnVyeTEVMBMGA1UE
+BwwMQ2hyaXN0Y2h1cmNoMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBM
+dGQxFzAVBgNVBAMMDk15IENvbW1vbiBOYW1lMIICIjANBgkqhkiG9w0BAQEFAAOC
+Ag8AMIICCgKCAgEAkgyzB6tScN1Phh9sLrUhq0+F4bOWMu7oUf6wzorik1t85FXX
+0iJYsntzZ8K5f5zVBcxCfuLfwIg2iUJOSnXUwCZzpMOUmAbxbEw4TEeBx+gMn82m
+lZfgABjoOC18GcxmCp3IDmObumcjAUsx8aMyZpqKmGgiIuCL9lWUWcNWDwaE+rL9
+/EUEhE+XIv4PvPUn2/OWTxz7ujy5c2U7nxvsswISA7CLzabdmJQp0NU6qGcV7M7c
+TdWyiD+vMb5QEB0gAfPseMtLWDxNSFE9QytthTIM+YsgaepLBWqiNOnyE7SYzyht
+d8/LF14G+P6yeSNJYbJ9ckrB0NMy8Pwl+C2tv1TANAcOhixUYbv6zsnKin5sC5tu
+WxMVfMS+pcBaIcW8wUIGK/d7oxhckAFwQzqYb2XHVz9whwHBFYlUY67AVjc/MMn+
+1/IdhSFNepAHd9FrrKY5pxrr0Wehp0jQ1TtrWxCr8JIOpYmFsuCF7VnL6KDLZIWM
+rxbnXMUT4MRFE0NNG4mHyefbpKg+yIck+6atW2QwlZwLenVFD1mbra2wWnLMiE7N
+NYcoj8bwAEO10aCJ5ltQy6h2foAiVGCdE2ogCPAJp48qTFkVKNfZdMyID3b2vBCf
+ps/pChbGbM6NZ8LsUC+6FElAm9IXwdpN1sTbYQYt0KbKVCj7SsHiIPSE7SMCAwEA
+ATANBgkqhkiG9w0BAQsFAAOCAgEAJAJoia6Vq4vXP/0bCgW3o9TOMmFYhI/xPxoh
+Gd7was9R7BOrMGO+/3E7DZtjycZYL0r9nOtr9S/BBreuZ4vkk/PSoGaSnG8ST4jC
+Ajk7ew/32RGOgA/oIzgKj1SPkBtvW+x+76sjUkGKsxmABBUhycIY7K0U8McTTfJ7
+gJ164VXmdG7qFMWmRy4Ry9QGXkDsbMSOZ485X7zbphjK5OZXEujP7GMUgg1lP479
+NqC1g+1m/A3PIB767lVYA7APQsrckHdRqOTkK9TYRQ3mvyE2wruhqE6lx8G/UyFh
+RZjZ3lh2bx07UWIlyMabnGDMrM4FCnesqVyVAc8VAbkdXkeJI9r6DdFw+dzIY0P1
+il+MlYpZNwRyNv2W5SCPilyuhuPOSrSnsSHx64puCIvwG/4xA30Jw8nviJuyGSef
+7uE+W7SD9E/hQHi/S9KRsYVoo7a6X9ADiwNsRNzVnuqc7K3mv/C5E9s6uFTNoObe
+fUBA7pL3Fmvc5pYatxTFI85ajBpe/la6AA+7HX/8PXEphmp6GhFCcfsq+DL03vTM
+DqIJL1i/JXggwqvvdcfaSeMDIOIzO89yUGGwwuj9rqMeEY99qDtljgy1EljjrB5i
+0j4Jg4O0OEd2KIOD7nz4do1tLNlRcpysDZeXIiwAI7Dd3wWMsgpOQxs0zqWyqDVq
+mCKa5Tw=
+-----END CERTIFICATE-----
diff --git a/tools/binman/test/u_boot_binman_embed.c b/tools/binman/test/u_boot_binman_embed.c
new file mode 100644
index 00000000000..75874bb6e23
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_embed.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Simple program to embed a devicetree. This is used by binman tests.
+ */
+
+int __attribute__((section(".mydtb"))) dtb_data[4096];
+
+int main(void)
+{
+ return 0;
+}
diff --git a/tools/binman/test/u_boot_binman_embed.lds b/tools/binman/test/u_boot_binman_embed.lds
new file mode 100644
index 00000000000..e213fa8a84c
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_embed.lds
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0x00000000;
+ _start = .;
+
+ . = ALIGN(4);
+ .text :
+ {
+ *(.text*)
+ }
+
+ . = ALIGN(4);
+ .data : {
+ dtb_embed_begin = .;
+ KEEP(*(.mydtb));
+ dtb_embed_end = .;
+ }
+ .interp : { *(.interp*) }
+
+}
diff --git a/tools/binman/test/u_boot_binman_embed_sm.c b/tools/binman/test/u_boot_binman_embed_sm.c
new file mode 100644
index 00000000000..ae245d78a6a
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_embed_sm.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Simple program to embed a devicetree. This is used by binman tests.
+ */
+
+int __attribute__((section(".mydtb"))) dtb_data[16];
+
+int main(void)
+{
+ return 0;
+}
diff --git a/tools/binman/test/u_boot_binman_syms.c b/tools/binman/test/u_boot_binman_syms.c
new file mode 100644
index 00000000000..147c90230f8
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_syms.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Google, Inc
+ *
+ * Simple program to create some binman symbols. This is used by binman tests.
+ */
+
+#include <linux/kconfig.h>
+#include <binman_sym.h>
+
+DECLARE_BINMAN_MAGIC_SYM;
+
+binman_sym_declare(unsigned long, u_boot_spl_any, offset);
+binman_sym_declare(unsigned long long, u_boot_spl2, offset);
+binman_sym_declare(unsigned long, u_boot_any, image_pos);
+binman_sym_declare(unsigned long, u_boot_any, size);
diff --git a/tools/binman/test/u_boot_binman_syms.lds b/tools/binman/test/u_boot_binman_syms.lds
new file mode 100644
index 00000000000..825fc3f649f
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_syms.lds
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0x00000010;
+ _start = .;
+
+ . = ALIGN(4);
+ .text :
+ {
+ __image_copy_start = .;
+ *(.text*)
+ }
+
+ . = ALIGN(4);
+ .binman_sym_table : {
+ __binman_sym_start = .;
+ KEEP(*(SORT(.binman_sym*)));
+ __binman_sym_end = .;
+ }
+ .interp : { *(.interp*) }
+
+}
diff --git a/tools/binman/test/u_boot_binman_syms_bad.c b/tools/binman/test/u_boot_binman_syms_bad.c
new file mode 120000
index 00000000000..939b2e965f8
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_syms_bad.c
@@ -0,0 +1 @@
+u_boot_binman_syms.c \ No newline at end of file
diff --git a/tools/binman/test/u_boot_binman_syms_bad.lds b/tools/binman/test/u_boot_binman_syms_bad.lds
new file mode 100644
index 00000000000..849d158ac86
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_syms_bad.lds
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0x00000000;
+ _start = .;
+
+ . = ALIGN(4);
+ .text :
+ {
+ *(.text*)
+ }
+
+ . = ALIGN(4);
+ .binman_sym_table : {
+ __binman_sym_start = .;
+ KEEP(*(SORT(.binman_sym*)));
+ __binman_sym_end = .;
+ }
+
+}
diff --git a/tools/binman/test/u_boot_binman_syms_size.c b/tools/binman/test/u_boot_binman_syms_size.c
new file mode 100644
index 00000000000..f686892a4da
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_syms_size.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Google, Inc
+ *
+ * Simple program to create some binman symbols. This is used by binman tests.
+ */
+
+#include <linux/kconfig.h>
+#include <binman_sym.h>
+
+DECLARE_BINMAN_MAGIC_SYM;
+
+binman_sym_declare(char, u_boot_spl, pos);
diff --git a/tools/binman/test/u_boot_binman_syms_x86.c b/tools/binman/test/u_boot_binman_syms_x86.c
new file mode 120000
index 00000000000..939b2e965f8
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_syms_x86.c
@@ -0,0 +1 @@
+u_boot_binman_syms.c \ No newline at end of file
diff --git a/tools/binman/test/u_boot_binman_syms_x86.lds b/tools/binman/test/u_boot_binman_syms_x86.lds
new file mode 100644
index 00000000000..9daf86f8338
--- /dev/null
+++ b/tools/binman/test/u_boot_binman_syms_x86.lds
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0xffffff00;
+ _start = .;
+
+ . = ALIGN(4);
+ .text :
+ {
+ __image_copy_start = .;
+ *(.text*)
+ }
+
+ . = ALIGN(4);
+ .binman_sym_table : {
+ __binman_sym_start = .;
+ KEEP(*(SORT(.binman_sym*)));
+ __binman_sym_end = .;
+ }
+ .interp : { *(.interp*) }
+
+}
diff --git a/tools/binman/test/u_boot_no_ucode_ptr.c b/tools/binman/test/u_boot_no_ucode_ptr.c
new file mode 100644
index 00000000000..24cdb909d00
--- /dev/null
+++ b/tools/binman/test/u_boot_no_ucode_ptr.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Google, Inc
+ *
+ * Simple program to create a bad _dt_ucode_base_size symbol to create an
+ * error when it is used. This is used by binman tests.
+ */
+
+static unsigned long not__dt_ucode_base_size[2]
+ __attribute__((section(".ucode"))) = {1, 2};
diff --git a/tools/binman/test/u_boot_ucode_ptr.c b/tools/binman/test/u_boot_ucode_ptr.c
new file mode 100644
index 00000000000..243c8e9e1a1
--- /dev/null
+++ b/tools/binman/test/u_boot_ucode_ptr.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Google, Inc
+ *
+ * Simple program to create a _dt_ucode_base_size symbol which can be read
+ * by binutils. This is used by binman tests.
+ */
+
+static unsigned long _dt_ucode_base_size[2]
+ __attribute__((section(".ucode"))) = {1, 2};
diff --git a/tools/binman/test/u_boot_ucode_ptr.lds b/tools/binman/test/u_boot_ucode_ptr.lds
new file mode 100644
index 00000000000..cf4d1b8bbda
--- /dev/null
+++ b/tools/binman/test/u_boot_ucode_ptr.lds
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = 0xfffffe14;
+ _start = .;
+ .ucode : {
+ *(.ucode)
+ }
+ .interp : { *(.interp*) }
+}
diff --git a/tools/binman/test/yaml/config.yaml b/tools/binman/test/yaml/config.yaml
new file mode 100644
index 00000000000..c2be32128bb
--- /dev/null
+++ b/tools/binman/test/yaml/config.yaml
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Test config
+#
+---
+
+main-branch:
+ obj:
+ a: 0x0
+ b: 0
+ arr: [0, 0, 0, 0]
+ another-arr:
+ - # 1
+ c: 0
+ d: 0
+ - # 2
+ c: 0
+ d: 0
diff --git a/tools/binman/test/yaml/config_phony.yaml b/tools/binman/test/yaml/config_phony.yaml
new file mode 100644
index 00000000000..d76fcb3b821
--- /dev/null
+++ b/tools/binman/test/yaml/config_phony.yaml
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Test config
+#
+---
+
+main-branch :
+ obj :
+ a : 0x0
+ b: 0
+ arr: [0, 0, 0, 0]
+ another-arr:
+ - # 1
+ c: 0
+ d: 0
+ - # 2
+ c: 0
+ d: 0
diff --git a/tools/binman/test/yaml/schema.yaml b/tools/binman/test/yaml/schema.yaml
new file mode 100644
index 00000000000..8aa03f3c8ec
--- /dev/null
+++ b/tools/binman/test/yaml/schema.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Test schema
+#
+---
+
+definitions:
+ u8:
+ type: integer
+ minimum: 0
+ maximum: 0xff
+ u16:
+ type: integer
+ minimum: 0
+ maximum: 0xffff
+ u32:
+ type: integer
+ minimum: 0
+ maximum: 0xffffffff
+
+type: object
+properties:
+ main-branch:
+ type: object
+ properties:
+ obj:
+ type: object
+ properties:
+ a:
+ $ref: "#/definitions/u32"
+ b:
+ $ref: "#/definitions/u16"
+ arr:
+ type: array
+ minItems: 4
+ maxItems: 4
+ items:
+ $ref: "#/definitions/u8"
+ another-arr:
+ type: array
+ minItems: 2
+ maxItems: 2
+ items:
+ type: object
+ properties:
+ c:
+ $ref: "#/definitions/u8"
+ d:
+ $ref: "#/definitions/u8"
diff --git a/tools/binman/test/yaml/schema_notype.yaml b/tools/binman/test/yaml/schema_notype.yaml
new file mode 100644
index 00000000000..6b4d98ffa18
--- /dev/null
+++ b/tools/binman/test/yaml/schema_notype.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Test schema
+#
+---
+
+definitions:
+ u8:
+ type: integer
+ minimum: 0
+ maximum: 0xff
+ u16:
+ type: integer
+ minimum: 0
+ maximum: 0xffff
+ u32:
+ type: integer
+ minimum: 0
+ maximum: 0xffffffff
+
+type: object
+properties:
+ main-branch:
+ type: object
+ properties:
+ obj:
+ type: object
+ properties:
+ a:
+ $ref: "#/definitions/u4"
+ b:
+ $ref: "#/definitions/u16"
+ arr:
+ type: array
+ minItems: 4
+ maxItems: 4
+ items:
+ $ref: "#/definitions/u8"