[coreboot-gerrit] Patch set updated for coreboot: a33c070 cbfstool: If compression fails, warn and use the uncompressed data.

Isaac Christensen (isaac.christensen@se-eng.com) gerrit at coreboot.org
Wed Sep 24 19:18:19 CEST 2014


Isaac Christensen (isaac.christensen at se-eng.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/6958

-gerrit

commit a33c070cbe2c9f165e97a1d9f5dd8b2d7c6232b0
Author: Gabe Black <gabeblack at google.com>
Date:   Fri Feb 21 01:01:06 2014 -0800

    cbfstool: If compression fails, warn and use the uncompressed data.
    
    The LZMA compression algorithm, currently the only one available, will fail
    if you ask it to write more data to the output than you've given it space for.
    The code that calls into LZMA allocates an output buffer the same size as the
    input, so if compression increases the size of the output the call will fail.
    The caller(s) were written to assume that the call succeeded and check the
    returned length to see if the size would have increased, but that will never
    happen with LZMA.
    
    Rather than try to rework the LZMA library to dynamically resize the output
    buffer or try to guess what the maximal size the data could expand to is, this
    change makes the caller simply print a warning and disable compression if the
    call failed for some reason.
    
    This may lead to images that are larger than necessary if compression fails
    for some other reason and the user doesn't notice, but since compression
    errors were ignored entirely until very recently that will hopefully not be
    a problem in practice, and we should be guarnateed to at least produce a
    correct image.
    
    Change-Id: I5f59529c2d48e9c4c2e011018b40ec336c4fcca8
    Signed-off-by: Gabe Black <gabeblack at google.com>
    Reviewed-on: https://chromium-review.googlesource.com/187365
    Reviewed-by: David Hendricks <dhendrix at chromium.org>
    Tested-by: Gabe Black <gabeblack at chromium.org>
    Commit-Queue: Gabe Black <gabeblack at chromium.org>
    (cherry picked from commit b9f622a554d5fb9a9aff839c64e11acb27785f13)
    Signed-off-by: Isaac Christensen <isaac.christensen at se-eng.com>
---
 util/cbfstool/cbfs-mkpayload.c | 53 +++++++++++++++++++-----------------------
 util/cbfstool/cbfs-mkstage.c   | 11 ++++++---
 2 files changed, 32 insertions(+), 32 deletions(-)

diff --git a/util/cbfstool/cbfs-mkpayload.c b/util/cbfstool/cbfs-mkpayload.c
index dabe322..08da954 100644
--- a/util/cbfstool/cbfs-mkpayload.c
+++ b/util/cbfstool/cbfs-mkpayload.c
@@ -201,25 +201,24 @@ int parse_elf_to_payload(const struct buffer *input,
 			segs[segments].type = PAYLOAD_SEGMENT_DATA;
 		segs[segments].load_addr = phdr[i].p_paddr;
 		segs[segments].mem_len = phdr[i].p_memsz;
-		segs[segments].compression = algo;
 		segs[segments].offset = doffset;
 
+		/* If the compression failed or made the section is larger,
+		   use the original stuff */
+
 		int len;
 		if (compress((char *)&header[phdr[i].p_offset],
-			     phdr[i].p_filesz, output->data + doffset, &len)) {
-			buffer_delete(output);
-			return -1;
-		}
-		segs[segments].len = len;
-
-		/* If the compressed section is larger, then use the
-		   original stuff */
-
-		if ((unsigned int)len > phdr[i].p_filesz) {
+			     phdr[i].p_filesz, output->data + doffset, &len) ||
+		    (unsigned int)len > phdr[i].p_filesz) {
+			WARN("Compression failed or would make the data bigger "
+			     "- disabled.\n");
 			segs[segments].compression = 0;
 			segs[segments].len = phdr[i].p_filesz;
 			memcpy(output->data + doffset,
 			       &header[phdr[i].p_offset], phdr[i].p_filesz);
+		} else {
+			segs[segments].compression = algo;
+			segs[segments].len = len;
 		}
 
 		doffset += segs[segments].len;
@@ -264,15 +263,13 @@ int parse_flat_binary_to_payload(const struct buffer *input,
 	segs[0].mem_len = input->size;
 	segs[0].offset = doffset;
 
-	if (compress(input->data, input->size, output->data + doffset, &len)) {
-		buffer_delete(output);
-		return -1;
-	}
-	segs[0].compression = algo;
-	segs[0].len = len;
-
-	if ((unsigned int)len >= input->size) {
-		WARN("Compressing data would make it bigger - disabled.\n");
+	if (!compress(input->data, input->size, output->data + doffset, &len) &&
+	    (unsigned int)len < input->size) {
+		segs[0].compression = algo;
+		segs[0].len = len;
+	} else {
+		WARN("Compression failed or would make the data bigger "
+		     "- disabled.\n");
 		segs[0].compression = 0;
 		segs[0].len = input->size;
 		memcpy(output->data + doffset, input->data, input->size);
@@ -393,15 +390,13 @@ int parse_fv_to_payload(const struct buffer *input,
 	segs[0].mem_len = input->size;
 	segs[0].offset = doffset;
 
-	if (compress(input->data, input->size, output->data + doffset, &len)) {
-		buffer_delete(output);
-		return -1;
-	}
-	segs[0].compression = algo;
-	segs[0].len = len;
-
-	if ((unsigned int)len >= input->size) {
-		WARN("Compressing data would make it bigger - disabled.\n");
+	if (!compress(input->data, input->size, output->data + doffset, &len) &&
+	    (unsigned int)len < input->size) {
+		segs[0].compression = algo;
+		segs[0].len = len;
+	} else {
+		WARN("Compression failed or would make the data bigger "
+		     "- disabled.\n");
 		segs[0].compression = 0;
 		segs[0].len = input->size;
 		memcpy(output->data + doffset, input->data, input->size);
diff --git a/util/cbfstool/cbfs-mkstage.c b/util/cbfstool/cbfs-mkstage.c
index 8c77ee5..4a2f4d8 100644
--- a/util/cbfstool/cbfs-mkstage.c
+++ b/util/cbfstool/cbfs-mkstage.c
@@ -155,12 +155,17 @@ int parse_elf_to_stage(const struct buffer *input, struct buffer *output,
 	 * to fill out the header. This seems backward but it works because
 	 * - the output header is a known size (not always true in many xdr's)
 	 * - we do need to know the compressed output size first
+	 * If compression fails or makes the data bigger, we'll warn about it
+	 * and use the original data.
 	 */
 	if (compress(buffer, data_end - data_start,
 		     (output->data + sizeof(struct cbfs_stage)),
-		     &outlen) < 0) {
-		free(buffer);
-		return -1;
+		     &outlen) < 0 || outlen > data_end - data_start) {
+		WARN("Compression failed or would make the data bigger "
+		     "- disabled.\n");
+		memcpy(output->data + sizeof(struct cbfs_stage),
+		       buffer, data_end - data_start);
+		algo = CBFS_COMPRESS_NONE;
 	}
 	free(buffer);
 



More information about the coreboot-gerrit mailing list