From 4ae2d70521f2a53c5feb9d9f77598c9f7f1654e1 Mon Sep 17 00:00:00 2001 From: Michal Bloch Date: Tue, 18 May 2021 21:40:36 +0200 Subject: Fix some incorrect printf specifiers Change-Id: Iae43060cd82d73c71c36dd9580e2d1110cb916a4 (cherry picked from commit 9b46484c08c05a91997034f503d9a9d1df0a6202) --- ss_engine/SS_UPI.c | 5 +++-- ss_engine/fota_tar.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ss_engine/SS_UPI.c b/ss_engine/SS_UPI.c index 2606414..f534a5d 100755 --- a/ss_engine/SS_UPI.c +++ b/ss_engine/SS_UPI.c @@ -28,6 +28,7 @@ Function Prototypes Mandatory #include #include #include +#include #include #include #include @@ -1736,7 +1737,7 @@ int SS_FSUpdateFile(int ubFileType, ua_dataSS_t * ua_dataSS, int ulPatchCount, f ulResult = SS_Link(NULL, pFsNode->file_new_path, pFsNode->patch_name); if (ulResult != S_SS_SUCCESS) { LOGE("SS_Link Failed, Linkname:[%s], reference file name, index = [%d]:[%s]\n", - pFsNode->file_new_path, pFsNode->patch_name, ulFileIndex); + pFsNode->file_new_path, ulFileIndex, pFsNode->patch_name); } } else { LOGE("Unlink Failed, result = [%d], index = [%d]\n", ulResult, ulFileIndex); @@ -2075,7 +2076,7 @@ size_t SS_FSAvailiableFreeSpace(char *block_name) while (NULL != (ent = getmntent(aFile))) { if (strcmp(ent->mnt_fsname, block_name) == 0) { if (statfs(ent->mnt_dir, &sb) == 0) - LOGL(LOG_SSENGINE, "Total free space = %ld, blocks free = %ld\n", sb.f_bsize * sb.f_bavail, sb.f_bfree); + LOGL(LOG_SSENGINE, "Total free space = %" PRIu64 ", blocks free = %" PRIu64 "\n", sb.f_bsize * sb.f_bavail, sb.f_bfree); } } endmntent(aFile); diff --git a/ss_engine/fota_tar.c b/ss_engine/fota_tar.c index 6b617a5..f37ef1e 100755 --- a/ss_engine/fota_tar.c +++ b/ss_engine/fota_tar.c @@ -1008,7 +1008,7 @@ int tar_extract_folder(char *tar, char *item, char *path) snprintf(name + strlen(name), sizeof(name) - strlen(name), "%s", buff + PREFIX_INDICATOR_BYTE); snprintf(dirPath, sizeof(dirPath), "%s/%s", path, name + folderpathlen); - LOG(" File Name is longer than 100 bytes -Remaining Str [%s]\n Full Str[%s]\n", dirPath); + LOG(" File Name is longer than 100 bytes -Remaining Str [%s]\n Full Str[%s]\n", dirPath, fullname); } else { //LOG(" Extracting file %s\n", fullname); memset(dirPath, 0, sizeof(dirPath)); -- cgit v1.2.3 From 10935a9128964e629372b8108af0514c402bb0e3 Mon Sep 17 00:00:00 2001 From: Michal Bloch Date: Mon, 24 May 2021 21:47:34 +0200 Subject: Compilation quickfix Change-Id: Iff6d71df986d07ada33535134cf8f510d3d60ab7 Signed-off-by: Michal Bloch --- ss_engine/SS_UPI.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ss_engine/SS_UPI.c b/ss_engine/SS_UPI.c index f534a5d..71d10a1 100755 --- a/ss_engine/SS_UPI.c +++ b/ss_engine/SS_UPI.c @@ -28,7 +28,7 @@ Function Prototypes Mandatory #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From c6895ab6faf1c178613baba53e558db56b1c6030 Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Mon, 31 Jan 2022 12:46:26 +0100 Subject: Optimize SHA1 calculation during image verification There is no need to read the entire partition into memory when calculating the checksum. In the case of e.g. rootfs this may not event be possible. Change-Id: I1165024ac8795cedc8ed9904ed35bea626d6601b --- ss_engine/SS_UPI.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/ss_engine/SS_UPI.c b/ss_engine/SS_UPI.c index 71d10a1..87e3054 100755 --- a/ss_engine/SS_UPI.c +++ b/ss_engine/SS_UPI.c @@ -2256,10 +2256,10 @@ Cleanup: int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS) { - FileInfo source_file; int ulResult = S_SS_SUCCESS; uint8_t source_sha1[SHA_DIGEST_SIZE]; uint8_t target_sha1[SHA_DIGEST_SIZE]; + uint8_t calculated_sha1[SHA_DIGEST_SIZE]; size_t free_space = 0; if (!(ua_dataSS && ua_dataSS->update_cfg && ua_dataSS->parti_info && ua_dataSS->parti_info->ua_blk_name)) { @@ -2289,22 +2289,18 @@ int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS) return E_SS_FAILURE; } - source_file.size = ua_dataSS->update_cfg->soure_img_size; - source_file.data = NULL; - if (SS_LoadPartition(ua_dataSS->parti_info->ua_blk_name, &source_file) == 0) { - if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) { - LOGL(LOG_SSENGINE, "SS_IMGVerfiyPartition - SHA matches with source [%s] \n", - ua_dataSS->parti_info->ua_blk_name); - } else { // Need not compare with Target sha as once upgraded, it should NOT verify same partition again. - unsigned char actualShaBuffer[41] = { 0, }; - hex_digest(source_file.sha1, actualShaBuffer, SHA_DIGEST_SIZE); - LOGE("SS_IMGVerfiyPartition - SHA mismatch with SRC [%s] Expected [%s] Actual [%s]\n", - ua_dataSS->parti_info->ua_blk_name, ua_dataSS->update_cfg->soure_sha1, actualShaBuffer); - SS_SetUpgradeState(E_SS_IMGSRCCURRUPTED); - ulResult = E_SS_FAILURE; - } + SS_CalculateFileSha(ua_dataSS->parti_info->ua_blk_name, ua_dataSS->update_cfg->soure_img_size, calculated_sha1); + if (memcmp(calculated_sha1, source_sha1, SHA_DIGEST_SIZE) == 0) { + LOGL(LOG_SSENGINE, "SS_IMGVerfiyPartition - SHA matches with source [%s] \n", + ua_dataSS->parti_info->ua_blk_name); + } else { // Need not compare with Target sha as once upgraded, it should NOT verify same partition again. + unsigned char actualShaBuffer[41] = { 0, }; + hex_digest(calculated_sha1, actualShaBuffer, SHA_DIGEST_SIZE); + LOGE("SS_IMGVerfiyPartition - SHA mismatch with SRC [%s] Expected [%s] Actual [%s]\n", + ua_dataSS->parti_info->ua_blk_name, ua_dataSS->update_cfg->soure_sha1, actualShaBuffer); + SS_SetUpgradeState(E_SS_IMGSRCCURRUPTED); + ulResult = E_SS_FAILURE; } - SS_Free(source_file.data); if (ulResult == S_SS_SUCCESS) { if (ua_dataSS->ui_progress) ua_dataSS->ui_progress(ua_dataSS, 100); -- cgit v1.2.3 From 7df2990519bff101d428d24f822cecb47a0904e3 Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Mon, 31 Jan 2022 12:49:30 +0100 Subject: Add option to skip free space checking With DELTA_IMG_AB update type data is written directly to the target partition, so there is no need to check for available space. Change-Id: Id377e850281590176e831b27e24ade6a8fcc1fd5 --- ss_engine/SS_UPI.c | 32 +++++++++++++++++++------------- ss_engine/SS_UPI.h | 3 ++- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/ss_engine/SS_UPI.c b/ss_engine/SS_UPI.c index 87e3054..5224ea6 100755 --- a/ss_engine/SS_UPI.c +++ b/ss_engine/SS_UPI.c @@ -29,6 +29,7 @@ Function Prototypes Mandatory #include #include #include +#include #include #include #include @@ -2254,7 +2255,7 @@ Cleanup: return ulResult; } -int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS) +int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS, const char *src_blk_name, bool check_free_space) { int ulResult = S_SS_SUCCESS; uint8_t source_sha1[SHA_DIGEST_SIZE]; @@ -2262,19 +2263,21 @@ int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS) uint8_t calculated_sha1[SHA_DIGEST_SIZE]; size_t free_space = 0; - if (!(ua_dataSS && ua_dataSS->update_cfg && ua_dataSS->parti_info && ua_dataSS->parti_info->ua_blk_name)) { + if (!(ua_dataSS && ua_dataSS->update_cfg && ua_dataSS->parti_info && src_blk_name)) { LOGE("Bad structure or members\n"); SS_SetUpgradeState(E_SS_BAD_PARAMS); return E_SS_FAILURE; } - //We verify twice the image size for BACKUP source, not on Partition. As Patch will be created on RAM - SS_GetAvailableFreeSpace(SS_COMMON_WORKSPACE, &free_space); - if ((free_space) < (2 * ua_dataSS->update_cfg->target_img_size)) { - LOGE("Not enough free space [%d] for twice max size [%d]\n", free_space, - (2 * ua_dataSS->update_cfg->target_img_size)); - SS_SetUpgradeState(E_SS_FSMEMORYERROR); - return E_SS_FAILURE; + if (check_free_space) { + //We verify twice the image size for BACKUP source, not on Partition. As Patch will be created on RAM + SS_GetAvailableFreeSpace(SS_COMMON_WORKSPACE, &free_space); + if ((free_space) < (2 * ua_dataSS->update_cfg->target_img_size)) { + LOGE("Not enough free space [%d] for twice max size [%d]\n", free_space, + (2 * ua_dataSS->update_cfg->target_img_size)); + SS_SetUpgradeState(E_SS_FSMEMORYERROR); + return E_SS_FAILURE; + } } if (ParseSha1(ua_dataSS->update_cfg->soure_sha1, source_sha1) != 0) { @@ -2289,15 +2292,18 @@ int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS) return E_SS_FAILURE; } - SS_CalculateFileSha(ua_dataSS->parti_info->ua_blk_name, ua_dataSS->update_cfg->soure_img_size, calculated_sha1); + /* + * If ab_update is set it means it is a DELTA_IMG_AB update, where the patch + * is applied to data directly read from a partition in the previous slot + */ + SS_CalculateFileSha(src_blk_name, ua_dataSS->update_cfg->soure_img_size, calculated_sha1); if (memcmp(calculated_sha1, source_sha1, SHA_DIGEST_SIZE) == 0) { - LOGL(LOG_SSENGINE, "SS_IMGVerfiyPartition - SHA matches with source [%s] \n", - ua_dataSS->parti_info->ua_blk_name); + LOGL(LOG_SSENGINE, "SS_IMGVerfiyPartition - SHA matches with source [%s] \n", src_blk_name); } else { // Need not compare with Target sha as once upgraded, it should NOT verify same partition again. unsigned char actualShaBuffer[41] = { 0, }; hex_digest(calculated_sha1, actualShaBuffer, SHA_DIGEST_SIZE); LOGE("SS_IMGVerfiyPartition - SHA mismatch with SRC [%s] Expected [%s] Actual [%s]\n", - ua_dataSS->parti_info->ua_blk_name, ua_dataSS->update_cfg->soure_sha1, actualShaBuffer); + src_blk_name, ua_dataSS->update_cfg->soure_sha1, actualShaBuffer); SS_SetUpgradeState(E_SS_IMGSRCCURRUPTED); ulResult = E_SS_FAILURE; } diff --git a/ss_engine/SS_UPI.h b/ss_engine/SS_UPI.h index d82c8e6..e24c2f5 100755 --- a/ss_engine/SS_UPI.h +++ b/ss_engine/SS_UPI.h @@ -18,6 +18,7 @@ #ifndef _SS_UPI_H_ #define _SS_UPI_H_ +#include #define DISPLAYRESOLUTION_SIZE 50 @@ -60,7 +61,7 @@ int SS_AppendNode(const char *ubDeltaPath, fs_params ** headparam, fs_params ** const char *new_path, const char *patchname, const char *sha1src, const char *sha1trg, int type, char *patchpath_name); extern int SS_IMGUpdatemain(ua_dataSS_t * ua_dataSS, int update_type); -extern int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS); +extern int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS, const char *src_blk_name, bool check_free_space); extern int SS_FSUpdatemain(ua_dataSS_t * ua_dataSS, int part_idx); extern int SS_FSVerifyPartition(ua_dataSS_t * ua_dataSS, int part_idx); extern int SS_UpdateDeltaIMG(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *)); -- cgit v1.2.3 From 2b0a9620607a0bea83a86ffb45e582d2e9b12ee3 Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Fri, 11 Feb 2022 14:27:18 +0100 Subject: ss_bsdiff: Refactoring Change-Id: I8c5adaacf740f3e43057068c00a5c33f6900736f --- bsdiff/ss_bsdiff.c | 121 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 92 insertions(+), 29 deletions(-) diff --git a/bsdiff/ss_bsdiff.c b/bsdiff/ss_bsdiff.c index 848e7ff..13ef124 100755 --- a/bsdiff/ss_bsdiff.c +++ b/bsdiff/ss_bsdiff.c @@ -29,6 +29,7 @@ // search function modification and the error handling. #define _CRT_SECURE_NO_WARNINGS +#include #include #include #include @@ -37,6 +38,7 @@ #include #include #include +#include #include #include <7zFile.h> @@ -52,6 +54,7 @@ #define PATCH_FILE_FORMAT_MOD // no accumulation of diff and extra in db and eb; immediate write; also write all 3 parts of control stmt at same time #define MULTI_THREADING 1 // only with #define CONST_MEMORY_USAGE or #define MAX_MATCH_SIZE #define TIME_LIMIT_CHECK 300 +#define TEMP_PATCH_NAME "temp_patch" /* Take care : 1) Use either (MAX_MATCH_SIZE or CONST_MEMORY_USAGE) or (none of both). @@ -105,6 +108,17 @@ struct data_thread { FILE * pfbz2; }; +enum compression_method { + CM_LZMA, +}; + +struct bsdiff_info { + const char *old_file; + const char *new_file; + const char *patch_file; + enum compression_method comp_method; +}; + struct data_thread data; int Function(int); @@ -181,32 +195,33 @@ static void offtout(off_t x, u_char *buf) buf[7] |= 0x80; } -int create_patch(int argc, const char *argv[], int offset_oldscore) +int create_patch(const char *old_file, const char *new_file, const char *temp_patch, int offset_oldscore) { + assert(old_file); + assert(new_file); + data.num_threads = MULTI_THREADING; data.new = (u_char **)malloc(sizeof(u_char *)*data.num_threads); - if (argc != 4) - errx(1, "usage: %s oldfile newfile patchfile\n", argv[0]); /* Allocate oldsize+1 bytes instead of oldsize bytes to ensure that we never try to malloc(0) and get a NULL pointer */ - if (((data.fd = open(argv[1], O_RDONLY, 0)) < 0) || + if (((data.fd = open(old_file, O_RDONLY, 0)) < 0) || ((data.oldsize = lseek(data.fd, 0, SEEK_END)) == -1) || ((data.old = malloc(data.oldsize + 1)) == NULL) || (lseek(data.fd, 0, SEEK_SET) != 0) || (read(data.fd, data.old, data.oldsize) != data.oldsize) || (close(data.fd) == -1)) - err(1, "%s", argv[1]); + err(1, "%s", old_file); data.I = malloc((data.oldsize + 1) * sizeof(saidx_t)); divsufsort(data.old, data.I, data.oldsize); /* Allocate newsize+1 bytes instead of newsize bytes to ensure that we never try to malloc(0) and get a NULL pointer */ - if (((data.fd = open(argv[2], O_RDONLY, 0)) < 0) || + if (((data.fd = open(new_file, O_RDONLY, 0)) < 0) || ((data.newsize = lseek(data.fd, 0, SEEK_END)) == -1) || (lseek(data.fd, 0, SEEK_SET) != 0)) - err(1, "%s", argv[2]); + err(1, "%s", new_file); data.size_thread = (data.newsize / data.num_threads); unsigned int j; @@ -215,21 +230,21 @@ int create_patch(int argc, const char *argv[], int offset_oldscore) if (((data.new[j] = (u_char *)malloc(sizeof(u_char) * (data.size_thread + 1))) == NULL) || (lseek(data.fd, 0, SEEK_CUR) != j * data.size_thread) || (read(data.fd, data.new[j], data.size_thread) != data.size_thread)) - err(1, "%s", argv[2]); + err(1, "%s", new_file); } else { if (((data.new[j] = (u_char *)malloc(sizeof(u_char) * (data.newsize - (j * data.size_thread) + 1))) == NULL) || (lseek(data.fd, 0, SEEK_CUR) != j * data.size_thread) || (read(data.fd, data.new[j], data.newsize - (j * data.size_thread)) != data.newsize - (j * data.size_thread))) - err(1, "here %s", argv[2]); + err(1, "here %s", new_file); } } if ((close(data.fd) == -1)) - err(1, "%s", argv[2]); + err(1, "%s", new_file); /* Create the patch file */ - if ((data.pf = fopen("temp_patch", "w")) == NULL) - err(1, "%s", "temp_patch"); + if ((data.pf = fopen(temp_patch, "w")) == NULL) + err(1, "%s", temp_patch); /* Header is 0 8 "BSDIFF40" @@ -245,7 +260,7 @@ int create_patch(int argc, const char *argv[], int offset_oldscore) offtout(data.newsize, data.header + 8); if (fwrite(data.header, 16, 1, data.pf) != 1) - err(1, "fwrite(%s)", "temp_patch"); + err(1, "fwrite(%s)", temp_patch); /* Compute the differences, writing ctrl as we go */ data.pfbz2 = data.pf; @@ -261,7 +276,7 @@ int create_patch(int argc, const char *argv[], int offset_oldscore) #ifdef TIME_LIMIT_CHECK if (ret != 0) { printf("bsdiff fails to create delta with offset score %d\n", offset_oldscore); - printf("Old: [%s] -> New: [%s]\n", argv[1], argv[2]); + printf("Old: [%s] -> New: [%s]\n", old_file, new_file); } #endif /* Seek to the beginning, write the header, and close the file */ @@ -269,7 +284,7 @@ int create_patch(int argc, const char *argv[], int offset_oldscore) err(1, "fseeko"); if (fwrite(data.header, 16, 1, data.pf) != 1) - err(1, "fwrite(%s)", "temp_patch"); + err(1, "fwrite(%s)", temp_patch); if (fclose(data.pf)) err(1, "fclose"); /* Free the memory we used */ @@ -475,7 +490,7 @@ int PrintUserError(char *buffer, int buf_size) #define OUT_BUF_SIZE (1 << 16) -static SRes Encode(ISeqOutStream *outStream, ISeqInStream *inStream, UInt64 fileSize, char *rs) +static SRes lzma_encode(ISeqOutStream *outStream, ISeqInStream *inStream, UInt64 fileSize, char *rs) { CLzmaEncHandle enc; SRes res; @@ -509,8 +524,11 @@ static SRes Encode(ISeqOutStream *outStream, ISeqInStream *inStream, UInt64 file return res; } -int main2(int numArgs, const char *args[], char *rs, int rs_size) +int lzma_compress(const char *input_file, const char *output_file, char *rs, int rs_size) { + assert(patch_file); + assert(rs); + CFileSeqInStream inStream; CFileOutStream outStream; int res; @@ -526,17 +544,17 @@ int main2(int numArgs, const char *args[], char *rs, int rs_size) if (t4 != 4 || t8 != 8) return PrintError(rs, "Incorrect UInt32 or UInt64", rs_size); - if (InFile_Open(&inStream.file, "temp_patch") != 0) + if (InFile_Open(&inStream.file, input_file) != 0) return PrintError(rs, "Can not open input file", rs_size); - if (OutFile_Open(&outStream.file, args[3]) != 0) + if (OutFile_Open(&outStream.file, output_file) != 0) return PrintError(rs, "Can not open output file", rs_size); UInt64 fileSize; File_GetLength(&inStream.file, &fileSize); - res = Encode(&outStream.s, &inStream.s, fileSize, rs); + res = lzma_encode(&outStream.s, &inStream.s, fileSize, rs); File_Close(&outStream.file); File_Close(&inStream.file); @@ -555,28 +573,73 @@ int main2(int numArgs, const char *args[], char *rs, int rs_size) return 0; } -int MY_CDECL main(int numArgs, const char *args[]) +void print_help(const char *arg0) +{ + assert(arg0); + errx(1, "ss_bsdiff Version 5.0\nUsage: %s oldfile newfile patchfile\n", arg0); +} + +int parse_args(struct bsdiff_info *info, int argc, char *argv[]) +{ + assert(info); + assert(argv); + + struct option long_options[] = { + {"compression", optional_argument, NULL, 'c'}, + {0, 0 , 0, 0} + }; + + int opt; + + while ((opt = getopt_long(argc, argv, "c:", long_options, NULL)) != -1) { + switch (opt) { + case 'c': + if (strcmp("lzma", optarg) == 0) + info->comp_method = CM_LZMA; + else { + err(1, "Unknown compression method: %s", optarg); + return -1; + } + } + } + + if (optind + 2 >= argc) { + err(1, "Not enough parameters"); + print_help(argv[0]); + return -1; + } + + info->old_file = argv[optind]; + info->new_file = argv[optind+1]; + info->patch_file = argv[optind+2]; + + return 0; +} + +int MY_CDECL main(int argc, char *argv[]) { char rs[800] = { 0 }; - if (numArgs != 4) - errx(1, "ss_bsdiff Version 5.0\nUsage: ss_bsdiff oldfile newfile patchfile\n"); - int ret = create_patch(numArgs, args, 8); + struct bsdiff_info info; + if (parse_args(&info, argc, argv) != 0) + return 1; + + int ret = create_patch(info.old_file, info.new_file, TEMP_PATCH_NAME, 8); #ifdef TIME_LIMIT_CHECK if (ret != 0) { printf("Trying with offset score 2\n"); - ret = create_patch(numArgs, args, 2); + ret = create_patch(info.old_file, info.new_file, TEMP_PATCH_NAME, 2); } if (ret != 0) { printf("Trying with offset score 0\n"); - ret = create_patch(numArgs, args, 0); + ret = create_patch(info.old_file, info.new_file, TEMP_PATCH_NAME, 0); } if (ret != 0) err(1, "bsdiff fails to create delta within timelimit"); #endif - int res = main2(numArgs, args, rs, sizeof(rs)); - if (remove("temp_patch") < 0) - printf("Failed to remove temp_patch\n"); + int res = lzma_compress(TEMP_PATCH_NAME, info.patch_file, rs, sizeof(rs)); + if (remove(TEMP_PATCH_NAME) < 0) + printf("Failed to remove %s\n", TEMP_PATCH_NAME); fputs(rs, stdout); return res; } -- cgit v1.2.3 From 8f93f088bdc02e5261eac99e2c7b6409fce5605d Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Mon, 14 Feb 2022 18:14:32 +0100 Subject: ss_bsdiff: Add Brotli compression method ss_bsdiff can produce patches compressed with both LZMA (default) and Brotli method. Use the '-c ' to select appropriate method. Change-Id: I05b6c1cf22826530f823b72940af8d8ff7f602c9 --- bsdiff/CMakeLists.txt | 2 +- bsdiff/ss_bsdiff.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 114 insertions(+), 4 deletions(-) diff --git a/bsdiff/CMakeLists.txt b/bsdiff/CMakeLists.txt index 497299c..ecd7205 100755 --- a/bsdiff/CMakeLists.txt +++ b/bsdiff/CMakeLists.txt @@ -10,7 +10,7 @@ SET(ss_bspatch_SRCS INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/bsdiff) INCLUDE(FindPkgConfig) -pkg_check_modules(${PROJECT_NAME}_pkgs REQUIRED liblzma-tool libdivsufsort) +pkg_check_modules(${PROJECT_NAME}_pkgs REQUIRED liblzma-tool libdivsufsort libbrotlienc) FOREACH(flag ${${PROJECT_NAME}_pkgs_CFLAGS}) SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}") diff --git a/bsdiff/ss_bsdiff.c b/bsdiff/ss_bsdiff.c index 13ef124..209f2e9 100755 --- a/bsdiff/ss_bsdiff.c +++ b/bsdiff/ss_bsdiff.c @@ -31,7 +31,10 @@ #define _CRT_SECURE_NO_WARNINGS #include #include +#include +#include #include +#include #include #include #include @@ -45,6 +48,7 @@ #include <7zVersion.h> #include #include +#include #define SUFSORT_MOD // Change suffix sorting algorithm from Qsufsort to Divsufsort //#define ZLIB_MOD // Change compression algorithm @@ -55,6 +59,7 @@ #define MULTI_THREADING 1 // only with #define CONST_MEMORY_USAGE or #define MAX_MATCH_SIZE #define TIME_LIMIT_CHECK 300 #define TEMP_PATCH_NAME "temp_patch" +#define BROTLI_COMPRESSION_QUALITY 9 /* Take care : 1) Use either (MAX_MATCH_SIZE or CONST_MEMORY_USAGE) or (none of both). @@ -110,6 +115,7 @@ struct data_thread { enum compression_method { CM_LZMA, + CM_BROTLI, }; struct bsdiff_info { @@ -526,7 +532,8 @@ static SRes lzma_encode(ISeqOutStream *outStream, ISeqInStream *inStream, UInt64 int lzma_compress(const char *input_file, const char *output_file, char *rs, int rs_size) { - assert(patch_file); + assert(input_file); + assert(output_file); assert(rs); CFileSeqInStream inStream; @@ -573,10 +580,96 @@ int lzma_compress(const char *input_file, const char *output_file, char *rs, int return 0; } +int brotli_compress_internal(int input_fd, int output_fd, int quality) +{ + int res = -1; + size_t input_size = lseek(input_fd, 0, SEEK_END); + lseek(input_fd, 0, SEEK_SET); + void *input_file_ptr = mmap(NULL, input_size, PROT_READ, MAP_PRIVATE, input_fd, 0); + + if (input_file_ptr == MAP_FAILED) { + printf("Can not mmap input file: %d - %m\n", errno); + goto exit; + } + + BrotliEncoderState *bstate = BrotliEncoderCreateInstance(NULL, NULL, NULL); + if (bstate == 0) { + printf("Can not create BrotliEncoder instance\n"); + goto exit; + } + size_t max_output_size = BrotliEncoderMaxCompressedSize(input_size); + + if (max_output_size == 0) { + printf("Brotli engine error\n"); + goto exit; + } + + if (ftruncate(output_fd, max_output_size) == -1) { + printf("Can not truncate output file: %d - %m\n", errno); + goto exit; + } + + void *output_file_ptr = mmap(NULL, max_output_size, PROT_WRITE, MAP_SHARED, output_fd, 0); + if (output_file_ptr == MAP_FAILED) { + printf("Can not mmap output file: %d - %m\n", errno); + goto exit; + } + + if(!BrotliEncoderCompress(quality, + BROTLI_DEFAULT_WINDOW, + BROTLI_DEFAULT_MODE, + input_size, + input_file_ptr, + &max_output_size, + output_file_ptr)) { + printf("Compression error\n"); + goto exit; + } + if (ftruncate(output_fd, max_output_size) == -1) { + printf("Can not truncate output file after compression: %d - %m\n", errno); + goto exit; + } + + res = 0; +exit: + if (input_file_ptr) + munmap(input_file_ptr, input_size); + if (output_file_ptr) + munmap(output_file_ptr, max_output_size); + + return res; +} + +int brotli_compress(const char *input_file, const char *output_file, int quality) +{ + assert(input_file); + assert(output_file); + int res = -1; + + int input_fd = open(input_file, O_RDONLY); + if (input_fd < 0) { + printf("Can not open file: %s for read\n", input_file); + return res; + } + int output_fd = open(output_file, O_RDWR | O_CREAT, S_IWUSR | S_IRUSR); + if (output_fd < 0) { + printf("Can not open file: %s for write (%d: %m)\n", output_file, errno); + close(input_fd); + return res; + } + + res = brotli_compress_internal(input_fd, output_fd, quality); + + close(input_fd); + close(output_fd); + + return res; +} + void print_help(const char *arg0) { assert(arg0); - errx(1, "ss_bsdiff Version 5.0\nUsage: %s oldfile newfile patchfile\n", arg0); + errx(1, "ss_bsdiff Version 5.0\nUsage: %s [-c ] oldfile newfile patchfile\n", arg0); } int parse_args(struct bsdiff_info *info, int argc, char *argv[]) @@ -584,6 +677,8 @@ int parse_args(struct bsdiff_info *info, int argc, char *argv[]) assert(info); assert(argv); + info->comp_method = CM_LZMA; // default compression method + struct option long_options[] = { {"compression", optional_argument, NULL, 'c'}, {0, 0 , 0, 0} @@ -596,6 +691,8 @@ int parse_args(struct bsdiff_info *info, int argc, char *argv[]) case 'c': if (strcmp("lzma", optarg) == 0) info->comp_method = CM_LZMA; + else if (strcmp("brotli", optarg) == 0) + info->comp_method = CM_BROTLI; else { err(1, "Unknown compression method: %s", optarg); return -1; @@ -637,7 +734,20 @@ int MY_CDECL main(int argc, char *argv[]) if (ret != 0) err(1, "bsdiff fails to create delta within timelimit"); #endif - int res = lzma_compress(TEMP_PATCH_NAME, info.patch_file, rs, sizeof(rs)); + int res = 0; + switch(info.comp_method) { + case CM_LZMA: + res = lzma_compress(TEMP_PATCH_NAME, info.patch_file, rs, sizeof(rs)); + break; + case CM_BROTLI: + res = brotli_compress(TEMP_PATCH_NAME, info.patch_file, BROTLI_COMPRESSION_QUALITY); + break; + default: + printf("Unknown compression method\n"); + res = -1; + break; + } + if (remove(TEMP_PATCH_NAME) < 0) printf("Failed to remove %s\n", TEMP_PATCH_NAME); fputs(rs, stdout); -- cgit v1.2.3 From f403ec8c5981d842b9b118e531ae92ea0081a8c2 Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Tue, 1 Feb 2022 16:21:10 +0100 Subject: Move SS_CalculateShaFile() to the SS_Common.c file Change-Id: Ice1492bc24ef1c62e43991b0784018c311602def --- ss_engine/SS_Common.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++ ss_engine/SS_Common.h | 5 ++++ ss_engine/SS_UPI.c | 63 ++------------------------------------------------ 3 files changed, 71 insertions(+), 61 deletions(-) diff --git a/ss_engine/SS_Common.c b/ss_engine/SS_Common.c index 42a444e..0dc38af 100755 --- a/ss_engine/SS_Common.c +++ b/ss_engine/SS_Common.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "SS_ImageUpdate.h" #include "SS_Engine_Errors.h" @@ -32,6 +33,7 @@ #include "ua_types.h" #include "fota_tar.h" #include "fota_common.h" +#include "sha1.h" void SS_Progress(void *pbUserData, SS_UINT32 uPercent) { @@ -114,3 +116,65 @@ long SS_GetDelta(void *pbUserData, unsigned char *pbBuffer, SS_UINT32 dwStartAdd return ret; } + +int SS_CalculateFileSha(char *filename, long int filesize, unsigned char calculated_sha1[SHA_DIGEST_SIZE]) +{ + + FILE *fp = NULL; + int ulResult = S_SS_SUCCESS; + long int chunk = 20*1024*1024; + char buf[256]; + uint8_t *buffer = NULL; + + fp = fopen(filename, "rb"); + if (fp == NULL) { + strerror_r(errno, buf, sizeof(buf)); + LOGE("failed to open \"%s\": %s\n", filename, buf); + ulResult = E_SS_FAILURE; + goto Cleanup; + } + + buffer = SS_Malloc(chunk); + if (!buffer) { + strerror_r(errno, buf, sizeof(buf)); + LOGE("failed to allocate memory for \"%s\": %s\n", filename, buf); + ulResult = E_SS_FAILURE; + goto Cleanup; + } + + ssize_t bytes_read = 0; + sha1_ctx_t sha_ctx; + sha1_init(&sha_ctx); + + while (filesize > 0) { + if (filesize < chunk) { + bytes_read = fread(buffer, 1, filesize, fp); + if (bytes_read != filesize) { + LOGE("short read of \"%s\" (%ld bytes of %ld)\n", filename, (long)bytes_read, filesize); + ulResult = E_SS_FAILURE; + goto Cleanup; + } + sha1_update(&sha_ctx, buffer, filesize); + break; + } else { + bytes_read = fread(buffer, 1, chunk, fp); + if (bytes_read != chunk) { + LOGE("short read of \"%s\" (%ld bytes of %ld)\n", filename, (long)bytes_read, filesize); + ulResult = E_SS_FAILURE; + goto Cleanup; + } + sha1_update(&sha_ctx, buffer, chunk); + filesize -= chunk; + } + } + + sha1_final(&sha_ctx, (uint32_t *) &calculated_sha1[0]); + +Cleanup: + if (fp) + fclose(fp); + if (buffer) + SS_Free(buffer); + return ulResult; +} + diff --git a/ss_engine/SS_Common.h b/ss_engine/SS_Common.h index 7fb5311..50868b7 100755 --- a/ss_engine/SS_Common.h +++ b/ss_engine/SS_Common.h @@ -63,3 +63,8 @@ struct status_header_page { }; void SS_unicode_to_char(const char *src, char *dest, int size); + +#ifndef SHA_DIGEST_SIZE +#define SHA_DIGEST_SIZE 20 // To avoid creating dependencies on sha1.h +#endif +int SS_CalculateFileSha(char *filename, long int filesize, unsigned char calculated_sha1[SHA_DIGEST_SIZE]); diff --git a/ss_engine/SS_UPI.c b/ss_engine/SS_UPI.c index 5224ea6..b2baf5e 100755 --- a/ss_engine/SS_UPI.c +++ b/ss_engine/SS_UPI.c @@ -241,66 +241,6 @@ long SS_GetUPIVersion(unsigned char *ver_str) return E_SS_FAILURE; } -int SS_CalculateFileSha(char *filename, long int filesize, FileInfo * file) -{ - - FILE *fp = NULL; - int ulResult = S_SS_SUCCESS; - long int chunk = 20*1024*1024; - char buf[256]; - - fp = fopen(filename, "rb"); - if (fp == NULL) { - strerror_r(errno, buf, sizeof(buf)); - LOGE("failed to open \"%s\": %s\n", filename, buf); - ulResult = E_SS_FAILURE; - goto Cleanup; - } - - file->data = SS_Malloc(chunk); - if (!file->data) { - strerror_r(errno, buf, sizeof(buf)); - LOGE("failed to allocate memory for \"%s\": %s\n", filename, buf); - ulResult = E_SS_FAILURE; - goto Cleanup; - } - - ssize_t bytes_read = 0; - sha1_ctx_t sha_ctx; - sha1_init(&sha_ctx); - - while (filesize > 0) { - if (filesize < chunk) { - bytes_read = fread(file->data, 1, filesize, fp); - if (bytes_read != filesize) { - LOGE("short read of \"%s\" (%ld bytes of %ld)\n", filename, (long)bytes_read, (long)file->size); - ulResult = E_SS_FAILURE; - goto Cleanup; - } - sha1_update(&sha_ctx, file->data, filesize); - break; - } else { - bytes_read = fread(file->data, 1, chunk, fp); - if (bytes_read != chunk) { - LOGE("short read of \"%s\" (%ld bytes of %ld)\n", filename, (long)bytes_read, (long)file->size); - ulResult = E_SS_FAILURE; - goto Cleanup; - } - sha1_update(&sha_ctx, file->data, chunk); - filesize -= chunk; - } - } - - sha1_final(&sha_ctx, (uint32_t *) &file->sha1); - -Cleanup: - if (fp) - fclose(fp); - if (file->data) - SS_Free(file->data); - return ulResult; -} - int SS_verify_DELTA_image(char *filename) { @@ -379,7 +319,8 @@ int SS_verify_DELTA_image(char *filename) goto Cleanup; } - ulResult = SS_CalculateFileSha(filename, udelta_size, &file); + unsigned char calcualted_sha1[SHA_DIGEST_SIZE]; + ulResult = SS_CalculateFileSha(filename, udelta_size, calcualted_sha1); if (ulResult != S_SS_SUCCESS) goto Cleanup; -- cgit v1.2.3 From 4ebae86e6edd8cc77809fe4d69a8055d846cea28 Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Tue, 1 Feb 2022 11:37:09 +0100 Subject: Add support for ssdiff patches compressed using Brotli algorithm Add new partition upgrade type - DELTA_IMG_AB A patch is a binary diff between two partition that is compressed with the Brotli algorithm. Brotli has a good compression ratio and allows decompression using a small amount of memory (during tests, it did not exceed 17MB). Change-Id: I7f251054f07f7860749dd95a2b90ca018ca5a899 --- CMakeLists.txt | 1 + bsdiff/ss_brotli_patch.c | 347 ++++++++++++++++++++++++++++++++++++++++++++++ bsdiff/ss_brotli_patch.h | 22 +++ packaging/libtota.spec | 1 + ss_engine/SS_Common.c | 6 + ss_engine/SS_Common.h | 1 + ss_engine/SS_PatchDelta.c | 51 +++++++ ss_engine/SS_UPI.c | 14 +- ss_engine/SS_UPI.h | 1 + ss_engine/ua_types.h | 2 + 10 files changed, 437 insertions(+), 9 deletions(-) create mode 100644 bsdiff/ss_brotli_patch.c create mode 100644 bsdiff/ss_brotli_patch.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 325ee98..f63f3c4 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,6 +12,7 @@ SET(SRCS ss_engine/fota_log.c ss_engine/fota_tar.c bsdiff/ss_bspatch_common.c + bsdiff/ss_brotli_patch.c ) SET(HEADERS ss_engine/fota_common.h diff --git a/bsdiff/ss_brotli_patch.c b/bsdiff/ss_brotli_patch.c new file mode 100644 index 0000000..dffad65 --- /dev/null +++ b/bsdiff/ss_brotli_patch.c @@ -0,0 +1,347 @@ +/* + * libtota + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fota_log.h" + +#define PF_OK 0 +#define PF_ERROR_OPEN_FILE 1 +#define PF_ERROR_MMAP 2 +#define PF_ERROR_INVALID_PATCH_FILE 3 +#define PF_ERROR_DECOMPRESSION 4 + +#define BUFF_IN_LEN 4096 +#define BUFF_OUT_LEN 4096 +#define SSINT_LEN 8 + +const char SSDIFF_MAGIC[] = "SSDIFF40"; + +struct bs_data { + int src_fd, dest_fd, patch_fd; + void *src_ptr, *dest_ptr, *patch_ptr; + size_t src_len, dest_len, patch_len; + unsigned char buff_in[BUFF_IN_LEN]; + unsigned char buff_out[BUFF_IN_LEN]; + uint8_t *dest_pos; + uint8_t *src_pos; + size_t available_in, available_out; + const uint8_t *compressed_pos; + uint8_t *decompressed_pos; + size_t total_size; + BrotliDecoderState *bstate; +}; + +static void free_data(struct bs_data *data) +{ + if (data == NULL) + return; + + if (data->src_ptr) munmap(data->src_ptr, data->src_len); + if (data->dest_ptr) munmap(data->dest_ptr, data->dest_len); + if (data->patch_ptr) munmap(data->patch_ptr, data->patch_len); + + if (data->src_fd) close(data->src_fd); + if (data->patch_fd) close(data->patch_fd); + if (data->dest_fd) close(data->dest_fd); +} + +static int open_file(char *file_name, int mode) +{ + assert(file_name); + int fd = open(file_name, mode, S_IWUSR | S_IRUSR); + if (fd < 0) + LOGE("Open file %s error: %m (%d)\n", file_name, errno); + return fd; +} + +static size_t get_file_len(int fd) +{ + assert(fd >= 0); + size_t result = lseek(fd, 0, SEEK_END); + lseek(fd, 0, SEEK_SET); + return result; +} + + +static size_t decompress_bytes(struct bs_data *data, size_t keep_offset) +{ + assert(data); + if (keep_offset > 0) { + memcpy(data->buff_out, data->buff_out + sizeof(data->buff_out) - keep_offset, keep_offset); + } + data->decompressed_pos = data->buff_out + keep_offset; + data->available_out = sizeof(data->buff_out) - keep_offset; + + BrotliDecoderResult result = BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT; + + result = BrotliDecoderDecompressStream(data->bstate, + &data->available_in, + &data->compressed_pos, + &data->available_out, + &data->decompressed_pos, + &data->total_size); + + if (result == BROTLI_DECODER_RESULT_ERROR) { + LOGE("Decoder error\n"); + return PF_ERROR_DECOMPRESSION; + } else if (result == BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT) { + LOGE("Invalid source file\n"); + return PF_ERROR_DECOMPRESSION; + } + + return PF_OK; +} + +static int open_files(struct bs_data *data, char *source_file, size_t source_size, char *dest_file, size_t dest_size, char *patch_file) +{ + assert(data); + assert(source_file); + assert(dest_file); + assert(patch_file); + + data->src_fd = open_file(source_file, O_RDONLY); + data->patch_fd = open_file(patch_file, O_RDONLY); + data->dest_fd = open_file(dest_file, O_RDWR); + if (data->src_fd < 0 || + data->patch_fd < 0 || + data->dest_fd < 0) + return PF_ERROR_OPEN_FILE; + + data->src_len = source_size; + data->patch_len = get_file_len(data->patch_fd); + data->dest_len = dest_size; + + data->src_ptr = mmap(NULL, data->src_len, PROT_READ, MAP_PRIVATE, data->src_fd, 0); + if (data->src_ptr == MAP_FAILED) { + LOGE("mmap source file error: %m (%d)", errno); + return PF_ERROR_MMAP; + } + + data->patch_ptr = mmap(NULL, data->patch_len, PROT_READ, MAP_PRIVATE, data->patch_fd, 0); + if (data->patch_ptr == MAP_FAILED) { + LOGE("mmap patch file error: %m (%d)", errno); + return PF_ERROR_MMAP; + } + + data->dest_ptr = mmap(NULL, data->dest_len, PROT_WRITE, MAP_SHARED, data->dest_fd, 0); + if (data->dest_ptr == MAP_FAILED) { + LOGE("mmap destination error: %m (%d)\n", errno); + return PF_ERROR_MMAP; + } + + data->compressed_pos = data->patch_ptr; + data->available_in = data->patch_len; + + return PF_OK; +} + +static void init_data(struct bs_data *data) +{ + assert(data); + + data->src_fd = -1; + data->patch_fd = -1; + data->dest_fd = -1; + data->src_ptr = NULL; + data->dest_ptr = NULL; + data->patch_ptr = NULL; + data->src_len = 0; + data->dest_len = 0; + data->patch_len = 0; + data->available_in = 0; + data->compressed_pos = 0; + data->available_out = 0; + data->decompressed_pos = 0; + data->bstate = BrotliDecoderCreateInstance(NULL, NULL, NULL); +} + +static int64_t parse_ssint(unsigned char *buff) +{ + assert(buff); + /* + * From bsdiff 4.0 documentation: + * + * INTEGER type: + * + * offset size data type value + * 0 1 byte x0 + * 1 1 byte x1 + * 2 1 byte x2 + * 3 1 byte x3 + * 4 1 byte x4 + * 5 1 byte x5 + * 6 1 byte x6 + * 7 1 byte x7 + 128 * s + * + * The values x0, x2, x2, x3, x4, x5, x6 are between 0 and 255 (inclusive). + * The value x7 is between 0 and 127 (inclusive). The value s is 0 or 1. + * + * The INTEGER is parsed as: + * (x0 + x1 * 256 + x2 * 256^2 + x3 * 256^3 + x4 * 256^4 + + * x5 * 256^5 + x6 * 256^6 + x7 * 256^7) * (-1)^s + * + * (In other words, an INTEGER is a 64-byte signed integer in sign-magnitude + * format, stored in little-endian byte order.) + */ + int64_t result = *(int64_t*)buff & 0x7fffffff; + if ((buff[7] & 0x80) != 0) + result = -result; + + return result; +} + +int read_header(struct bs_data *data, uint8_t **buff_out_pos) +{ + assert(data); + assert(buff_out_pos); + + *buff_out_pos = data->buff_out; + + if (*buff_out_pos + sizeof(SSDIFF_MAGIC) > data->decompressed_pos || + memcmp(data->buff_out, SSDIFF_MAGIC, sizeof(SSDIFF_MAGIC) - 1) != 0) { + LOGE("Invalid patch file\n"); + return PF_ERROR_INVALID_PATCH_FILE; + } else { + LOGL(LOG_SSENGINE, "Looks like SSDIFF\n"); + } + + *buff_out_pos += sizeof(SSDIFF_MAGIC) - 1; + + if (*buff_out_pos + SSINT_LEN > data->decompressed_pos) { + decompress_bytes(data, data->decompressed_pos - *buff_out_pos); + *buff_out_pos = data->buff_out; + } + + size_t target_size = parse_ssint(*buff_out_pos); + LOGL(LOG_SSENGINE, "target_size: 0x%lx (%ld)\n", target_size, target_size); + + if (target_size != data->dest_len) { + LOGE("Declared target size differs from that read from the patch\n"); + return PF_ERROR_INVALID_PATCH_FILE; + } + + *buff_out_pos += SSINT_LEN; + + return PF_OK; +} + +int apply_patch_brotli(char *source_file, size_t source_size, char *dest_file, size_t dest_size, char *patch_file) +{ + assert(source_file); + assert(dest_file); + assert(patch_file); + + int result; + struct bs_data data; + + init_data(&data); + + if ((result = open_files(&data, source_file, source_size, dest_file, dest_size, patch_file)) != PF_OK) + goto exit; + + if ((result = decompress_bytes(&data, 0)) != PF_OK) + goto exit; + + uint8_t *buff_out_pos; + + if ((result = read_header(&data, &buff_out_pos)) != PF_OK) + goto exit; + + uint64_t total_write = 0; + + while (total_write < data.dest_len) { + /* + * Make sure we can read the block header + */ + if (buff_out_pos + 4*8 > data.decompressed_pos) { + if ((result = decompress_bytes(&data, data.decompressed_pos - buff_out_pos)) != PF_OK) + goto exit; + buff_out_pos = data.buff_out; + } + + /* + * Read the block header + */ + int64_t diff_len = parse_ssint(buff_out_pos+0*8); + int64_t extra_len = parse_ssint(buff_out_pos+1*8); + int64_t old_pos = parse_ssint(buff_out_pos+2*8); + int64_t new_pos = parse_ssint(buff_out_pos+3*8); + buff_out_pos += 4*8; + + /* + * Prepare pointers + */ + data.dest_pos = data.dest_ptr + new_pos; + data.src_pos = data.src_ptr + old_pos; + /* + * Read diff data + */ + int64_t write = 0; + while (write < diff_len) { + if (buff_out_pos >= data.decompressed_pos) { + if ((result = decompress_bytes(&data, 0)) != PF_OK) + goto exit; + buff_out_pos = data.buff_out; + } + while (write < diff_len && buff_out_pos < data.decompressed_pos) { + *data.dest_pos = *(uint8_t*)(data.src_pos) + *(uint8_t*)buff_out_pos; + data.dest_pos++; + data.src_pos++; + buff_out_pos++; + write++; + } + } + total_write += write; + /* + * Read extra data + */ + write = 0; + while (write < extra_len) { + if (buff_out_pos >= data.decompressed_pos) { + if ((result = decompress_bytes(&data, 0)) != PF_OK) + goto exit; + buff_out_pos = data.buff_out; + } + int64_t chunk_size = extra_len - write; + if (buff_out_pos + chunk_size > data.decompressed_pos) { + chunk_size = data.decompressed_pos - buff_out_pos; + } + memcpy(data.dest_pos, buff_out_pos, chunk_size); + data.dest_pos += chunk_size; + buff_out_pos += chunk_size; + write += chunk_size; + } + total_write += write; + } + + result = PF_OK; + +exit: + free_data(&data); + return result; +} diff --git a/bsdiff/ss_brotli_patch.h b/bsdiff/ss_brotli_patch.h new file mode 100644 index 0000000..47694b9 --- /dev/null +++ b/bsdiff/ss_brotli_patch.h @@ -0,0 +1,22 @@ +/* + * libtota + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include + +extern int apply_patch_brotli(char *source_file, size_t source_size, char *dest_file, size_t dest_size, char *patch_file); diff --git a/packaging/libtota.spec b/packaging/libtota.spec index 9e34d36..424ed84 100755 --- a/packaging/libtota.spec +++ b/packaging/libtota.spec @@ -8,6 +8,7 @@ Source0: %{name}-%{version}.tar.gz BuildRequires: cmake BuildRequires: pkgconfig(liblzma-tool) +BuildRequires: libbrotli-devel %description Fota update agent which update firmware using delta files diff --git a/ss_engine/SS_Common.c b/ss_engine/SS_Common.c index 0dc38af..6e31854 100755 --- a/ss_engine/SS_Common.c +++ b/ss_engine/SS_Common.c @@ -178,3 +178,9 @@ Cleanup: return ulResult; } +void hex_digest(char * sha1, char *buffer, int size) +{ + for (int i = 0; i < size; i++) { + snprintf(&buffer[i * 2], (size * 2) - (i * 2) + 1, "%02x", sha1[i]); + } +} diff --git a/ss_engine/SS_Common.h b/ss_engine/SS_Common.h index 50868b7..6e75a46 100755 --- a/ss_engine/SS_Common.h +++ b/ss_engine/SS_Common.h @@ -68,3 +68,4 @@ void SS_unicode_to_char(const char *src, char *dest, int size); #define SHA_DIGEST_SIZE 20 // To avoid creating dependencies on sha1.h #endif int SS_CalculateFileSha(char *filename, long int filesize, unsigned char calculated_sha1[SHA_DIGEST_SIZE]); +void hex_digest(char * sha1, char *buffer, int size); diff --git a/ss_engine/SS_PatchDelta.c b/ss_engine/SS_PatchDelta.c index ce78f69..2c85344 100755 --- a/ss_engine/SS_PatchDelta.c +++ b/ss_engine/SS_PatchDelta.c @@ -31,6 +31,8 @@ #include "SS_PatchDelta.h" #include "fota_common.h" #include "SS_Engine_Errors.h" +#include "ss_brotli_patch.h" +#include "SS_Common.h" extern void *SS_Malloc(unsigned int size); @@ -980,3 +982,52 @@ Cleanup: return result; } + +int SS_UpdateDeltaIMGAB(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *)) +{ + int result = S_SS_SUCCESS; + uint8_t target_sha1[SHA_DIGEST_SIZE]; + uint8_t source_sha1[SHA_DIGEST_SIZE]; + uint8_t current_target_sha1[SHA_DIGEST_SIZE]; + + if (ParseSha1(ua_dataSS->update_cfg->target_sha1, target_sha1) != 0) { + LOGE("failed to parse tgt-sha1 \"%s\"\n", ua_dataSS->update_cfg->target_sha1); + return E_SS_FAILURE; + } + + if (ParseSha1(ua_dataSS->update_cfg->soure_sha1, source_sha1) != 0) { + LOGE("failed to parse Src-sha1 \"%s\"\n", ua_dataSS->update_cfg->soure_sha1); + return E_SS_FAILURE; + } + + SS_CalculateFileSha(ua_dataSS->parti_info->ua_blk_name, + ua_dataSS->update_cfg->target_img_size, + current_target_sha1); + + /* source_file.size = ua_dataSS->update_cfg->soure_img_size; */ + /* source_file.data = NULL; */ + if (memcmp(target_sha1, current_target_sha1, SHA_DIGEST_SIZE) == 0) { + LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMGAB - Patch already applied\n"); + return S_SS_SUCCESS; + } + + SS_CalculateFileSha(ua_dataSS->parti_info->ua_blk_name_previous, + ua_dataSS->update_cfg->soure_img_size, + current_target_sha1); + + if (memcmp(source_sha1, current_target_sha1, SHA_DIGEST_SIZE) != 0) { + unsigned char actualShaBuffer[41] = { 0, }; + hex_digest(current_target_sha1, actualShaBuffer, SHA_DIGEST_SIZE); + LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMGAB - Source partition was corrupted. SRC: [%s] Expected [%s] Actual [%s]\n", + ua_dataSS->parti_info->ua_blk_name_previous, ua_dataSS->update_cfg->soure_sha1, actualShaBuffer); + return E_SS_FAILURE; + } + + apply_patch_brotli(ua_dataSS->parti_info->ua_blk_name_previous, + ua_dataSS->update_cfg->soure_img_size, + ua_dataSS->parti_info->ua_blk_name, + ua_dataSS->update_cfg->target_img_size, + SS_PATCHFILE_SOURCE); + + return result; +} diff --git a/ss_engine/SS_UPI.c b/ss_engine/SS_UPI.c index b2baf5e..4c6b19c 100755 --- a/ss_engine/SS_UPI.c +++ b/ss_engine/SS_UPI.c @@ -41,6 +41,7 @@ Function Prototypes Mandatory #include "SS_PatchDelta.h" #include "SS_Engine_Errors.h" #include "SS_FSUpdate.h" +#include "ss_bspatch_common.h" int gtotalFSCnt = 0; int FS_UpgradeState = E_SS_FAILURE; @@ -96,13 +97,6 @@ int SS_Do_Memory_Profiling() } } #endif -static void hex_digest(char * sha1, char *buffer, int size) -{ - int i = 0; - for ( i = 0; i < size; i++){ - snprintf(&buffer[i * 2], (size * 2) - (i * 2) + 1, "%02x", sha1[i]); - } -} #ifdef TIME_PROFILING static char ts1[256]; static double ts2; @@ -2320,7 +2314,7 @@ int SS_IMGUpdatemain(ua_dataSS_t * ua_dataSS, int update_type) //SS_FSUpdatePar if (update_type == FULL_IMG && ua_dataSS->update_data->ua_temp_path) ulResult = SS_MoveFile(SS_PATCHFILE_SOURCE, ua_dataSS->update_data->ua_temp_path); else if ((ua_dataSS->update_cfg->update_type == DELTA_IMG && ua_dataSS->write_data_to_blkdev) - || ua_dataSS->update_cfg->update_type == EXTRA) { + || ua_dataSS->update_cfg->update_type == EXTRA || ua_dataSS->update_cfg->update_type == DELTA_IMG_AB) { FILE *fp = NULL; char buf[14] = { 0, }; //to store zImage-delta magic keyword @@ -2338,7 +2332,9 @@ int SS_IMGUpdatemain(ua_dataSS_t * ua_dataSS, int update_type) //SS_FSUpdatePar LOGL(LOG_SSENGINE, "short read of \"%s\" (%ld bytes of %ld)\n", SS_PATCHFILE_SOURCE, (long)bytes_read, (long)13); fclose(fp); - if (strncmp(buf, SS_KERNEL_MAGIC, sizeof(buf) / sizeof(char)) == 0) + if (update_type == DELTA_IMG_AB) + ulResult = SS_UpdateDeltaIMGAB(ua_dataSS); + else if (strncmp(buf, SS_KERNEL_MAGIC, sizeof(buf) / sizeof(char)) == 0) ulResult = SS_UpdateDeltaKernel(ua_dataSS, ua_dataSS->write_data_to_blkdev); else ulResult = SS_UpdateDeltaIMG(ua_dataSS, ua_dataSS->write_data_to_blkdev); diff --git a/ss_engine/SS_UPI.h b/ss_engine/SS_UPI.h index e24c2f5..9e6fa1d 100755 --- a/ss_engine/SS_UPI.h +++ b/ss_engine/SS_UPI.h @@ -66,6 +66,7 @@ extern int SS_FSUpdatemain(ua_dataSS_t * ua_dataSS, int part_idx); extern int SS_FSVerifyPartition(ua_dataSS_t * ua_dataSS, int part_idx); extern int SS_UpdateDeltaIMG(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *)); extern int SS_UpdateDeltaKernel(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *)); +extern int SS_UpdateDeltaIMGAB(ua_dataSS_t * ua_dataSS); //extra functions extern void *SS_Malloc(unsigned int size); diff --git a/ss_engine/ua_types.h b/ss_engine/ua_types.h index c7d70b1..859f49d 100755 --- a/ss_engine/ua_types.h +++ b/ss_engine/ua_types.h @@ -36,6 +36,7 @@ typedef enum { FULL_IMG, DELTA_IMG, + DELTA_IMG_AB, DELTA_FS, EXTRA } UA_DATA_FORMAT; @@ -73,6 +74,7 @@ typedef struct _ua_part_info_t { char *ua_parti_name; char *ua_subject_name; char *ua_blk_name; + char *ua_blk_name_previous; int ua_blk_offset; } ua_part_info_t; -- cgit v1.2.3 From c374696c698f0e3368ec6ab4433f2534e9f4ceaa Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Thu, 3 Feb 2022 13:15:07 +0100 Subject: Add partition patching validation Change-Id: I50beec2214e7e606741eb740dc3421e3b5fb259d --- ss_engine/SS_PatchDelta.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ss_engine/SS_PatchDelta.c b/ss_engine/SS_PatchDelta.c index 2c85344..f3d6a5f 100755 --- a/ss_engine/SS_PatchDelta.c +++ b/ss_engine/SS_PatchDelta.c @@ -1011,6 +1011,7 @@ int SS_UpdateDeltaIMGAB(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, return S_SS_SUCCESS; } + LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMGAB - Checksum calculation of the source partition\n"); SS_CalculateFileSha(ua_dataSS->parti_info->ua_blk_name_previous, ua_dataSS->update_cfg->soure_img_size, current_target_sha1); @@ -1023,11 +1024,25 @@ int SS_UpdateDeltaIMGAB(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, return E_SS_FAILURE; } + LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMGAB - Applying the patch\n"); apply_patch_brotli(ua_dataSS->parti_info->ua_blk_name_previous, ua_dataSS->update_cfg->soure_img_size, ua_dataSS->parti_info->ua_blk_name, ua_dataSS->update_cfg->target_img_size, SS_PATCHFILE_SOURCE); + LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMGAB - Checksum calculation of the target partition\n"); + SS_CalculateFileSha(ua_dataSS->parti_info->ua_blk_name, + ua_dataSS->update_cfg->target_img_size, + current_target_sha1); + + if (memcmp(target_sha1, current_target_sha1, SHA_DIGEST_SIZE) != 0) { + unsigned char actualShaBuffer[41] = { 0, }; + hex_digest(current_target_sha1, actualShaBuffer, SHA_DIGEST_SIZE); + LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMGAB - Target partition was corrupted. SRC: [%s] Expected [%s] Actual [%s]\n", + ua_dataSS->parti_info->ua_blk_name, ua_dataSS->update_cfg->target_sha1, actualShaBuffer); + return E_SS_FAILURE; + } + return result; } -- cgit v1.2.3 From 1eefca6e1170dc7e07b24859ffcf20b6becd277d Mon Sep 17 00:00:00 2001 From: Mateusz Moscicki Date: Fri, 4 Feb 2022 16:09:43 +0100 Subject: Replace SHA1 library The previous SHA1 implementation was unable to correctly calculate hashes for images larger than 0x1fffffff bytes. Change-Id: Ic98f8cb4355e4522bb8154aa3285a60f20c42f4a --- ss_engine/SS_ApplyPatch.c | 4 +- ss_engine/SS_Common.c | 10 +- ss_engine/SS_PatchDelta.c | 29 ++- ss_engine/SS_PatchDelta.h | 6 +- ss_engine/sha1.c | 639 +++++++++++++++++++--------------------------- ss_engine/sha1.h | 142 +++-------- 6 files changed, 323 insertions(+), 507 deletions(-) diff --git a/ss_engine/SS_ApplyPatch.c b/ss_engine/SS_ApplyPatch.c index 2057ac3..bc9e86b 100755 --- a/ss_engine/SS_ApplyPatch.c +++ b/ss_engine/SS_ApplyPatch.c @@ -44,7 +44,7 @@ #include -int SS_ApplyBsdiff(char *oldfile, char *newfile, char *patch, SinkFn sink, void *token, sha1_ctx_t * ctx1) +int SS_ApplyBsdiff(char *oldfile, char *newfile, char *patch, SinkFn sink, void *token, SHA1_CTX * ctx1) { UInt64 unpackSize = 0; CFileSeqInStream inStream; @@ -103,7 +103,7 @@ int SS_ApplyBsdiff(char *oldfile, char *newfile, char *patch, SinkFn sink, void } if (ctx1) - sha1_update(ctx1, new_data, new_size); + SHA1Update(ctx1, new_data, new_size); Cleanup: if (new_data) SS_Free(new_data); diff --git a/ss_engine/SS_Common.c b/ss_engine/SS_Common.c index 6e31854..b0815ea 100755 --- a/ss_engine/SS_Common.c +++ b/ss_engine/SS_Common.c @@ -143,8 +143,8 @@ int SS_CalculateFileSha(char *filename, long int filesize, unsigned char calcula } ssize_t bytes_read = 0; - sha1_ctx_t sha_ctx; - sha1_init(&sha_ctx); + SHA1_CTX sha_ctx; + SHA1Init(&sha_ctx); while (filesize > 0) { if (filesize < chunk) { @@ -154,7 +154,7 @@ int SS_CalculateFileSha(char *filename, long int filesize, unsigned char calcula ulResult = E_SS_FAILURE; goto Cleanup; } - sha1_update(&sha_ctx, buffer, filesize); + SHA1Update(&sha_ctx, buffer, filesize); break; } else { bytes_read = fread(buffer, 1, chunk, fp); @@ -163,12 +163,12 @@ int SS_CalculateFileSha(char *filename, long int filesize, unsigned char calcula ulResult = E_SS_FAILURE; goto Cleanup; } - sha1_update(&sha_ctx, buffer, chunk); + SHA1Update(&sha_ctx, buffer, chunk); filesize -= chunk; } } - sha1_final(&sha_ctx, (uint32_t *) &calculated_sha1[0]); + SHA1Final(calculated_sha1, &sha_ctx); Cleanup: if (fp) diff --git a/ss_engine/SS_PatchDelta.c b/ss_engine/SS_PatchDelta.c index f3d6a5f..9f2d94c 100755 --- a/ss_engine/SS_PatchDelta.c +++ b/ss_engine/SS_PatchDelta.c @@ -146,19 +146,19 @@ int SS_LoadPartition(const char *filename, FileInfo * file) return -1; } - sha1_ctx_t sha_ctx; - sha1_init(&sha_ctx); + SHA1_CTX sha_ctx; + SHA1Init(&sha_ctx); file->data = SS_Malloc(file->size); if (file->data) { read = fread(file->data, 1, file->size, dev); LOGL(LOG_SSENGINE, "Partition size read %d\n", read); - sha1_update(&sha_ctx, file->data, read); + SHA1Update(&sha_ctx, file->data, read); file->size = read; } - const uint8_t sha_final[SHA_DIGEST_SIZE] = { 0, }; - sha1_final(&sha_ctx, (uint32_t *) & sha_final); + unsigned char sha_final[SHA_DIGEST_SIZE] = { 0, }; + SHA1Final(sha_final, &sha_ctx); for (i = 0; i < SHA_DIGEST_SIZE; ++i) file->sha1[i] = sha_final[i]; //LOGL(LOG_SSENGINE, "Final SHA of Source (%s)\n", sha_final); @@ -212,7 +212,7 @@ int SS_LoadFile(const char *filename, FileInfo * file) } fclose(f); //LOGL(LOG_SSENGINE,"SS_LoadFile --- [bytes_read %d]\n",bytes_read); - sha1(file->data, file->size, (uint32_t *) file->sha1); + SHA1(file->sha1, file->data, file->size); return 0; } @@ -241,7 +241,7 @@ int SS_UpdateDeltaFS(const char *source_filename, const char *target_filename, { uint8_t target_sha1[SHA_DIGEST_SIZE] = { 0, }; uint8_t source_sha1[SHA_DIGEST_SIZE] = { 0, }; - sha1_ctx_t ctx1; + SHA1_CTX ctx1; int output; int retry = 1; int use_backup = 0; @@ -413,7 +413,7 @@ int SS_UpdateDeltaFS(const char *source_filename, const char *target_filename, sink = ss_fileSink; token = &output; } - sha1_init(&ctx1); + SHA1Init(&ctx1); if (use_backup) result = SS_ApplyBsdiff(SS_BACKUP_SOURCE, outname, SS_PATCHFILE_SOURCE, sink, token, &ctx1); else @@ -439,8 +439,8 @@ int SS_UpdateDeltaFS(const char *source_filename, const char *target_filename, } } while (retry-- > 0); - const uint8_t current_target_sha1[SHA_DIGEST_SIZE] = { 0, }; - sha1_final(&ctx1, (uint32_t *) & current_target_sha1); + unsigned char current_target_sha1[SHA_DIGEST_SIZE] = { 0, }; + SHA1Final(current_target_sha1, &ctx1); if (memcmp(current_target_sha1, target_sha1, SHA_DIGEST_SIZE) != 0) { LOGE("patch did not produce expected sha1\n"); SS_SetUpgradeState(E_SS_FSSHA_MISMATCH); @@ -842,9 +842,9 @@ int SS_UpdateDeltaIMG(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, i { uint8_t target_sha1[SHA_DIGEST_SIZE]; uint8_t source_sha1[SHA_DIGEST_SIZE]; - const uint8_t current_target_sha1[SHA_DIGEST_SIZE]; + unsigned char current_target_sha1[SHA_DIGEST_SIZE]; FileInfo source_file; - sha1_ctx_t ctx1; + SHA1_CTX ctx1; MemorySinkInfo msi; int result = S_SS_SUCCESS; int blk_cnt; @@ -918,7 +918,7 @@ int SS_UpdateDeltaIMG(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, i sink = ss_memorySink; token = &msi; - sha1_init(&ctx1); + SHA1Init(&ctx1); //if souce was corrupted, use backup to apply diff if (use_backup_img == -1) result = @@ -931,7 +931,7 @@ int SS_UpdateDeltaIMG(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, i goto Cleanup; } - sha1_final(&ctx1, (uint32_t *) & current_target_sha1); + SHA1Final(current_target_sha1, &ctx1); result = memcmp(current_target_sha1, target_sha1, SHA_DIGEST_SIZE); if (result != S_SS_SUCCESS) { LOGE("patch did not produce expected sha1 \n"); @@ -983,6 +983,7 @@ Cleanup: } + int SS_UpdateDeltaIMGAB(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *)) { int result = S_SS_SUCCESS; diff --git a/ss_engine/SS_PatchDelta.h b/ss_engine/SS_PatchDelta.h index d789419..edbe182 100755 --- a/ss_engine/SS_PatchDelta.h +++ b/ss_engine/SS_PatchDelta.h @@ -29,7 +29,7 @@ //#define ENHANCED_BSDIFF #define SS_UPDATE_FS 0 #define SS_UPDATE_IMG 1 -//#define SHA_DIGEST_SIZE 20 +#define SHA_DIGEST_SIZE 20 typedef struct { int type; ssize_t size; @@ -53,7 +53,7 @@ int ParseSha1(const char *str, uint8_t * digest); void ShowBSDiffLicense(); int ApplyBSDiffPatch(const unsigned char *old_data, ssize_t old_size, - const Value * patch, ssize_t patch_offset, SinkFn sink, void *token, sha1_ctx_t * ctx1); + const Value * patch, ssize_t patch_offset, SinkFn sink, void *token, SHA1_CTX * ctx1); int ApplyBSDiffPatchMem(const unsigned char *old_data, ssize_t old_size, const Value * patch, ssize_t patch_offset, unsigned char **new_data, ssize_t * new_size); //int ApplyOptimizedBSDiffPatch(const unsigned char* old_data,void* token, @@ -64,7 +64,7 @@ int SS_LoadFile(const char *filename, FileInfo * file); extern void SS_SetUpgradeState(int Val); extern long SS_GetAvailableFreeSpace(const char *partition_name, SS_UINT32 * available_flash_size); extern int SS_BackupSource(const char *source_filename); -extern int SS_ApplyBsdiff(char *oldfile, char *newfile, char *patch, SinkFn sink, void *token, sha1_ctx_t * ctx1); +extern int SS_ApplyBsdiff(char *oldfile, char *newfile, char *patch, SinkFn sink, void *token, SHA1_CTX * ctx1); extern int SS_BackupSourceClear(); extern int SS_PatchSourceClear(); extern long SS_WriteFile(long wHandle, SS_UINT32 dwPosition, unsigned char *pbBuffer, SS_UINT32 dwSize); diff --git a/ss_engine/sha1.c b/ss_engine/sha1.c index 54d75a2..fe8da83 100755 --- a/ss_engine/sha1.c +++ b/ss_engine/sha1.c @@ -1,410 +1,295 @@ /* - * sha1.c - * - * an implementation of the Secure Hash Algorithm v.1 (SHA-1), - * specified in FIPS 180-1 - * - * David A. McGrew - * Cisco Systems, Inc. - */ +SHA-1 in C +By Steve Reid +100% Public Domain -/* - * - * Copyright (c) 2001-2006, Cisco Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * Neither the name of the Cisco Systems, Inc. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - - -#include "sha1.h" +Test Vectors (from FIPS PUB 180-1) +"abc" + A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D +"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 +A million repetitions of "a" + 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F +*/ +/* #define LITTLE_ENDIAN * This should be #define'd already, if true. */ +/* #define SHA1HANDSOFF * Copies data before messing with it. */ +#define SHA1HANDSOFF -/* SN == Rotate left N bits */ -#define S1(X) ((X << 1) | (X >> 31)) -#define S5(X) ((X << 5) | (X >> 27)) -#define S30(X) ((X << 30) | (X >> 2)) +#include +#include -#define f0(B,C,D) ((B & C) | (~B & D)) -#define f1(B,C,D) (B ^ C ^ D) -#define f2(B,C,D) ((B & C) | (B & D) | (C & D)) -#define f3(B,C,D) (B ^ C ^ D) +/* for uint32_t */ +#include +#include "sha1.h" -/* - * nota bene: the variable K0 appears in the curses library, so we - * give longer names to these variables to avoid spurious warnings - * on systems that uses curses - */ - -uint32_t SHA_K0 = 0x5A827999; /* Kt for 0 <= t <= 19 */ -uint32_t SHA_K1 = 0x6ED9EBA1; /* Kt for 20 <= t <= 39 */ -uint32_t SHA_K2 = 0x8F1BBCDC; /* Kt for 40 <= t <= 59 */ -uint32_t SHA_K3 = 0xCA62C1D6; /* Kt for 60 <= t <= 79 */ - -void -sha1(const uint8_t *msg, int octets_in_msg, uint32_t hash_value[5]) { - sha1_ctx_t ctx; - - sha1_init(&ctx); - sha1_update(&ctx, msg, octets_in_msg); - sha1_final(&ctx, hash_value); - -} - -/* - * sha1_core(M, H) computes the core compression function, where M is - * the next part of the message (in network byte order) and H is the - * intermediate state { H0, H1, ...} (in host byte order) - * - * this function does not do any of the padding required in the - * complete SHA1 function - * - * this function is used in the SEAL 3.0 key setup routines - * (crypto/cipher/seal.c) - */ - -void -sha1_core(const uint32_t M[16], uint32_t hash_value[5]) { - uint32_t H0; - uint32_t H1; - uint32_t H2; - uint32_t H3; - uint32_t H4; - uint32_t W[80]; - uint32_t A, B, C, D, E, TEMP; - int t; - - /* copy hash_value into H0, H1, H2, H3, H4 */ - H0 = hash_value[0]; - H1 = hash_value[1]; - H2 = hash_value[2]; - H3 = hash_value[3]; - H4 = hash_value[4]; - - /* copy/xor message into array */ - - W[0] = be32_to_cpu(M[0]); - W[1] = be32_to_cpu(M[1]); - W[2] = be32_to_cpu(M[2]); - W[3] = be32_to_cpu(M[3]); - W[4] = be32_to_cpu(M[4]); - W[5] = be32_to_cpu(M[5]); - W[6] = be32_to_cpu(M[6]); - W[7] = be32_to_cpu(M[7]); - W[8] = be32_to_cpu(M[8]); - W[9] = be32_to_cpu(M[9]); - W[10] = be32_to_cpu(M[10]); - W[11] = be32_to_cpu(M[11]); - W[12] = be32_to_cpu(M[12]); - W[13] = be32_to_cpu(M[13]); - W[14] = be32_to_cpu(M[14]); - W[15] = be32_to_cpu(M[15]); - TEMP = W[13] ^ W[8] ^ W[2] ^ W[0]; W[16] = S1(TEMP); - TEMP = W[14] ^ W[9] ^ W[3] ^ W[1]; W[17] = S1(TEMP); - TEMP = W[15] ^ W[10] ^ W[4] ^ W[2]; W[18] = S1(TEMP); - TEMP = W[16] ^ W[11] ^ W[5] ^ W[3]; W[19] = S1(TEMP); - TEMP = W[17] ^ W[12] ^ W[6] ^ W[4]; W[20] = S1(TEMP); - TEMP = W[18] ^ W[13] ^ W[7] ^ W[5]; W[21] = S1(TEMP); - TEMP = W[19] ^ W[14] ^ W[8] ^ W[6]; W[22] = S1(TEMP); - TEMP = W[20] ^ W[15] ^ W[9] ^ W[7]; W[23] = S1(TEMP); - TEMP = W[21] ^ W[16] ^ W[10] ^ W[8]; W[24] = S1(TEMP); - TEMP = W[22] ^ W[17] ^ W[11] ^ W[9]; W[25] = S1(TEMP); - TEMP = W[23] ^ W[18] ^ W[12] ^ W[10]; W[26] = S1(TEMP); - TEMP = W[24] ^ W[19] ^ W[13] ^ W[11]; W[27] = S1(TEMP); - TEMP = W[25] ^ W[20] ^ W[14] ^ W[12]; W[28] = S1(TEMP); - TEMP = W[26] ^ W[21] ^ W[15] ^ W[13]; W[29] = S1(TEMP); - TEMP = W[27] ^ W[22] ^ W[16] ^ W[14]; W[30] = S1(TEMP); - TEMP = W[28] ^ W[23] ^ W[17] ^ W[15]; W[31] = S1(TEMP); - - /* process the remainder of the array */ - for (t=32; t < 80; t++) { - TEMP = W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]; - W[t] = S1(TEMP); - } - - A = H0; B = H1; C = H2; D = H3; E = H4; - - for (t=0; t < 20; t++) { - TEMP = S5(A) + f0(B,C,D) + E + W[t] + SHA_K0; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - for ( ; t < 40; t++) { - TEMP = S5(A) + f1(B,C,D) + E + W[t] + SHA_K1; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - for ( ; t < 60; t++) { - TEMP = S5(A) + f2(B,C,D) + E + W[t] + SHA_K2; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - for ( ; t < 80; t++) { - TEMP = S5(A) + f3(B,C,D) + E + W[t] + SHA_K3; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - - hash_value[0] = H0 + A; - hash_value[1] = H1 + B; - hash_value[2] = H2 + C; - hash_value[3] = H3 + D; - hash_value[4] = H4 + E; - - return; +#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) + +/* blk0() and blk() perform the initial expand. */ +/* I got the idea of expanding during the round function from SSLeay */ +#if BYTE_ORDER == LITTLE_ENDIAN +#define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \ + |(rol(block->l[i],8)&0x00FF00FF)) +#elif BYTE_ORDER == BIG_ENDIAN +#define blk0(i) block->l[i] +#else +#error "Endianness not defined!" +#endif +#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \ + ^block->l[(i+2)&15]^block->l[i&15],1)) + +/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ +#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30); +#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30); +#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30); +#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30); +#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30); + + +/* Hash a single 512-bit block. This is the core of the algorithm. */ + +void SHA1Transform( + uint32_t state[5], + const unsigned char buffer[64] +) +{ + uint32_t a, b, c, d, e; + + typedef union + { + unsigned char c[64]; + uint32_t l[16]; + } CHAR64LONG16; + +#ifdef SHA1HANDSOFF + CHAR64LONG16 block[1]; /* use array to appear as a pointer */ + + memcpy(block, buffer, 64); +#else + /* The following had better never be used because it causes the + * pointer-to-const buffer to be cast into a pointer to non-const. + * And the result is written through. I threw a "const" in, hoping + * this will cause a diagnostic. + */ + CHAR64LONG16 *block = (const CHAR64LONG16 *) buffer; +#endif + /* Copy context->state[] to working vars */ + a = state[0]; + b = state[1]; + c = state[2]; + d = state[3]; + e = state[4]; + /* 4 rounds of 20 operations each. Loop unrolled. */ + R0(a, b, c, d, e, 0); + R0(e, a, b, c, d, 1); + R0(d, e, a, b, c, 2); + R0(c, d, e, a, b, 3); + R0(b, c, d, e, a, 4); + R0(a, b, c, d, e, 5); + R0(e, a, b, c, d, 6); + R0(d, e, a, b, c, 7); + R0(c, d, e, a, b, 8); + R0(b, c, d, e, a, 9); + R0(a, b, c, d, e, 10); + R0(e, a, b, c, d, 11); + R0(d, e, a, b, c, 12); + R0(c, d, e, a, b, 13); + R0(b, c, d, e, a, 14); + R0(a, b, c, d, e, 15); + R1(e, a, b, c, d, 16); + R1(d, e, a, b, c, 17); + R1(c, d, e, a, b, 18); + R1(b, c, d, e, a, 19); + R2(a, b, c, d, e, 20); + R2(e, a, b, c, d, 21); + R2(d, e, a, b, c, 22); + R2(c, d, e, a, b, 23); + R2(b, c, d, e, a, 24); + R2(a, b, c, d, e, 25); + R2(e, a, b, c, d, 26); + R2(d, e, a, b, c, 27); + R2(c, d, e, a, b, 28); + R2(b, c, d, e, a, 29); + R2(a, b, c, d, e, 30); + R2(e, a, b, c, d, 31); + R2(d, e, a, b, c, 32); + R2(c, d, e, a, b, 33); + R2(b, c, d, e, a, 34); + R2(a, b, c, d, e, 35); + R2(e, a, b, c, d, 36); + R2(d, e, a, b, c, 37); + R2(c, d, e, a, b, 38); + R2(b, c, d, e, a, 39); + R3(a, b, c, d, e, 40); + R3(e, a, b, c, d, 41); + R3(d, e, a, b, c, 42); + R3(c, d, e, a, b, 43); + R3(b, c, d, e, a, 44); + R3(a, b, c, d, e, 45); + R3(e, a, b, c, d, 46); + R3(d, e, a, b, c, 47); + R3(c, d, e, a, b, 48); + R3(b, c, d, e, a, 49); + R3(a, b, c, d, e, 50); + R3(e, a, b, c, d, 51); + R3(d, e, a, b, c, 52); + R3(c, d, e, a, b, 53); + R3(b, c, d, e, a, 54); + R3(a, b, c, d, e, 55); + R3(e, a, b, c, d, 56); + R3(d, e, a, b, c, 57); + R3(c, d, e, a, b, 58); + R3(b, c, d, e, a, 59); + R4(a, b, c, d, e, 60); + R4(e, a, b, c, d, 61); + R4(d, e, a, b, c, 62); + R4(c, d, e, a, b, 63); + R4(b, c, d, e, a, 64); + R4(a, b, c, d, e, 65); + R4(e, a, b, c, d, 66); + R4(d, e, a, b, c, 67); + R4(c, d, e, a, b, 68); + R4(b, c, d, e, a, 69); + R4(a, b, c, d, e, 70); + R4(e, a, b, c, d, 71); + R4(d, e, a, b, c, 72); + R4(c, d, e, a, b, 73); + R4(b, c, d, e, a, 74); + R4(a, b, c, d, e, 75); + R4(e, a, b, c, d, 76); + R4(d, e, a, b, c, 77); + R4(c, d, e, a, b, 78); + R4(b, c, d, e, a, 79); + /* Add the working vars back into context.state[] */ + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; + state[4] += e; + /* Wipe variables */ + a = b = c = d = e = 0; +#ifdef SHA1HANDSOFF + memset(block, '\0', sizeof(block)); +#endif } -void -sha1_init(sha1_ctx_t *ctx) { - int i; - - /* initialize state vector */ - ctx->H[0] = 0x67452301; - ctx->H[1] = 0xefcdab89; - ctx->H[2] = 0x98badcfe; - ctx->H[3] = 0x10325476; - ctx->H[4] = 0xc3d2e1f0; - - for(i = 0; i < 16; i++) { - ctx->M[i] = 0; - } - - /* indicate that message buffer is empty */ - ctx->octets_in_buffer = 0; - /* reset message bit-count to zero */ - ctx->num_bits_in_msg = 0; +/* SHA1Init - Initialize new context */ +void SHA1Init( + SHA1_CTX * context +) +{ + /* SHA1 initialization constants */ + context->state[0] = 0x67452301; + context->state[1] = 0xEFCDAB89; + context->state[2] = 0x98BADCFE; + context->state[3] = 0x10325476; + context->state[4] = 0xC3D2E1F0; + context->count[0] = context->count[1] = 0; } -void -sha1_update(sha1_ctx_t *ctx, const uint8_t *msg, int octets_in_msg) { - int i; - uint8_t *buf = (uint8_t *)ctx->M; - - /* update message bit-count */ - ctx->num_bits_in_msg += octets_in_msg * 8; - - /* loop over 16-word blocks of M */ - while (octets_in_msg > 0) { - - if (octets_in_msg + ctx->octets_in_buffer >= 64) { - - /* - * copy words of M into msg buffer until that buffer is full, - * converting them into host byte order as needed - */ - octets_in_msg -= (64 - ctx->octets_in_buffer); - for (i=ctx->octets_in_buffer; i < 64; i++) - buf[i] = *msg++; - ctx->octets_in_buffer = 0; - - /* process a whole block */ - - //debug_print(mod_sha1, "(update) running sha1_core()", NULL); - - sha1_core(ctx->M, ctx->H); - - } else { - - //debug_print(mod_sha1, "(update) not running sha1_core()", NULL); - for (i=ctx->octets_in_buffer; - i < (ctx->octets_in_buffer + octets_in_msg); i++) - buf[i] = *msg++; - ctx->octets_in_buffer += octets_in_msg; - octets_in_msg = 0; +/* Run your data through this. */ + +void SHA1Update( + SHA1_CTX * context, + const unsigned char *data, + uint32_t len +) +{ + uint32_t i; + + uint32_t j; + + j = context->count[0]; + if ((context->count[0] += len << 3) < j) + context->count[1]++; + context->count[1] += (len >> 29); + j = (j >> 3) & 63; + if ((j + len) > 63) + { + memcpy(&context->buffer[j], data, (i = 64 - j)); + SHA1Transform(context->state, context->buffer); + for (; i + 63 < len; i += 64) + { + SHA1Transform(context->state, &data[i]); + } + j = 0; } - - } - + else + i = 0; + memcpy(&context->buffer[j], &data[i], len - i); } -/* - * sha1_final(ctx, output) computes the result for ctx and copies it - * into the twenty octets located at *output - */ - -void -sha1_final(sha1_ctx_t *ctx, uint32_t *output) { - uint32_t A, B, C, D, E, TEMP; - uint32_t W[80]; - int i, t; - - /* - * process the remaining octets_in_buffer, padding and terminating as - * necessary - */ - { - int tail = ctx->octets_in_buffer % 4; - - /* copy/xor message into array */ - for (i=0; i < (ctx->octets_in_buffer+3)/4; i++) - W[i] = be32_to_cpu(ctx->M[i]); - - /* set the high bit of the octet immediately following the message */ - switch (tail) { - case (3): - W[i-1] = (be32_to_cpu(ctx->M[i-1]) & 0xffffff00) | 0x80; - W[i] = 0x0; - break; - case (2): - W[i-1] = (be32_to_cpu(ctx->M[i-1]) & 0xffff0000) | 0x8000; - W[i] = 0x0; - break; - case (1): - W[i-1] = (be32_to_cpu(ctx->M[i-1]) & 0xff000000) | 0x800000; - W[i] = 0x0; - break; - case (0): - W[i] = 0x80000000; - break; - } - - /* zeroize remaining words */ - for (i++ ; i < 15; i++) - W[i] = 0x0; - - /* - * if there is room at the end of the word array, then set the - * last word to the bit-length of the message; otherwise, set that - * word to zero and then we need to do one more run of the - * compression algo. - */ - if (ctx->octets_in_buffer < 56) - W[15] = ctx->num_bits_in_msg; - else if (ctx->octets_in_buffer < 60) - W[15] = 0x0; - - /* process the word array */ - for (t=16; t < 80; t++) { - TEMP = W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]; - W[t] = S1(TEMP); - } - - A = ctx->H[0]; - B = ctx->H[1]; - C = ctx->H[2]; - D = ctx->H[3]; - E = ctx->H[4]; - - for (t=0; t < 20; t++) { - TEMP = S5(A) + f0(B,C,D) + E + W[t] + SHA_K0; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - for ( ; t < 40; t++) { - TEMP = S5(A) + f1(B,C,D) + E + W[t] + SHA_K1; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - for ( ; t < 60; t++) { - TEMP = S5(A) + f2(B,C,D) + E + W[t] + SHA_K2; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - for ( ; t < 80; t++) { - TEMP = S5(A) + f3(B,C,D) + E + W[t] + SHA_K3; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - - ctx->H[0] += A; - ctx->H[1] += B; - ctx->H[2] += C; - ctx->H[3] += D; - ctx->H[4] += E; - - } - - //debug_print(mod_sha1, "(final) running sha1_core()", NULL); - if (ctx->octets_in_buffer >= 56) { +/* Add padding and return the message digest. */ +void SHA1Final( + unsigned char digest[20], + SHA1_CTX * context +) +{ + unsigned i; - //debug_print(mod_sha1, "(final) running sha1_core() again", NULL); + unsigned char finalcount[8]; - /* we need to do one final run of the compression algo */ + unsigned char c; - /* - * set initial part of word array to zeros, and set the - * final part to the number of bits in the message +#if 0 /* untested "improvement" by DHR */ + /* Convert context->count to a sequence of bytes + * in finalcount. Second element first, but + * big-endian order within element. + * But we do it all backwards. */ - for (i=0; i < 15; i++) - W[i] = 0x0; - W[15] = ctx->num_bits_in_msg; - - /* process the word array */ - for (t=16; t < 80; t++) { - TEMP = W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]; - W[t] = S1(TEMP); - } + unsigned char *fcp = &finalcount[8]; - A = ctx->H[0]; - B = ctx->H[1]; - C = ctx->H[2]; - D = ctx->H[3]; - E = ctx->H[4]; + for (i = 0; i < 2; i++) + { + uint32_t t = context->count[i]; - for (t=0; t < 20; t++) { - TEMP = S5(A) + f0(B,C,D) + E + W[t] + SHA_K0; - E = D; D = C; C = S30(B); B = A; A = TEMP; - } - for ( ; t < 40; t++) { - TEMP = S5(A) + f1(B,C,D) + E + W[t] + SHA_K1; - E = D; D = C; C = S30(B); B = A; A = TEMP; + int j; + + for (j = 0; j < 4; t >>= 8, j++) + *--fcp = (unsigned char) t} +#else + for (i = 0; i < 8; i++) + { + finalcount[i] = (unsigned char) ((context->count[(i >= 4 ? 0 : 1)] >> ((3 - (i & 3)) * 8)) & 255); /* Endian independent */ } - for ( ; t < 60; t++) { - TEMP = S5(A) + f2(B,C,D) + E + W[t] + SHA_K2; - E = D; D = C; C = S30(B); B = A; A = TEMP; +#endif + c = 0200; + SHA1Update(context, &c, 1); + while ((context->count[0] & 504) != 448) + { + c = 0000; + SHA1Update(context, &c, 1); } - for ( ; t < 80; t++) { - TEMP = S5(A) + f3(B,C,D) + E + W[t] + SHA_K3; - E = D; D = C; C = S30(B); B = A; A = TEMP; + SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */ + for (i = 0; i < 20; i++) + { + digest[i] = (unsigned char) + ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255); } - - ctx->H[0] += A; - ctx->H[1] += B; - ctx->H[2] += C; - ctx->H[3] += D; - ctx->H[4] += E; - } - - /* copy result into output buffer */ - output[0] = be32_to_cpu(ctx->H[0]); - output[1] = be32_to_cpu(ctx->H[1]); - output[2] = be32_to_cpu(ctx->H[2]); - output[3] = be32_to_cpu(ctx->H[3]); - output[4] = be32_to_cpu(ctx->H[4]); - - /* indicate that message buffer in context is empty */ - ctx->octets_in_buffer = 0; - - return; + /* Wipe variables */ + memset(context, '\0', sizeof(*context)); + memset(&finalcount, '\0', sizeof(finalcount)); } - +void SHA1( + char *hash_out, + const char *str, + int len) +{ + SHA1_CTX ctx; + unsigned int ii; + + SHA1Init(&ctx); + for (ii=0; ii> 24) | \ - (((uint32_t)(x) & 0x00ff0000u) >> 8) | \ - (((uint32_t)(x) & 0x0000ff00u) << 8) | \ - (((uint32_t)(x) & 0x000000ffu) << 24)) - - -typedef unsigned int uint32_t; -typedef unsigned char uint8_t; -typedef struct { - uint32_t H[5]; /* state vector */ - uint32_t M[16]; /* message buffer */ - int octets_in_buffer; /* octets of message in buffer */ - uint32_t num_bits_in_msg; /* total number of bits in message */ -} sha1_ctx_t; - -/* - * sha1(&ctx, msg, len, output) hashes the len octets starting at msg - * into the SHA1 context, then writes the result to the 20 octets at - * output - * - */ - -void -sha1(const uint8_t *message, int octets_in_msg, uint32_t output[5]); - -/* - * sha1_init(&ctx) initializes the SHA1 context ctx - * - * sha1_update(&ctx, msg, len) hashes the len octets starting at msg - * into the SHA1 context - * - * sha1_final(&ctx, output) performs the final processing of the SHA1 - * context and writes the result to the 20 octets at output - * - */ - -void -sha1_init(sha1_ctx_t *ctx); - -void -sha1_update(sha1_ctx_t *ctx, const uint8_t *M, int octets_in_msg); - -void -sha1_final(sha1_ctx_t *ctx, uint32_t output[5]); - -/* - * The sha1_core function is INTERNAL to SHA-1, but it is declared - * here because it is also used by the cipher SEAL 3.0 in its key - * setup algorithm. - */ /* - * sha1_core(M, H) computes the core sha1 compression function, where M is - * the next part of the message and H is the intermediate state {H0, - * H1, ...} - * - * this function does not do any of the padding required in the - * complete sha1 function + SHA-1 in C + By Steve Reid + 100% Public Domain */ -void -sha1_core(const uint32_t M[16], uint32_t hash_value[5]); +#include "stdint.h" + +typedef struct +{ + uint32_t state[5]; + uint32_t count[2]; + unsigned char buffer[64]; +} SHA1_CTX; + +void SHA1Transform( + uint32_t state[5], + const unsigned char buffer[64] + ); + +void SHA1Init( + SHA1_CTX * context + ); + +void SHA1Update( + SHA1_CTX * context, + const unsigned char *data, + uint32_t len + ); + +void SHA1Final( + unsigned char digest[20], + SHA1_CTX * context + ); + +void SHA1( + char *hash_out, + const char *str, + int len); #endif /* SHA1_H */ -- cgit v1.2.3