diff --git a/espReader/ESPReader.h b/espReader/ESPReader.h index 3f41784..a955ca3 100644 --- a/espReader/ESPReader.h +++ b/espReader/ESPReader.h @@ -175,6 +175,12 @@ extern "C" { uint8_t day; }; + struct walker_callbacks { + void (*pre)(Node n, void *data, void **carry_out); + void (*post)(Node n, void *data, void **carry_in); + void *data; + }; + // // === BINARY DATA OVERLAYS === // @@ -260,7 +266,7 @@ extern "C" { * Data is walked sequentially. Nodes passed to `cb` will be strictly increasing * in terms of memory location within the buffer. */ - void espr_walk(char *data, size_t size, void (*cb)(Node n, void *pt), void *pt); + void espr_walk(char *data, size_t size, struct walker_callbacks cb); /* `espr_print` prints the header of every group and record in the given * esp/esm binary data. diff --git a/espReader/Reader.c b/espReader/Reader.c index bbc99dc..88ae704 100644 --- a/espReader/Reader.c +++ b/espReader/Reader.c @@ -26,9 +26,9 @@ const int year_offset = 9; void asserts(void); // Tree walkers -char *walk_concat(char *data, size_t size, void (*cb)(Node n, void *pt), void *pt); -char *walk_group(char *data, void (*cb)(Node n, void *pt), void *pt); -char *walk_record(char *data, void (*cb)(Node n, void *pt), void *pt); +char *walk_concat(char *data, size_t size, struct walker_callbacks cb); +char *walk_group(char *data, struct walker_callbacks cb); +char *walk_record(char *data, struct walker_callbacks cb); // Header printers void print_group_header(Group *header); @@ -43,10 +43,11 @@ void print_type4(Type4 val); // Utilities Timestamp convert_ts(uint16_t ts); -void print_callback(Node n, void *_); -void dc_size_cb(Node n, void *dc_size_ptr); -void formid_count_cb(Node n, void *count_ptr); -void decompress_cb(Node n, void *decom_ptr); +void print_callback(Node n, void *data, void **carry_out); +void dc_size_cb(Node n, void *data, void **carry_out); +void formid_count_cb(Node n, void *data, void **carry_out); +void decompress_pre(Node n, void *data, void **carry_out); +void decompress_post(Node n, void *data, void **carry_in); // // === FUNCTIONS === @@ -60,7 +61,7 @@ void asserts(void) { assert(sizeof(Field) == 6); // Field struct incorrect size } -void espr_walk(char *data, size_t size, void (*cb)(Node n, void *pt), void *pt) { +void espr_walk(char *data, size_t size, struct walker_callbacks cb) { // check assertions that cannot be checked at compile time asserts(); @@ -70,16 +71,18 @@ void espr_walk(char *data, size_t size, void (*cb)(Node n, void *pt), void *pt) const Type4 type = *(const Type4 *)data; assert(type.uint == rt[TES4]); - data = walk_concat(data, size, cb, pt); + data = walk_concat(data, size, cb); assert(data == data_start + size); } void espr_print(char *data, size_t size) { - espr_walk(data, size, print_callback, NULL); + struct walker_callbacks cb = { .pre = print_callback }; + espr_walk(data, size, cb); } -void print_callback(Node n, void *pt) { - (void)pt; +void print_callback(Node n, void *data, void **carry_out) { + (void)data; + (void)carry_out; switch (n.type) { case NT_GROUP: print_group_header(n.header.group); @@ -94,26 +97,28 @@ void print_callback(Node n, void *pt) { size_t espr_decompressed_size(char *data, size_t size) { size_t dc_size = 0; - espr_walk(data, size, dc_size_cb, &dc_size); + struct walker_callbacks cb = { .pre = dc_size_cb, .data = &dc_size }; + espr_walk(data, size, cb); return dc_size; } // Adds the size of every node up, reading decompressed size from compressed records. -void dc_size_cb(Node n, void *dc_size_ptr) { - size_t *dcsp = dc_size_ptr; +void dc_size_cb(Node n, void *data, void **carry_out) { + (void)carry_out; + size_t *dc_size = data; switch (n.type) { case NT_GROUP: // Only add header size for groups, internals will be walked - *dcsp += sizeof(Group); + *dc_size += sizeof(Group); break; case NT_RECORD: // Add the whole record and header, records are leaf-ish - *dcsp += sizeof(Record); + *dc_size += sizeof(Record); if (n.header.record->flags & COMPRESSED_FLAG) { // Read decompressed size - *dcsp += *((uint32_t *)n.data); + *dc_size += *((uint32_t *)n.data); } else - *dcsp += n.header.record->size; + *dc_size += n.header.record->size; break; default: assert(false); // invalid node type @@ -122,7 +127,8 @@ void dc_size_cb(Node n, void *dc_size_ptr) { size_t espr_formid_count(char *data, size_t size) { size_t count = 0; - espr_walk(data, size, formid_count_cb, &count); + struct walker_callbacks cb = { .pre = formid_count_cb, .data = &count }; + espr_walk(data, size, cb); return count; } @@ -130,67 +136,97 @@ size_t espr_formid_count(char *data, size_t size) { * groups have formids, and every record should have a unique formid, * otherwise there would be clashes in the id space. */ -void formid_count_cb(Node n, void *count_ptr) { - size_t *c = count_ptr; +void formid_count_cb(Node n, void *data, void **carry_out) { + (void)carry_out; + size_t *count = data; if (n.type == NT_RECORD) { - (*c)++; + (*count)++; } } struct decom { char *buf; - char *start; size_t remaining; }; void espr_decompress(char *data, size_t size, char *buf, size_t buf_size) { - struct decom s = { .buf = buf, .start = data, .remaining = buf_size }; - espr_walk(data, size, decompress_cb, &s); - - // handle final segment - size_t remaining = buf_size - (s.buf - buf); - assert(remaining == s.remaining); - memcpy(s.buf, s.start, remaining); + struct decom s = { .buf = buf, .remaining = buf_size }; + struct walker_callbacks cb = { .pre = decompress_pre, .post = decompress_post, .data = &s }; + espr_walk(data, size, cb); } -void decompress_cb(Node n, void *decom_ptr) { +void decompress_pre(Node n, void *decom_ptr, void **carry_out) { struct decom *d = decom_ptr; - // only need to do anything when we find a compressed flag - if (n.type == NT_RECORD && n.header.record->flags & COMPRESSED_FLAG) { - // uncompressed segment copy - size_t size = n.data - d->start; - assert(size < d->remaining); - memcpy(d->buf, d->start, size); + switch (n.type) { + case NT_RECORD: + // compressed record + if (n.header.record->flags & COMPRESSED_FLAG) { + // copy header + memcpy(d->buf, n.header.record, sizeof(Record)); - // update decom struct - d->remaining -= size; - d->buf += size; + // copied header reference + Record *header = (Record *)d->buf; - // copied header - Record *header = (Record *)(d->buf) - 1; + // update decom struct + d->remaining -= sizeof(Record); + d->buf += sizeof(Record); - // decompress directly into buffer - const size_t dc_size = *((uint32_t *)n.data); - size_t to_copy = dc_size; - size_t cur_size = n.header.record->size - sizeof(uint32_t); - char *data_start = n.data + sizeof(uint32_t); - int ret = uncompress(d->buf, &to_copy, data_start, cur_size); - assert(ret == Z_OK); - assert(to_copy == dc_size); - - // update decom struct - d->remaining -= dc_size; - d->buf += dc_size; + // decompress directly into buffer + // first 4 bytes are the decompressed size + const size_t dc_size = *((uint32_t *)n.data); + size_t to_copy = dc_size; + size_t cur_size = n.header.record->size - sizeof(uint32_t); + char *data_start = n.data + sizeof(uint32_t); + int ret = uncompress(d->buf, &to_copy, data_start, cur_size); + assert(ret == Z_OK); + assert(to_copy == dc_size); - // update start to start of next record/group - d->start = n.data + n.header.record->size; + // update decom struct + d->remaining -= dc_size; + d->buf += dc_size; - // update header data size - header->size = dc_size; + // update header data size + header->size = dc_size; - // unset compressed flag - header->flags &= ~COMPRESSED_FLAG; + // unset compressed flag + header->flags &= ~COMPRESSED_FLAG; + } + else { + // copy record + size_t record_size = sizeof(Record) + n.header.record->size; + memcpy(d->buf, n.header.record, record_size); + + // update decom + d->remaining -= record_size; + d->buf += record_size; + } + break; + case NT_GROUP: + // copy header, contents will be copied while walking + memcpy(d->buf, n.header.group, sizeof(Group)); + + // save copied header location for post-walk group size recalc + *carry_out = (void *)d->buf; + + // update decom + d->buf += sizeof(Group); + d->remaining -= sizeof(Group); + + break; + default: + assert(false); // invalid node type + } +} + +void decompress_post(Node n, void *decom_ptr, void **carry_in) { + struct decom *d = decom_ptr; + + // only need to handle group resize + if (n.type == NT_GROUP) { + Group *g = (Group *)(*carry_in); + size_t new_size = (char *)d->buf - (char *)g; + g->size = new_size; } } @@ -199,7 +235,7 @@ void decompress_cb(Node n, void *decom_ptr) { * `walk_concat` will call the appropriate walking function * for each segment of unknown data in this concatenation. */ -char *walk_concat(char *data, size_t size, void (*cb)(Node n, void *pt), void *pt) { +char *walk_concat(char *data, size_t size, struct walker_callbacks cb) { const char *end = data + size; while (data != end) { assert(data < end); @@ -211,9 +247,9 @@ char *walk_concat(char *data, size_t size, void (*cb)(Node n, void *pt), void *p // only need to distinguish between groups and records if (type->uint == rt[GRUP]) - data = walk_group(data, cb, pt); + data = walk_group(data, cb); else - data = walk_record(data, cb, pt); + data = walk_record(data, cb); } return data; } @@ -223,7 +259,7 @@ char *walk_concat(char *data, size_t size, void (*cb)(Node n, void *pt), void *p * * This function will also call `cb` with the node constructed from this group record. */ -char *walk_group(char *data, void (*cb)(Node n, void *pt), void *pt) { +char *walk_group(char *data, struct walker_callbacks cb) { Group *const header = (Group *const)data; // The size in the group header includes the size of the header @@ -231,29 +267,49 @@ char *walk_group(char *data, void (*cb)(Node n, void *pt), void *pt) { char *data_end = data + header->size; size_t data_size = data_end - data_start; - // Callback Node n = { .header.group = header, .data = data_start, .type = NT_GROUP }; - cb(n, pt); + void *carry; + + // Pre-walk callback + if (cb.pre) + cb.pre(n, cb.data, &carry); // Walk through the concatenation of data inside the group. - data = walk_concat(data_start, data_size, cb, pt); + data = walk_concat(data_start, data_size, cb); assert(data == data_end); + // Post-walk callback + if (cb.post) + cb.post(n, cb.data, &carry); + return data; } -char *walk_record(char *data, void (*cb)(Node n, void *pt), void *pt) { +char *walk_record(char *data, struct walker_callbacks cb) { Record *header = (Record *)data; assert(header->type.uint != rt[GRUP]); char *data_start = data + sizeof(Record); - // Callback Node n = { .header.record = header, .data = data_start, .type = NT_RECORD }; - cb(n, pt); + void *carry; + + /* Pre and post walk callbacks make less sense for record walking as records + * are leaf-ish, will still call both here for now as field walking may be + * added in the future. + */ + + // Pre-walk callback + if (cb.pre) + cb.pre(n, cb.data, &carry); // Update data ptr based on record size. data += sizeof(Record) + header->size; + + // Post-walk callback + if (cb.post) + cb.post(n, cb.data, &carry); + return data; }