From 9f254f0c7b03236be615b1235cf3fc765d6000ea Mon Sep 17 00:00:00 2001 From: 3gg <3gg@shellblade.net> Date: Thu, 13 Jul 2023 08:22:18 -0700 Subject: Add mem allocator, remove listpool. --- mem/CMakeLists.txt | 26 ++++++ mem/include/mem.h | 149 +++++++++++++++++++++++++++++++++ mem/src/mem.c | 183 +++++++++++++++++++++++++++++++++++++++++ mem/test/mem_test.c | 232 ++++++++++++++++++++++++++++++++++++++++++++++++++++ mem/test/test.h | 185 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 775 insertions(+) create mode 100644 mem/CMakeLists.txt create mode 100644 mem/include/mem.h create mode 100644 mem/src/mem.c create mode 100644 mem/test/mem_test.c create mode 100644 mem/test/test.h (limited to 'mem') diff --git a/mem/CMakeLists.txt b/mem/CMakeLists.txt new file mode 100644 index 0000000..233d2be --- /dev/null +++ b/mem/CMakeLists.txt @@ -0,0 +1,26 @@ +cmake_minimum_required(VERSION 3.0) + +project(mem) + +# Library + +add_library(mem + src/mem.c) + +target_include_directories(mem PUBLIC + include) + +target_link_libraries(mem + list) + +target_compile_options(mem PRIVATE -Wall -Wextra) + +# Test + +add_executable(mem_test + test/mem_test.c) + +target_link_libraries(mem_test + mem) + +target_compile_options(mem_test PRIVATE -DUNIT_TEST -DNDEBUG -Wall -Wextra) diff --git a/mem/include/mem.h b/mem/include/mem.h new file mode 100644 index 0000000..30c24fc --- /dev/null +++ b/mem/include/mem.h @@ -0,0 +1,149 @@ +/* + * Block-based Memory Allocator. + * + * Clients should use the macros to define and use allocators. They make the API + * type-safe. + * + * Like a pool/block-based allocator, this allocator stores data in fixed-size + * blocks. However, this allocator also supports allocation of contiguous chunks + * of a variable number of blocks. + * + * Chunk information is stored in a separate array so that client data is + * contiguous in the main pool of memory and better cached. + */ +#pragma once + +#include +#include +#include +#include + +/// Define a typed memory allocator backed by a statically-allocated array. +#define DEF_MEM(MEM, TYPE, NUM_BLOCKS) \ + typedef struct MEM { \ + Memory mem; \ + Chunk chunks[NUM_BLOCKS]; \ + TYPE blocks[NUM_BLOCKS]; \ + } MEM; + +/// Define a typed memory allocator backed by a dynamically-allocated array. +#define DEF_MEM_DYN(MEM, TYPE) \ + typedef struct MEM { \ + Memory mem; \ + Chunk* chunks; \ + TYPE* blocks; \ + } MEM; + +/// Initialize a statically-backed memory allocator. +#define mem_make(MEM) \ + { \ + assert(MEM); \ + const size_t block_size = sizeof((MEM)->blocks[0]); \ + const size_t num_blocks = sizeof((MEM)->blocks) / block_size; \ + mem_make_( \ + &(MEM)->mem, (MEM)->chunks, (MEM)->blocks, num_blocks, block_size); \ + } + +/// Initialize a dynamically-backed memory allocator. +#define mem_make_dyn(MEM, num_blocks, block_size) \ + mem_make_(&(MEM)->mem, 0, 0, num_blocks, block_size) + +/// Destroy the allocator. +/// +/// If the allocator is dynamically-backed, then this function frees the +/// underlying memory. +#define mem_del(MEM) mem_del_(&(MEM)->mem) + +/// Clear the allocator. +/// +/// This function frees all of the allocator's blocks. The resulting allocator +/// is as if it were newly created. +#define mem_clear(MEM) mem_clear_(&(MEM)->mem) + +/// Allocate a new chunk of N blocks. +/// Return a pointer to the first block of the chunk, or 0 if there is no memory +/// left. +/// New chunks are conveniently zeroed out. +#define mem_alloc(MEM, num_blocks) mem_alloc_(&(MEM)->mem, num_blocks) + +/// Free the chunk. +/// The chunk pointer is conveniently set to 0. +#define mem_free(MEM, CHUNK) mem_free_(&(MEM)->mem, (void**)CHUNK) + +/// Return a pointer to a chunk given the chunk's handle. +/// The chunk must have been allocated. +#define mem_get_chunk(MEM, HANDLE) \ + ((__typeof__((MEM)->blocks[0])*)mem_get_chunk_(&(MEM)->mem, HANDLE)) + +/// Get the handle to the given chunk. +#define mem_get_chunk_handle(MEM, CHUNK_PTR) \ + mem_get_chunk_handle_(&(MEM)->mem, CHUNK_PTR) + +/// Iterate over the used chunks of the allocator. +/// +/// The caller can use 'i' as the index of the current chunk. +/// +/// It is valid to mem_free() the chunk at each step of the iteration. +#define mem_foreach(MEM, ITER, BODY) \ + size_t i = 0; \ + do { \ + if ((MEM)->chunks[i].used) { \ + __typeof__((MEM)->blocks[0])* ITER = &(MEM)->blocks[i]; \ + (void)ITER; \ + BODY; \ + } \ + i = (MEM)->chunks[i].next; \ + } while (i); + +// ----------------------------------------------------------------------------- + +/// Chunk information. +/// +/// Every chunk represents a contiguous array of some number of blocks. The +/// allocator begins as one big unused chunk. +/// +/// Allocation looks for a free chunk large enough to hold to requested number +/// of blocks. If the free chunk is larger than the requested chunk size, then +/// the requested chunk is carved out of the larger block. +/// +/// Deallocation frees the chunk back and merges it with free neighbouring +/// chunks. Two free chunks are never contiguous in memory. +/// +/// 'next' and 'prev' always point to a valid chunk (e.g., 0). Allocation stops +/// looking for free chunks when it loops over. +typedef struct Chunk { + size_t num_blocks; + size_t prev; + size_t next; + bool used; +} Chunk; + +typedef struct Memory { + size_t block_size_bytes; + size_t num_blocks; + size_t next_free_chunk; + bool dynamic; /// True if blocks and chunks are dynamically-allocated. + Chunk* chunks; /// Array of chunk information. + uint8_t* blocks; /// Array of blocks; +} Memory; + +/// Create a memory allocator. +/// +/// 'chunks' and 'blocks' may be user-provided (statically-backed allocator) or +/// null (dynamically-backed allocator). +/// - If null, the allocator malloc()s the memory for them. +/// - If given: +/// - `chunks` must be at least `num_blocks` chunks. +/// - `blocks` must be at least `num_blocks` * `block_size_bytes` bytes. +/// +/// All blocks are zeroed out for convenience. +bool mem_make_( + Memory* mem, Chunk* chunks, void* blocks, size_t num_blocks, + size_t block_size_bytes); + +void mem_del_(Memory*); +void mem_clear_(Memory*); +void* mem_alloc_(Memory*, size_t num_blocks); +void mem_free_(Memory*, void** chunk_ptr); +void* mem_get_chunk_(const Memory*, size_t chunk_handle); +size_t mem_get_chunk_handle_(const Memory*, const void* chunk); diff --git a/mem/src/mem.c b/mem/src/mem.c new file mode 100644 index 0000000..ff97f0f --- /dev/null +++ b/mem/src/mem.c @@ -0,0 +1,183 @@ +#include "mem.h" + +#include +#include + +bool mem_make_( + Memory* mem, Chunk* chunks, void* blocks, size_t num_blocks, + size_t block_size_bytes) { + assert(mem); + assert((chunks && blocks) || (!chunks && !blocks)); + assert(num_blocks >= 1); + + mem->block_size_bytes = block_size_bytes; + mem->num_blocks = num_blocks; + mem->next_free_chunk = 0; + + // Allocate chunks and blocks if necessary and zero them out. + if (!chunks) { + chunks = calloc(num_blocks, sizeof(Chunk)); + blocks = calloc(num_blocks, block_size_bytes); + mem->dynamic = true; + if (!chunks || !blocks) { + return false; + } + } else { + memset(blocks, 0, num_blocks * block_size_bytes); + memset(chunks, 0, num_blocks * sizeof(Chunk)); + mem->dynamic = false; + } + mem->chunks = chunks; + mem->blocks = blocks; + + // Initialize the head as one large free chunk. + Chunk* head = &mem->chunks[0]; + head->num_blocks = num_blocks; + + return true; +} + +void mem_del_(Memory* mem) { + assert(mem); + if (mem->dynamic) { + if (mem->chunks) { + free(mem->chunks); + mem->chunks = 0; + } + if (mem->blocks) { + free(mem->blocks); + mem->blocks = 0; + } + } +} + +void mem_clear_(Memory* mem) { + assert(mem); + mem->next_free_chunk = 0; + memset(mem->blocks, 0, mem->num_blocks * mem->block_size_bytes); + memset(mem->chunks, 0, mem->num_blocks * sizeof(Chunk)); + + // Initialize the head as one large free chunk. + Chunk* head = &mem->chunks[0]; + head->num_blocks = mem->num_blocks; +} + +void* mem_alloc_(Memory* mem, size_t num_blocks) { + assert(mem); + assert(num_blocks >= 1); + + // Search for the first free chunk that can accommodate num_blocks. + const size_t start = mem->next_free_chunk; + size_t chunk_idx = start; + bool found = false; + do { + Chunk* chunk = &mem->chunks[chunk_idx]; + if (!chunk->used) { + if (chunk->num_blocks > num_blocks) { + // Carve out a smaller chunk when the found chunk is larger than + // requested. + // [prev] <--> [chunk] <--> [new next] <--> [next] + const size_t new_next_idx = chunk_idx + num_blocks; + Chunk* new_next = &mem->chunks[new_next_idx]; + if (chunk->next) { + mem->chunks[chunk->next].prev = new_next_idx; + } + new_next->prev = chunk_idx; + new_next->next = chunk->next; + chunk->next = new_next_idx; + + new_next->num_blocks = chunk->num_blocks - num_blocks; + chunk->num_blocks = num_blocks; + + chunk->used = true; + found = true; + break; + } else if (chunk->num_blocks == num_blocks) { + chunk->used = true; + found = true; + break; + } + } + chunk_idx = chunk->next; // Last chunk points back to 0, which is always the + // start of some chunk. 'next' and 'prev' are + // always valid pointers. + } while (chunk_idx != start); + + if (found) { + mem->next_free_chunk = mem->chunks[chunk_idx].next; + return &mem->blocks[chunk_idx * mem->block_size_bytes]; + } else { + return 0; // Large-enough free chunk not found. + } +} + +// The given pointer is a pointer to this first block of the chunk. +void mem_free_(Memory* mem, void** chunk_ptr) { + assert(mem); + assert(chunk_ptr); + + const size_t chunk_idx = + ((uint8_t*)*chunk_ptr - mem->blocks) / mem->block_size_bytes; + assert(chunk_idx < mem->num_blocks); + Chunk* chunk = &mem->chunks[chunk_idx]; + + // Disallow double-frees. + assert(chunk->used); + + // Zero out the chunk so that we don't get stray values the next time it is + // allocated. + memset(&mem->blocks[chunk_idx], 0, chunk->num_blocks * mem->block_size_bytes); + + // Free the chunk. If it is contiguous with other free chunks, then merge. + // We only need to look at the chunk's immediate neighbours because no two + // free chunks are left contiguous after merging. + chunk->used = false; + if (chunk->next) { + Chunk* next = &mem->chunks[chunk->next]; + if (!next->used) { + // Pre: [chunk] <--> [next] <--> [next next] + // Post: [ chunk + next ] <--> [next next] + chunk->num_blocks += mem->chunks[chunk->next].num_blocks; + chunk->next = next->next; + if (next->next) { + Chunk* next_next = &mem->chunks[next->next]; + next_next->prev = chunk_idx; + } + next->prev = next->next = next->num_blocks = 0; + } + } + if (chunk->prev) { + Chunk* prev = &mem->chunks[chunk->prev]; + if (!prev->used) { + // Pre: [prev] <--> [chunk] <--> [next] + // Post: [ prev + chunk ] <--> [next] + prev->num_blocks += chunk->num_blocks; + prev->next = chunk->next; + if (chunk->next) { + Chunk* next = &mem->chunks[chunk->next]; + next->prev = chunk->prev; + } + chunk->prev = chunk->next = chunk->num_blocks = 0; + } + } + + *chunk_ptr = 0; +} + +// The handle is the chunk's index. We don't call it an index in the public API +// because from the user's perspective, two chunks allocated back-to-back need +// not be +1 away (the offset depends on how large the first chunk is). +void* mem_get_chunk_(const Memory* mem, size_t chunk_handle) { + assert(mem); + assert(chunk_handle < mem->num_blocks); + assert(mem->chunks[chunk_handle].used); + return &mem->blocks[chunk_handle * mem->block_size_bytes]; +} + +// The given chunk pointer is a pointer to the blocks array. +size_t mem_get_chunk_handle_(const Memory* mem, const void* chunk) { + assert(mem); + const size_t block_byte_index = (const uint8_t*)chunk - mem->blocks; + assert(block_byte_index % mem->block_size_bytes == 0); + return block_byte_index / mem->block_size_bytes; +} diff --git a/mem/test/mem_test.c b/mem/test/mem_test.c new file mode 100644 index 0000000..6ab4c7c --- /dev/null +++ b/mem/test/mem_test.c @@ -0,0 +1,232 @@ +#include "mem.h" + +#include "test.h" + +#define NUM_BLOCKS 10 + +DEF_MEM(test_mem, int, NUM_BLOCKS); + +static int count(test_mem* mem) { + int count = 0; + mem_foreach(mem, n, { count++; }); + return count; +} + +static int sum(test_mem* mem) { + int sum = 0; + mem_foreach(mem, n, { sum += *n; }); + return sum; +} + +// Create a statically-backed allocator. +TEST_CASE(mem_create) { + test_mem mem; + mem_make(&mem); +} + +// Create a dynamically-backed allocator. +TEST_CASE(mem_create_dyn) { + DEF_MEM_DYN(dyn_mem, int); + + dyn_mem mem; + mem_make_dyn(&mem, NUM_BLOCKS, sizeof(int)); +} + +// Allocate N chunks of 1 block each. +TEST_CASE(mem_fully_allocate) { + test_mem mem; + mem_make(&mem); + + for (int i = 0; i < NUM_BLOCKS; ++i) { + const int* block = mem_alloc(&mem, 1); + TEST_TRUE(block != 0); + } +} + +// Allocate N chunks of 1 block each, then free them. +TEST_CASE(mem_fill_then_free) { + test_mem mem; + mem_make(&mem); + + int* blocks[NUM_BLOCKS] = {0}; + for (int i = 0; i < NUM_BLOCKS; i++) { + blocks[i] = mem_alloc(&mem, 1); + TEST_TRUE(blocks[i] != 0); + } + + for (int i = 0; i < NUM_BLOCKS; i++) { + mem_free(&mem, &blocks[i]); + TEST_EQUAL(blocks[i], 0); // Pointer should be set to 0 on free. + } + + TEST_EQUAL(count(&mem), 0); +} + +// Attempt to allocate blocks past the maximum allocator size. +// The allocator should handle the failed allocations gracefully. +TEST_CASE(mem_allocate_beyond_max_size) { + test_mem mem; + mem_make(&mem); + + // Fully allocate the mem. + for (int i = 0; i < NUM_BLOCKS; ++i) { + TEST_TRUE(mem_alloc(&mem, 1) != 0); + } + + // Past the end. + for (int i = 0; i < NUM_BLOCKS; ++i) { + TEST_EQUAL(mem_alloc(&mem, 1), 0); + } +} + +// Free blocks should always remain zeroed out. +// This tests the invariant right after creating the allocator. +TEST_CASE(mem_zero_free_blocks_after_creation) { + test_mem mem; + mem_make(&mem); + + const int zero = 0; + for (int i = 0; i < NUM_BLOCKS; ++i) { + const int* block = (const int*)(mem.blocks) + i; + TEST_EQUAL(memcmp(block, &zero, sizeof(int)), 0); + } +} + +// Free blocks should always remain zeroed out. +// This tests the invariant after freeing a block. +TEST_CASE(mem_zero_free_block_after_free) { + test_mem mem; + mem_make(&mem); + + int* val = mem_alloc(&mem, 1); + TEST_TRUE(val != 0); + *val = 177; + + int* old_val = val; + mem_free(&mem, &val); // val pointer is set to 0. + TEST_EQUAL(*old_val, 0); // Block is zeroed out after free. +} + +// Traverse an empty allocator. +TEST_CASE(mem_traverse_empty) { + test_mem mem; + mem_make(&mem); + + TEST_EQUAL(count(&mem), 0); +} + +// Traverse a partially full allocator. +TEST_CASE(mem_traverse_partially_full) { + const int N = NUM_BLOCKS / 2; + + test_mem mem; + mem_make(&mem); + + for (int i = 0; i < N; ++i) { + int* val = mem_alloc(&mem, 1); + TEST_TRUE(val != 0); + *val = i + 1; + } + + TEST_EQUAL(sum(&mem), (N) * (N + 1) / 2); +} + +// Traverse a full allocator. +TEST_CASE(mem_traverse_full) { + test_mem mem; + mem_make(&mem); + + for (int i = 0; i < NUM_BLOCKS; ++i) { + int* val = mem_alloc(&mem, 1); + TEST_TRUE(val != 0); + *val = i + 1; + } + + TEST_EQUAL(sum(&mem), (NUM_BLOCKS) * (NUM_BLOCKS + 1) / 2); +} + +// Get the ith (allocated) chunk. +TEST_CASE(mem_get_block) { + test_mem mem; + mem_make(&mem); + + for (int i = 0; i < NUM_BLOCKS; ++i) { + int* block = mem_alloc(&mem, 1); + TEST_TRUE(block != 0); + *block = i; + TEST_EQUAL(mem_get_chunk_handle(&mem, block), (size_t)i); + } + + for (int i = 0; i < NUM_BLOCKS; ++i) { + TEST_EQUAL(*mem_get_chunk(&mem, i), i); + } +} + +// Test merging. +// 1. Allocate chunks of variable sizes. +// 2. Free them in a different order. +// 3. Then we should be able to allocate 1 chunk of N blocks. +TEST_CASE(mem_fragmentation) { + test_mem mem; + mem_make(&mem); + + int* blocks[NUM_BLOCKS] = {0}; + int next_block = 0; + +#define ALLOC(num_blocks) \ + blocks[next_block] = mem_alloc(&mem, num_blocks); \ + TEST_TRUE(blocks[next_block] != 0); \ + next_block++; + +#define FREE(block_idx) mem_free(&mem, &blocks[block_idx]) + + // 5 total allocations of variable chunk sizes. + ALLOC(2); // 2; idx = 0 + ALLOC(3); // 5; idx = 1 + ALLOC(1); // 6; idx = 2 + ALLOC(3); // 9; idx = 3 + ALLOC(1); // 10; idx = 4 + + // Free the 5 allocations in a different order. + FREE(1); + FREE(3); + FREE(4); + FREE(2); + FREE(0); + + // Should be able to allocate 1 chunk of N blocks. + const void* chunk = mem_alloc(&mem, NUM_BLOCKS); + TEST_TRUE(chunk != 0); +} + +// Clear and re-use an allocator. +TEST_CASE(mem_clear_then_reuse) { + test_mem mem; + mem_make(&mem); + + // Allocate chunks, contents not important. + for (int i = 0; i < NUM_BLOCKS; ++i) { + int* chunk = mem_alloc(&mem, 1); + TEST_TRUE(chunk != 0); + } + + mem_clear(&mem); + + // Allocate chunks and assign values 0..N. + for (int i = 0; i < NUM_BLOCKS; ++i) { + int* chunk = mem_alloc(&mem, 1); + TEST_TRUE(chunk != 0); + *chunk = i + 1; + } + + TEST_EQUAL(sum(&mem), NUM_BLOCKS * (NUM_BLOCKS + 1) / 2); +} + +// Stress test. +// +// 1. Allocate the mem, either fully or partially. If fully, attempt to +// allocate some items past the end. +// +// 2. Free all allocated items in some random order. + +int main() { return 0; } diff --git a/mem/test/test.h b/mem/test/test.h new file mode 100644 index 0000000..fd8dc22 --- /dev/null +++ b/mem/test/test.h @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: MIT +#pragma once + +#ifdef UNIT_TEST + +#include +#include +#include +#include + +#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ + defined(__NetBSD__) || defined(__OpenBSD__) +#define USE_SYSCTL_FOR_ARGS 1 +// clang-format off +#include +#include +// clang-format on +#include // getpid +#endif + +struct test_file_metadata; + +struct test_failure { + bool present; + const char *message; + const char *file; + int line; +}; + +struct test_case_metadata { + void (*fn)(struct test_case_metadata *, struct test_file_metadata *); + struct test_failure failure; + const char *name; + struct test_case_metadata *next; +}; + +struct test_file_metadata { + bool registered; + const char *name; + struct test_file_metadata *next; + struct test_case_metadata *tests; +}; + +struct test_file_metadata __attribute__((weak)) * test_file_head; + +#define SET_FAILURE(_message) \ + metadata->failure = (struct test_failure) { \ + .message = _message, .file = __FILE__, .line = __LINE__, .present = true, \ + } + +#define TEST_EQUAL(a, b) \ + do { \ + if ((a) != (b)) { \ + SET_FAILURE(#a " != " #b); \ + return; \ + } \ + } while (0) + +#define TEST_TRUE(a) \ + do { \ + if (!(a)) { \ + SET_FAILURE(#a " is not true"); \ + return; \ + } \ + } while (0) + +#define TEST_STREQUAL(a, b) \ + do { \ + if (strcmp(a, b) != 0) { \ + SET_FAILURE(#a " != " #b); \ + return; \ + } \ + } while (0) + +#define TEST_CASE(_name) \ + static void __test_h_##_name(struct test_case_metadata *, \ + struct test_file_metadata *); \ + static struct test_file_metadata __test_h_file; \ + static struct test_case_metadata __test_h_meta_##_name = { \ + .name = #_name, \ + .fn = __test_h_##_name, \ + }; \ + static void __attribute__((constructor(101))) __test_h_##_name##_register(void) { \ + __test_h_meta_##_name.next = __test_h_file.tests; \ + __test_h_file.tests = &__test_h_meta_##_name; \ + if (!__test_h_file.registered) { \ + __test_h_file.name = __FILE__; \ + __test_h_file.next = test_file_head; \ + test_file_head = &__test_h_file; \ + __test_h_file.registered = true; \ + } \ + } \ + static void __test_h_##_name( \ + struct test_case_metadata *metadata __attribute__((unused)), \ + struct test_file_metadata *file_metadata __attribute__((unused))) + +extern void __attribute__((weak)) (*test_h_unittest_setup)(void); +/// Run defined tests, return true if all tests succeeds +/// @param[out] tests_run if not NULL, set to whether tests were run +static inline void __attribute__((constructor(102))) run_tests(void) { + bool should_run = false; +#ifdef USE_SYSCTL_FOR_ARGS + int mib[] = { + CTL_KERN, +#if defined(__NetBSD__) || defined(__OpenBSD__) + KERN_PROC_ARGS, + getpid(), + KERN_PROC_ARGV, +#else + KERN_PROC, + KERN_PROC_ARGS, + getpid(), +#endif + }; + char *arg = NULL; + size_t arglen; + sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL, &arglen, NULL, 0); + arg = malloc(arglen); + sysctl(mib, sizeof(mib) / sizeof(mib[0]), arg, &arglen, NULL, 0); +#else + FILE *cmdlinef = fopen("/proc/self/cmdline", "r"); + char *arg = NULL; + int arglen; + fscanf(cmdlinef, "%ms%n", &arg, &arglen); + fclose(cmdlinef); +#endif + for (char *pos = arg; pos < arg + arglen; pos += strlen(pos) + 1) { + if (strcmp(pos, "--unittest") == 0) { + should_run = true; + break; + } + } + free(arg); + + if (!should_run) { + return; + } + + if (&test_h_unittest_setup) { + test_h_unittest_setup(); + } + + struct test_file_metadata *i = test_file_head; + int failed = 0, success = 0; + while (i) { + fprintf(stderr, "Running tests from %s:\n", i->name); + struct test_case_metadata *j = i->tests; + while (j) { + fprintf(stderr, "\t%s ... ", j->name); + j->failure.present = false; + j->fn(j, i); + if (j->failure.present) { + fprintf(stderr, "failed (%s at %s:%d)\n", j->failure.message, + j->failure.file, j->failure.line); + failed++; + } else { + fprintf(stderr, "passed\n"); + success++; + } + j = j->next; + } + fprintf(stderr, "\n"); + i = i->next; + } + int total = failed + success; + fprintf(stderr, "Test results: passed %d/%d, failed %d/%d\n", success, total, + failed, total); + exit(failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE); +} + +#else + +#include + +#define TEST_CASE(name) static void __attribute__((unused)) __test_h_##name(void) + +#define TEST_EQUAL(a, b) \ + (void)(a); \ + (void)(b) +#define TEST_TRUE(a) (void)(a) +#define TEST_STREQUAL(a, b) \ + (void)(a); \ + (void)(b) + +#endif -- cgit v1.2.3