diff options
-rw-r--r-- | CMakeLists.txt | 2 | ||||
-rw-r--r-- | listpool/CMakeLists.txt | 26 | ||||
-rw-r--r-- | listpool/README.md | 14 | ||||
-rw-r--r-- | listpool/include/listpool.h | 100 | ||||
-rw-r--r-- | listpool/src/listpool.c | 92 | ||||
-rw-r--r-- | listpool/test/listpool_test.c | 183 | ||||
-rw-r--r-- | mem/CMakeLists.txt | 26 | ||||
-rw-r--r-- | mem/include/mem.h | 149 | ||||
-rw-r--r-- | mem/src/mem.c | 183 | ||||
-rw-r--r-- | mem/test/mem_test.c | 232 | ||||
-rw-r--r-- | mem/test/test.h (renamed from listpool/test/test.h) | 0 | ||||
-rw-r--r-- | mempool/include/mempool.h | 2 |
12 files changed, 592 insertions, 417 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 498b771..868268d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt | |||
@@ -6,7 +6,7 @@ add_subdirectory(cstring) | |||
6 | add_subdirectory(error) | 6 | add_subdirectory(error) |
7 | add_subdirectory(filesystem) | 7 | add_subdirectory(filesystem) |
8 | add_subdirectory(list) | 8 | add_subdirectory(list) |
9 | add_subdirectory(listpool) | 9 | add_subdirectory(mem) |
10 | add_subdirectory(log) | 10 | add_subdirectory(log) |
11 | add_subdirectory(mempool) | 11 | add_subdirectory(mempool) |
12 | add_subdirectory(plugin) | 12 | add_subdirectory(plugin) |
diff --git a/listpool/CMakeLists.txt b/listpool/CMakeLists.txt deleted file mode 100644 index 2dcd983..0000000 --- a/listpool/CMakeLists.txt +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | cmake_minimum_required(VERSION 3.0) | ||
2 | |||
3 | project(listpool) | ||
4 | |||
5 | # Library | ||
6 | |||
7 | add_library(listpool | ||
8 | src/listpool.c) | ||
9 | |||
10 | target_include_directories(listpool PUBLIC | ||
11 | include) | ||
12 | |||
13 | target_link_libraries(listpool | ||
14 | list) | ||
15 | |||
16 | target_compile_options(listpool PRIVATE -Wall -Wextra) | ||
17 | |||
18 | # Test | ||
19 | |||
20 | add_executable(listpool_test | ||
21 | test/listpool_test.c) | ||
22 | |||
23 | target_link_libraries(listpool_test | ||
24 | listpool) | ||
25 | |||
26 | target_compile_options(listpool_test PRIVATE -DUNIT_TEST -DNDEBUG -Wall -Wextra) | ||
diff --git a/listpool/README.md b/listpool/README.md deleted file mode 100644 index ed38980..0000000 --- a/listpool/README.md +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | # Listpool | ||
2 | |||
3 | A block allocator built from a single, contiguous array of memory that maintains | ||
4 | free and used blocks in doubly linked lists. | ||
5 | |||
6 | A `listpool` is similar to a `mempool`, but the additional structure allows it | ||
7 | to: | ||
8 | |||
9 | - Allocate and free blocks in constant time. | ||
10 | - Traverse used blocks in linear time in the number of used blocks, as opposed | ||
11 | to the total number of blocks like in a `mempool`. | ||
12 | |||
13 | A `listpool` otherwise provides the same guarantees and characteristics as a | ||
14 | `mempool`. | ||
diff --git a/listpool/include/listpool.h b/listpool/include/listpool.h deleted file mode 100644 index 85a3b27..0000000 --- a/listpool/include/listpool.h +++ /dev/null | |||
@@ -1,100 +0,0 @@ | |||
1 | #pragma once | ||
2 | |||
3 | #include "list.h" | ||
4 | |||
5 | #include <assert.h> | ||
6 | #include <stddef.h> | ||
7 | #include <stdint.h> | ||
8 | |||
9 | /// Define a typed listpool of a given size. | ||
10 | #define DEF_LISTPOOL(POOL, TYPE, NUM_BLOCKS) \ | ||
11 | typedef struct POOL { \ | ||
12 | listpool pool; \ | ||
13 | list nodes[NUM_BLOCKS]; \ | ||
14 | TYPE blocks[NUM_BLOCKS]; \ | ||
15 | } POOL; | ||
16 | |||
17 | /// Creates a new listpool. | ||
18 | #define listpool_make(POOL) \ | ||
19 | { \ | ||
20 | assert(POOL); \ | ||
21 | const size_t block_size = sizeof((POOL)->blocks[0]); \ | ||
22 | const size_t num_blocks = sizeof((POOL)->blocks) / block_size; \ | ||
23 | listpool_make_( \ | ||
24 | &(POOL)->pool, (POOL)->nodes, (POOL)->blocks, num_blocks, block_size); \ | ||
25 | } | ||
26 | |||
27 | /// Allocate a new block. | ||
28 | /// Return 0 if there is no memory left. | ||
29 | #define listpool_alloc(POOL) listpool_alloc_(&(POOL)->pool) | ||
30 | |||
31 | /// Free the block. | ||
32 | /// The block pointer is conveniently set to 0. | ||
33 | #define listpool_free(POOL, ITEM) listpool_free_(&(POOL)->pool, (void**)ITEM) | ||
34 | |||
35 | /// Remove a value from the list. | ||
36 | /// Defined here instead of DEF_LISTPOOL_IMPL() because not all types may have | ||
37 | /// an operator==. | ||
38 | #define listpool_remove(POOL, VAL) \ | ||
39 | { \ | ||
40 | listpool_foreach(POOL, iter, { \ | ||
41 | if (*iter == VAL) { \ | ||
42 | listpool_free(POOL, &iter); \ | ||
43 | break; \ | ||
44 | } \ | ||
45 | }); \ | ||
46 | } | ||
47 | |||
48 | /// Return the ith block. | ||
49 | /// The block must have been allocated. | ||
50 | #define listpool_get_block(POOL, INDEX) \ | ||
51 | ((typeof((POOL)->blocks[0])*)listpool_get_block_(&(POOL)->pool, INDEX)) | ||
52 | |||
53 | /// Get the index to the given block. | ||
54 | #define listpool_get_block_index(POOL, BLOCK_PTR) \ | ||
55 | listpool_get_block_index_(&(POOL)->pool, BLOCK_PTR) | ||
56 | |||
57 | /// Iterate over the used items of the pool. | ||
58 | /// | ||
59 | /// The caller can use 'i' as the index of the current block. | ||
60 | /// | ||
61 | /// It is valid to mempool_free() the object at each step of the iteration. | ||
62 | #define listpool_foreach(POOL, ITER, BODY) \ | ||
63 | for (list* it_ = (POOL)->pool.used; it_; it_ = it_->next) { \ | ||
64 | const size_t i = it_ - (POOL)->pool.nodes; \ | ||
65 | typeof((POOL)->blocks[0])* ITER = &(POOL)->blocks[i]; \ | ||
66 | (void)ITER; \ | ||
67 | BODY; \ | ||
68 | } | ||
69 | |||
70 | typedef struct listpool { | ||
71 | size_t block_size_bytes; | ||
72 | size_t num_blocks; | ||
73 | list* free; // Head of the free list. | ||
74 | list* used; // Head of the used list. | ||
75 | list* nodes; // Array of nodes. | ||
76 | uint8_t* blocks; // Array of blocks; | ||
77 | } listpool; | ||
78 | |||
79 | /// Create a new list pool from a user-provided array of memory. | ||
80 | /// `nodes` must have at least `num_blocks` nodes. | ||
81 | /// `blocks` must be at least `num_blocks` * `block_size_bytes` bytes. | ||
82 | /// All blocks are zeroed out for convenience. | ||
83 | void listpool_make_( | ||
84 | listpool* pool, list* nodes, void* blocks, size_t num_blocks, | ||
85 | size_t block_size_bytes); | ||
86 | |||
87 | /// Allocate a new block. | ||
88 | /// Return 0 if there is no memory left. | ||
89 | void* listpool_alloc_(listpool* pool); | ||
90 | |||
91 | /// Free the block. | ||
92 | /// The block pointer is conveniently set to 0. | ||
93 | void listpool_free_(listpool* pool, void** block_ptr); | ||
94 | |||
95 | /// Return the ith block. | ||
96 | /// The block must have been allocated. | ||
97 | void* listpool_get_block_(const listpool*, size_t block_index); | ||
98 | |||
99 | /// Get the index to the given block. | ||
100 | size_t listpool_get_block_index_(const listpool*, const void* block); | ||
diff --git a/listpool/src/listpool.c b/listpool/src/listpool.c deleted file mode 100644 index 758062c..0000000 --- a/listpool/src/listpool.c +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | #include "listpool.h" | ||
2 | |||
3 | #include <string.h> | ||
4 | |||
5 | void listpool_make_( | ||
6 | listpool* pool, list* nodes, void* blocks, size_t num_blocks, | ||
7 | size_t block_size_bytes) { | ||
8 | assert(pool); | ||
9 | pool->block_size_bytes = block_size_bytes; | ||
10 | pool->num_blocks = num_blocks; | ||
11 | pool->free = &nodes[0]; | ||
12 | pool->used = 0; | ||
13 | pool->nodes = nodes; | ||
14 | pool->blocks = blocks; | ||
15 | list_make(nodes, num_blocks); | ||
16 | memset(blocks, 0, num_blocks * block_size_bytes); | ||
17 | } | ||
18 | |||
19 | void* listpool_alloc_(listpool* pool) { | ||
20 | assert(pool); | ||
21 | if (!pool->free) { | ||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | const size_t index = pool->free - pool->nodes; | ||
26 | assert(index < pool->num_blocks); | ||
27 | |||
28 | list* free = pool->free; | ||
29 | pool->free = pool->free->next; | ||
30 | |||
31 | // pool->used is always the head of the used list, so prepend the new item to | ||
32 | // the list. | ||
33 | list* new_used = free; | ||
34 | new_used->prev = 0; | ||
35 | new_used->next = pool->used; | ||
36 | if (pool->used) { | ||
37 | pool->used->prev = new_used; | ||
38 | } | ||
39 | pool->used = new_used; | ||
40 | |||
41 | return pool->blocks + index * pool->block_size_bytes; | ||
42 | } | ||
43 | |||
44 | void listpool_free_(listpool* pool, void** block_ptr) { | ||
45 | assert(pool); | ||
46 | assert(block_ptr); | ||
47 | |||
48 | memset(*block_ptr, 0, pool->block_size_bytes); | ||
49 | |||
50 | const size_t index = | ||
51 | ((uint8_t*)*block_ptr - pool->blocks) / pool->block_size_bytes; | ||
52 | assert(index < pool->num_blocks); | ||
53 | |||
54 | list* item = &pool->nodes[index]; | ||
55 | |||
56 | // We must remove the item from the used list first. | ||
57 | if (item->prev) { | ||
58 | item->prev->next = item->next; | ||
59 | } | ||
60 | if (item->next) { | ||
61 | item->next->prev = item->prev; | ||
62 | } | ||
63 | if (item == pool->used) { | ||
64 | pool->used = item->next; | ||
65 | } | ||
66 | |||
67 | // pool->free is always the head of the free list, so prepend the new item to | ||
68 | // the list. The item is now free to wire after removing it from the used | ||
69 | // list. | ||
70 | if (!pool->free) { | ||
71 | pool->free = item; | ||
72 | } else { | ||
73 | item->next = pool->free; | ||
74 | pool->free->prev = item; | ||
75 | pool->free = item; | ||
76 | } | ||
77 | |||
78 | *block_ptr = 0; | ||
79 | } | ||
80 | |||
81 | void* listpool_get_block_(const listpool* pool, size_t block_index) { | ||
82 | assert(pool); | ||
83 | assert(block_index < pool->num_blocks); | ||
84 | return pool->blocks + block_index * pool->block_size_bytes; | ||
85 | } | ||
86 | |||
87 | size_t listpool_get_block_index_(const listpool* pool, const void* block) { | ||
88 | assert(pool); | ||
89 | const size_t block_byte_index = (const uint8_t*)block - pool->blocks; | ||
90 | assert(block_byte_index % pool->block_size_bytes == 0); | ||
91 | return block_byte_index / pool->block_size_bytes; | ||
92 | } | ||
diff --git a/listpool/test/listpool_test.c b/listpool/test/listpool_test.c deleted file mode 100644 index 7a7b0cf..0000000 --- a/listpool/test/listpool_test.c +++ /dev/null | |||
@@ -1,183 +0,0 @@ | |||
1 | #include "listpool.h" | ||
2 | |||
3 | #include "test.h" | ||
4 | |||
5 | #define NUM_BLOCKS 10 | ||
6 | |||
7 | DEF_LISTPOOL(test_pool, int, NUM_BLOCKS); | ||
8 | |||
9 | static int count(test_pool* pool) { | ||
10 | int count = 0; | ||
11 | listpool_foreach(pool, n, { count++; }); | ||
12 | return count; | ||
13 | } | ||
14 | |||
15 | static int sum(test_pool* pool) { | ||
16 | int sum = 0; | ||
17 | listpool_foreach(pool, n, { sum += *n; }); | ||
18 | return sum; | ||
19 | } | ||
20 | |||
21 | // Create a pool. | ||
22 | TEST_CASE(listpool_create) { | ||
23 | test_pool pool; | ||
24 | listpool_make(&pool); | ||
25 | } | ||
26 | |||
27 | // Allocate all N blocks. | ||
28 | TEST_CASE(listpool_fully_allocate) { | ||
29 | test_pool pool; | ||
30 | listpool_make(&pool); | ||
31 | |||
32 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
33 | const int* block = listpool_alloc(&pool); | ||
34 | TEST_TRUE(block != 0); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | // Allocate all N blocks, then free them. | ||
39 | TEST_CASE(listpool_fill_then_free) { | ||
40 | test_pool pool; | ||
41 | listpool_make(&pool); | ||
42 | |||
43 | int* blocks[NUM_BLOCKS] = {0}; | ||
44 | for (int i = 0; i < NUM_BLOCKS; i++) { | ||
45 | blocks[i] = listpool_alloc(&pool); | ||
46 | TEST_TRUE(blocks[i] != 0); | ||
47 | } | ||
48 | |||
49 | for (int i = 0; i < NUM_BLOCKS; i++) { | ||
50 | listpool_free(&pool, &blocks[i]); | ||
51 | TEST_EQUAL(blocks[i], 0); // Pointer should be set to 0 on free. | ||
52 | } | ||
53 | |||
54 | TEST_EQUAL(count(&pool), 0); | ||
55 | } | ||
56 | |||
57 | // Attempt to allocate blocks past the maximum pool size. | ||
58 | // The pool should handle the failed allocations gracefully. | ||
59 | TEST_CASE(listpool_allocate_beyond_max_size) { | ||
60 | test_pool pool; | ||
61 | listpool_make(&pool); | ||
62 | |||
63 | // Fully allocate the pool. | ||
64 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
65 | TEST_TRUE(listpool_alloc(&pool) != 0); | ||
66 | } | ||
67 | |||
68 | // Past the end. | ||
69 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
70 | TEST_EQUAL(listpool_alloc(&pool), 0); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | // Free blocks should always remain zeroed out. | ||
75 | // This tests the invariant right after creating the pool. | ||
76 | TEST_CASE(listpool_zero_free_blocks_after_creation) { | ||
77 | test_pool pool; | ||
78 | listpool_make(&pool); | ||
79 | |||
80 | const int zero = 0; | ||
81 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
82 | const int* block = (const int*)(pool.blocks) + i; | ||
83 | TEST_EQUAL(memcmp(block, &zero, sizeof(int)), 0); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | // Free blocks should always remain zeroed out. | ||
88 | // This tests the invariant after freeing a block. | ||
89 | TEST_CASE(listpool_zero_free_block_after_free) { | ||
90 | test_pool pool; | ||
91 | listpool_make(&pool); | ||
92 | |||
93 | int* val = listpool_alloc(&pool); | ||
94 | TEST_TRUE(val != 0); | ||
95 | *val = 177; | ||
96 | |||
97 | int* old_val = val; | ||
98 | listpool_free(&pool, &val); // val pointer is set to 0. | ||
99 | TEST_EQUAL(*old_val, 0); // Block is zeroed out after free. | ||
100 | } | ||
101 | |||
102 | // Traverse an empty pool. | ||
103 | TEST_CASE(listpool_traverse_empty) { | ||
104 | test_pool pool; | ||
105 | listpool_make(&pool); | ||
106 | |||
107 | TEST_EQUAL(count(&pool), 0); | ||
108 | } | ||
109 | |||
110 | // Traverse a partially full pool. | ||
111 | TEST_CASE(listpool_traverse_partially_full) { | ||
112 | const int N = NUM_BLOCKS / 2; | ||
113 | |||
114 | test_pool pool; | ||
115 | listpool_make(&pool); | ||
116 | |||
117 | for (int i = 0; i < N; ++i) { | ||
118 | int* val = listpool_alloc(&pool); | ||
119 | TEST_TRUE(val != 0); | ||
120 | *val = i + 1; | ||
121 | } | ||
122 | |||
123 | TEST_EQUAL(sum(&pool), (N) * (N + 1) / 2); | ||
124 | } | ||
125 | |||
126 | // Traverse a full pool. | ||
127 | TEST_CASE(listpool_traverse_full) { | ||
128 | test_pool pool; | ||
129 | listpool_make(&pool); | ||
130 | |||
131 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
132 | int* val = listpool_alloc(&pool); | ||
133 | TEST_TRUE(val != 0); | ||
134 | *val = i + 1; | ||
135 | } | ||
136 | |||
137 | TEST_EQUAL(sum(&pool), (NUM_BLOCKS) * (NUM_BLOCKS + 1) / 2); | ||
138 | } | ||
139 | |||
140 | // Get the ith (allocated) block. | ||
141 | TEST_CASE(listpool_get_block) { | ||
142 | test_pool pool; | ||
143 | listpool_make(&pool); | ||
144 | |||
145 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
146 | int* block = listpool_alloc(&pool); | ||
147 | TEST_TRUE(block != 0); | ||
148 | *block = i; | ||
149 | TEST_EQUAL(listpool_get_block_index(&pool, block), (size_t)i); | ||
150 | } | ||
151 | |||
152 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
153 | TEST_EQUAL(*listpool_get_block(&pool, i), i); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | // Remove a value from the list. | ||
158 | TEST_CASE(listpool_remove_value) { | ||
159 | test_pool pool; | ||
160 | listpool_make(&pool); | ||
161 | |||
162 | int* x = listpool_alloc(&pool); | ||
163 | int* y = listpool_alloc(&pool); | ||
164 | TEST_TRUE(x != 0); | ||
165 | TEST_TRUE(y != 0); | ||
166 | |||
167 | *x = 155; | ||
168 | *y = 177; | ||
169 | |||
170 | listpool_remove(&pool, 155); // x | ||
171 | |||
172 | TEST_EQUAL(count(&pool), 1); | ||
173 | TEST_EQUAL(sum(&pool), *y); | ||
174 | } | ||
175 | |||
176 | // Stress test. | ||
177 | // | ||
178 | // 1. Allocate the pool, either fully or partially. If fully, attempt to | ||
179 | // allocate some items past the end. | ||
180 | // | ||
181 | // 2. Free all allocated items in some random order. | ||
182 | |||
183 | int main() { return 0; } | ||
diff --git a/mem/CMakeLists.txt b/mem/CMakeLists.txt new file mode 100644 index 0000000..233d2be --- /dev/null +++ b/mem/CMakeLists.txt | |||
@@ -0,0 +1,26 @@ | |||
1 | cmake_minimum_required(VERSION 3.0) | ||
2 | |||
3 | project(mem) | ||
4 | |||
5 | # Library | ||
6 | |||
7 | add_library(mem | ||
8 | src/mem.c) | ||
9 | |||
10 | target_include_directories(mem PUBLIC | ||
11 | include) | ||
12 | |||
13 | target_link_libraries(mem | ||
14 | list) | ||
15 | |||
16 | target_compile_options(mem PRIVATE -Wall -Wextra) | ||
17 | |||
18 | # Test | ||
19 | |||
20 | add_executable(mem_test | ||
21 | test/mem_test.c) | ||
22 | |||
23 | target_link_libraries(mem_test | ||
24 | mem) | ||
25 | |||
26 | target_compile_options(mem_test PRIVATE -DUNIT_TEST -DNDEBUG -Wall -Wextra) | ||
diff --git a/mem/include/mem.h b/mem/include/mem.h new file mode 100644 index 0000000..30c24fc --- /dev/null +++ b/mem/include/mem.h | |||
@@ -0,0 +1,149 @@ | |||
1 | /* | ||
2 | * Block-based Memory Allocator. | ||
3 | * | ||
4 | * Clients should use the macros to define and use allocators. They make the API | ||
5 | * type-safe. | ||
6 | * | ||
7 | * Like a pool/block-based allocator, this allocator stores data in fixed-size | ||
8 | * blocks. However, this allocator also supports allocation of contiguous chunks | ||
9 | * of a variable number of blocks. | ||
10 | * | ||
11 | * Chunk information is stored in a separate array so that client data is | ||
12 | * contiguous in the main pool of memory and better cached. | ||
13 | */ | ||
14 | #pragma once | ||
15 | |||
16 | #include <assert.h> | ||
17 | #include <stdbool.h> | ||
18 | #include <stddef.h> | ||
19 | #include <stdint.h> | ||
20 | |||
21 | /// Define a typed memory allocator backed by a statically-allocated array. | ||
22 | #define DEF_MEM(MEM, TYPE, NUM_BLOCKS) \ | ||
23 | typedef struct MEM { \ | ||
24 | Memory mem; \ | ||
25 | Chunk chunks[NUM_BLOCKS]; \ | ||
26 | TYPE blocks[NUM_BLOCKS]; \ | ||
27 | } MEM; | ||
28 | |||
29 | /// Define a typed memory allocator backed by a dynamically-allocated array. | ||
30 | #define DEF_MEM_DYN(MEM, TYPE) \ | ||
31 | typedef struct MEM { \ | ||
32 | Memory mem; \ | ||
33 | Chunk* chunks; \ | ||
34 | TYPE* blocks; \ | ||
35 | } MEM; | ||
36 | |||
37 | /// Initialize a statically-backed memory allocator. | ||
38 | #define mem_make(MEM) \ | ||
39 | { \ | ||
40 | assert(MEM); \ | ||
41 | const size_t block_size = sizeof((MEM)->blocks[0]); \ | ||
42 | const size_t num_blocks = sizeof((MEM)->blocks) / block_size; \ | ||
43 | mem_make_( \ | ||
44 | &(MEM)->mem, (MEM)->chunks, (MEM)->blocks, num_blocks, block_size); \ | ||
45 | } | ||
46 | |||
47 | /// Initialize a dynamically-backed memory allocator. | ||
48 | #define mem_make_dyn(MEM, num_blocks, block_size) \ | ||
49 | mem_make_(&(MEM)->mem, 0, 0, num_blocks, block_size) | ||
50 | |||
51 | /// Destroy the allocator. | ||
52 | /// | ||
53 | /// If the allocator is dynamically-backed, then this function frees the | ||
54 | /// underlying memory. | ||
55 | #define mem_del(MEM) mem_del_(&(MEM)->mem) | ||
56 | |||
57 | /// Clear the allocator. | ||
58 | /// | ||
59 | /// This function frees all of the allocator's blocks. The resulting allocator | ||
60 | /// is as if it were newly created. | ||
61 | #define mem_clear(MEM) mem_clear_(&(MEM)->mem) | ||
62 | |||
63 | /// Allocate a new chunk of N blocks. | ||
64 | /// Return a pointer to the first block of the chunk, or 0 if there is no memory | ||
65 | /// left. | ||
66 | /// New chunks are conveniently zeroed out. | ||
67 | #define mem_alloc(MEM, num_blocks) mem_alloc_(&(MEM)->mem, num_blocks) | ||
68 | |||
69 | /// Free the chunk. | ||
70 | /// The chunk pointer is conveniently set to 0. | ||
71 | #define mem_free(MEM, CHUNK) mem_free_(&(MEM)->mem, (void**)CHUNK) | ||
72 | |||
73 | /// Return a pointer to a chunk given the chunk's handle. | ||
74 | /// The chunk must have been allocated. | ||
75 | #define mem_get_chunk(MEM, HANDLE) \ | ||
76 | ((__typeof__((MEM)->blocks[0])*)mem_get_chunk_(&(MEM)->mem, HANDLE)) | ||
77 | |||
78 | /// Get the handle to the given chunk. | ||
79 | #define mem_get_chunk_handle(MEM, CHUNK_PTR) \ | ||
80 | mem_get_chunk_handle_(&(MEM)->mem, CHUNK_PTR) | ||
81 | |||
82 | /// Iterate over the used chunks of the allocator. | ||
83 | /// | ||
84 | /// The caller can use 'i' as the index of the current chunk. | ||
85 | /// | ||
86 | /// It is valid to mem_free() the chunk at each step of the iteration. | ||
87 | #define mem_foreach(MEM, ITER, BODY) \ | ||
88 | size_t i = 0; \ | ||
89 | do { \ | ||
90 | if ((MEM)->chunks[i].used) { \ | ||
91 | __typeof__((MEM)->blocks[0])* ITER = &(MEM)->blocks[i]; \ | ||
92 | (void)ITER; \ | ||
93 | BODY; \ | ||
94 | } \ | ||
95 | i = (MEM)->chunks[i].next; \ | ||
96 | } while (i); | ||
97 | |||
98 | // ----------------------------------------------------------------------------- | ||
99 | |||
100 | /// Chunk information. | ||
101 | /// | ||
102 | /// Every chunk represents a contiguous array of some number of blocks. The | ||
103 | /// allocator begins as one big unused chunk. | ||
104 | /// | ||
105 | /// Allocation looks for a free chunk large enough to hold to requested number | ||
106 | /// of blocks. If the free chunk is larger than the requested chunk size, then | ||
107 | /// the requested chunk is carved out of the larger block. | ||
108 | /// | ||
109 | /// Deallocation frees the chunk back and merges it with free neighbouring | ||
110 | /// chunks. Two free chunks are never contiguous in memory. | ||
111 | /// | ||
112 | /// 'next' and 'prev' always point to a valid chunk (e.g., 0). Allocation stops | ||
113 | /// looking for free chunks when it loops over. | ||
114 | typedef struct Chunk { | ||
115 | size_t num_blocks; | ||
116 | size_t prev; | ||
117 | size_t next; | ||
118 | bool used; | ||
119 | } Chunk; | ||
120 | |||
121 | typedef struct Memory { | ||
122 | size_t block_size_bytes; | ||
123 | size_t num_blocks; | ||
124 | size_t next_free_chunk; | ||
125 | bool dynamic; /// True if blocks and chunks are dynamically-allocated. | ||
126 | Chunk* chunks; /// Array of chunk information. | ||
127 | uint8_t* blocks; /// Array of blocks; | ||
128 | } Memory; | ||
129 | |||
130 | /// Create a memory allocator. | ||
131 | /// | ||
132 | /// 'chunks' and 'blocks' may be user-provided (statically-backed allocator) or | ||
133 | /// null (dynamically-backed allocator). | ||
134 | /// - If null, the allocator malloc()s the memory for them. | ||
135 | /// - If given: | ||
136 | /// - `chunks` must be at least `num_blocks` chunks. | ||
137 | /// - `blocks` must be at least `num_blocks` * `block_size_bytes` bytes. | ||
138 | /// | ||
139 | /// All blocks are zeroed out for convenience. | ||
140 | bool mem_make_( | ||
141 | Memory* mem, Chunk* chunks, void* blocks, size_t num_blocks, | ||
142 | size_t block_size_bytes); | ||
143 | |||
144 | void mem_del_(Memory*); | ||
145 | void mem_clear_(Memory*); | ||
146 | void* mem_alloc_(Memory*, size_t num_blocks); | ||
147 | void mem_free_(Memory*, void** chunk_ptr); | ||
148 | void* mem_get_chunk_(const Memory*, size_t chunk_handle); | ||
149 | size_t mem_get_chunk_handle_(const Memory*, const void* chunk); | ||
diff --git a/mem/src/mem.c b/mem/src/mem.c new file mode 100644 index 0000000..ff97f0f --- /dev/null +++ b/mem/src/mem.c | |||
@@ -0,0 +1,183 @@ | |||
1 | #include "mem.h" | ||
2 | |||
3 | #include <stdlib.h> | ||
4 | #include <string.h> | ||
5 | |||
6 | bool mem_make_( | ||
7 | Memory* mem, Chunk* chunks, void* blocks, size_t num_blocks, | ||
8 | size_t block_size_bytes) { | ||
9 | assert(mem); | ||
10 | assert((chunks && blocks) || (!chunks && !blocks)); | ||
11 | assert(num_blocks >= 1); | ||
12 | |||
13 | mem->block_size_bytes = block_size_bytes; | ||
14 | mem->num_blocks = num_blocks; | ||
15 | mem->next_free_chunk = 0; | ||
16 | |||
17 | // Allocate chunks and blocks if necessary and zero them out. | ||
18 | if (!chunks) { | ||
19 | chunks = calloc(num_blocks, sizeof(Chunk)); | ||
20 | blocks = calloc(num_blocks, block_size_bytes); | ||
21 | mem->dynamic = true; | ||
22 | if (!chunks || !blocks) { | ||
23 | return false; | ||
24 | } | ||
25 | } else { | ||
26 | memset(blocks, 0, num_blocks * block_size_bytes); | ||
27 | memset(chunks, 0, num_blocks * sizeof(Chunk)); | ||
28 | mem->dynamic = false; | ||
29 | } | ||
30 | mem->chunks = chunks; | ||
31 | mem->blocks = blocks; | ||
32 | |||
33 | // Initialize the head as one large free chunk. | ||
34 | Chunk* head = &mem->chunks[0]; | ||
35 | head->num_blocks = num_blocks; | ||
36 | |||
37 | return true; | ||
38 | } | ||
39 | |||
40 | void mem_del_(Memory* mem) { | ||
41 | assert(mem); | ||
42 | if (mem->dynamic) { | ||
43 | if (mem->chunks) { | ||
44 | free(mem->chunks); | ||
45 | mem->chunks = 0; | ||
46 | } | ||
47 | if (mem->blocks) { | ||
48 | free(mem->blocks); | ||
49 | mem->blocks = 0; | ||
50 | } | ||
51 | } | ||
52 | } | ||
53 | |||
54 | void mem_clear_(Memory* mem) { | ||
55 | assert(mem); | ||
56 | mem->next_free_chunk = 0; | ||
57 | memset(mem->blocks, 0, mem->num_blocks * mem->block_size_bytes); | ||
58 | memset(mem->chunks, 0, mem->num_blocks * sizeof(Chunk)); | ||
59 | |||
60 | // Initialize the head as one large free chunk. | ||
61 | Chunk* head = &mem->chunks[0]; | ||
62 | head->num_blocks = mem->num_blocks; | ||
63 | } | ||
64 | |||
65 | void* mem_alloc_(Memory* mem, size_t num_blocks) { | ||
66 | assert(mem); | ||
67 | assert(num_blocks >= 1); | ||
68 | |||
69 | // Search for the first free chunk that can accommodate num_blocks. | ||
70 | const size_t start = mem->next_free_chunk; | ||
71 | size_t chunk_idx = start; | ||
72 | bool found = false; | ||
73 | do { | ||
74 | Chunk* chunk = &mem->chunks[chunk_idx]; | ||
75 | if (!chunk->used) { | ||
76 | if (chunk->num_blocks > num_blocks) { | ||
77 | // Carve out a smaller chunk when the found chunk is larger than | ||
78 | // requested. | ||
79 | // [prev] <--> [chunk] <--> [new next] <--> [next] | ||
80 | const size_t new_next_idx = chunk_idx + num_blocks; | ||
81 | Chunk* new_next = &mem->chunks[new_next_idx]; | ||
82 | if (chunk->next) { | ||
83 | mem->chunks[chunk->next].prev = new_next_idx; | ||
84 | } | ||
85 | new_next->prev = chunk_idx; | ||
86 | new_next->next = chunk->next; | ||
87 | chunk->next = new_next_idx; | ||
88 | |||
89 | new_next->num_blocks = chunk->num_blocks - num_blocks; | ||
90 | chunk->num_blocks = num_blocks; | ||
91 | |||
92 | chunk->used = true; | ||
93 | found = true; | ||
94 | break; | ||
95 | } else if (chunk->num_blocks == num_blocks) { | ||
96 | chunk->used = true; | ||
97 | found = true; | ||
98 | break; | ||
99 | } | ||
100 | } | ||
101 | chunk_idx = chunk->next; // Last chunk points back to 0, which is always the | ||
102 | // start of some chunk. 'next' and 'prev' are | ||
103 | // always valid pointers. | ||
104 | } while (chunk_idx != start); | ||
105 | |||
106 | if (found) { | ||
107 | mem->next_free_chunk = mem->chunks[chunk_idx].next; | ||
108 | return &mem->blocks[chunk_idx * mem->block_size_bytes]; | ||
109 | } else { | ||
110 | return 0; // Large-enough free chunk not found. | ||
111 | } | ||
112 | } | ||
113 | |||
114 | // The given pointer is a pointer to this first block of the chunk. | ||
115 | void mem_free_(Memory* mem, void** chunk_ptr) { | ||
116 | assert(mem); | ||
117 | assert(chunk_ptr); | ||
118 | |||
119 | const size_t chunk_idx = | ||
120 | ((uint8_t*)*chunk_ptr - mem->blocks) / mem->block_size_bytes; | ||
121 | assert(chunk_idx < mem->num_blocks); | ||
122 | Chunk* chunk = &mem->chunks[chunk_idx]; | ||
123 | |||
124 | // Disallow double-frees. | ||
125 | assert(chunk->used); | ||
126 | |||
127 | // Zero out the chunk so that we don't get stray values the next time it is | ||
128 | // allocated. | ||
129 | memset(&mem->blocks[chunk_idx], 0, chunk->num_blocks * mem->block_size_bytes); | ||
130 | |||
131 | // Free the chunk. If it is contiguous with other free chunks, then merge. | ||
132 | // We only need to look at the chunk's immediate neighbours because no two | ||
133 | // free chunks are left contiguous after merging. | ||
134 | chunk->used = false; | ||
135 | if (chunk->next) { | ||
136 | Chunk* next = &mem->chunks[chunk->next]; | ||
137 | if (!next->used) { | ||
138 | // Pre: [chunk] <--> [next] <--> [next next] | ||
139 | // Post: [ chunk + next ] <--> [next next] | ||
140 | chunk->num_blocks += mem->chunks[chunk->next].num_blocks; | ||
141 | chunk->next = next->next; | ||
142 | if (next->next) { | ||
143 | Chunk* next_next = &mem->chunks[next->next]; | ||
144 | next_next->prev = chunk_idx; | ||
145 | } | ||
146 | next->prev = next->next = next->num_blocks = 0; | ||
147 | } | ||
148 | } | ||
149 | if (chunk->prev) { | ||
150 | Chunk* prev = &mem->chunks[chunk->prev]; | ||
151 | if (!prev->used) { | ||
152 | // Pre: [prev] <--> [chunk] <--> [next] | ||
153 | // Post: [ prev + chunk ] <--> [next] | ||
154 | prev->num_blocks += chunk->num_blocks; | ||
155 | prev->next = chunk->next; | ||
156 | if (chunk->next) { | ||
157 | Chunk* next = &mem->chunks[chunk->next]; | ||
158 | next->prev = chunk->prev; | ||
159 | } | ||
160 | chunk->prev = chunk->next = chunk->num_blocks = 0; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | *chunk_ptr = 0; | ||
165 | } | ||
166 | |||
167 | // The handle is the chunk's index. We don't call it an index in the public API | ||
168 | // because from the user's perspective, two chunks allocated back-to-back need | ||
169 | // not be +1 away (the offset depends on how large the first chunk is). | ||
170 | void* mem_get_chunk_(const Memory* mem, size_t chunk_handle) { | ||
171 | assert(mem); | ||
172 | assert(chunk_handle < mem->num_blocks); | ||
173 | assert(mem->chunks[chunk_handle].used); | ||
174 | return &mem->blocks[chunk_handle * mem->block_size_bytes]; | ||
175 | } | ||
176 | |||
177 | // The given chunk pointer is a pointer to the blocks array. | ||
178 | size_t mem_get_chunk_handle_(const Memory* mem, const void* chunk) { | ||
179 | assert(mem); | ||
180 | const size_t block_byte_index = (const uint8_t*)chunk - mem->blocks; | ||
181 | assert(block_byte_index % mem->block_size_bytes == 0); | ||
182 | return block_byte_index / mem->block_size_bytes; | ||
183 | } | ||
diff --git a/mem/test/mem_test.c b/mem/test/mem_test.c new file mode 100644 index 0000000..6ab4c7c --- /dev/null +++ b/mem/test/mem_test.c | |||
@@ -0,0 +1,232 @@ | |||
1 | #include "mem.h" | ||
2 | |||
3 | #include "test.h" | ||
4 | |||
5 | #define NUM_BLOCKS 10 | ||
6 | |||
7 | DEF_MEM(test_mem, int, NUM_BLOCKS); | ||
8 | |||
9 | static int count(test_mem* mem) { | ||
10 | int count = 0; | ||
11 | mem_foreach(mem, n, { count++; }); | ||
12 | return count; | ||
13 | } | ||
14 | |||
15 | static int sum(test_mem* mem) { | ||
16 | int sum = 0; | ||
17 | mem_foreach(mem, n, { sum += *n; }); | ||
18 | return sum; | ||
19 | } | ||
20 | |||
21 | // Create a statically-backed allocator. | ||
22 | TEST_CASE(mem_create) { | ||
23 | test_mem mem; | ||
24 | mem_make(&mem); | ||
25 | } | ||
26 | |||
27 | // Create a dynamically-backed allocator. | ||
28 | TEST_CASE(mem_create_dyn) { | ||
29 | DEF_MEM_DYN(dyn_mem, int); | ||
30 | |||
31 | dyn_mem mem; | ||
32 | mem_make_dyn(&mem, NUM_BLOCKS, sizeof(int)); | ||
33 | } | ||
34 | |||
35 | // Allocate N chunks of 1 block each. | ||
36 | TEST_CASE(mem_fully_allocate) { | ||
37 | test_mem mem; | ||
38 | mem_make(&mem); | ||
39 | |||
40 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
41 | const int* block = mem_alloc(&mem, 1); | ||
42 | TEST_TRUE(block != 0); | ||
43 | } | ||
44 | } | ||
45 | |||
46 | // Allocate N chunks of 1 block each, then free them. | ||
47 | TEST_CASE(mem_fill_then_free) { | ||
48 | test_mem mem; | ||
49 | mem_make(&mem); | ||
50 | |||
51 | int* blocks[NUM_BLOCKS] = {0}; | ||
52 | for (int i = 0; i < NUM_BLOCKS; i++) { | ||
53 | blocks[i] = mem_alloc(&mem, 1); | ||
54 | TEST_TRUE(blocks[i] != 0); | ||
55 | } | ||
56 | |||
57 | for (int i = 0; i < NUM_BLOCKS; i++) { | ||
58 | mem_free(&mem, &blocks[i]); | ||
59 | TEST_EQUAL(blocks[i], 0); // Pointer should be set to 0 on free. | ||
60 | } | ||
61 | |||
62 | TEST_EQUAL(count(&mem), 0); | ||
63 | } | ||
64 | |||
65 | // Attempt to allocate blocks past the maximum allocator size. | ||
66 | // The allocator should handle the failed allocations gracefully. | ||
67 | TEST_CASE(mem_allocate_beyond_max_size) { | ||
68 | test_mem mem; | ||
69 | mem_make(&mem); | ||
70 | |||
71 | // Fully allocate the mem. | ||
72 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
73 | TEST_TRUE(mem_alloc(&mem, 1) != 0); | ||
74 | } | ||
75 | |||
76 | // Past the end. | ||
77 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
78 | TEST_EQUAL(mem_alloc(&mem, 1), 0); | ||
79 | } | ||
80 | } | ||
81 | |||
82 | // Free blocks should always remain zeroed out. | ||
83 | // This tests the invariant right after creating the allocator. | ||
84 | TEST_CASE(mem_zero_free_blocks_after_creation) { | ||
85 | test_mem mem; | ||
86 | mem_make(&mem); | ||
87 | |||
88 | const int zero = 0; | ||
89 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
90 | const int* block = (const int*)(mem.blocks) + i; | ||
91 | TEST_EQUAL(memcmp(block, &zero, sizeof(int)), 0); | ||
92 | } | ||
93 | } | ||
94 | |||
95 | // Free blocks should always remain zeroed out. | ||
96 | // This tests the invariant after freeing a block. | ||
97 | TEST_CASE(mem_zero_free_block_after_free) { | ||
98 | test_mem mem; | ||
99 | mem_make(&mem); | ||
100 | |||
101 | int* val = mem_alloc(&mem, 1); | ||
102 | TEST_TRUE(val != 0); | ||
103 | *val = 177; | ||
104 | |||
105 | int* old_val = val; | ||
106 | mem_free(&mem, &val); // val pointer is set to 0. | ||
107 | TEST_EQUAL(*old_val, 0); // Block is zeroed out after free. | ||
108 | } | ||
109 | |||
110 | // Traverse an empty allocator. | ||
111 | TEST_CASE(mem_traverse_empty) { | ||
112 | test_mem mem; | ||
113 | mem_make(&mem); | ||
114 | |||
115 | TEST_EQUAL(count(&mem), 0); | ||
116 | } | ||
117 | |||
118 | // Traverse a partially full allocator. | ||
119 | TEST_CASE(mem_traverse_partially_full) { | ||
120 | const int N = NUM_BLOCKS / 2; | ||
121 | |||
122 | test_mem mem; | ||
123 | mem_make(&mem); | ||
124 | |||
125 | for (int i = 0; i < N; ++i) { | ||
126 | int* val = mem_alloc(&mem, 1); | ||
127 | TEST_TRUE(val != 0); | ||
128 | *val = i + 1; | ||
129 | } | ||
130 | |||
131 | TEST_EQUAL(sum(&mem), (N) * (N + 1) / 2); | ||
132 | } | ||
133 | |||
134 | // Traverse a full allocator. | ||
135 | TEST_CASE(mem_traverse_full) { | ||
136 | test_mem mem; | ||
137 | mem_make(&mem); | ||
138 | |||
139 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
140 | int* val = mem_alloc(&mem, 1); | ||
141 | TEST_TRUE(val != 0); | ||
142 | *val = i + 1; | ||
143 | } | ||
144 | |||
145 | TEST_EQUAL(sum(&mem), (NUM_BLOCKS) * (NUM_BLOCKS + 1) / 2); | ||
146 | } | ||
147 | |||
148 | // Get the ith (allocated) chunk. | ||
149 | TEST_CASE(mem_get_block) { | ||
150 | test_mem mem; | ||
151 | mem_make(&mem); | ||
152 | |||
153 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
154 | int* block = mem_alloc(&mem, 1); | ||
155 | TEST_TRUE(block != 0); | ||
156 | *block = i; | ||
157 | TEST_EQUAL(mem_get_chunk_handle(&mem, block), (size_t)i); | ||
158 | } | ||
159 | |||
160 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
161 | TEST_EQUAL(*mem_get_chunk(&mem, i), i); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | // Test merging. | ||
166 | // 1. Allocate chunks of variable sizes. | ||
167 | // 2. Free them in a different order. | ||
168 | // 3. Then we should be able to allocate 1 chunk of N blocks. | ||
169 | TEST_CASE(mem_fragmentation) { | ||
170 | test_mem mem; | ||
171 | mem_make(&mem); | ||
172 | |||
173 | int* blocks[NUM_BLOCKS] = {0}; | ||
174 | int next_block = 0; | ||
175 | |||
176 | #define ALLOC(num_blocks) \ | ||
177 | blocks[next_block] = mem_alloc(&mem, num_blocks); \ | ||
178 | TEST_TRUE(blocks[next_block] != 0); \ | ||
179 | next_block++; | ||
180 | |||
181 | #define FREE(block_idx) mem_free(&mem, &blocks[block_idx]) | ||
182 | |||
183 | // 5 total allocations of variable chunk sizes. | ||
184 | ALLOC(2); // 2; idx = 0 | ||
185 | ALLOC(3); // 5; idx = 1 | ||
186 | ALLOC(1); // 6; idx = 2 | ||
187 | ALLOC(3); // 9; idx = 3 | ||
188 | ALLOC(1); // 10; idx = 4 | ||
189 | |||
190 | // Free the 5 allocations in a different order. | ||
191 | FREE(1); | ||
192 | FREE(3); | ||
193 | FREE(4); | ||
194 | FREE(2); | ||
195 | FREE(0); | ||
196 | |||
197 | // Should be able to allocate 1 chunk of N blocks. | ||
198 | const void* chunk = mem_alloc(&mem, NUM_BLOCKS); | ||
199 | TEST_TRUE(chunk != 0); | ||
200 | } | ||
201 | |||
202 | // Clear and re-use an allocator. | ||
203 | TEST_CASE(mem_clear_then_reuse) { | ||
204 | test_mem mem; | ||
205 | mem_make(&mem); | ||
206 | |||
207 | // Allocate chunks, contents not important. | ||
208 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
209 | int* chunk = mem_alloc(&mem, 1); | ||
210 | TEST_TRUE(chunk != 0); | ||
211 | } | ||
212 | |||
213 | mem_clear(&mem); | ||
214 | |||
215 | // Allocate chunks and assign values 0..N. | ||
216 | for (int i = 0; i < NUM_BLOCKS; ++i) { | ||
217 | int* chunk = mem_alloc(&mem, 1); | ||
218 | TEST_TRUE(chunk != 0); | ||
219 | *chunk = i + 1; | ||
220 | } | ||
221 | |||
222 | TEST_EQUAL(sum(&mem), NUM_BLOCKS * (NUM_BLOCKS + 1) / 2); | ||
223 | } | ||
224 | |||
225 | // Stress test. | ||
226 | // | ||
227 | // 1. Allocate the mem, either fully or partially. If fully, attempt to | ||
228 | // allocate some items past the end. | ||
229 | // | ||
230 | // 2. Free all allocated items in some random order. | ||
231 | |||
232 | int main() { return 0; } | ||
diff --git a/listpool/test/test.h b/mem/test/test.h index fd8dc22..fd8dc22 100644 --- a/listpool/test/test.h +++ b/mem/test/test.h | |||
diff --git a/mempool/include/mempool.h b/mempool/include/mempool.h index 2447884..994e25a 100644 --- a/mempool/include/mempool.h +++ b/mempool/include/mempool.h | |||
@@ -108,7 +108,7 @@ typedef struct mempool { | |||
108 | size_t num_blocks; | 108 | size_t num_blocks; |
109 | size_t next_free_block; | 109 | size_t next_free_block; |
110 | bool full; | 110 | bool full; |
111 | bool dynamic; // True if blocks and info are dynamically-allocated. | 111 | bool dynamic; /// True if blocks and info are dynamically-allocated. |
112 | BlockInfo* block_info; | 112 | BlockInfo* block_info; |
113 | uint8_t* blocks; | 113 | uint8_t* blocks; |
114 | } mempool; | 114 | } mempool; |