diff options
Diffstat (limited to 'mempool/src/mempool.c')
-rw-r--r-- | mempool/src/mempool.c | 85 |
1 files changed, 85 insertions, 0 deletions
diff --git a/mempool/src/mempool.c b/mempool/src/mempool.c new file mode 100644 index 0000000..b7e8705 --- /dev/null +++ b/mempool/src/mempool.c | |||
@@ -0,0 +1,85 @@ | |||
1 | #include "mempool.h" | ||
2 | |||
3 | #include <string.h> | ||
4 | |||
5 | static inline size_t min(size_t a, size_t b) { return a < b ? a : b; } | ||
6 | |||
7 | void mempool_make_(mempool* pool, BlockInfo* block_info, void* blocks, | ||
8 | size_t num_blocks, size_t block_size_bytes) { | ||
9 | assert(pool); | ||
10 | assert(block_info); | ||
11 | assert(blocks); | ||
12 | assert(num_blocks >= 1); | ||
13 | pool->block_size_bytes = block_size_bytes; | ||
14 | pool->num_blocks = num_blocks; | ||
15 | pool->next_free_block = 0; | ||
16 | pool->full = false; | ||
17 | pool->block_info = block_info; | ||
18 | pool->blocks = blocks; | ||
19 | memset(blocks, 0, num_blocks * block_size_bytes); | ||
20 | } | ||
21 | |||
22 | void* mempool_alloc_(mempool* pool) { | ||
23 | assert(pool); | ||
24 | |||
25 | if (pool->full) { | ||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | // Allocate the block. | ||
30 | void* block = &pool->blocks[pool->next_free_block * pool->block_size_bytes]; | ||
31 | pool->block_info[pool->next_free_block].used = true; | ||
32 | |||
33 | // Search for the next free block. If it does not exist, flag the pool full. | ||
34 | pool->full = true; | ||
35 | for (size_t i = 1; i < pool->num_blocks && i != 0; i++) { | ||
36 | pool->next_free_block = (pool->next_free_block + 1) % pool->num_blocks; | ||
37 | if (!pool->block_info[pool->next_free_block].used) { | ||
38 | pool->full = false; | ||
39 | break; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | return block; | ||
44 | } | ||
45 | |||
46 | void mempool_free_(mempool* pool, void** block_ptr) { | ||
47 | assert(pool); | ||
48 | assert(block_ptr); | ||
49 | |||
50 | memset(*block_ptr, 0, pool->block_size_bytes); | ||
51 | |||
52 | const size_t block_index = | ||
53 | ((uint8_t*)*block_ptr - pool->blocks) / pool->block_size_bytes; | ||
54 | assert(block_index < pool->num_blocks); | ||
55 | |||
56 | // Disallow double-frees. | ||
57 | assert(pool->block_info[block_index].used); | ||
58 | |||
59 | pool->block_info[block_index].used = false; | ||
60 | if (pool->full) { | ||
61 | pool->next_free_block = block_index; | ||
62 | pool->full = false; | ||
63 | } else { | ||
64 | // Prefer to allocate blocks towards the start of the pool. This way, blocks | ||
65 | // should cluster around this area and the pool should offer better memory | ||
66 | // locality for used blocks. | ||
67 | pool->next_free_block = min(pool->next_free_block, block_index); | ||
68 | } | ||
69 | |||
70 | *block_ptr = 0; | ||
71 | } | ||
72 | |||
73 | void* mempool_get_block_(const mempool* pool, size_t block_index) { | ||
74 | assert(pool); | ||
75 | assert(block_index < pool->num_blocks); | ||
76 | assert(pool->block_info[block_index].used); | ||
77 | return pool->blocks + block_index * pool->block_size_bytes; | ||
78 | } | ||
79 | |||
80 | size_t mempool_get_block_index_(const mempool* pool, const void* block) { | ||
81 | assert(pool); | ||
82 | const size_t block_byte_index = (const uint8_t*)block - pool->blocks; | ||
83 | assert(block_byte_index % pool->block_size_bytes == 0); | ||
84 | return block_byte_index / pool->block_size_bytes; | ||
85 | } | ||