1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
|
#include "mempool.h"
#include <string.h>
static inline size_t min(size_t a, size_t b) { return a < b ? a : b; }
void mempool_make_(
mempool* pool, BlockInfo* block_info, void* blocks, size_t num_blocks,
size_t block_size_bytes) {
assert(pool);
assert(block_info);
assert(blocks);
assert(num_blocks >= 1);
pool->block_size_bytes = block_size_bytes;
pool->num_blocks = num_blocks;
pool->next_free_block = 0;
pool->full = false;
pool->block_info = block_info;
pool->blocks = blocks;
memset(blocks, 0, num_blocks * block_size_bytes);
memset(block_info, 0, num_blocks * sizeof(BlockInfo));
}
void* mempool_alloc_(mempool* pool) {
assert(pool);
if (pool->full) {
return 0;
}
// Allocate the block.
void* block = &pool->blocks[pool->next_free_block * pool->block_size_bytes];
pool->block_info[pool->next_free_block].used = true;
// Search for the next free block. If it does not exist, flag the pool
// full.
//
// The search starts near the current free block, where we might be more
// likely to find free blocks. The search starts with i=1 since we only
// need to test the other N-1 blocks in the pool.
pool->full = true;
for (size_t i = 1; i < pool->num_blocks; i++) {
pool->next_free_block = (pool->next_free_block + 1) % pool->num_blocks;
if (!pool->block_info[pool->next_free_block].used) {
pool->full = false;
break;
}
}
return block;
}
void mempool_free_(mempool* pool, void** block_ptr) {
assert(pool);
assert(block_ptr);
// Zero out the block so that we don't get stray values the next time it is
// allocated.
memset(*block_ptr, 0, pool->block_size_bytes);
const size_t block_index =
((uint8_t*)*block_ptr - pool->blocks) / pool->block_size_bytes;
assert(block_index < pool->num_blocks);
// Disallow double-frees.
assert(pool->block_info[block_index].used);
pool->block_info[block_index].used = false;
if (pool->full) {
pool->next_free_block = block_index;
pool->full = false;
} else {
// Prefer to allocate blocks towards the start of the pool. This way, blocks
// should cluster around this area and the pool should offer better memory
// locality for used blocks.
pool->next_free_block = min(pool->next_free_block, block_index);
}
*block_ptr = 0;
}
void* mempool_get_block_(const mempool* pool, size_t block_index) {
assert(pool);
assert(block_index < pool->num_blocks);
assert(pool->block_info[block_index].used);
return pool->blocks + block_index * pool->block_size_bytes;
}
size_t mempool_get_block_index_(const mempool* pool, const void* block) {
assert(pool);
const size_t block_byte_index = (const uint8_t*)block - pool->blocks;
assert(block_byte_index % pool->block_size_bytes == 0);
return block_byte_index / pool->block_size_bytes;
}
|