-
Notifications
You must be signed in to change notification settings - Fork 0
/
memory_arena.c
134 lines (102 loc) · 3.53 KB
/
memory_arena.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
__declspec(dllimport) void *VirtualAlloc(void *Address, size_t Size, u32 AllocationType, u32 Protect);
struct memory_arena{
u8 *base;
u64 allocated;
u64 committed;
u64 reserved;
};
struct memory_arena create_memory_arena(u64 size_to_reserve){
struct memory_arena ret = {0};
ret.reserved = size_to_reserve;
ret.base = VirtualAlloc(/*DesiredBase*/0, size_to_reserve, /*MEM_RESERVE*/0x00002000, /*PAGE_READWRITE*/4);
if(!ret.base){
print("Error: Allocation failiure.");
_exit(1);
}
return ret;
}
static __declspec(noinline) void grow_arena(struct memory_arena *arena, u64 grow_to){
if(!arena->base){
//
// If the 'memory_arena' is not yet initialized, initialize it
// to have with a 'reserved' size of 64 GiB.
//
*arena = create_memory_arena(/*size_to_reserve*/giga_bytes(64));
}
//
// If we can fit 'grow_to', we should always succeed.
// Otherwise, panic.
//
if(grow_to > arena->reserved){
print("Error: Ran out of memory.");
_exit(1);
}
//
// If we can fit an additional mega byte, we should.
//
if(grow_to + mega_bytes(1) <= arena->reserved){
grow_to += mega_bytes(1);
}
//
// Page align 'grow_to'.
//
grow_to = (grow_to + 0xfff) & ~0xfff;
//
// If this was called manually, we might not have to commit anything.
//
if(grow_to < arena->committed) return;
//
// Commit the bytes.
//
void *Success = VirtualAlloc(arena->base, grow_to, /*MEM_COMMIT*/0x00001000, /*PAGE_READWRITE*/4);
if(Success != arena->base){
print("Error: Allocation failiure.");
_exit(1);
}
arena->committed = grow_to;
}
void *memory_arena_allocate_bytes(struct memory_arena *arena, u64 size, u64 alignment){
// Make sure the alignment is a power of two.
assert(alignment && ((alignment & (alignment - 1)) == 0));
//
// Allocate bytes to reach the 'alignment'.
//
u64 allocated = arena->allocated;
allocated = (allocated + (alignment - 1)) & ~(alignment - 1);
u64 allocation_base = allocated;
//
// Allocate 'size' bytes.
//
allocated += size;
assert(arena->allocated <= allocated);
if(allocated > arena->committed){
grow_arena(arena, allocated);
}
arena->allocated = allocated;
return arena->base + allocation_base;
}
#define push_struct(arena, type) ((type *)memory_arena_allocate_bytes((arena), sizeof(type), _Alignof(type)))
#define push_array(arena, type, count) ((type *)memory_arena_allocate_bytes((arena), sizeof(type) * (count), _Alignof(type)))
u8 *arena_current(struct memory_arena *arena){
return arena->base + arena->allocated;
}
//_____________________________________________________________________________________________________________________
// Temporary memory.
struct temporary_memory{
struct memory_arena *arena;
u64 saved_allocated;
};
struct temporary_memory begin_temporary_memory(struct memory_arena *arena){
struct temporary_memory ret = {
.arena = arena,
.saved_allocated = arena->allocated,
};
return ret;
}
void end_temporary_memory(struct temporary_memory *temp){
struct memory_arena *arena = temp->arena;
u8 *reset_to = arena->base + temp->saved_allocated;
u64 reset_size = arena->allocated - temp->saved_allocated;
memset(reset_to, 0, reset_size);
arena->allocated = temp->saved_allocated;
}