forked from oven-sh/bun
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathArenaAllocator.zig
287 lines (261 loc) · 12.6 KB
/
ArenaAllocator.zig
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
/// TODO: delete this once we've upgraded Zig and https://github.com/ziglang/zig/pull/15985 is merged.
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const Allocator = std.mem.Allocator;
/// This allocator takes an existing allocator, wraps it, and provides an interface
/// where you can allocate without freeing, and then free it all together.
pub const ArenaAllocator = struct {
child_allocator: Allocator,
state: State,
/// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
/// as a memory-saving optimization.
pub const State = struct {
buffer_list: std.SinglyLinkedList(usize) = .{},
end_index: usize = 0,
pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator {
return .{
.child_allocator = child_allocator,
.state = self,
};
}
};
pub fn allocator(self: *ArenaAllocator) Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
const BufNode = std.SinglyLinkedList(usize).Node;
pub fn init(child_allocator: Allocator) ArenaAllocator {
return (State{}).promote(child_allocator);
}
pub fn deinit(self: ArenaAllocator) void {
// NOTE: When changing this, make sure `reset()` is adjusted accordingly!
var it = self.state.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
const next_it = node.next;
const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
it = next_it;
}
}
pub const ResetMode = union(enum) {
/// Releases all allocated memory in the arena.
free_all,
/// This will pre-heat the arena for future allocations by allocating a
/// large enough buffer for all previously done allocations.
/// Preheating will speed up the allocation process by invoking the backing allocator
/// less often than before. If `reset()` is used in a loop, this means that after the
/// biggest operation, no memory allocations are performed anymore.
retain_capacity,
/// This is the same as `retain_capacity`, but the memory will be shrunk to
/// this value if it exceeds the limit.
retain_with_limit: usize,
};
/// Queries the current memory use of this arena.
/// This will **not** include the storage required for internal keeping.
pub fn queryCapacity(self: ArenaAllocator) usize {
var size: usize = 0;
var it = self.state.buffer_list.first;
while (it) |node| : (it = node.next) {
// Compute the actually allocated size excluding the
// linked list node.
size += node.data - @sizeOf(BufNode);
}
return size;
}
/// Resets the arena allocator and frees all allocated memory.
///
/// `mode` defines how the currently allocated memory is handled.
/// See the variant documentation for `ResetMode` for the effects of each mode.
///
/// The function will return whether the reset operation was successful or not.
/// If the reallocation failed `false` is returned. The arena will still be fully
/// functional in that case, all memory is released. Future allocations just might
/// be slower.
///
/// NOTE: If `mode` is `free_mode`, the function will always return `true`.
pub fn reset(self: *ArenaAllocator, mode: ResetMode) bool {
// Some words on the implementation:
// The reset function can be implemented with two basic approaches:
// - Counting how much bytes were allocated since the last reset, and storing that
// information in State. This will make reset fast and alloc only a teeny tiny bit
// slower.
// - Counting how much bytes were allocated by iterating the chunk linked list. This
// will make reset slower, but alloc() keeps the same speed when reset() as if reset()
// would not exist.
//
// The second variant was chosen for implementation, as with more and more calls to reset(),
// the function will get faster and faster. At one point, the complexity of the function
// will drop to amortized O(1), as we're only ever having a single chunk that will not be
// reallocated, and we're not even touching the backing allocator anymore.
//
// Thus, only the first hand full of calls to reset() will actually need to iterate the linked
// list, all future calls are just taking the first node, and only resetting the `end_index`
// value.
const requested_capacity = switch (mode) {
.retain_capacity => self.queryCapacity(),
.retain_with_limit => |limit| @min(limit, self.queryCapacity()),
.free_all => 0,
};
if (requested_capacity == 0) {
// just reset when we don't have anything to reallocate
self.deinit();
self.state = State{};
return true;
}
const total_size = requested_capacity + @sizeOf(BufNode);
const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
// Free all nodes except for the last one
var it = self.state.buffer_list.first;
const maybe_first_node = while (it) |node| {
// this has to occur before the free because the free frees node
const next_it = node.next;
if (next_it == null)
break node;
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
it = next_it;
} else null;
std.debug.assert(maybe_first_node == null or maybe_first_node.?.next == null);
// reset the state before we try resizing the buffers, so we definitely have reset the arena to 0.
self.state.end_index = 0;
if (maybe_first_node) |first_node| {
self.state.buffer_list.first = first_node;
// perfect, no need to invoke the child_allocator
if (first_node.data == total_size)
return true;
const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data];
if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) {
// successful resize
first_node.data = total_size;
} else {
// manual realloc
const new_ptr = self.child_allocator.rawAlloc(total_size, align_bits, @returnAddress()) orelse {
// we failed to preheat the arena properly, signal this to the user.
return false;
};
self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress());
const node = @as(*BufNode, @ptrCast(@alignCast(new_ptr)));
node.* = .{ .data = total_size };
self.state.buffer_list.first = node;
}
}
return true;
}
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) ?*BufNode {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse
return null;
const buf_node = @as(*BufNode, @ptrCast(@alignCast(ptr)));
buf_node.* = .{ .data = len };
self.state.buffer_list.prepend(buf_node);
self.state.end_index = 0;
return buf_node;
}
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
const self = @as(*ArenaAllocator, @ptrCast(@alignCast(ctx)));
_ = ra;
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
var cur_node = if (self.state.buffer_list.first) |first_node|
first_node
else
(self.createNode(0, n + ptr_align) orelse return null);
while (true) {
const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data];
const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..];
const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index;
const adjusted_addr = mem.alignForward(usize, addr, ptr_align);
const adjusted_index = self.state.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index <= cur_buf.len) {
const result = cur_buf[adjusted_index..new_end_index];
self.state.end_index = new_end_index;
return result.ptr;
}
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
if (self.child_allocator.rawResize(cur_alloc_buf, log2_align, bigger_buf_size, @returnAddress())) {
cur_node.data = bigger_buf_size;
} else {
// Allocate a new node if that's not possible
cur_node = self.createNode(cur_buf.len, n + ptr_align) orelse return null;
}
}
}
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
const self = @as(*ArenaAllocator, @ptrCast(@alignCast(ctx)));
_ = log2_buf_align;
_ = ret_addr;
const cur_node = self.state.buffer_list.first orelse return false;
const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) {
// It's not the most recent allocation, so it cannot be expanded,
// but it's fine if they want to make it smaller.
return new_len <= buf.len;
}
if (buf.len >= new_len) {
self.state.end_index -= buf.len - new_len;
return true;
} else if (cur_buf.len - self.state.end_index >= new_len - buf.len) {
self.state.end_index += new_len - buf.len;
return true;
} else {
return false;
}
}
fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
_ = log2_buf_align;
_ = ret_addr;
const self = @as(*ArenaAllocator, @ptrCast(@alignCast(ctx)));
const cur_node = self.state.buffer_list.first orelse return;
const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) {
self.state.end_index -= buf.len;
}
}
};
test "ArenaAllocator (reset with preheating)" {
var arena_allocator = ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
// provides some variance in the allocated data
var rng_src = std.rand.DefaultPrng.init(19930913);
const random = rng_src.random();
var rounds: usize = 25;
while (rounds > 0) {
rounds -= 1;
_ = arena_allocator.reset(.retain_capacity);
var alloced_bytes: usize = 0;
var total_size: usize = random.intRangeAtMost(usize, 256, 16384);
while (alloced_bytes < total_size) {
const size = random.intRangeAtMost(usize, 16, 256);
const alignment = 32;
const slice = try arena_allocator.allocator().alignedAlloc(u8, alignment, size);
try std.testing.expect(std.mem.isAligned(@intFromPtr(slice.ptr), alignment));
try std.testing.expectEqual(size, slice.len);
alloced_bytes += slice.len;
}
}
}
test "ArenaAllocator (reset while retaining a buffer)" {
var arena_allocator = ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
const a = arena_allocator.allocator();
// Create two internal buffers
_ = try a.alloc(u8, 1);
_ = try a.alloc(u8, 1000);
// Check that we have at least two buffers
try std.testing.expect(arena_allocator.state.buffer_list.first.?.next != null);
// This retains the first allocated buffer
try std.testing.expect(arena_allocator.reset(.{ .retain_with_limit = 1 }));
}