Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
slab_allocator.cpp
Go to the documentation of this file.
1#include "slab_allocator.hpp"
6#include <cstddef>
7#include <numeric>
8#include <unordered_map>
9
10#define LOGGING 0
11
19namespace {
20// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
21bool allocator_destroyed = false;
22
23// Slabs that are being manually managed by the user.
24// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
26#ifndef NO_MULTITHREADING
27// The manual slabs unordered map is not thread-safe, so we need to manage access to it when multithreaded.
28std::mutex manual_slabs_mutex;
29#endif
30template <typename... Args> inline void dbg_info(Args... args)
31{
32#if LOGGING == 1
33 info(args...);
34#else
35 // Suppress warning.
36 (void)(sizeof...(args));
37#endif
38}
39
47class SlabAllocator {
48 private:
49 size_t circuit_size_hint_;
51#ifndef NO_MULTITHREADING
52 std::mutex memory_store_mutex;
53#endif
54
55 public:
56 ~SlabAllocator();
57 SlabAllocator() = default;
58 SlabAllocator(const SlabAllocator& other) = delete;
59 SlabAllocator(SlabAllocator&& other) = delete;
60 SlabAllocator& operator=(const SlabAllocator& other) = delete;
61 SlabAllocator& operator=(SlabAllocator&& other) = delete;
62
63 void init(size_t circuit_size_hint);
64
65 std::shared_ptr<void> get(size_t size);
66
67 size_t get_total_size();
68
69 private:
70 void release(void* ptr, size_t size);
71};
72
73SlabAllocator::~SlabAllocator()
74{
75 allocator_destroyed = true;
76 for (auto& e : memory_store) {
77 for (auto& p : e.second) {
78 aligned_free(p);
79 }
80 }
81}
82
83void SlabAllocator::init(size_t circuit_size_hint)
84{
85 if (circuit_size_hint <= circuit_size_hint_) {
86 return;
87 }
88
89 circuit_size_hint_ = circuit_size_hint;
90
91 // Free any existing slabs.
92 for (auto& e : memory_store) {
93 for (auto& p : e.second) {
94 aligned_free(p);
95 }
96 }
97 memory_store.clear();
98
99 dbg_info("slab allocator initing for size: ", circuit_size_hint);
100
101 if (circuit_size_hint == 0ULL) {
102 return;
103 }
104
105 // Over-allocate because we know there are requests for circuit_size + n. (somewhat arbitrary n = 512)
106 size_t overalloc = 512;
107 size_t base_size = circuit_size_hint + overalloc;
108
109 std::map<size_t, size_t> prealloc_num;
110
111 // Size comments below assume a base (circuit) size of 2^19, 524288 bytes.
112
113 // /* 0.5 MiB */ prealloc_num[base_size * 1] = 2; // Batch invert skipped temporary.
114 // /* 2 MiB */ prealloc_num[base_size * 4] = 4 + // Composer base wire vectors.
115 // 1; // Miscellaneous.
116 // /* 6 MiB */ prealloc_num[base_size * 12] = 2 + // next_var_index, prev_var_index
117 // 2; // real_variable_index, real_variable_tags
118 /* 16 MiB */ prealloc_num[base_size * 32] = 11; // Composer base selector vectors.
119 /* 32 MiB */ prealloc_num[base_size * 32 * 2] = 1; // Miscellaneous.
120 /* 50 MiB */ prealloc_num[base_size * 32 * 3] = 1; // Variables.
121 /* 64 MiB */ prealloc_num[base_size * 32 * 4] = 1 + // SRS monomial points.
122 4 + // Coset-fft wires.
123 15 + // Coset-fft constraint selectors.
124 8 + // Coset-fft perm selectors.
125 1 + // Coset-fft sorted poly.
126 1 + // Pippenger point_schedule.
127 4; // Miscellaneous.
128 /* 128 MiB */ prealloc_num[base_size * 32 * 8] = 1 + // Proving key evaluation domain roots.
129 2; // Pippenger point_pairs.
130
131 for (auto& e : prealloc_num) {
132 for (size_t i = 0; i < e.second; ++i) {
133 auto size = e.first;
134 memory_store[size].push_back(aligned_alloc(32, size));
135 dbg_info("Allocated memory slab of size: ", size, " total: ", get_total_size());
136 }
137 }
138}
139
140std::shared_ptr<void> SlabAllocator::get(size_t req_size)
141{
142#ifndef NO_MULTITHREADING
143 std::unique_lock<std::mutex> lock(memory_store_mutex);
144#endif
145
146 auto it = memory_store.lower_bound(req_size);
147
148 // Can use a preallocated slab that is less than 2 times the requested size.
149 if (it != memory_store.end() && it->first < req_size * 2) {
150 size_t size = it->first;
151 auto* ptr = it->second.back();
152 it->second.pop_back();
153
154 if (it->second.empty()) {
155 memory_store.erase(it);
156 }
157
158 if (req_size >= circuit_size_hint_ && size > req_size + req_size / 10) {
159 dbg_info("WARNING: Using memory slab of size: ",
160 size,
161 " for requested ",
162 req_size,
163 " total: ",
164 get_total_size());
165 } else {
166 dbg_info("Reusing memory slab of size: ", size, " for requested ", req_size, " total: ", get_total_size());
167 }
168
169 return { ptr, [this, size](void* p) {
170 if (allocator_destroyed) {
171 aligned_free(p);
172 return;
173 }
174 this->release(p, size);
175 } };
176 }
177
178 if (req_size > static_cast<size_t>(1024 * 1024)) {
179 dbg_info("WARNING: Allocating unmanaged memory slab of size: ", req_size);
180 }
181 if (req_size % 32 == 0) {
182 return { aligned_alloc(32, req_size), aligned_free };
183 }
184 // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
185 return { tracy_malloc(req_size), tracy_free };
186}
187
188size_t SlabAllocator::get_total_size()
189{
190 return std::accumulate(memory_store.begin(), memory_store.end(), size_t{ 0 }, [](size_t acc, const auto& kv) {
191 return acc + kv.first * kv.second.size();
192 });
193}
194
195void SlabAllocator::release(void* ptr, size_t size)
196{
197#ifndef NO_MULTITHREADING
198 std::unique_lock<std::mutex> lock(memory_store_mutex);
199#endif
200 memory_store[size].push_back(ptr);
201 // dbg_info("Pooled poly memory of size: ", size, " total: ", get_total_size());
202}
203// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
204SlabAllocator allocator;
205} // namespace
206
207namespace bb {
208void init_slab_allocator(size_t circuit_subgroup_size)
209{
210 allocator.init(circuit_subgroup_size);
211}
212
214{
215 return allocator.get(size);
216}
217
218void* get_mem_slab_raw(size_t size)
219{
220 auto slab = get_mem_slab(size);
221#ifndef NO_MULTITHREADING
222 std::unique_lock<std::mutex> lock(manual_slabs_mutex);
223#endif
224 manual_slabs[slab.get()] = slab;
225 return slab.get();
226}
227
228void free_mem_slab_raw(void* p)
229{
230 if (allocator_destroyed) {
231 aligned_free(p);
232 return;
233 }
234#ifndef NO_MULTITHREADING
235 std::unique_lock<std::mutex> lock(manual_slabs_mutex);
236#endif
237 manual_slabs.erase(p);
238}
239} // namespace bb
void info(Args... args)
Definition log.hpp:70
const auto init
Definition fr.bench.cpp:141
void tracy_free(void *mem)
Definition mem.hpp:128
void * tracy_malloc(size_t size)
Definition mem.hpp:120
Entry point for Barretenberg command-line interface.
void * get_mem_slab_raw(size_t size)
std::shared_ptr< void > get_mem_slab(size_t size)
void init_slab_allocator(size_t circuit_subgroup_size)
void free_mem_slab_raw(void *p)
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13