This repository was archived by the owner on Jan 23, 2023. It is now read-only.
- Notifications
You must be signed in to change notification settings - Fork 2.6k
/
Copy pathalloc.h
323 lines (265 loc) · 8.69 KB
/
alloc.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#ifndef _ALLOC_H_
#define_ALLOC_H_
#if !defined(_HOST_H_)
#include"host.h"
#endif// defined(_HOST_H_)
// CompMemKind values are used to tag memory allocations performed via
// the compiler's allocator so that the memory usage of various compiler
// components can be tracked separately (when MEASURE_MEM_ALLOC is defined).
enum CompMemKind
{
#defineCompMemKindMacro(kind) CMK_##kind,
#include"compmemkind.h"
CMK_Count
};
classArenaAllocator
{
private:
ArenaAllocator(const ArenaAllocator& other) = delete;
ArenaAllocator& operator=(const ArenaAllocator& other) = delete;
ArenaAllocator& operator=(ArenaAllocator&& other) = delete;
structPageDescriptor
{
PageDescriptor* m_next;
size_t m_pageBytes; // # of bytes allocated
size_t m_usedBytes; // # of bytes actually used. (This is only valid when we've allocated a new page.)
// See ArenaAllocator::allocateNewPage.
BYTE m_contents[];
};
enum
{
DEFAULT_PAGE_SIZE = 0x10000,
};
PageDescriptor* m_firstPage;
PageDescriptor* m_lastPage;
// These two pointers (when non-null) will always point into 'm_lastPage'.
BYTE* m_nextFreeByte;
BYTE* m_lastFreeByte;
void* allocateNewPage(size_t size);
staticvoid* allocateHostMemory(size_t size, size_t* pActualSize);
staticvoidfreeHostMemory(void* block, size_t size);
#if MEASURE_MEM_ALLOC
structMemStats
{
unsigned allocCnt; // # of allocs
UINT64 allocSz; // total size of those alloc.
UINT64 allocSzMax; // Maximum single allocation.
UINT64 allocSzByKind[CMK_Count]; // Classified by "kind".
UINT64 nraTotalSizeAlloc;
UINT64 nraTotalSizeUsed;
staticconstchar* s_CompMemKindNames[]; // Names of the kinds.
voidAddAlloc(size_t sz, CompMemKind cmk)
{
allocCnt += 1;
allocSz += sz;
if (sz > allocSzMax)
{
allocSzMax = sz;
}
allocSzByKind[cmk] += sz;
}
voidPrint(FILE* f); // Print these stats to file.
voidPrintByKind(FILE* f); // Do just the by-kind histogram part.
};
structAggregateMemStats : publicMemStats
{
unsigned nMethods;
voidAdd(const MemStats& ms)
{
nMethods++;
allocCnt += ms.allocCnt;
allocSz += ms.allocSz;
allocSzMax = max(allocSzMax, ms.allocSzMax);
for (int i = 0; i < CMK_Count; i++)
{
allocSzByKind[i] += ms.allocSzByKind[i];
}
nraTotalSizeAlloc += ms.nraTotalSizeAlloc;
nraTotalSizeUsed += ms.nraTotalSizeUsed;
}
voidPrint(FILE* f); // Print these stats to file.
};
public:
structMemStatsAllocator
{
ArenaAllocator* m_arena;
CompMemKind m_kind;
void* allocateMemory(size_t sz)
{
m_arena->m_stats.AddAlloc(sz, m_kind);
return m_arena->allocateMemory(sz);
}
};
private:
static CritSecObject s_statsLock; // This lock protects the data structures below.
static MemStats s_maxStats; // Stats for the allocator with the largest amount allocated.
static AggregateMemStats s_aggStats; // Aggregates statistics for all allocators.
MemStats m_stats;
MemStatsAllocator m_statsAllocators[CMK_Count];
public:
MemStatsAllocator* getMemStatsAllocator(CompMemKind kind);
voidfinishMemStats();
voiddumpMemStats(FILE* file);
staticvoiddumpMaxMemStats(FILE* file);
staticvoiddumpAggregateMemStats(FILE* file);
#endif// MEASURE_MEM_ALLOC
public:
ArenaAllocator();
// NOTE: it would be nice to have a destructor on this type to ensure that any value that
// goes out of scope is either uninitialized or has been torn down via a call to
// destroy(), but this interacts badly in methods that use SEH. #3058 tracks
// revisiting EH in the JIT; such a destructor could be added if SEH is removed
// as part of that work.
voiddestroy();
inlinevoid* allocateMemory(size_t sz);
size_tgetTotalBytesAllocated();
size_tgetTotalBytesUsed();
staticboolbypassHostAllocator();
staticsize_tgetDefaultPageSize();
};
//------------------------------------------------------------------------
// ArenaAllocator::allocateMemory:
// Allocates memory using an `ArenaAllocator`.
//
// Arguments:
// size - The number of bytes to allocate.
//
// Return Value:
// A pointer to the allocated memory.
//
// Note:
// The DEBUG version of the method has some abilities that the release
// version does not: it may inject faults into the allocator and
// seeds all allocations with a specified pattern to help catch
// use-before-init problems.
//
inlinevoid* ArenaAllocator::allocateMemory(size_t size)
{
assert(size != 0);
// Ensure that we always allocate in pointer sized increments.
size = roundUp(size, sizeof(size_t));
#if defined(DEBUG)
if (JitConfig.ShouldInjectFault() != 0)
{
// Force the underlying memory allocator (either the OS or the CLR hoster)
// to allocate the memory. Any fault injection will kick in.
void* p = ClrAllocInProcessHeap(0, S_SIZE_T(1));
if (p != nullptr)
{
ClrFreeInProcessHeap(0, p);
}
else
{
NOMEM(); // Throw!
}
}
#endif
void* block = m_nextFreeByte;
m_nextFreeByte += size;
if (m_nextFreeByte > m_lastFreeByte)
{
block = allocateNewPage(size);
}
#if defined(DEBUG)
memset(block, UninitializedWord<char>(nullptr), size);
#endif
return block;
}
// Allows general purpose code (e.g. collection classes) to allocate
// memory of a pre-determined kind via an arena allocator.
classCompAllocator
{
#if MEASURE_MEM_ALLOC
ArenaAllocator::MemStatsAllocator* m_arena;
#else
ArenaAllocator* m_arena;
#endif
public:
CompAllocator(ArenaAllocator* arena, CompMemKind cmk)
#if MEASURE_MEM_ALLOC
: m_arena(arena->getMemStatsAllocator(cmk))
#else
: m_arena(arena)
#endif
{
}
// Allocate a block of memory suitable to store `count` objects of type `T`.
// Zero-length allocations are not allowed.
template <typename T>
T* allocate(size_t count)
{
// Ensure that count * sizeof(T) does not overflow.
if (count > (SIZE_MAX / sizeof(T)))
{
NOMEM();
}
void* p = m_arena->allocateMemory(count * sizeof(T));
// Ensure that the allocator returned sizeof(size_t) aligned memory.
assert((size_t(p) & (sizeof(size_t) - 1)) == 0);
returnstatic_cast<T*>(p);
}
// Deallocate a block of memory previously allocated by `allocate`.
// The arena allocator does not release memory so this doesn't do anything.
voiddeallocate(void* p)
{
}
};
// Global operator new overloads that work with CompAllocator
inlinevoid* __cdecl operatornew(size_t n, CompAllocator alloc)
{
return alloc.allocate<char>(n);
}
inlinevoid* __cdecl operatornew[](size_t n, CompAllocator alloc)
{
return alloc.allocate<char>(n);
}
// A CompAllocator wrapper that implements IAllocator and allows zero-length
// memory allocations (the arena allocator does not support zero-length
// allocation).
classCompIAllocator : publicIAllocator
{
CompAllocator m_alloc;
char m_zeroLenAllocTarg;
public:
CompIAllocator(CompAllocator alloc) : m_alloc(alloc)
{
}
// Allocates a block of memory at least `sz` in size.
virtualvoid* Alloc(size_t sz) override
{
if (sz == 0)
{
return &m_zeroLenAllocTarg;
}
else
{
return m_alloc.allocate<char>(sz);
}
}
// Allocates a block of memory at least `elems * elemSize` in size.
virtualvoid* ArrayAlloc(size_t elems, size_t elemSize) override
{
if ((elems == 0) || (elemSize == 0))
{
return &m_zeroLenAllocTarg;
}
else
{
// Ensure that elems * elemSize does not overflow.
if (elems > (SIZE_MAX / elemSize))
{
NOMEM();
}
return m_alloc.allocate<char>(elems * elemSize);
}
}
// Frees the block of memory pointed to by p.
virtualvoidFree(void* p) override
{
m_alloc.deallocate(p);
}
};
#endif// _ALLOC_H_