- Notifications
You must be signed in to change notification settings - Fork 5.8k
/
Copy pathcodeCache.hpp
451 lines (384 loc) · 18.8 KB
/
codeCache.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CODE_CODECACHE_HPP
#defineSHARE_CODE_CODECACHE_HPP
#include"code/codeBlob.hpp"
#include"code/nmethod.hpp"
#include"gc/shared/gcBehaviours.hpp"
#include"memory/allocation.hpp"
#include"memory/heap.hpp"
#include"oops/instanceKlass.hpp"
#include"oops/oopsHierarchy.hpp"
#include"runtime/mutexLocker.hpp"
#include"utilities/numberSeq.hpp"
// The CodeCache implements the code cache for various pieces of generated
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
// The entries in the CodeCache are all CodeBlob's.
// -- Implementation --
// The CodeCache consists of one or more CodeHeaps, each of which contains
// CodeBlobs of a specific CodeBlobType. Currently heaps for the following
// types are available:
// - Non-nmethods: Non-nmethods like Buffers, Adapters and Runtime Stubs
// - Profiled nmethods: nmethods that are profiled, i.e., those
// executed at level 2 or 3
// - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
// executed at level 1 or 4 and native methods
// - All: Used for code of all types if code cache segmentation is disabled.
//
// In the rare case of the non-nmethod code heap getting full, non-nmethod code
// will be stored in the non-profiled code heap as a fallback solution.
//
// Depending on the availability of compilers and compilation mode there
// may be fewer heaps. The size of the code heaps depends on the values of
// ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
// (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
// for details).
//
// Code cache segmentation is controlled by the flag SegmentedCodeCache.
// If turned off, all code types are stored in a single code heap. By default
// code cache segmentation is turned on if tiered mode is enabled and
// ReservedCodeCacheSize >= 240 MB.
//
// All methods of the CodeCache accepting a CodeBlobType only apply to
// CodeBlobs of the given type. For example, iteration over the
// CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
// and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
//
// IMPORTANT: If you add new CodeHeaps to the code cache or change the
// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
// Solaris and BSD.
classExceptionCache;
classKlassDepChange;
classOopClosure;
classShenandoahParallelCodeHeapIterator;
classNativePostCallNop;
classDeoptimizationScope;
classReservedSpace;
#ifdef LINUX
#defineDEFAULT_PERFMAP_FILENAME"/tmp/perf-%p.map"
#endif
classCodeCache : AllStatic {
friendclassVMStructs;
friendclassJVMCIVMStructs;
template <classT, classFilter, bool is_relaxed> friendclassCodeBlobIterator;
friendclassWhiteBox;
friendclassShenandoahParallelCodeHeapIterator;
private:
// CodeHeaps of the cache
static GrowableArray<CodeHeap*>* _heaps;
static GrowableArray<CodeHeap*>* _nmethod_heaps;
static GrowableArray<CodeHeap*>* _allocable_heaps;
static address _low_bound; // Lower bound of CodeHeap addresses
static address _high_bound; // Upper bound of CodeHeap addresses
staticvolatileint _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
staticuint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded
staticuint64_t _gc_epoch; // Global state for tracking when nmethods were found to be on-stack
staticuint64_t _cold_gc_count; // Global state for determining how many GCs are needed before an nmethod is cold
staticsize_t _last_unloading_used;
staticdouble _last_unloading_time;
static TruncatedSeq _unloading_gc_intervals;
static TruncatedSeq _unloading_allocation_rates;
staticvolatilebool _unloading_threshold_gc_requested;
static ExceptionCache* volatile _exception_cache_purge_list;
// CodeHeap management
staticvoidinitialize_heaps(); // Initializes the CodeHeaps
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
staticvoidadd_heap(ReservedSpace rs, constchar* name, CodeBlobType code_blob_type);
static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or nullptr
static CodeHeap* get_code_heap(constvoid* cb); // Returns the CodeHeap for the given CodeBlob
static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap
staticconstchar* get_code_heap_flag_name(CodeBlobType code_blob_type);
static ReservedSpace reserve_heap_memory(size_t size, size_t rs_ps); // Reserves one continuous chunk of memory for the CodeHeaps
// Iteration
static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
static CodeBlob* first_blob(CodeBlobType code_blob_type); // Returns the first CodeBlob of the given type
static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap
private:
staticsize_tbytes_allocated_in_freelists();
staticintallocated_segments();
staticsize_tfreelists_length();
// Make private to prevent unsafe calls. Not all CodeBlob*'s are embedded in a CodeHeap.
staticboolcontains(CodeBlob *p) { fatal("don't call me!"); returnfalse; }
public:
// Initialization
staticvoidinitialize();
staticsize_tpage_size(bool aligned = true, size_t min_pages = 1); // Returns the page size used by the CodeCache
staticintcode_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs);
staticvoidadd_heap(CodeHeap* heap);
staticconst GrowableArray<CodeHeap*>* heaps() { return _heaps; }
staticconst GrowableArray<CodeHeap*>* nmethod_heaps() { return _nmethod_heaps; }
// Allocation/administration
static CodeBlob* allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure = true, CodeBlobType orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
staticvoidcommit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
staticvoidfree(CodeBlob* cb); // frees a CodeBlob
staticvoidfree_unused_tail(CodeBlob* cb, size_t used); // frees the unused tail of a CodeBlob (only used by TemplateInterpreter::initialize())
staticboolcontains(void *p); // returns whether p is included
staticboolcontains(nmethod* nm); // returns whether nm is included
staticvoidblobs_do(voidf(CodeBlob* cb)); // iterates over all CodeBlobs
staticvoidnmethods_do(voidf(nmethod* nm)); // iterates over all nmethods
staticvoidnmethods_do(NMethodClosure* cl); // iterates over all nmethods
staticvoidmetadata_do(MetadataClosure* f); // iterates over metadata in alive nmethods
// Lookup
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
static CodeBlob* find_blob_fast(void* start); // Returns the CodeBlob containing the given address
static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address
staticintfind_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
staticintblob_count(); // Returns the total number of CodeBlobs in the cache
staticintblob_count(CodeBlobType code_blob_type);
staticintadapter_count(); // Returns the total number of Adapters in the cache
staticintadapter_count(CodeBlobType code_blob_type);
staticintnmethod_count(); // Returns the total number of nmethods in the cache
staticintnmethod_count(CodeBlobType code_blob_type);
// GC support
staticvoidverify_oops();
// Helper scope object managing code cache unlinking behavior, i.e. sets and
// restores the closure that determines which nmethods are going to be removed
// during the unlinking part of code cache unloading.
classUnlinkingScope : StackObj {
ClosureIsUnloadingBehaviour _is_unloading_behaviour;
IsUnloadingBehaviour* _saved_behaviour;
public:
UnlinkingScope(BoolObjectClosure* is_alive);
~UnlinkingScope();
};
// Code cache unloading heuristics
staticuint64_tcold_gc_count();
staticvoidupdate_cold_gc_count();
staticvoidgc_on_allocation();
// The GC epoch and marking_cycle code below is there to support sweeping
// nmethods in loom stack chunks.
staticuint64_tgc_epoch();
staticboolis_gc_marking_cycle_active();
staticuint64_tprevious_completed_gc_marking_cycle();
staticvoidon_gc_marking_cycle_start();
staticvoidon_gc_marking_cycle_finish();
// Arm nmethods so that special actions are taken (nmethod_entry_barrier) for
// on-stack nmethods. It's used in two places:
// 1. Used before the start of concurrent marking so that oops inside
// on-stack nmethods are visited.
// 2. Used at the end of (stw/concurrent) marking so that nmethod::_gc_epoch
// is up-to-date, which provides more accurate estimate of
// nmethod::is_cold.
staticvoidarm_all_nmethods();
staticvoidmaybe_restart_compiler(size_t freed_memory);
staticvoiddo_unloading(bool unloading_occurred);
staticuint8_tunloading_cycle() { return _unloading_cycle; }
staticvoidincrement_unloading_cycle();
staticvoidrelease_exception_cache(ExceptionCache* entry);
staticvoidpurge_exception_caches();
// Printing/debugging
staticvoidprint(); // prints summary
staticvoidprint_internals();
staticvoidprint_memory_overhead();
staticvoidverify(); // verifies the code cache
staticvoidprint_trace(constchar* event, CodeBlob* cb, uint size = 0) PRODUCT_RETURN;
staticvoidprint_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
staticvoidlog_state(outputStream* st);
LINUX_ONLY(staticvoidwrite_perf_map(constchar* filename, outputStream* st);) // Prints warnings and error messages to outputStream
static const char* get_code_heap_name(CodeBlobType code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
staticvoidreport_codemem_full(CodeBlobType code_blob_type, bool print);
// Dcmd (Diagnostic commands)
staticvoidprint_codelist(outputStream* st);
staticvoidprint_layout(outputStream* st);
// The full limits of the codeCache
static address low_bound() { return _low_bound; }
static address low_bound(CodeBlobType code_blob_type);
static address high_bound() { return _high_bound; }
static address high_bound(CodeBlobType code_blob_type);
// Profiling
staticsize_tcapacity();
staticsize_tunallocated_capacity(CodeBlobType code_blob_type);
staticsize_tunallocated_capacity();
staticsize_tmax_capacity();
staticdoublereverse_free_ratio();
staticsize_tmax_distance_to_non_nmethod();
staticboolis_non_nmethod(address addr);
staticvoidclear_inline_caches(); // clear all inline caches
staticvoidcleanup_inline_caches_whitebox(); // clean bad nmethods from inline caches
// Returns true if an own CodeHeap for the given CodeBlobType is available
staticboolheap_available(CodeBlobType code_blob_type);
// Returns the CodeBlobType for the given nmethod
static CodeBlobType get_code_blob_type(nmethod* nm) {
returnget_code_heap(nm)->code_blob_type();
}
staticboolcode_blob_type_accepts_nmethod(CodeBlobType type) {
return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled;
}
staticboolcode_blob_type_accepts_allocable(CodeBlobType type) {
return type <= CodeBlobType::All;
}
// Returns the CodeBlobType for the given compilation level
static CodeBlobType get_code_blob_type(int comp_level) {
if (comp_level == CompLevel_none ||
comp_level == CompLevel_simple ||
comp_level == CompLevel_full_optimization) {
// Non profiled methods
return CodeBlobType::MethodNonProfiled;
} elseif (comp_level == CompLevel_limited_profile ||
comp_level == CompLevel_full_profile) {
// Profiled methods
return CodeBlobType::MethodProfiled;
}
ShouldNotReachHere();
returnstatic_cast<CodeBlobType>(0);
}
staticvoidverify_clean_inline_caches();
// Deoptimization
private:
staticvoidmark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
public:
staticvoidmark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope);
staticvoidmark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee);
staticvoidmake_marked_nmethods_deoptimized();
// Marks dependents during classloading
staticvoidmark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee);
// RedefineClasses support
// Marks in case of evolution
staticvoidmark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
staticvoidmark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
staticvoidold_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN;
staticvoidunregister_old_nmethod(nmethod* c) NOT_JVMTI_RETURN;
// Support for fullspeed debugging
staticvoidmark_dependents_on_method_for_breakpoint(const methodHandle& dependee);
// tells if there are nmethods with dependencies
staticboolhas_nmethods_with_dependencies();
staticintget_codemem_full_count(CodeBlobType code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != nullptr) ? heap->full_count() : 0;
}
// CodeHeap State Analytics.
// interface methods for CodeHeap printing, called by CompileBroker
staticvoidaggregate(outputStream *out, size_t granularity);
staticvoiddiscard(outputStream *out);
staticvoidprint_usedSpace(outputStream *out);
staticvoidprint_freeSpace(outputStream *out);
staticvoidprint_count(outputStream *out);
staticvoidprint_space(outputStream *out);
staticvoidprint_age(outputStream *out);
staticvoidprint_names(outputStream *out);
};
// Iterator to iterate over code blobs in the CodeCache.
// The relaxed iterators only hold the CodeCache_lock across next calls
template <classT, classFilter, bool is_relaxed> classCodeBlobIterator : publicStackObj {
public:
enum LivenessFilter { all, not_unloading };
private:
CodeBlob* _code_blob; // Current CodeBlob
GrowableArrayIterator<CodeHeap*> _heap;
GrowableArrayIterator<CodeHeap*> _end;
bool _not_unloading; // Those nmethods that are not unloading
voidinitialize_iteration(T* nm) {
}
boolnext_impl() {
for (;;) {
// Walk through heaps as required
if (!next_blob()) {
if (_heap == _end) {
returnfalse;
}
++_heap;
continue;
}
// Filter is_unloading as required
if (_not_unloading) {
nmethod* nm = _code_blob->as_nmethod_or_null();
if (nm != nullptr && nm->is_unloading()) {
continue;
}
}
returntrue;
}
}
public:
CodeBlobIterator(LivenessFilter filter, T* nm = nullptr)
: _not_unloading(filter == not_unloading)
{
if (Filter::heaps() == nullptr) {
// The iterator is supposed to shortcut since we have
// _heap == _end, but make sure we do not have garbage
// in other fields as well.
_code_blob = nullptr;
return;
}
_heap = Filter::heaps()->begin();
_end = Filter::heaps()->end();
// If set to nullptr, initialized by first call to next()
_code_blob = nm;
if (nm != nullptr) {
while(!(*_heap)->contains(_code_blob)) {
++_heap;
}
assert((*_heap)->contains(_code_blob), "match not found");
}
}
// Advance iterator to next blob
boolnext() {
if (is_relaxed) {
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
returnnext_impl();
} else {
assert_locked_or_safepoint(CodeCache_lock);
returnnext_impl();
}
}
boolend() const { return _code_blob == nullptr; }
T* method() const { return (T*)_code_blob; }
private:
// Advance iterator to the next blob in the current code heap
boolnext_blob() {
if (_heap == _end) {
returnfalse;
}
CodeHeap *heap = *_heap;
// Get first method CodeBlob
if (_code_blob == nullptr) {
_code_blob = CodeCache::first_blob(heap);
if (_code_blob == nullptr) {
returnfalse;
} elseif (Filter::apply(_code_blob)) {
returntrue;
}
}
// Search for next method CodeBlob
_code_blob = CodeCache::next_blob(heap, _code_blob);
while (_code_blob != nullptr && !Filter::apply(_code_blob)) {
_code_blob = CodeCache::next_blob(heap, _code_blob);
}
return _code_blob != nullptr;
}
};
structNMethodFilter {
staticboolapply(CodeBlob* cb) { return cb->is_nmethod(); }
staticconst GrowableArray<CodeHeap*>* heaps() { returnCodeCache::nmethod_heaps(); }
};
structAllCodeBlobsFilter {
staticboolapply(CodeBlob* cb) { returntrue; }
staticconst GrowableArray<CodeHeap*>* heaps() { returnCodeCache::heaps(); }
};
typedef CodeBlobIterator<nmethod, NMethodFilter, false/* is_relaxed */> NMethodIterator;
typedef CodeBlobIterator<nmethod, NMethodFilter, true/* is_relaxed */> RelaxedNMethodIterator;
typedef CodeBlobIterator<CodeBlob, AllCodeBlobsFilter, false/* is_relaxed */> AllCodeBlobsIterator;
#endif // SHARE_CODE_CODECACHE_HPP