| File: | jdk/src/hotspot/share/code/codeCache.hpp |
| Warning: | line 305, column 40 Assigned value is garbage or undefined |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. | |||
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |||
| 4 | * | |||
| 5 | * This code is free software; you can redistribute it and/or modify it | |||
| 6 | * under the terms of the GNU General Public License version 2 only, as | |||
| 7 | * published by the Free Software Foundation. | |||
| 8 | * | |||
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | |||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | |||
| 13 | * accompanied this code). | |||
| 14 | * | |||
| 15 | * You should have received a copy of the GNU General Public License version | |||
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | |||
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |||
| 18 | * | |||
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |||
| 20 | * or visit www.oracle.com if you need additional information or have any | |||
| 21 | * questions. | |||
| 22 | * | |||
| 23 | */ | |||
| 24 | ||||
| 25 | #include "precompiled.hpp" | |||
| 26 | #include "code/codeCache.hpp" | |||
| 27 | #include "code/compiledIC.hpp" | |||
| 28 | #include "code/icBuffer.hpp" | |||
| 29 | #include "code/nmethod.hpp" | |||
| 30 | #include "compiler/compileBroker.hpp" | |||
| 31 | #include "gc/shared/collectedHeap.hpp" | |||
| 32 | #include "gc/shared/workerThread.hpp" | |||
| 33 | #include "jfr/jfrEvents.hpp" | |||
| 34 | #include "logging/log.hpp" | |||
| 35 | #include "logging/logStream.hpp" | |||
| 36 | #include "memory/allocation.inline.hpp" | |||
| 37 | #include "memory/resourceArea.hpp" | |||
| 38 | #include "memory/universe.hpp" | |||
| 39 | #include "oops/method.hpp" | |||
| 40 | #include "runtime/interfaceSupport.inline.hpp" | |||
| 41 | #include "runtime/handshake.hpp" | |||
| 42 | #include "runtime/mutexLocker.hpp" | |||
| 43 | #include "runtime/orderAccess.hpp" | |||
| 44 | #include "runtime/os.hpp" | |||
| 45 | #include "runtime/sweeper.hpp" | |||
| 46 | #include "runtime/thread.inline.hpp" | |||
| 47 | #include "runtime/vmOperations.hpp" | |||
| 48 | #include "runtime/vmThread.hpp" | |||
| 49 | #include "utilities/events.hpp" | |||
| 50 | #include "utilities/xmlstream.hpp" | |||
| 51 | ||||
| 52 | #ifdef ASSERT1 | |||
| 53 | ||||
| 54 | #define SWEEP(nm)record_sweep(nm, 54) record_sweep(nm, __LINE__54) | |||
| 55 | // Sweeper logging code | |||
| 56 | class SweeperRecord { | |||
| 57 | public: | |||
| 58 | int traversal; | |||
| 59 | int compile_id; | |||
| 60 | long traversal_mark; | |||
| 61 | int state; | |||
| 62 | const char* kind; | |||
| 63 | address vep; | |||
| 64 | address uep; | |||
| 65 | int line; | |||
| 66 | ||||
| 67 | void print() { | |||
| 68 | tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT"0x%016" "l" "x" " vep = " | |||
| 69 | PTR_FORMAT"0x%016" "l" "x" " state = %d traversal_mark %ld line = %d", | |||
| 70 | traversal, | |||
| 71 | compile_id, | |||
| 72 | kind == NULL__null ? "" : kind, | |||
| 73 | p2i(uep), | |||
| 74 | p2i(vep), | |||
| 75 | state, | |||
| 76 | traversal_mark, | |||
| 77 | line); | |||
| 78 | } | |||
| 79 | }; | |||
| 80 | ||||
| 81 | static int _sweep_index = 0; | |||
| 82 | static SweeperRecord* _records = NULL__null; | |||
| 83 | ||||
| 84 | void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) { | |||
| 85 | if (_records != NULL__null) { | |||
| 86 | _records[_sweep_index].traversal = _traversals; | |||
| 87 | _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0; | |||
| 88 | _records[_sweep_index].compile_id = nm->compile_id(); | |||
| 89 | _records[_sweep_index].kind = nm->compile_kind(); | |||
| 90 | _records[_sweep_index].state = nm->get_state(); | |||
| 91 | _records[_sweep_index].vep = nm->verified_entry_point(); | |||
| 92 | _records[_sweep_index].uep = nm->entry_point(); | |||
| 93 | _records[_sweep_index].line = line; | |||
| 94 | _sweep_index = (_sweep_index + 1) % SweeperLogEntries; | |||
| 95 | } | |||
| 96 | } | |||
| 97 | ||||
| 98 | void NMethodSweeper::init_sweeper_log() { | |||
| 99 | if (LogSweeper && _records == NULL__null) { | |||
| 100 | // Create the ring buffer for the logging code | |||
| 101 | _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC)(SweeperRecord*) (AllocateHeap((SweeperLogEntries) * sizeof(SweeperRecord ), mtGC)); | |||
| 102 | memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); | |||
| 103 | } | |||
| 104 | } | |||
| 105 | #else | |||
| 106 | #define SWEEP(nm)record_sweep(nm, 106) | |||
| 107 | #endif | |||
| 108 | ||||
| 109 | CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method | |||
| 110 | long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. | |||
| 111 | long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache | |||
| 112 | int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache | |||
| 113 | size_t NMethodSweeper::_sweep_threshold_bytes = 0; // Threshold for when to sweep. Updated after ergonomics | |||
| 114 | ||||
| 115 | volatile bool NMethodSweeper::_should_sweep = false;// Indicates if a normal sweep will be done | |||
| 116 | volatile bool NMethodSweeper::_force_sweep = false;// Indicates if a forced sweep will be done | |||
| 117 | volatile size_t NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: | |||
| 118 | // 1) alive -> not_entrant | |||
| 119 | // 2) not_entrant -> zombie | |||
| 120 | int NMethodSweeper::_hotness_counter_reset_val = 0; | |||
| 121 | ||||
| 122 | long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed | |||
| 123 | long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed | |||
| 124 | size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache | |||
| 125 | Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping | |||
| 126 | Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep | |||
| 127 | Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep | |||
| 128 | Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction | |||
| 129 | ||||
| 130 | class MarkActivationClosure: public CodeBlobClosure { | |||
| 131 | public: | |||
| 132 | virtual void do_code_blob(CodeBlob* cb) { | |||
| 133 | assert(cb->is_nmethod(), "CodeBlob should be nmethod")do { if (!(cb->is_nmethod())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 133, "assert(" "cb->is_nmethod()" ") failed", "CodeBlob should be nmethod" ); ::breakpoint(); } } while (0); | |||
| 134 | nmethod* nm = (nmethod*)cb; | |||
| 135 | nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); | |||
| 136 | // If we see an activation belonging to a non_entrant nmethod, we mark it. | |||
| 137 | if (nm->is_not_entrant()) { | |||
| 138 | nm->mark_as_seen_on_stack(); | |||
| 139 | } | |||
| 140 | } | |||
| 141 | }; | |||
| 142 | static MarkActivationClosure mark_activation_closure; | |||
| 143 | ||||
| 144 | int NMethodSweeper::hotness_counter_reset_val() { | |||
| 145 | if (_hotness_counter_reset_val == 0) { | |||
| 146 | _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; | |||
| 147 | } | |||
| 148 | return _hotness_counter_reset_val; | |||
| 149 | } | |||
| 150 | bool NMethodSweeper::wait_for_stack_scanning() { | |||
| 151 | return _current.end(); | |||
| 152 | } | |||
| 153 | ||||
| 154 | class NMethodMarkingClosure : public HandshakeClosure { | |||
| 155 | private: | |||
| 156 | CodeBlobClosure* _cl; | |||
| 157 | public: | |||
| 158 | NMethodMarkingClosure(CodeBlobClosure* cl) : HandshakeClosure("NMethodMarking"), _cl(cl) {} | |||
| 159 | void do_thread(Thread* thread) { | |||
| 160 | if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) { | |||
| 161 | JavaThread::cast(thread)->nmethods_do(_cl); | |||
| 162 | } | |||
| 163 | } | |||
| 164 | }; | |||
| 165 | ||||
| 166 | CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() { | |||
| 167 | #ifdef ASSERT1 | |||
| 168 | assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread")do { if (!(Thread::current()->is_Code_cache_sweeper_thread ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 168, "assert(" "Thread::current()->is_Code_cache_sweeper_thread()" ") failed", "must be executed under CodeCache_lock and in sweeper thread" ); ::breakpoint(); } } while (0); | |||
| 169 | assert_lock_strong(CodeCache_lock); | |||
| 170 | #endif | |||
| 171 | ||||
| 172 | // If we do not want to reclaim not-entrant or zombie methods there is no need | |||
| 173 | // to scan stacks | |||
| 174 | if (!MethodFlushing) { | |||
| 175 | return NULL__null; | |||
| 176 | } | |||
| 177 | ||||
| 178 | // Check for restart | |||
| 179 | assert(_current.method() == NULL, "should only happen between sweeper cycles")do { if (!(_current.method() == __null)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 179, "assert(" "_current.method() == __null" ") failed", "should only happen between sweeper cycles" ); ::breakpoint(); } } while (0); | |||
| 180 | assert(wait_for_stack_scanning(), "should only happen between sweeper cycles")do { if (!(wait_for_stack_scanning())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 180, "assert(" "wait_for_stack_scanning()" ") failed", "should only happen between sweeper cycles" ); ::breakpoint(); } } while (0); | |||
| 181 | ||||
| 182 | _seen = 0; | |||
| 183 | _current = CompiledMethodIterator(CompiledMethodIterator::all_blobs); | |||
| 184 | // Initialize to first nmethod | |||
| 185 | _current.next(); | |||
| 186 | _traversals += 1; | |||
| 187 | _total_time_this_sweep = Tickspan(); | |||
| 188 | ||||
| 189 | if (PrintMethodFlushing) { | |||
| 190 | tty->print_cr("### Sweep: stack traversal %ld", _traversals); | |||
| 191 | } | |||
| 192 | return &mark_activation_closure; | |||
| 193 | } | |||
| 194 | ||||
| 195 | /** | |||
| 196 | * This function triggers a VM operation that does stack scanning of active | |||
| 197 | * methods. Stack scanning is mandatory for the sweeper to make progress. | |||
| 198 | */ | |||
| 199 | void NMethodSweeper::do_stack_scanning() { | |||
| 200 | assert(!CodeCache_lock->owned_by_self(), "just checking")do { if (!(!CodeCache_lock->owned_by_self())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 200, "assert(" "!CodeCache_lock->owned_by_self()" ") failed" , "just checking"); ::breakpoint(); } } while (0); | |||
| 201 | if (wait_for_stack_scanning()) { | |||
| 202 | CodeBlobClosure* code_cl; | |||
| 203 | { | |||
| 204 | MutexLocker ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |||
| 205 | code_cl = prepare_mark_active_nmethods(); | |||
| 206 | } | |||
| 207 | if (code_cl != NULL__null) { | |||
| 208 | NMethodMarkingClosure nm_cl(code_cl); | |||
| 209 | Handshake::execute(&nm_cl); | |||
| 210 | } | |||
| 211 | } | |||
| 212 | } | |||
| 213 | ||||
| 214 | void NMethodSweeper::sweeper_loop() { | |||
| 215 | bool timeout; | |||
| 216 | while (true) { | |||
| ||||
| 217 | { | |||
| 218 | ThreadBlockInVM tbivm(JavaThread::current()); | |||
| 219 | MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); | |||
| 220 | const long wait_time = 60*60*24 * 1000; | |||
| 221 | timeout = waiter.wait(wait_time); | |||
| 222 | } | |||
| 223 | if (!timeout && (_should_sweep || _force_sweep)) { | |||
| 224 | sweep(); | |||
| 225 | } | |||
| 226 | } | |||
| 227 | } | |||
| 228 | ||||
| 229 | /** | |||
| 230 | * Wakes up the sweeper thread to sweep if code cache space runs low | |||
| 231 | */ | |||
| 232 | void NMethodSweeper::report_allocation(int code_blob_type) { | |||
| 233 | if (should_start_aggressive_sweep(code_blob_type)) { | |||
| 234 | MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); | |||
| 235 | _should_sweep = true; | |||
| 236 | CodeSweeper_lock->notify(); | |||
| 237 | } | |||
| 238 | } | |||
| 239 | ||||
| 240 | bool NMethodSweeper::should_start_aggressive_sweep(int code_blob_type) { | |||
| 241 | // Makes sure that we do not invoke the sweeper too often during startup. | |||
| 242 | double start_threshold = 100.0 / (double)StartAggressiveSweepingAt; | |||
| 243 | double aggressive_sweep_threshold = MAX2(start_threshold, 1.1); | |||
| 244 | return (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold); | |||
| 245 | } | |||
| 246 | ||||
| 247 | /** | |||
| 248 | * Wakes up the sweeper thread and forces a sweep. Blocks until it finished. | |||
| 249 | */ | |||
| 250 | void NMethodSweeper::force_sweep() { | |||
| 251 | ThreadBlockInVM tbivm(JavaThread::current()); | |||
| 252 | MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); | |||
| 253 | // Request forced sweep | |||
| 254 | _force_sweep = true; | |||
| 255 | while (_force_sweep) { | |||
| 256 | // Notify sweeper that we want to force a sweep and wait for completion. | |||
| 257 | // In case a sweep currently takes place we timeout and try again because | |||
| 258 | // we want to enforce a full sweep. | |||
| 259 | CodeSweeper_lock->notify(); | |||
| 260 | waiter.wait(1000); | |||
| 261 | } | |||
| 262 | } | |||
| 263 | ||||
| 264 | /** | |||
| 265 | * Handle a safepoint request | |||
| 266 | */ | |||
| 267 | void NMethodSweeper::handle_safepoint_request() { | |||
| 268 | JavaThread* thread = JavaThread::current(); | |||
| 269 | if (SafepointMechanism::should_process(thread)) { | |||
| 270 | if (PrintMethodFlushing && Verbose) { | |||
| 271 | tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nmethod_count()); | |||
| 272 | } | |||
| 273 | MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |||
| 274 | ||||
| 275 | ThreadBlockInVM tbivm(thread); | |||
| 276 | } | |||
| 277 | } | |||
| 278 | ||||
| 279 | void NMethodSweeper::sweep() { | |||
| 280 | assert(_should_sweep || _force_sweep, "must have been set")do { if (!(_should_sweep || _force_sweep)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 280, "assert(" "_should_sweep || _force_sweep" ") failed", "must have been set" ); ::breakpoint(); } } while (0); | |||
| 281 | assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode")do { if (!(JavaThread::current()->thread_state() == _thread_in_vm )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 281, "assert(" "JavaThread::current()->thread_state() == _thread_in_vm" ") failed", "must run in vm mode"); ::breakpoint(); } } while (0); | |||
| 282 | Atomic::store(&_bytes_changed, static_cast<size_t>(0)); // reset regardless of sleep reason | |||
| 283 | if (_should_sweep) { | |||
| 284 | MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); | |||
| 285 | _should_sweep = false; | |||
| 286 | } | |||
| 287 | ||||
| 288 | do_stack_scanning(); | |||
| 289 | ||||
| 290 | init_sweeper_log(); | |||
| 291 | sweep_code_cache(); | |||
| 292 | ||||
| 293 | // We are done with sweeping the code cache once. | |||
| 294 | _total_nof_code_cache_sweeps++; | |||
| 295 | ||||
| 296 | if (_force_sweep) { | |||
| 297 | // Notify requester that forced sweep finished | |||
| 298 | MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); | |||
| 299 | _force_sweep = false; | |||
| 300 | CodeSweeper_lock->notify(); | |||
| 301 | } | |||
| 302 | } | |||
| 303 | ||||
| 304 | static void post_sweep_event(EventSweepCodeCache* event, | |||
| 305 | const Ticks& start, | |||
| 306 | const Ticks& end, | |||
| 307 | s4 traversals, | |||
| 308 | int swept, | |||
| 309 | int flushed, | |||
| 310 | int zombified) { | |||
| 311 | assert(event != NULL, "invariant")do { if (!(event != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 311, "assert(" "event != __null" ") failed", "invariant"); :: breakpoint(); } } while (0); | |||
| 312 | assert(event->should_commit(), "invariant")do { if (!(event->should_commit())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 312, "assert(" "event->should_commit()" ") failed", "invariant" ); ::breakpoint(); } } while (0); | |||
| 313 | event->set_starttime(start); | |||
| 314 | event->set_endtime(end); | |||
| 315 | event->set_sweepId(traversals); | |||
| 316 | event->set_sweptCount(swept); | |||
| 317 | event->set_flushedCount(flushed); | |||
| 318 | event->set_zombifiedCount(zombified); | |||
| 319 | event->commit(); | |||
| 320 | } | |||
| 321 | ||||
| 322 | void NMethodSweeper::sweep_code_cache() { | |||
| 323 | ResourceMark rm; | |||
| 324 | Ticks sweep_start_counter = Ticks::now(); | |||
| 325 | ||||
| 326 | log_debug(codecache, sweep, start)(!(LogImpl<(LogTag::_codecache), (LogTag::_sweep), (LogTag ::_start), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG )>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_codecache), (LogTag::_sweep), (LogTag::_start), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("CodeCache flushing"); | |||
| 327 | ||||
| 328 | int flushed_count = 0; | |||
| 329 | int zombified_count = 0; | |||
| 330 | int flushed_c2_count = 0; | |||
| 331 | ||||
| 332 | if (PrintMethodFlushing && Verbose) { | |||
| 333 | tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nmethod_count()); | |||
| 334 | } | |||
| 335 | ||||
| 336 | int swept_count = 0; | |||
| 337 | assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here")do { if (!(!SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 337, "assert(" "!SafepointSynchronize::is_at_safepoint()" ") failed" , "should not be in safepoint when we get here"); ::breakpoint (); } } while (0); | |||
| 338 | assert(!CodeCache_lock->owned_by_self(), "just checking")do { if (!(!CodeCache_lock->owned_by_self())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 338, "assert(" "!CodeCache_lock->owned_by_self()" ") failed" , "just checking"); ::breakpoint(); } } while (0); | |||
| 339 | ||||
| 340 | int freed_memory = 0; | |||
| 341 | { | |||
| 342 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |||
| 343 | ||||
| 344 | while (!_current.end()) { | |||
| 345 | swept_count++; | |||
| 346 | // Since we will give up the CodeCache_lock, always skip ahead | |||
| 347 | // to the next nmethod. Other blobs can be deleted by other | |||
| 348 | // threads but nmethods are only reclaimed by the sweeper. | |||
| 349 | CompiledMethod* nm = _current.method(); | |||
| 350 | _current.next(); | |||
| 351 | ||||
| 352 | // Now ready to process nmethod and give up CodeCache_lock | |||
| 353 | { | |||
| 354 | MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |||
| 355 | // Save information before potentially flushing the nmethod | |||
| 356 | // Only flushing nmethods so size only matters for them. | |||
| 357 | int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0; | |||
| 358 | bool is_c2_method = nm->is_compiled_by_c2(); | |||
| 359 | bool is_osr = nm->is_osr_method(); | |||
| 360 | int compile_id = nm->compile_id(); | |||
| 361 | intptr_t address = p2i(nm); | |||
| 362 | const char* state_before = nm->state(); | |||
| 363 | const char* state_after = ""; | |||
| 364 | ||||
| 365 | MethodStateChange type = process_compiled_method(nm); | |||
| 366 | switch (type) { | |||
| 367 | case Flushed: | |||
| 368 | state_after = "flushed"; | |||
| 369 | freed_memory += size; | |||
| 370 | ++flushed_count; | |||
| 371 | if (is_c2_method) { | |||
| 372 | ++flushed_c2_count; | |||
| 373 | } | |||
| 374 | break; | |||
| 375 | case MadeZombie: | |||
| 376 | state_after = "made zombie"; | |||
| 377 | ++zombified_count; | |||
| 378 | break; | |||
| 379 | case None: | |||
| 380 | break; | |||
| 381 | default: | |||
| 382 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 382); ::breakpoint(); } while (0); | |||
| 383 | } | |||
| 384 | if (PrintMethodFlushing && Verbose && type != None) { | |||
| 385 | tty->print_cr("### %s nmethod %3d/" PTR_FORMAT"0x%016" "l" "x" " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after); | |||
| 386 | } | |||
| 387 | } | |||
| 388 | ||||
| 389 | _seen++; | |||
| 390 | handle_safepoint_request(); | |||
| 391 | } | |||
| 392 | } | |||
| 393 | ||||
| 394 | assert(_current.end(), "must have scanned the whole cache")do { if (!(_current.end())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 394, "assert(" "_current.end()" ") failed", "must have scanned the whole cache" ); ::breakpoint(); } } while (0); | |||
| 395 | ||||
| 396 | const Ticks sweep_end_counter = Ticks::now(); | |||
| 397 | const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; | |||
| 398 | { | |||
| 399 | MutexLocker mu(NMethodSweeperStats_lock, Mutex::_no_safepoint_check_flag); | |||
| 400 | _total_time_sweeping += sweep_time; | |||
| 401 | _total_time_this_sweep += sweep_time; | |||
| 402 | _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); | |||
| 403 | _total_flushed_size += freed_memory; | |||
| 404 | _total_nof_methods_reclaimed += flushed_count; | |||
| 405 | _total_nof_c2_methods_reclaimed += flushed_c2_count; | |||
| 406 | _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); | |||
| 407 | } | |||
| 408 | ||||
| 409 | EventSweepCodeCache event(UNTIMED); | |||
| 410 | if (event.should_commit()) { | |||
| 411 | post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, flushed_count, zombified_count); | |||
| 412 | } | |||
| 413 | ||||
| 414 | #ifdef ASSERT1 | |||
| 415 | if(PrintMethodFlushing) { | |||
| 416 | tty->print_cr("### sweeper: sweep time(" JLONG_FORMAT"%" "l" "d" "): ", sweep_time.value()); | |||
| 417 | } | |||
| 418 | #endif | |||
| 419 | ||||
| 420 | Log(codecache, sweep)LogImpl<(LogTag::_codecache), (LogTag::_sweep), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) > log; | |||
| 421 | if (log.is_debug()) { | |||
| 422 | LogStream ls(log.debug()); | |||
| 423 | CodeCache::print_summary(&ls, false); | |||
| 424 | } | |||
| 425 | log_sweep("finished"); | |||
| 426 | ||||
| 427 | // Sweeper is the only case where memory is released, check here if it | |||
| 428 | // is time to restart the compiler. Only checking if there is a certain | |||
| 429 | // amount of free memory in the code cache might lead to re-enabling | |||
| 430 | // compilation although no memory has been released. For example, there are | |||
| 431 | // cases when compilation was disabled although there is 4MB (or more) free | |||
| 432 | // memory in the code cache. The reason is code cache fragmentation. Therefore, | |||
| 433 | // it only makes sense to re-enable compilation if we have actually freed memory. | |||
| 434 | // Note that typically several kB are released for sweeping 16MB of the code | |||
| 435 | // cache. As a result, 'freed_memory' > 0 to restart the compiler. | |||
| 436 | if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { | |||
| 437 | CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); | |||
| 438 | log.debug("restart compiler"); | |||
| 439 | log_sweep("restart_compiler"); | |||
| 440 | } | |||
| 441 | } | |||
| 442 | ||||
| 443 | // This function updates the sweeper statistics that keep track of nmethods | |||
| 444 | // state changes. If there is 'enough' state change, the sweeper is invoked | |||
| 445 | // as soon as possible. Also, we are guaranteed to invoke the sweeper if | |||
| 446 | // the code cache gets full. | |||
| 447 | void NMethodSweeper::report_state_change(nmethod* nm) { | |||
| 448 | Atomic::add(&_bytes_changed, (size_t)nm->total_size()); | |||
| 449 | if (Atomic::load(&_bytes_changed) > _sweep_threshold_bytes) { | |||
| 450 | MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); | |||
| 451 | _should_sweep = true; | |||
| 452 | CodeSweeper_lock->notify(); // Wake up sweeper. | |||
| 453 | } | |||
| 454 | } | |||
| 455 | ||||
| 456 | class CompiledMethodMarker: public StackObj { | |||
| 457 | private: | |||
| 458 | CodeCacheSweeperThread* _thread; | |||
| 459 | public: | |||
| 460 | CompiledMethodMarker(CompiledMethod* cm) { | |||
| 461 | JavaThread* current = JavaThread::current(); | |||
| 462 | assert (current->is_Code_cache_sweeper_thread(), "Must be")do { if (!(current->is_Code_cache_sweeper_thread())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 462, "assert(" "current->is_Code_cache_sweeper_thread()" ") failed", "Must be"); ::breakpoint(); } } while (0); | |||
| 463 | _thread = (CodeCacheSweeperThread*)current; | |||
| 464 | if (!cm->is_zombie() && !cm->is_unloading()) { | |||
| 465 | // Only expose live nmethods for scanning | |||
| 466 | _thread->set_scanned_compiled_method(cm); | |||
| 467 | } | |||
| 468 | } | |||
| 469 | ~CompiledMethodMarker() { | |||
| 470 | _thread->set_scanned_compiled_method(NULL__null); | |||
| 471 | } | |||
| 472 | }; | |||
| 473 | ||||
| 474 | NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) { | |||
| 475 | assert(cm != NULL, "sanity")do { if (!(cm != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 475, "assert(" "cm != __null" ") failed", "sanity"); ::breakpoint (); } } while (0); | |||
| 476 | assert(!CodeCache_lock->owned_by_self(), "just checking")do { if (!(!CodeCache_lock->owned_by_self())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 476, "assert(" "!CodeCache_lock->owned_by_self()" ") failed" , "just checking"); ::breakpoint(); } } while (0); | |||
| 477 | ||||
| 478 | MethodStateChange result = None; | |||
| 479 | // Make sure this nmethod doesn't get unloaded during the scan, | |||
| 480 | // since safepoints may happen during acquired below locks. | |||
| 481 | CompiledMethodMarker nmm(cm); | |||
| 482 | SWEEP(cm)record_sweep(cm, 482); | |||
| 483 | ||||
| 484 | // Skip methods that are currently referenced by the VM | |||
| 485 | if (cm->is_locked_by_vm()) { | |||
| 486 | // But still remember to clean-up inline caches for alive nmethods | |||
| 487 | if (cm->is_alive()) { | |||
| 488 | // Clean inline caches that point to zombie/non-entrant/unloaded nmethods | |||
| 489 | cm->cleanup_inline_caches(false); | |||
| 490 | SWEEP(cm)record_sweep(cm, 490); | |||
| 491 | } | |||
| 492 | return result; | |||
| 493 | } | |||
| 494 | ||||
| 495 | if (cm->is_zombie()) { | |||
| 496 | // All inline caches that referred to this nmethod were cleaned in the | |||
| 497 | // previous sweeper cycle. Now flush the nmethod from the code cache. | |||
| 498 | assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods")do { if (!(!cm->is_locked_by_vm())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 498, "assert(" "!cm->is_locked_by_vm()" ") failed", "must not flush locked Compiled Methods" ); ::breakpoint(); } } while (0); | |||
| 499 | cm->flush(); | |||
| 500 | assert(result == None, "sanity")do { if (!(result == None)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 500, "assert(" "result == None" ") failed", "sanity"); ::breakpoint (); } } while (0); | |||
| 501 | result = Flushed; | |||
| 502 | } else if (cm->is_not_entrant()) { | |||
| 503 | // If there are no current activations of this method on the | |||
| 504 | // stack we can safely convert it to a zombie method | |||
| 505 | OrderAccess::loadload(); // _stack_traversal_mark and _state | |||
| 506 | if (cm->can_convert_to_zombie()) { | |||
| 507 | // Code cache state change is tracked in make_zombie() | |||
| 508 | cm->make_zombie(); | |||
| 509 | SWEEP(cm)record_sweep(cm, 509); | |||
| 510 | assert(result == None, "sanity")do { if (!(result == None)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 510, "assert(" "result == None" ") failed", "sanity"); ::breakpoint (); } } while (0); | |||
| 511 | result = MadeZombie; | |||
| 512 | assert(cm->is_zombie(), "nmethod must be zombie")do { if (!(cm->is_zombie())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 512, "assert(" "cm->is_zombie()" ") failed", "nmethod must be zombie" ); ::breakpoint(); } } while (0); | |||
| 513 | } else { | |||
| 514 | // Still alive, clean up its inline caches | |||
| 515 | cm->cleanup_inline_caches(false); | |||
| 516 | SWEEP(cm)record_sweep(cm, 516); | |||
| 517 | } | |||
| 518 | } else if (cm->is_unloaded()) { | |||
| 519 | // Code is unloaded, so there are no activations on the stack. | |||
| 520 | // Convert the nmethod to zombie. | |||
| 521 | // Code cache state change is tracked in make_zombie() | |||
| 522 | cm->make_zombie(); | |||
| 523 | SWEEP(cm)record_sweep(cm, 523); | |||
| 524 | assert(result == None, "sanity")do { if (!(result == None)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/sweeper.cpp" , 524, "assert(" "result == None" ") failed", "sanity"); ::breakpoint (); } } while (0); | |||
| 525 | result = MadeZombie; | |||
| 526 | } else { | |||
| 527 | if (cm->is_nmethod()) { | |||
| 528 | possibly_flush((nmethod*)cm); | |||
| 529 | } | |||
| 530 | // Clean inline caches that point to zombie/non-entrant/unloaded nmethods | |||
| 531 | cm->cleanup_inline_caches(false); | |||
| 532 | SWEEP(cm)record_sweep(cm, 532); | |||
| 533 | } | |||
| 534 | return result; | |||
| 535 | } | |||
| 536 | ||||
| 537 | ||||
| 538 | void NMethodSweeper::possibly_flush(nmethod* nm) { | |||
| 539 | if (UseCodeCacheFlushing) { | |||
| 540 | if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed() && !nm->is_unloading()) { | |||
| 541 | bool make_not_entrant = false; | |||
| 542 | ||||
| 543 | // Do not make native methods not-entrant | |||
| 544 | nm->dec_hotness_counter(); | |||
| 545 | // Get the initial value of the hotness counter. This value depends on the | |||
| 546 | // ReservedCodeCacheSize | |||
| 547 | int reset_val = hotness_counter_reset_val(); | |||
| 548 | int time_since_reset = reset_val - nm->hotness_counter(); | |||
| 549 | int code_blob_type = CodeCache::get_code_blob_type(nm); | |||
| 550 | double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity); | |||
| 551 | // The less free space in the code cache we have - the bigger reverse_free_ratio() is. | |||
| 552 | // I.e., 'threshold' increases with lower available space in the code cache and a higher | |||
| 553 | // NmethodSweepActivity. If the current hotness counter - which decreases from its initial | |||
| 554 | // value until it is reset by stack walking - is smaller than the computed threshold, the | |||
| 555 | // corresponding nmethod is considered for removal. | |||
| 556 | if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) { | |||
| 557 | // A method is marked as not-entrant if the method is | |||
| 558 | // 1) 'old enough': nm->hotness_counter() < threshold | |||
| 559 | // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush) | |||
| 560 | // The second condition is necessary if we are dealing with very small code cache | |||
| 561 | // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. | |||
| 562 | // The second condition ensures that methods are not immediately made not-entrant | |||
| 563 | // after compilation. | |||
| 564 | make_not_entrant = true; | |||
| 565 | } | |||
| 566 | ||||
| 567 | // The stack-scanning low-cost detection may not see the method was used (which can happen for | |||
| 568 | // flat profiles). Check the age counter for possible data. | |||
| 569 | if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) { | |||
| 570 | MethodCounters* mc = nm->method()->get_method_counters(Thread::current()); | |||
| 571 | if (mc != NULL__null) { | |||
| 572 | // Snapshot the value as it's changed concurrently | |||
| 573 | int age = mc->nmethod_age(); | |||
| 574 | if (MethodCounters::is_nmethod_hot(age)) { | |||
| 575 | // The method has gone through flushing, and it became relatively hot that it deopted | |||
| 576 | // before we could take a look at it. Give it more time to appear in the stack traces, | |||
| 577 | // proportional to the number of deopts. | |||
| 578 | MethodData* md = nm->method()->method_data(); | |||
| 579 | if (md != NULL__null && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) { | |||
| 580 | // It's been long enough, we still haven't seen it on stack. | |||
| 581 | // Try to flush it, but enable counters the next time. | |||
| 582 | mc->reset_nmethod_age(); | |||
| 583 | } else { | |||
| 584 | make_not_entrant = false; | |||
| 585 | } | |||
| 586 | } else if (MethodCounters::is_nmethod_warm(age)) { | |||
| 587 | // Method has counters enabled, and the method was used within | |||
| 588 | // previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing | |||
| 589 | // compiled state. | |||
| 590 | mc->reset_nmethod_age(); | |||
| 591 | // delay the next check | |||
| 592 | nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); | |||
| 593 | make_not_entrant = false; | |||
| 594 | } else if (MethodCounters::is_nmethod_age_unset(age)) { | |||
| 595 | // No counters were used before. Set the counters to the detection | |||
| 596 | // limit value. If the method is going to be used again it will be compiled | |||
| 597 | // with counters that we're going to use for analysis the the next time. | |||
| 598 | mc->reset_nmethod_age(); | |||
| 599 | } else { | |||
| 600 | // Method was totally idle for 10 sweeps | |||
| 601 | // The counter already has the initial value, flush it and may be recompile | |||
| 602 | // later with counters | |||
| 603 | } | |||
| 604 | } | |||
| 605 | } | |||
| 606 | ||||
| 607 | if (make_not_entrant) { | |||
| 608 | nm->make_not_entrant(); | |||
| 609 | ||||
| 610 | // Code cache state change is tracked in make_not_entrant() | |||
| 611 | if (PrintMethodFlushing && Verbose) { | |||
| 612 | tty->print_cr("### Nmethod %d/" PTR_FORMAT"0x%016" "l" "x" "made not-entrant: hotness counter %d/%d threshold %f", | |||
| 613 | nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold); | |||
| 614 | } | |||
| 615 | } | |||
| 616 | } | |||
| 617 | } | |||
| 618 | } | |||
| 619 | ||||
| 620 | // Print out some state information about the current sweep and the | |||
| 621 | // state of the code cache if it's requested. | |||
| 622 | void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { | |||
| 623 | if (PrintMethodFlushing) { | |||
| 624 | ResourceMark rm; | |||
| 625 | stringStream s; | |||
| 626 | // Dump code cache state into a buffer before locking the tty, | |||
| 627 | // because log_state() will use locks causing lock conflicts. | |||
| 628 | CodeCache::log_state(&s); | |||
| 629 | ||||
| 630 | ttyLocker ttyl; | |||
| 631 | tty->print("### sweeper: %s ", msg); | |||
| 632 | if (format != NULL__null) { | |||
| 633 | va_list ap; | |||
| 634 | va_start(ap, format)__builtin_va_start(ap, format); | |||
| 635 | tty->vprint(format, ap); | |||
| 636 | va_end(ap)__builtin_va_end(ap); | |||
| 637 | } | |||
| 638 | tty->print_cr("%s", s.as_string()); | |||
| 639 | } | |||
| 640 | ||||
| 641 | if (LogCompilation && (xtty != NULL__null)) { | |||
| 642 | ResourceMark rm; | |||
| 643 | stringStream s; | |||
| 644 | // Dump code cache state into a buffer before locking the tty, | |||
| 645 | // because log_state() will use locks causing lock conflicts. | |||
| 646 | CodeCache::log_state(&s); | |||
| 647 | ||||
| 648 | ttyLocker ttyl; | |||
| 649 | xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT"%" "l" "d" "' ", msg, (intx)traversal_count()); | |||
| 650 | if (format != NULL__null) { | |||
| 651 | va_list ap; | |||
| 652 | va_start(ap, format)__builtin_va_start(ap, format); | |||
| 653 | xtty->vprint(format, ap); | |||
| 654 | va_end(ap)__builtin_va_end(ap); | |||
| 655 | } | |||
| 656 | xtty->print("%s", s.as_string()); | |||
| 657 | xtty->stamp(); | |||
| 658 | xtty->end_elem(); | |||
| 659 | } | |||
| 660 | } | |||
| 661 | ||||
| 662 | void NMethodSweeper::print(outputStream* out) { | |||
| 663 | ttyLocker ttyl; | |||
| 664 | out = (out == NULL__null) ? tty : out; | |||
| 665 | out->print_cr("Code cache sweeper statistics:"); | |||
| 666 | out->print_cr(" Total sweep time: %1.0lf ms", (double)_total_time_sweeping.value()/1000000); | |||
| 667 | out->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps); | |||
| 668 | out->print_cr(" Total number of flushed methods: %ld (thereof %ld C2 methods)", _total_nof_methods_reclaimed, | |||
| 669 | _total_nof_c2_methods_reclaimed); | |||
| 670 | out->print_cr(" Total size of flushed methods: " SIZE_FORMAT"%" "l" "u" " kB", _total_flushed_size/K); | |||
| 671 | } |
| 1 | /* | |||
| 2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. | |||
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |||
| 4 | * | |||
| 5 | * This code is free software; you can redistribute it and/or modify it | |||
| 6 | * under the terms of the GNU General Public License version 2 only, as | |||
| 7 | * published by the Free Software Foundation. | |||
| 8 | * | |||
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | |||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | |||
| 13 | * accompanied this code). | |||
| 14 | * | |||
| 15 | * You should have received a copy of the GNU General Public License version | |||
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | |||
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |||
| 18 | * | |||
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |||
| 20 | * or visit www.oracle.com if you need additional information or have any | |||
| 21 | * questions. | |||
| 22 | * | |||
| 23 | */ | |||
| 24 | ||||
| 25 | #ifndef SHARE_CODE_CODECACHE_HPP | |||
| 26 | #define SHARE_CODE_CODECACHE_HPP | |||
| 27 | ||||
| 28 | #include "code/codeBlob.hpp" | |||
| 29 | #include "code/nmethod.hpp" | |||
| 30 | #include "gc/shared/gcBehaviours.hpp" | |||
| 31 | #include "memory/allocation.hpp" | |||
| 32 | #include "memory/heap.hpp" | |||
| 33 | #include "oops/instanceKlass.hpp" | |||
| 34 | #include "oops/oopsHierarchy.hpp" | |||
| 35 | #include "runtime/mutexLocker.hpp" | |||
| 36 | ||||
| 37 | // The CodeCache implements the code cache for various pieces of generated | |||
| 38 | // code, e.g., compiled java methods, runtime stubs, transition frames, etc. | |||
| 39 | // The entries in the CodeCache are all CodeBlob's. | |||
| 40 | ||||
| 41 | // -- Implementation -- | |||
| 42 | // The CodeCache consists of one or more CodeHeaps, each of which contains | |||
| 43 | // CodeBlobs of a specific CodeBlobType. Currently heaps for the following | |||
| 44 | // types are available: | |||
| 45 | // - Non-nmethods: Non-nmethods like Buffers, Adapters and Runtime Stubs | |||
| 46 | // - Profiled nmethods: nmethods that are profiled, i.e., those | |||
| 47 | // executed at level 2 or 3 | |||
| 48 | // - Non-Profiled nmethods: nmethods that are not profiled, i.e., those | |||
| 49 | // executed at level 1 or 4 and native methods | |||
| 50 | // - All: Used for code of all types if code cache segmentation is disabled. | |||
| 51 | // | |||
| 52 | // In the rare case of the non-nmethod code heap getting full, non-nmethod code | |||
| 53 | // will be stored in the non-profiled code heap as a fallback solution. | |||
| 54 | // | |||
| 55 | // Depending on the availability of compilers and compilation mode there | |||
| 56 | // may be fewer heaps. The size of the code heaps depends on the values of | |||
| 57 | // ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize | |||
| 58 | // (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..) | |||
| 59 | // for details). | |||
| 60 | // | |||
| 61 | // Code cache segmentation is controlled by the flag SegmentedCodeCache. | |||
| 62 | // If turned off, all code types are stored in a single code heap. By default | |||
| 63 | // code cache segmentation is turned on if tiered mode is enabled and | |||
| 64 | // ReservedCodeCacheSize >= 240 MB. | |||
| 65 | // | |||
| 66 | // All methods of the CodeCache accepting a CodeBlobType only apply to | |||
| 67 | // CodeBlobs of the given type. For example, iteration over the | |||
| 68 | // CodeBlobs of a specific type can be done by using CodeCache::first_blob(..) | |||
| 69 | // and CodeCache::next_blob(..) and providing the corresponding CodeBlobType. | |||
| 70 | // | |||
| 71 | // IMPORTANT: If you add new CodeHeaps to the code cache or change the | |||
| 72 | // existing ones, make sure to adapt the dtrace scripts (jhelper.d) for | |||
| 73 | // Solaris and BSD. | |||
| 74 | ||||
| 75 | class ExceptionCache; | |||
| 76 | class KlassDepChange; | |||
| 77 | class OopClosure; | |||
| 78 | class ShenandoahParallelCodeHeapIterator; | |||
| 79 | ||||
| 80 | class CodeCache : AllStatic { | |||
| 81 | friend class VMStructs; | |||
| 82 | friend class JVMCIVMStructs; | |||
| 83 | template <class T, class Filter> friend class CodeBlobIterator; | |||
| 84 | friend class WhiteBox; | |||
| 85 | friend class CodeCacheLoader; | |||
| 86 | friend class ShenandoahParallelCodeHeapIterator; | |||
| 87 | private: | |||
| 88 | // CodeHeaps of the cache | |||
| 89 | static GrowableArray<CodeHeap*>* _heaps; | |||
| 90 | static GrowableArray<CodeHeap*>* _compiled_heaps; | |||
| 91 | static GrowableArray<CodeHeap*>* _nmethod_heaps; | |||
| 92 | static GrowableArray<CodeHeap*>* _allocable_heaps; | |||
| 93 | ||||
| 94 | static address _low_bound; // Lower bound of CodeHeap addresses | |||
| 95 | static address _high_bound; // Upper bound of CodeHeap addresses | |||
| 96 | static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies | |||
| 97 | static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded | |||
| 98 | ||||
| 99 | static ExceptionCache* volatile _exception_cache_purge_list; | |||
| 100 | ||||
| 101 | // CodeHeap management | |||
| 102 | static void initialize_heaps(); // Initializes the CodeHeaps | |||
| 103 | // Check the code heap sizes set by the user via command line | |||
| 104 | static void check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set); | |||
| 105 | // Creates a new heap with the given name and size, containing CodeBlobs of the given type | |||
| 106 | static void add_heap(ReservedSpace rs, const char* name, int code_blob_type); | |||
| 107 | static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or NULL | |||
| 108 | static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob | |||
| 109 | static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType | |||
| 110 | // Returns the name of the VM option to set the size of the corresponding CodeHeap | |||
| 111 | static const char* get_code_heap_flag_name(int code_blob_type); | |||
| 112 | static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps | |||
| 113 | ||||
| 114 | // Iteration | |||
| 115 | static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap | |||
| 116 | static CodeBlob* first_blob(int code_blob_type); // Returns the first CodeBlob of the given type | |||
| 117 | static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap | |||
| 118 | ||||
| 119 | static size_t bytes_allocated_in_freelists(); | |||
| 120 | static int allocated_segments(); | |||
| 121 | static size_t freelists_length(); | |||
| 122 | ||||
| 123 | // Make private to prevent unsafe calls. Not all CodeBlob*'s are embedded in a CodeHeap. | |||
| 124 | static bool contains(CodeBlob *p) { fatal("don't call me!")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/code/codeCache.hpp" , 124, "don't call me!"); ::breakpoint(); } while (0); return false; } | |||
| 125 | ||||
| 126 | public: | |||
| 127 | // Initialization | |||
| 128 | static void initialize(); | |||
| 129 | static size_t page_size(bool aligned = true, size_t min_pages = 1); // Returns the page size used by the CodeCache | |||
| 130 | ||||
| 131 | static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs); | |||
| 132 | ||||
| 133 | static void add_heap(CodeHeap* heap); | |||
| 134 | static const GrowableArray<CodeHeap*>* heaps() { return _heaps; } | |||
| 135 | static const GrowableArray<CodeHeap*>* compiled_heaps() { return _compiled_heaps; } | |||
| 136 | static const GrowableArray<CodeHeap*>* nmethod_heaps() { return _nmethod_heaps; } | |||
| 137 | ||||
| 138 | // Allocation/administration | |||
| 139 | static CodeBlob* allocate(int size, int code_blob_type, bool handle_alloc_failure = true, int orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob | |||
| 140 | static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled | |||
| 141 | static int alignment_unit(); // guaranteed alignment of all CodeBlobs | |||
| 142 | static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) | |||
| 143 | static void free(CodeBlob* cb); // frees a CodeBlob | |||
| 144 | static void free_unused_tail(CodeBlob* cb, size_t used); // frees the unused tail of a CodeBlob (only used by TemplateInterpreter::initialize()) | |||
| 145 | static bool contains(void *p); // returns whether p is included | |||
| 146 | static bool contains(nmethod* nm); // returns whether nm is included | |||
| 147 | static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs | |||
| 148 | static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs | |||
| 149 | static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods | |||
| 150 | static void metadata_do(MetadataClosure* f); // iterates over metadata in alive nmethods | |||
| 151 | ||||
| 152 | // Lookup | |||
| 153 | static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address | |||
| 154 | static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method | |||
| 155 | static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address | |||
| 156 | static CompiledMethod* find_compiled(void* start); | |||
| 157 | ||||
| 158 | static int blob_count(); // Returns the total number of CodeBlobs in the cache | |||
| 159 | static int blob_count(int code_blob_type); | |||
| 160 | static int adapter_count(); // Returns the total number of Adapters in the cache | |||
| 161 | static int adapter_count(int code_blob_type); | |||
| 162 | static int nmethod_count(); // Returns the total number of nmethods in the cache | |||
| 163 | static int nmethod_count(int code_blob_type); | |||
| 164 | ||||
| 165 | // GC support | |||
| 166 | static void verify_oops(); | |||
| 167 | // If any oops are not marked this method unloads (i.e., breaks root links | |||
| 168 | // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" | |||
| 169 | // to "true" iff some code got unloaded. | |||
| 170 | // "unloading_occurred" controls whether metadata should be cleaned because of class unloading. | |||
| 171 | class UnloadingScope: StackObj { | |||
| 172 | ClosureIsUnloadingBehaviour _is_unloading_behaviour; | |||
| 173 | IsUnloadingBehaviour* _saved_behaviour; | |||
| 174 | ||||
| 175 | public: | |||
| 176 | UnloadingScope(BoolObjectClosure* is_alive); | |||
| 177 | ~UnloadingScope(); | |||
| 178 | }; | |||
| 179 | ||||
| 180 | static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); | |||
| 181 | static uint8_t unloading_cycle() { return _unloading_cycle; } | |||
| 182 | static void increment_unloading_cycle(); | |||
| 183 | static void release_exception_cache(ExceptionCache* entry); | |||
| 184 | static void purge_exception_caches(); | |||
| 185 | ||||
| 186 | // Printing/debugging | |||
| 187 | static void print(); // prints summary | |||
| 188 | static void print_internals(); | |||
| 189 | static void print_memory_overhead(); | |||
| 190 | static void verify(); // verifies the code cache | |||
| 191 | static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; | |||
| 192 | static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage | |||
| 193 | static void log_state(outputStream* st); | |||
| 194 | LINUX_ONLY(static void write_perf_map();)static void write_perf_map(); | |||
| 195 | static const char* get_code_heap_name(int code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); } | |||
| 196 | static void report_codemem_full(int code_blob_type, bool print); | |||
| 197 | ||||
| 198 | // Dcmd (Diagnostic commands) | |||
| 199 | static void print_codelist(outputStream* st); | |||
| 200 | static void print_layout(outputStream* st); | |||
| 201 | ||||
| 202 | // The full limits of the codeCache | |||
| 203 | static address low_bound() { return _low_bound; } | |||
| 204 | static address low_bound(int code_blob_type); | |||
| 205 | static address high_bound() { return _high_bound; } | |||
| 206 | static address high_bound(int code_blob_type); | |||
| 207 | ||||
| 208 | // Profiling | |||
| 209 | static size_t capacity(); | |||
| 210 | static size_t unallocated_capacity(int code_blob_type); | |||
| 211 | static size_t unallocated_capacity(); | |||
| 212 | static size_t max_capacity(); | |||
| 213 | ||||
| 214 | static double reverse_free_ratio(int code_blob_type); | |||
| 215 | ||||
| 216 | static void clear_inline_caches(); // clear all inline caches | |||
| 217 | static void cleanup_inline_caches(); // clean unloaded/zombie nmethods from inline caches | |||
| 218 | ||||
| 219 | // Returns true if an own CodeHeap for the given CodeBlobType is available | |||
| 220 | static bool heap_available(int code_blob_type); | |||
| 221 | ||||
| 222 | // Returns the CodeBlobType for the given CompiledMethod | |||
| 223 | static int get_code_blob_type(CompiledMethod* cm) { | |||
| 224 | return get_code_heap(cm)->code_blob_type(); | |||
| 225 | } | |||
| 226 | ||||
| 227 | static bool code_blob_type_accepts_compiled(int type) { | |||
| 228 | bool result = type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled; | |||
| 229 | return result; | |||
| 230 | } | |||
| 231 | ||||
| 232 | static bool code_blob_type_accepts_nmethod(int type) { | |||
| 233 | return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled; | |||
| 234 | } | |||
| 235 | ||||
| 236 | static bool code_blob_type_accepts_allocable(int type) { | |||
| 237 | return type <= CodeBlobType::All; | |||
| 238 | } | |||
| 239 | ||||
| 240 | ||||
| 241 | // Returns the CodeBlobType for the given compilation level | |||
| 242 | static int get_code_blob_type(int comp_level) { | |||
| 243 | if (comp_level == CompLevel_none || | |||
| 244 | comp_level == CompLevel_simple || | |||
| 245 | comp_level == CompLevel_full_optimization) { | |||
| 246 | // Non profiled methods | |||
| 247 | return CodeBlobType::MethodNonProfiled; | |||
| 248 | } else if (comp_level == CompLevel_limited_profile || | |||
| 249 | comp_level == CompLevel_full_profile) { | |||
| 250 | // Profiled methods | |||
| 251 | return CodeBlobType::MethodProfiled; | |||
| 252 | } | |||
| 253 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/code/codeCache.hpp" , 253); ::breakpoint(); } while (0); | |||
| 254 | return 0; | |||
| 255 | } | |||
| 256 | ||||
| 257 | static void verify_clean_inline_caches(); | |||
| 258 | static void verify_icholder_relocations(); | |||
| 259 | ||||
| 260 | // Deoptimization | |||
| 261 | private: | |||
| 262 | static int mark_for_deoptimization(KlassDepChange& changes); | |||
| 263 | ||||
| 264 | public: | |||
| 265 | static void mark_all_nmethods_for_deoptimization(); | |||
| 266 | static int mark_for_deoptimization(Method* dependee); | |||
| 267 | static void make_marked_nmethods_not_entrant(); | |||
| 268 | ||||
| 269 | // Flushing and deoptimization | |||
| 270 | static void flush_dependents_on(InstanceKlass* dependee); | |||
| 271 | ||||
| 272 | // RedefineClasses support | |||
| 273 | // Flushing and deoptimization in case of evolution | |||
| 274 | static int mark_dependents_for_evol_deoptimization(); | |||
| 275 | static void mark_all_nmethods_for_evol_deoptimization(); | |||
| 276 | static void flush_evol_dependents(); | |||
| 277 | static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN; | |||
| 278 | static void unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN; | |||
| 279 | ||||
| 280 | // Support for fullspeed debugging | |||
| 281 | static void flush_dependents_on_method(const methodHandle& dependee); | |||
| 282 | ||||
| 283 | // tells how many nmethods have dependencies | |||
| 284 | static int number_of_nmethods_with_dependencies(); | |||
| 285 | ||||
| 286 | static int get_codemem_full_count(int code_blob_type) { | |||
| 287 | CodeHeap* heap = get_code_heap(code_blob_type); | |||
| 288 | return (heap != NULL__null) ? heap->full_count() : 0; | |||
| 289 | } | |||
| 290 | ||||
| 291 | // CodeHeap State Analytics. | |||
| 292 | // interface methods for CodeHeap printing, called by CompileBroker | |||
| 293 | static void aggregate(outputStream *out, size_t granularity); | |||
| 294 | static void discard(outputStream *out); | |||
| 295 | static void print_usedSpace(outputStream *out); | |||
| 296 | static void print_freeSpace(outputStream *out); | |||
| 297 | static void print_count(outputStream *out); | |||
| 298 | static void print_space(outputStream *out); | |||
| 299 | static void print_age(outputStream *out); | |||
| 300 | static void print_names(outputStream *out); | |||
| 301 | }; | |||
| 302 | ||||
| 303 | ||||
| 304 | // Iterator to iterate over nmethods in the CodeCache. | |||
| 305 | template <class T, class Filter> class CodeBlobIterator : public StackObj { | |||
| ||||
| 306 | public: | |||
| 307 | enum LivenessFilter { all_blobs, only_alive, only_alive_and_not_unloading }; | |||
| 308 | ||||
| 309 | private: | |||
| 310 | CodeBlob* _code_blob; // Current CodeBlob | |||
| 311 | GrowableArrayIterator<CodeHeap*> _heap; | |||
| 312 | GrowableArrayIterator<CodeHeap*> _end; | |||
| 313 | bool _only_alive; | |||
| 314 | bool _only_not_unloading; | |||
| 315 | ||||
| 316 | public: | |||
| 317 | CodeBlobIterator(LivenessFilter filter, T* nm = NULL__null) | |||
| 318 | : _only_alive(filter == only_alive || filter == only_alive_and_not_unloading), | |||
| 319 | _only_not_unloading(filter == only_alive_and_not_unloading) | |||
| 320 | { | |||
| 321 | if (Filter::heaps() == NULL__null) { | |||
| 322 | return; | |||
| 323 | } | |||
| 324 | _heap = Filter::heaps()->begin(); | |||
| 325 | _end = Filter::heaps()->end(); | |||
| 326 | // If set to NULL, initialized by first call to next() | |||
| 327 | _code_blob = (CodeBlob*)nm; | |||
| 328 | if (nm != NULL__null) { | |||
| 329 | while(!(*_heap)->contains_blob(_code_blob)) { | |||
| 330 | ++_heap; | |||
| 331 | } | |||
| 332 | assert((*_heap)->contains_blob(_code_blob), "match not found")do { if (!((*_heap)->contains_blob(_code_blob))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/codeCache.hpp" , 332, "assert(" "(*_heap)->contains_blob(_code_blob)" ") failed" , "match not found"); ::breakpoint(); } } while (0); | |||
| 333 | } | |||
| 334 | } | |||
| 335 | ||||
| 336 | // Advance iterator to next blob | |||
| 337 | bool next() { | |||
| 338 | assert_locked_or_safepoint(CodeCache_lock); | |||
| 339 | ||||
| 340 | for (;;) { | |||
| 341 | // Walk through heaps as required | |||
| 342 | if (!next_blob()) { | |||
| 343 | if (_heap == _end) { | |||
| 344 | return false; | |||
| 345 | } | |||
| 346 | ++_heap; | |||
| 347 | continue; | |||
| 348 | } | |||
| 349 | ||||
| 350 | // Filter is_alive as required | |||
| 351 | if (_only_alive && !_code_blob->is_alive()) { | |||
| 352 | continue; | |||
| 353 | } | |||
| 354 | ||||
| 355 | // Filter is_unloading as required | |||
| 356 | if (_only_not_unloading) { | |||
| 357 | CompiledMethod* cm = _code_blob->as_compiled_method_or_null(); | |||
| 358 | if (cm != NULL__null && cm->is_unloading()) { | |||
| 359 | continue; | |||
| 360 | } | |||
| 361 | } | |||
| 362 | ||||
| 363 | return true; | |||
| 364 | } | |||
| 365 | } | |||
| 366 | ||||
| 367 | bool end() const { return _code_blob == NULL__null; } | |||
| 368 | T* method() const { return (T*)_code_blob; } | |||
| 369 | ||||
| 370 | private: | |||
| 371 | ||||
| 372 | // Advance iterator to the next blob in the current code heap | |||
| 373 | bool next_blob() { | |||
| 374 | if (_heap == _end) { | |||
| 375 | return false; | |||
| 376 | } | |||
| 377 | CodeHeap *heap = *_heap; | |||
| 378 | // Get first method CodeBlob | |||
| 379 | if (_code_blob == NULL__null) { | |||
| 380 | _code_blob = CodeCache::first_blob(heap); | |||
| 381 | if (_code_blob == NULL__null) { | |||
| 382 | return false; | |||
| 383 | } else if (Filter::apply(_code_blob)) { | |||
| 384 | return true; | |||
| 385 | } | |||
| 386 | } | |||
| 387 | // Search for next method CodeBlob | |||
| 388 | _code_blob = CodeCache::next_blob(heap, _code_blob); | |||
| 389 | while (_code_blob != NULL__null && !Filter::apply(_code_blob)) { | |||
| 390 | _code_blob = CodeCache::next_blob(heap, _code_blob); | |||
| 391 | } | |||
| 392 | return _code_blob != NULL__null; | |||
| 393 | } | |||
| 394 | }; | |||
| 395 | ||||
| 396 | ||||
| 397 | struct CompiledMethodFilter { | |||
| 398 | static bool apply(CodeBlob* cb) { return cb->is_compiled(); } | |||
| 399 | static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::compiled_heaps(); } | |||
| 400 | }; | |||
| 401 | ||||
| 402 | ||||
| 403 | struct NMethodFilter { | |||
| 404 | static bool apply(CodeBlob* cb) { return cb->is_nmethod(); } | |||
| 405 | static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::nmethod_heaps(); } | |||
| 406 | }; | |||
| 407 | ||||
| 408 | struct AllCodeBlobsFilter { | |||
| 409 | static bool apply(CodeBlob* cb) { return true; } | |||
| 410 | static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::heaps(); } | |||
| 411 | }; | |||
| 412 | ||||
| 413 | typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter> CompiledMethodIterator; | |||
| 414 | typedef CodeBlobIterator<nmethod, NMethodFilter> NMethodIterator; | |||
| 415 | typedef CodeBlobIterator<CodeBlob, AllCodeBlobsFilter> AllCodeBlobsIterator; | |||
| 416 | ||||
| 417 | #endif // SHARE_CODE_CODECACHE_HPP |