| File: | jdk/src/hotspot/share/code/nmethod.cpp | 
| Warning: | line 3193, column 16 Value stored to 'out_preserve' during its initialization is never read  | 
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | 
| 2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. | 
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
| 4 | * | 
| 5 | * This code is free software; you can redistribute it and/or modify it | 
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
| 7 | * published by the Free Software Foundation. | 
| 8 | * | 
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | 
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
| 13 | * accompanied this code). | 
| 14 | * | 
| 15 | * You should have received a copy of the GNU General Public License version | 
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
| 18 | * | 
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
| 20 | * or visit www.oracle.com if you need additional information or have any | 
| 21 | * questions. | 
| 22 | * | 
| 23 | */ | 
| 24 | |
| 25 | #include "precompiled.hpp" | 
| 26 | #include "jvm.h" | 
| 27 | #include "asm/assembler.inline.hpp" | 
| 28 | #include "code/codeCache.hpp" | 
| 29 | #include "code/compiledIC.hpp" | 
| 30 | #include "code/compiledMethod.inline.hpp" | 
| 31 | #include "code/dependencies.hpp" | 
| 32 | #include "code/nativeInst.hpp" | 
| 33 | #include "code/nmethod.hpp" | 
| 34 | #include "code/scopeDesc.hpp" | 
| 35 | #include "compiler/abstractCompiler.hpp" | 
| 36 | #include "compiler/compileBroker.hpp" | 
| 37 | #include "compiler/compileLog.hpp" | 
| 38 | #include "compiler/compilerDirectives.hpp" | 
| 39 | #include "compiler/directivesParser.hpp" | 
| 40 | #include "compiler/disassembler.hpp" | 
| 41 | #include "compiler/oopMap.hpp" | 
| 42 | #include "gc/shared/collectedHeap.hpp" | 
| 43 | #include "interpreter/bytecode.hpp" | 
| 44 | #include "logging/log.hpp" | 
| 45 | #include "logging/logStream.hpp" | 
| 46 | #include "memory/allocation.inline.hpp" | 
| 47 | #include "memory/resourceArea.hpp" | 
| 48 | #include "memory/universe.hpp" | 
| 49 | #include "oops/access.inline.hpp" | 
| 50 | #include "oops/klass.inline.hpp" | 
| 51 | #include "oops/method.inline.hpp" | 
| 52 | #include "oops/methodData.hpp" | 
| 53 | #include "oops/oop.inline.hpp" | 
| 54 | #include "prims/jvmtiImpl.hpp" | 
| 55 | #include "prims/jvmtiThreadState.hpp" | 
| 56 | #include "prims/methodHandles.hpp" | 
| 57 | #include "runtime/atomic.hpp" | 
| 58 | #include "runtime/deoptimization.hpp" | 
| 59 | #include "runtime/flags/flagSetting.hpp" | 
| 60 | #include "runtime/frame.inline.hpp" | 
| 61 | #include "runtime/handles.inline.hpp" | 
| 62 | #include "runtime/jniHandles.inline.hpp" | 
| 63 | #include "runtime/orderAccess.hpp" | 
| 64 | #include "runtime/os.hpp" | 
| 65 | #include "runtime/safepointVerifiers.hpp" | 
| 66 | #include "runtime/serviceThread.hpp" | 
| 67 | #include "runtime/sharedRuntime.hpp" | 
| 68 | #include "runtime/signature.hpp" | 
| 69 | #include "runtime/sweeper.hpp" | 
| 70 | #include "runtime/threadWXSetters.inline.hpp" | 
| 71 | #include "runtime/vmThread.hpp" | 
| 72 | #include "utilities/align.hpp" | 
| 73 | #include "utilities/copy.hpp" | 
| 74 | #include "utilities/dtrace.hpp" | 
| 75 | #include "utilities/events.hpp" | 
| 76 | #include "utilities/globalDefinitions.hpp" | 
| 77 | #include "utilities/resourceHash.hpp" | 
| 78 | #include "utilities/xmlstream.hpp" | 
| 79 | #if INCLUDE_JVMCI1 | 
| 80 | #include "jvmci/jvmciRuntime.hpp" | 
| 81 | #endif | 
| 82 | |
| 83 | #ifdef DTRACE_ENABLED | 
| 84 | |
| 85 | // Only bother with this argument setup if dtrace is available | 
| 86 | |
| 87 | #define DTRACE_METHOD_UNLOAD_PROBE(method) \ | 
| 88 | { \ | 
| 89 | Method* m = (method); \ | 
| 90 | if (m != NULL__null) { \ | 
| 91 | Symbol* klass_name = m->klass_name(); \ | 
| 92 | Symbol* name = m->name(); \ | 
| 93 | Symbol* signature = m->signature(); \ | 
| 94 | HOTSPOT_COMPILED_METHOD_UNLOAD( \ | 
| 95 | (char *) klass_name->bytes(), klass_name->utf8_length(), \ | 
| 96 | (char *) name->bytes(), name->utf8_length(), \ | 
| 97 | (char *) signature->bytes(), signature->utf8_length()); \ | 
| 98 | } \ | 
| 99 | } | 
| 100 | |
| 101 | #else // ndef DTRACE_ENABLED | 
| 102 | |
| 103 | #define DTRACE_METHOD_UNLOAD_PROBE(method) | 
| 104 | |
| 105 | #endif | 
| 106 | |
| 107 | //--------------------------------------------------------------------------------- | 
| 108 | // NMethod statistics | 
| 109 | // They are printed under various flags, including: | 
| 110 | // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation. | 
| 111 | // (In the latter two cases, they like other stats are printed to the log only.) | 
| 112 | |
| 113 | #ifndef PRODUCT | 
| 114 | // These variables are put into one block to reduce relocations | 
| 115 | // and make it simpler to print from the debugger. | 
| 116 | struct java_nmethod_stats_struct { | 
| 117 | int nmethod_count; | 
| 118 | int total_size; | 
| 119 | int relocation_size; | 
| 120 | int consts_size; | 
| 121 | int insts_size; | 
| 122 | int stub_size; | 
| 123 | int scopes_data_size; | 
| 124 | int scopes_pcs_size; | 
| 125 | int dependencies_size; | 
| 126 | int handler_table_size; | 
| 127 | int nul_chk_table_size; | 
| 128 | #if INCLUDE_JVMCI1 | 
| 129 | int speculations_size; | 
| 130 | int jvmci_data_size; | 
| 131 | #endif | 
| 132 | int oops_size; | 
| 133 | int metadata_size; | 
| 134 | |
| 135 | void note_nmethod(nmethod* nm) { | 
| 136 | nmethod_count += 1; | 
| 137 | total_size += nm->size(); | 
| 138 | relocation_size += nm->relocation_size(); | 
| 139 | consts_size += nm->consts_size(); | 
| 140 | insts_size += nm->insts_size(); | 
| 141 | stub_size += nm->stub_size(); | 
| 142 | oops_size += nm->oops_size(); | 
| 143 | metadata_size += nm->metadata_size(); | 
| 144 | scopes_data_size += nm->scopes_data_size(); | 
| 145 | scopes_pcs_size += nm->scopes_pcs_size(); | 
| 146 | dependencies_size += nm->dependencies_size(); | 
| 147 | handler_table_size += nm->handler_table_size(); | 
| 148 | nul_chk_table_size += nm->nul_chk_table_size(); | 
| 149 | #if INCLUDE_JVMCI1 | 
| 150 | speculations_size += nm->speculations_size(); | 
| 151 | jvmci_data_size += nm->jvmci_data_size(); | 
| 152 | #endif | 
| 153 | } | 
| 154 | void print_nmethod_stats(const char* name) { | 
| 155 | if (nmethod_count == 0) return; | 
| 156 | tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name); | 
| 157 | if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); | 
| 158 | if (nmethod_count != 0) tty->print_cr(" header = " SIZE_FORMAT"%" "l" "u", nmethod_count * sizeof(nmethod)); | 
| 159 | if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); | 
| 160 | if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); | 
| 161 | if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); | 
| 162 | if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); | 
| 163 | if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); | 
| 164 | if (metadata_size != 0) tty->print_cr(" metadata = %d", metadata_size); | 
| 165 | if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); | 
| 166 | if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); | 
| 167 | if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); | 
| 168 | if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); | 
| 169 | if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); | 
| 170 | #if INCLUDE_JVMCI1 | 
| 171 | if (speculations_size != 0) tty->print_cr(" speculations = %d", speculations_size); | 
| 172 | if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %d", jvmci_data_size); | 
| 173 | #endif | 
| 174 | } | 
| 175 | }; | 
| 176 | |
| 177 | struct native_nmethod_stats_struct { | 
| 178 | int native_nmethod_count; | 
| 179 | int native_total_size; | 
| 180 | int native_relocation_size; | 
| 181 | int native_insts_size; | 
| 182 | int native_oops_size; | 
| 183 | int native_metadata_size; | 
| 184 | void note_native_nmethod(nmethod* nm) { | 
| 185 | native_nmethod_count += 1; | 
| 186 | native_total_size += nm->size(); | 
| 187 | native_relocation_size += nm->relocation_size(); | 
| 188 | native_insts_size += nm->insts_size(); | 
| 189 | native_oops_size += nm->oops_size(); | 
| 190 | native_metadata_size += nm->metadata_size(); | 
| 191 | } | 
| 192 | void print_native_nmethod_stats() { | 
| 193 | if (native_nmethod_count == 0) return; | 
| 194 | tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count); | 
| 195 | if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size); | 
| 196 | if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size); | 
| 197 | if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size); | 
| 198 | if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size); | 
| 199 | if (native_metadata_size != 0) tty->print_cr(" N. metadata = %d", native_metadata_size); | 
| 200 | } | 
| 201 | }; | 
| 202 | |
| 203 | struct pc_nmethod_stats_struct { | 
| 204 | int pc_desc_resets; // number of resets (= number of caches) | 
| 205 | int pc_desc_queries; // queries to nmethod::find_pc_desc | 
| 206 | int pc_desc_approx; // number of those which have approximate true | 
| 207 | int pc_desc_repeats; // number of _pc_descs[0] hits | 
| 208 | int pc_desc_hits; // number of LRU cache hits | 
| 209 | int pc_desc_tests; // total number of PcDesc examinations | 
| 210 | int pc_desc_searches; // total number of quasi-binary search steps | 
| 211 | int pc_desc_adds; // number of LUR cache insertions | 
| 212 | |
| 213 | void print_pc_stats() { | 
| 214 | tty->print_cr("PcDesc Statistics: %d queries, %.2f comparisons per query", | 
| 215 | pc_desc_queries, | 
| 216 | (double)(pc_desc_tests + pc_desc_searches) | 
| 217 | / pc_desc_queries); | 
| 218 | tty->print_cr(" caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d", | 
| 219 | pc_desc_resets, | 
| 220 | pc_desc_queries, pc_desc_approx, | 
| 221 | pc_desc_repeats, pc_desc_hits, | 
| 222 | pc_desc_tests, pc_desc_searches, pc_desc_adds); | 
| 223 | } | 
| 224 | }; | 
| 225 | |
| 226 | #ifdef COMPILER11 | 
| 227 | static java_nmethod_stats_struct c1_java_nmethod_stats; | 
| 228 | #endif | 
| 229 | #ifdef COMPILER21 | 
| 230 | static java_nmethod_stats_struct c2_java_nmethod_stats; | 
| 231 | #endif | 
| 232 | #if INCLUDE_JVMCI1 | 
| 233 | static java_nmethod_stats_struct jvmci_java_nmethod_stats; | 
| 234 | #endif | 
| 235 | static java_nmethod_stats_struct unknown_java_nmethod_stats; | 
| 236 | |
| 237 | static native_nmethod_stats_struct native_nmethod_stats; | 
| 238 | static pc_nmethod_stats_struct pc_nmethod_stats; | 
| 239 | |
| 240 | static void note_java_nmethod(nmethod* nm) { | 
| 241 | #ifdef COMPILER11 | 
| 242 | if (nm->is_compiled_by_c1()) { | 
| 243 | c1_java_nmethod_stats.note_nmethod(nm); | 
| 244 | } else | 
| 245 | #endif | 
| 246 | #ifdef COMPILER21 | 
| 247 | if (nm->is_compiled_by_c2()) { | 
| 248 | c2_java_nmethod_stats.note_nmethod(nm); | 
| 249 | } else | 
| 250 | #endif | 
| 251 | #if INCLUDE_JVMCI1 | 
| 252 | if (nm->is_compiled_by_jvmci()) { | 
| 253 | jvmci_java_nmethod_stats.note_nmethod(nm); | 
| 254 | } else | 
| 255 | #endif | 
| 256 | { | 
| 257 | unknown_java_nmethod_stats.note_nmethod(nm); | 
| 258 | } | 
| 259 | } | 
| 260 | #endif // !PRODUCT | 
| 261 | |
| 262 | //--------------------------------------------------------------------------------- | 
| 263 | |
| 264 | |
| 265 | ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { | 
| 266 |   assert(pc != NULL, "Must be non null")do { if (!(pc != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 266, "assert(" "pc != __null" ") failed", "Must be non null" ); ::breakpoint(); } } while (0);  | 
| 267 |   assert(exception.not_null(), "Must be non null")do { if (!(exception.not_null())) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 267, "assert(" "exception.not_null()" ") failed", "Must be non null" ); ::breakpoint(); } } while (0);  | 
| 268 |   assert(handler != NULL, "Must be non null")do { if (!(handler != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 268, "assert(" "handler != __null" ") failed", "Must be non null" ); ::breakpoint(); } } while (0);  | 
| 269 | |
| 270 | _count = 0; | 
| 271 | _exception_type = exception->klass(); | 
| 272 | _next = NULL__null; | 
| 273 | _purge_list_next = NULL__null; | 
| 274 | |
| 275 | add_address_and_handler(pc,handler); | 
| 276 | } | 
| 277 | |
| 278 | |
| 279 | address ExceptionCache::match(Handle exception, address pc) { | 
| 280 |   assert(pc != NULL,"Must be non null")do { if (!(pc != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 280, "assert(" "pc != __null" ") failed", "Must be non null" ); ::breakpoint(); } } while (0);  | 
| 281 |   assert(exception.not_null(),"Must be non null")do { if (!(exception.not_null())) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 281, "assert(" "exception.not_null()" ") failed", "Must be non null" ); ::breakpoint(); } } while (0);  | 
| 282 | if (exception->klass() == exception_type()) { | 
| 283 | return (test_address(pc)); | 
| 284 | } | 
| 285 | |
| 286 | return NULL__null; | 
| 287 | } | 
| 288 | |
| 289 | |
| 290 | bool ExceptionCache::match_exception_with_space(Handle exception) { | 
| 291 |   assert(exception.not_null(),"Must be non null")do { if (!(exception.not_null())) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 291, "assert(" "exception.not_null()" ") failed", "Must be non null" ); ::breakpoint(); } } while (0);  | 
| 292 | if (exception->klass() == exception_type() && count() < cache_size) { | 
| 293 | return true; | 
| 294 | } | 
| 295 | return false; | 
| 296 | } | 
| 297 | |
| 298 | |
| 299 | address ExceptionCache::test_address(address addr) { | 
| 300 | int limit = count(); | 
| 301 | for (int i = 0; i < limit; i++) { | 
| 302 | if (pc_at(i) == addr) { | 
| 303 | return handler_at(i); | 
| 304 | } | 
| 305 | } | 
| 306 | return NULL__null; | 
| 307 | } | 
| 308 | |
| 309 | |
| 310 | bool ExceptionCache::add_address_and_handler(address addr, address handler) { | 
| 311 | if (test_address(addr) == handler) return true; | 
| 312 | |
| 313 | int index = count(); | 
| 314 | if (index < cache_size) { | 
| 315 | set_pc_at(index, addr); | 
| 316 | set_handler_at(index, handler); | 
| 317 | increment_count(); | 
| 318 | return true; | 
| 319 | } | 
| 320 | return false; | 
| 321 | } | 
| 322 | |
| 323 | ExceptionCache* ExceptionCache::next() { | 
| 324 | return Atomic::load(&_next); | 
| 325 | } | 
| 326 | |
| 327 | void ExceptionCache::set_next(ExceptionCache *ec) { | 
| 328 | Atomic::store(&_next, ec); | 
| 329 | } | 
| 330 | |
| 331 | //----------------------------------------------------------------------------- | 
| 332 | |
| 333 | |
| 334 | // Helper used by both find_pc_desc methods. | 
| 335 | static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) { | 
| 336 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests)++pc_nmethod_stats.pc_desc_tests; | 
| 337 | if (!approximate) | 
| 338 | return pc->pc_offset() == pc_offset; | 
| 339 | else | 
| 340 | return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); | 
| 341 | } | 
| 342 | |
| 343 | void PcDescCache::reset_to(PcDesc* initial_pc_desc) { | 
| 344 | if (initial_pc_desc == NULL__null) { | 
| 345 | _pc_descs[0] = NULL__null; // native method; no PcDescs at all | 
| 346 | return; | 
| 347 | } | 
| 348 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets)++pc_nmethod_stats.pc_desc_resets; | 
| 349 | // reset the cache by filling it with benign (non-null) values | 
| 350 |   assert(initial_pc_desc->pc_offset() < 0, "must be sentinel")do { if (!(initial_pc_desc->pc_offset() < 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 350, "assert(" "initial_pc_desc->pc_offset() < 0" ") failed" , "must be sentinel"); ::breakpoint(); } } while (0);  | 
| 351 | for (int i = 0; i < cache_size; i++) | 
| 352 | _pc_descs[i] = initial_pc_desc; | 
| 353 | } | 
| 354 | |
| 355 | PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { | 
| 356 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries)++pc_nmethod_stats.pc_desc_queries; | 
| 357 | NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx)if (approximate) ++pc_nmethod_stats.pc_desc_approx; | 
| 358 | |
| 359 | // Note: one might think that caching the most recently | 
| 360 | // read value separately would be a win, but one would be | 
| 361 | // wrong. When many threads are updating it, the cache | 
| 362 | // line it's in would bounce between caches, negating | 
| 363 | // any benefit. | 
| 364 | |
| 365 | // In order to prevent race conditions do not load cache elements | 
| 366 | // repeatedly, but use a local copy: | 
| 367 | PcDesc* res; | 
| 368 | |
| 369 | // Step one: Check the most recently added value. | 
| 370 | res = _pc_descs[0]; | 
| 371 | if (res == NULL__null) return NULL__null; // native method; no PcDescs at all | 
| 372 | if (match_desc(res, pc_offset, approximate)) { | 
| 373 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats)++pc_nmethod_stats.pc_desc_repeats; | 
| 374 | return res; | 
| 375 | } | 
| 376 | |
| 377 | // Step two: Check the rest of the LRU cache. | 
| 378 | for (int i = 1; i < cache_size; ++i) { | 
| 379 | res = _pc_descs[i]; | 
| 380 | if (res->pc_offset() < 0) break; // optimization: skip empty cache | 
| 381 | if (match_desc(res, pc_offset, approximate)) { | 
| 382 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits)++pc_nmethod_stats.pc_desc_hits; | 
| 383 | return res; | 
| 384 | } | 
| 385 | } | 
| 386 | |
| 387 | // Report failure. | 
| 388 | return NULL__null; | 
| 389 | } | 
| 390 | |
| 391 | void PcDescCache::add_pc_desc(PcDesc* pc_desc) { | 
| 392 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds)++pc_nmethod_stats.pc_desc_adds; | 
| 393 | // Update the LRU cache by shifting pc_desc forward. | 
| 394 | for (int i = 0; i < cache_size; i++) { | 
| 395 | PcDesc* next = _pc_descs[i]; | 
| 396 | _pc_descs[i] = pc_desc; | 
| 397 | pc_desc = next; | 
| 398 | } | 
| 399 | } | 
| 400 | |
| 401 | // adjust pcs_size so that it is a multiple of both oopSize and | 
| 402 | // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple | 
| 403 | // of oopSize, then 2*sizeof(PcDesc) is) | 
| 404 | static int adjust_pcs_size(int pcs_size) { | 
| 405 | int nsize = align_up(pcs_size, oopSize); | 
| 406 | if ((nsize % sizeof(PcDesc)) != 0) { | 
| 407 | nsize = pcs_size + sizeof(PcDesc); | 
| 408 | } | 
| 409 |   assert((nsize % oopSize) == 0, "correct alignment")do { if (!((nsize % oopSize) == 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 409, "assert(" "(nsize % oopSize) == 0" ") failed", "correct alignment" ); ::breakpoint(); } } while (0);  | 
| 410 | return nsize; | 
| 411 | } | 
| 412 | |
| 413 | |
| 414 | int nmethod::total_size() const { | 
| 415 | return | 
| 416 | consts_size() + | 
| 417 | insts_size() + | 
| 418 | stub_size() + | 
| 419 | scopes_data_size() + | 
| 420 | scopes_pcs_size() + | 
| 421 | handler_table_size() + | 
| 422 | nul_chk_table_size(); | 
| 423 | } | 
| 424 | |
| 425 | address* nmethod::orig_pc_addr(const frame* fr) { | 
| 426 | return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); | 
| 427 | } | 
| 428 | |
| 429 | const char* nmethod::compile_kind() const { | 
| 430 | if (is_osr_method()) return "osr"; | 
| 431 | if (method() != NULL__null && is_native_method()) return "c2n"; | 
| 432 | return NULL__null; | 
| 433 | } | 
| 434 | |
| 435 | // Fill in default values for various flag fields | 
| 436 | void nmethod::init_defaults() { | 
| 437 | _state = not_installed; | 
| 438 | _has_flushed_dependencies = 0; | 
| 439 | _lock_count = 0; | 
| 440 | _stack_traversal_mark = 0; | 
| 441 | _load_reported = false; // jvmti state | 
| 442 | _unload_reported = false; | 
| 443 | |
| 444 | #ifdef ASSERT1 | 
| 445 | _oops_are_stale = false; | 
| 446 | #endif | 
| 447 | |
| 448 | _oops_do_mark_link = NULL__null; | 
| 449 | _osr_link = NULL__null; | 
| 450 | #if INCLUDE_RTM_OPT1 | 
| 451 | _rtm_state = NoRTM; | 
| 452 | #endif | 
| 453 | } | 
| 454 | |
| 455 | nmethod* nmethod::new_native_nmethod(const methodHandle& method, | 
| 456 | int compile_id, | 
| 457 | CodeBuffer *code_buffer, | 
| 458 | int vep_offset, | 
| 459 | int frame_complete, | 
| 460 | int frame_size, | 
| 461 | ByteSize basic_lock_owner_sp_offset, | 
| 462 | ByteSize basic_lock_sp_offset, | 
| 463 | OopMapSet* oop_maps) { | 
| 464 | code_buffer->finalize_oop_references(method); | 
| 465 | // create nmethod | 
| 466 | nmethod* nm = NULL__null; | 
| 467 | { | 
| 468 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | 
| 469 | int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod)); | 
| 470 | |
| 471 | CodeOffsets offsets; | 
| 472 | offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); | 
| 473 | offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); | 
| 474 | nm = new (native_nmethod_size, CompLevel_none) | 
| 475 | nmethod(method(), compiler_none, native_nmethod_size, | 
| 476 | compile_id, &offsets, | 
| 477 | code_buffer, frame_size, | 
| 478 | basic_lock_owner_sp_offset, | 
| 479 | basic_lock_sp_offset, | 
| 480 | oop_maps); | 
| 481 |     NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm))if (nm != __null) native_nmethod_stats.note_native_nmethod(nm );  | 
| 482 | } | 
| 483 | |
| 484 | if (nm != NULL__null) { | 
| 485 | // verify nmethod | 
| 486 | debug_only(nm->verify();)nm->verify(); // might block | 
| 487 | |
| 488 | nm->log_new_nmethod(); | 
| 489 | } | 
| 490 | return nm; | 
| 491 | } | 
| 492 | |
| 493 | nmethod* nmethod::new_nmethod(const methodHandle& method, | 
| 494 | int compile_id, | 
| 495 | int entry_bci, | 
| 496 | CodeOffsets* offsets, | 
| 497 | int orig_pc_offset, | 
| 498 | DebugInformationRecorder* debug_info, | 
| 499 | Dependencies* dependencies, | 
| 500 | CodeBuffer* code_buffer, int frame_size, | 
| 501 | OopMapSet* oop_maps, | 
| 502 | ExceptionHandlerTable* handler_table, | 
| 503 | ImplicitExceptionTable* nul_chk_table, | 
| 504 | AbstractCompiler* compiler, | 
| 505 | int comp_level, | 
| 506 | const GrowableArrayView<RuntimeStub*>& native_invokers | 
| 507 | #if INCLUDE_JVMCI1 | 
| 508 | , char* speculations, | 
| 509 | int speculations_len, | 
| 510 | int nmethod_mirror_index, | 
| 511 | const char* nmethod_mirror_name, | 
| 512 | FailedSpeculation** failed_speculations | 
| 513 | #endif | 
| 514 | ) | 
| 515 | { | 
| 516 |   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR")do { if (!(debug_info->oop_recorder() == code_buffer->oop_recorder ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 516, "assert(" "debug_info->oop_recorder() == code_buffer->oop_recorder()" ") failed", "shared OR"); ::breakpoint(); } } while (0);  | 
| 517 | code_buffer->finalize_oop_references(method); | 
| 518 | // create nmethod | 
| 519 | nmethod* nm = NULL__null; | 
| 520 | { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | 
| 521 | #if INCLUDE_JVMCI1 | 
| 522 | int jvmci_data_size = !compiler->is_jvmci() ? 0 : JVMCINMethodData::compute_size(nmethod_mirror_name); | 
| 523 | #endif | 
| 524 | int nmethod_size = | 
| 525 | CodeBlob::allocation_size(code_buffer, sizeof(nmethod)) | 
| 526 | + adjust_pcs_size(debug_info->pcs_size()) | 
| 527 | + align_up((int)dependencies->size_in_bytes(), oopSize) | 
| 528 | + align_up(checked_cast<int>(native_invokers.data_size_in_bytes()), oopSize) | 
| 529 | + align_up(handler_table->size_in_bytes() , oopSize) | 
| 530 | + align_up(nul_chk_table->size_in_bytes() , oopSize) | 
| 531 | #if INCLUDE_JVMCI1 | 
| 532 | + align_up(speculations_len , oopSize) | 
| 533 | + align_up(jvmci_data_size , oopSize) | 
| 534 | #endif | 
| 535 | + align_up(debug_info->data_size() , oopSize); | 
| 536 | |
| 537 | nm = new (nmethod_size, comp_level) | 
| 538 | nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets, | 
| 539 | orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, | 
| 540 | oop_maps, | 
| 541 | handler_table, | 
| 542 | nul_chk_table, | 
| 543 | compiler, | 
| 544 | comp_level, | 
| 545 | native_invokers | 
| 546 | #if INCLUDE_JVMCI1 | 
| 547 | , speculations, | 
| 548 | speculations_len, | 
| 549 | jvmci_data_size | 
| 550 | #endif | 
| 551 | ); | 
| 552 | |
| 553 | if (nm != NULL__null) { | 
| 554 | #if INCLUDE_JVMCI1 | 
| 555 | if (compiler->is_jvmci()) { | 
| 556 | // Initialize the JVMCINMethodData object inlined into nm | 
| 557 | nm->jvmci_nmethod_data()->initialize(nmethod_mirror_index, nmethod_mirror_name, failed_speculations); | 
| 558 | } | 
| 559 | #endif | 
| 560 | // To make dependency checking during class loading fast, record | 
| 561 | // the nmethod dependencies in the classes it is dependent on. | 
| 562 | // This allows the dependency checking code to simply walk the | 
| 563 | // class hierarchy above the loaded class, checking only nmethods | 
| 564 | // which are dependent on those classes. The slow way is to | 
| 565 | // check every nmethod for dependencies which makes it linear in | 
| 566 | // the number of methods compiled. For applications with a lot | 
| 567 | // classes the slow way is too slow. | 
| 568 | for (Dependencies::DepStream deps(nm); deps.next(); ) { | 
| 569 | if (deps.type() == Dependencies::call_site_target_value) { | 
| 570 | // CallSite dependencies are managed on per-CallSite instance basis. | 
| 571 | oop call_site = deps.argument_oop(0); | 
| 572 | MethodHandles::add_dependent_nmethod(call_site, nm); | 
| 573 | } else { | 
| 574 | Klass* klass = deps.context_type(); | 
| 575 | if (klass == NULL__null) { | 
| 576 | continue; // ignore things like evol_method | 
| 577 | } | 
| 578 | // record this nmethod as dependent on this klass | 
| 579 | InstanceKlass::cast(klass)->add_dependent_nmethod(nm); | 
| 580 | } | 
| 581 | } | 
| 582 | NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm))if (nm != __null) note_java_nmethod(nm); | 
| 583 | } | 
| 584 | } | 
| 585 | // Do verification and logging outside CodeCache_lock. | 
| 586 | if (nm != NULL__null) { | 
| 587 | // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. | 
| 588 | DEBUG_ONLY(nm->verify();)nm->verify(); | 
| 589 | nm->log_new_nmethod(); | 
| 590 | } | 
| 591 | return nm; | 
| 592 | } | 
| 593 | |
| 594 | // For native wrappers | 
| 595 | nmethod::nmethod( | 
| 596 | Method* method, | 
| 597 | CompilerType type, | 
| 598 | int nmethod_size, | 
| 599 | int compile_id, | 
| 600 | CodeOffsets* offsets, | 
| 601 | CodeBuffer* code_buffer, | 
| 602 | int frame_size, | 
| 603 | ByteSize basic_lock_owner_sp_offset, | 
| 604 | ByteSize basic_lock_sp_offset, | 
| 605 | OopMapSet* oop_maps ) | 
| 606 | : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), | 
| 607 | _is_unloading_state(0), | 
| 608 | _native_receiver_sp_offset(basic_lock_owner_sp_offset), | 
| 609 | _native_basic_lock_sp_offset(basic_lock_sp_offset) | 
| 610 | { | 
| 611 | { | 
| 612 | int scopes_data_offset = 0; | 
| 613 | int deoptimize_offset = 0; | 
| 614 | int deoptimize_mh_offset = 0; | 
| 615 | |
| 616 | debug_only(NoSafepointVerifier nsv;)NoSafepointVerifier nsv; | 
| 617 | assert_locked_or_safepoint(CodeCache_lock); | 
| 618 | |
| 619 | init_defaults(); | 
| 620 | _entry_bci = InvocationEntryBci; | 
| 621 | // We have no exception handler or deopt handler make the | 
| 622 | // values something that will never match a pc like the nmethod vtable entry | 
| 623 | _exception_offset = 0; | 
| 624 | _orig_pc_offset = 0; | 
| 625 | |
| 626 | _consts_offset = data_offset(); | 
| 627 | _stub_offset = data_offset(); | 
| 628 | _oops_offset = data_offset(); | 
| 629 | _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); | 
| 630 | scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); | 
| 631 | _scopes_pcs_offset = scopes_data_offset; | 
| 632 | _dependencies_offset = _scopes_pcs_offset; | 
| 633 | _native_invokers_offset = _dependencies_offset; | 
| 634 | _handler_table_offset = _native_invokers_offset; | 
| 635 | _nul_chk_table_offset = _handler_table_offset; | 
| 636 | #if INCLUDE_JVMCI1 | 
| 637 | _speculations_offset = _nul_chk_table_offset; | 
| 638 | _jvmci_data_offset = _speculations_offset; | 
| 639 | _nmethod_end_offset = _jvmci_data_offset; | 
| 640 | #else | 
| 641 | _nmethod_end_offset = _nul_chk_table_offset; | 
| 642 | #endif | 
| 643 | _compile_id = compile_id; | 
| 644 | _comp_level = CompLevel_none; | 
| 645 | _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); | 
| 646 | _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); | 
| 647 | _osr_entry_point = NULL__null; | 
| 648 | _exception_cache = NULL__null; | 
| 649 | _pc_desc_container.reset_to(NULL__null); | 
| 650 | _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); | 
| 651 | |
| 652 | _scopes_data_begin = (address) this + scopes_data_offset; | 
| 653 | _deopt_handler_begin = (address) this + deoptimize_offset; | 
| 654 | _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset; | 
| 655 | |
| 656 | code_buffer->copy_code_and_locs_to(this); | 
| 657 | code_buffer->copy_values_to(this); | 
| 658 | |
| 659 | clear_unloading_state(); | 
| 660 | |
| 661 | Universe::heap()->register_nmethod(this); | 
| 662 | debug_only(Universe::heap()->verify_nmethod(this))Universe::heap()->verify_nmethod(this); | 
| 663 | |
| 664 | CodeCache::commit(this); | 
| 665 | } | 
| 666 | |
| 667 | if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { | 
| 668 | ttyLocker ttyl; // keep the following output all in one block | 
| 669 | // This output goes directly to the tty, not the compiler log. | 
| 670 | // To enable tools to match it up with the compilation activity, | 
| 671 | // be sure to tag this tty output with the compile ID. | 
| 672 | if (xtty != NULL__null) { | 
| 673 | xtty->begin_head("print_native_nmethod"); | 
| 674 | xtty->method(_method); | 
| 675 | xtty->stamp(); | 
| 676 | xtty->end_head(" address='" INTPTR_FORMAT"0x%016" "l" "x" "'", (intptr_t) this); | 
| 677 | } | 
| 678 | // Print the header part, then print the requested information. | 
| 679 | // This is both handled in decode2(), called via print_code() -> decode() | 
| 680 | if (PrintNativeNMethods) { | 
| 681 | tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------"); | 
| 682 | print_code(); | 
| 683 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 684 | #if defined(SUPPORT_DATA_STRUCTS) | 
| 685 | if (AbstractDisassembler::show_structs()) { | 
| 686 | if (oop_maps != NULL__null) { | 
| 687 | tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning | 
| 688 | oop_maps->print_on(tty); | 
| 689 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 690 | } | 
| 691 | } | 
| 692 | #endif | 
| 693 | } else { | 
| 694 | print(); // print the header part only. | 
| 695 | } | 
| 696 | #if defined(SUPPORT_DATA_STRUCTS) | 
| 697 | if (AbstractDisassembler::show_structs()) { | 
| 698 | if (PrintRelocations) { | 
| 699 | print_relocations(); | 
| 700 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 701 | } | 
| 702 | } | 
| 703 | #endif | 
| 704 | if (xtty != NULL__null) { | 
| 705 | xtty->tail("print_native_nmethod"); | 
| 706 | } | 
| 707 | } | 
| 708 | } | 
| 709 | |
| 710 | void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () { | 
| 711 | return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level)); | 
| 712 | } | 
| 713 | |
| 714 | nmethod::nmethod( | 
| 715 | Method* method, | 
| 716 | CompilerType type, | 
| 717 | int nmethod_size, | 
| 718 | int compile_id, | 
| 719 | int entry_bci, | 
| 720 | CodeOffsets* offsets, | 
| 721 | int orig_pc_offset, | 
| 722 | DebugInformationRecorder* debug_info, | 
| 723 | Dependencies* dependencies, | 
| 724 | CodeBuffer *code_buffer, | 
| 725 | int frame_size, | 
| 726 | OopMapSet* oop_maps, | 
| 727 | ExceptionHandlerTable* handler_table, | 
| 728 | ImplicitExceptionTable* nul_chk_table, | 
| 729 | AbstractCompiler* compiler, | 
| 730 | int comp_level, | 
| 731 | const GrowableArrayView<RuntimeStub*>& native_invokers | 
| 732 | #if INCLUDE_JVMCI1 | 
| 733 | , char* speculations, | 
| 734 | int speculations_len, | 
| 735 | int jvmci_data_size | 
| 736 | #endif | 
| 737 | ) | 
| 738 | : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), | 
| 739 | _is_unloading_state(0), | 
| 740 | _native_receiver_sp_offset(in_ByteSize(-1)), | 
| 741 | _native_basic_lock_sp_offset(in_ByteSize(-1)) | 
| 742 | { | 
| 743 |   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR")do { if (!(debug_info->oop_recorder() == code_buffer->oop_recorder ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 743, "assert(" "debug_info->oop_recorder() == code_buffer->oop_recorder()" ") failed", "shared OR"); ::breakpoint(); } } while (0);  | 
| 744 | { | 
| 745 | debug_only(NoSafepointVerifier nsv;)NoSafepointVerifier nsv; | 
| 746 | assert_locked_or_safepoint(CodeCache_lock); | 
| 747 | |
| 748 | _deopt_handler_begin = (address) this; | 
| 749 | _deopt_mh_handler_begin = (address) this; | 
| 750 | |
| 751 | init_defaults(); | 
| 752 | _entry_bci = entry_bci; | 
| 753 | _compile_id = compile_id; | 
| 754 | _comp_level = comp_level; | 
| 755 | _orig_pc_offset = orig_pc_offset; | 
| 756 | _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); | 
| 757 | |
| 758 | // Section offsets | 
| 759 | _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); | 
| 760 | _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); | 
| 761 | set_ctable_begin(header_begin() + _consts_offset); | 
| 762 | |
| 763 | #if INCLUDE_JVMCI1 | 
| 764 | if (compiler->is_jvmci()) { | 
| 765 | // JVMCI might not produce any stub sections | 
| 766 | if (offsets->value(CodeOffsets::Exceptions) != -1) { | 
| 767 | _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); | 
| 768 | } else { | 
| 769 | _exception_offset = -1; | 
| 770 | } | 
| 771 | if (offsets->value(CodeOffsets::Deopt) != -1) { | 
| 772 | _deopt_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::Deopt); | 
| 773 | } else { | 
| 774 | _deopt_handler_begin = NULL__null; | 
| 775 | } | 
| 776 | if (offsets->value(CodeOffsets::DeoptMH) != -1) { | 
| 777 | _deopt_mh_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::DeoptMH); | 
| 778 | } else { | 
| 779 | _deopt_mh_handler_begin = NULL__null; | 
| 780 | } | 
| 781 | } else | 
| 782 | #endif | 
| 783 | { | 
| 784 | // Exception handler and deopt handler are in the stub section | 
| 785 |       assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set")do { if (!(offsets->value(CodeOffsets::Exceptions) != -1)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 785, "assert(" "offsets->value(CodeOffsets::Exceptions) != -1" ") failed", "must be set"); ::breakpoint(); } } while (0);  | 
| 786 |       assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set")do { if (!(offsets->value(CodeOffsets::Deopt ) != -1)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 786, "assert(" "offsets->value(CodeOffsets::Deopt ) != -1" ") failed", "must be set"); ::breakpoint(); } } while (0);  | 
| 787 | |
| 788 | _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); | 
| 789 | _deopt_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::Deopt); | 
| 790 | if (offsets->value(CodeOffsets::DeoptMH) != -1) { | 
| 791 | _deopt_mh_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::DeoptMH); | 
| 792 | } else { | 
| 793 | _deopt_mh_handler_begin = NULL__null; | 
| 794 | } | 
| 795 | } | 
| 796 | if (offsets->value(CodeOffsets::UnwindHandler) != -1) { | 
| 797 | _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); | 
| 798 | } else { | 
| 799 | _unwind_handler_offset = -1; | 
| 800 | } | 
| 801 | |
| 802 | _oops_offset = data_offset(); | 
| 803 | _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); | 
| 804 | int scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); | 
| 805 | |
| 806 | _scopes_pcs_offset = scopes_data_offset + align_up(debug_info->data_size (), oopSize); | 
| 807 | _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); | 
| 808 | _native_invokers_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes(), oopSize); | 
| 809 | _handler_table_offset = _native_invokers_offset + align_up(checked_cast<int>(native_invokers.data_size_in_bytes()), oopSize); | 
| 810 | _nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize); | 
| 811 | #if INCLUDE_JVMCI1 | 
| 812 | _speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); | 
| 813 | _jvmci_data_offset = _speculations_offset + align_up(speculations_len, oopSize); | 
| 814 | _nmethod_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize); | 
| 815 | #else | 
| 816 | _nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); | 
| 817 | #endif | 
| 818 | _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); | 
| 819 | _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); | 
| 820 | _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); | 
| 821 | _exception_cache = NULL__null; | 
| 822 | _scopes_data_begin = (address) this + scopes_data_offset; | 
| 823 | |
| 824 | _pc_desc_container.reset_to(scopes_pcs_begin()); | 
| 825 | |
| 826 | code_buffer->copy_code_and_locs_to(this); | 
| 827 | // Copy contents of ScopeDescRecorder to nmethod | 
| 828 | code_buffer->copy_values_to(this); | 
| 829 | debug_info->copy_to(this); | 
| 830 | dependencies->copy_to(this); | 
| 831 | if (native_invokers.is_nonempty()) { // can not get address of zero-length array | 
| 832 | // Copy native stubs | 
| 833 | memcpy(native_invokers_begin(), native_invokers.adr_at(0), native_invokers.data_size_in_bytes()); | 
| 834 | } | 
| 835 | clear_unloading_state(); | 
| 836 | |
| 837 | Universe::heap()->register_nmethod(this); | 
| 838 | debug_only(Universe::heap()->verify_nmethod(this))Universe::heap()->verify_nmethod(this); | 
| 839 | |
| 840 | CodeCache::commit(this); | 
| 841 | |
| 842 | // Copy contents of ExceptionHandlerTable to nmethod | 
| 843 | handler_table->copy_to(this); | 
| 844 | nul_chk_table->copy_to(this); | 
| 845 | |
| 846 | #if INCLUDE_JVMCI1 | 
| 847 | // Copy speculations to nmethod | 
| 848 | if (speculations_size() != 0) { | 
| 849 | memcpy(speculations_begin(), speculations, speculations_len); | 
| 850 | } | 
| 851 | #endif | 
| 852 | |
| 853 | // we use the information of entry points to find out if a method is | 
| 854 | // static or non static | 
| 855 |     assert(compiler->is_c2() || compiler->is_jvmci() ||do { if (!(compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 857, "assert(" "compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point)" ") failed", " entry points must be same for static methods and vice versa" ); ::breakpoint(); } } while (0)  | 
| 856 |            _method->is_static() == (entry_point() == _verified_entry_point),do { if (!(compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 857, "assert(" "compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point)" ") failed", " entry points must be same for static methods and vice versa" ); ::breakpoint(); } } while (0)  | 
| 857 |            " entry points must be same for static methods and vice versa")do { if (!(compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 857, "assert(" "compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point)" ") failed", " entry points must be same for static methods and vice versa" ); ::breakpoint(); } } while (0);  | 
| 858 | } | 
| 859 | } | 
| 860 | |
| 861 | // Print a short set of xml attributes to identify this nmethod. The | 
| 862 | // output should be embedded in some other element. | 
| 863 | void nmethod::log_identity(xmlStream* log) const { | 
| 864 | log->print(" compile_id='%d'", compile_id()); | 
| 865 | const char* nm_kind = compile_kind(); | 
| 866 | if (nm_kind != NULL__null) log->print(" compile_kind='%s'", nm_kind); | 
| 867 | log->print(" compiler='%s'", compiler_name()); | 
| 868 | if (TieredCompilation) { | 
| 869 | log->print(" level='%d'", comp_level()); | 
| 870 | } | 
| 871 | #if INCLUDE_JVMCI1 | 
| 872 | if (jvmci_nmethod_data() != NULL__null) { | 
| 873 | const char* jvmci_name = jvmci_nmethod_data()->name(); | 
| 874 | if (jvmci_name != NULL__null) { | 
| 875 | log->print(" jvmci_mirror_name='"); | 
| 876 | log->text("%s", jvmci_name); | 
| 877 | log->print("'"); | 
| 878 | } | 
| 879 | } | 
| 880 | #endif | 
| 881 | } | 
| 882 | |
| 883 | |
| 884 | #define LOG_OFFSET(log, name) \ | 
| 885 | if (p2i(name##_end()) - p2i(name##_begin())) \ | 
| 886 | log->print(" " XSTR(name)"name" "_offset='" INTX_FORMAT"%" "l" "d" "'" , \ | 
| 887 | p2i(name##_begin()) - p2i(this)) | 
| 888 | |
| 889 | |
| 890 | void nmethod::log_new_nmethod() const { | 
| 891 | if (LogCompilation && xtty != NULL__null) { | 
| 892 | ttyLocker ttyl; | 
| 893 | xtty->begin_elem("nmethod"); | 
| 894 | log_identity(xtty); | 
| 895 | xtty->print(" entry='" INTPTR_FORMAT"0x%016" "l" "x" "' size='%d'", p2i(code_begin()), size()); | 
| 896 | xtty->print(" address='" INTPTR_FORMAT"0x%016" "l" "x" "'", p2i(this)); | 
| 897 | |
| 898 | LOG_OFFSET(xtty, relocation); | 
| 899 | LOG_OFFSET(xtty, consts); | 
| 900 | LOG_OFFSET(xtty, insts); | 
| 901 | LOG_OFFSET(xtty, stub); | 
| 902 | LOG_OFFSET(xtty, scopes_data); | 
| 903 | LOG_OFFSET(xtty, scopes_pcs); | 
| 904 | LOG_OFFSET(xtty, dependencies); | 
| 905 | LOG_OFFSET(xtty, handler_table); | 
| 906 | LOG_OFFSET(xtty, nul_chk_table); | 
| 907 | LOG_OFFSET(xtty, oops); | 
| 908 | LOG_OFFSET(xtty, metadata); | 
| 909 | |
| 910 | xtty->method(method()); | 
| 911 | xtty->stamp(); | 
| 912 | xtty->end_elem(); | 
| 913 | } | 
| 914 | } | 
| 915 | |
| 916 | #undef LOG_OFFSET | 
| 917 | |
| 918 | |
| 919 | // Print out more verbose output usually for a newly created nmethod. | 
| 920 | void nmethod::print_on(outputStream* st, const char* msg) const { | 
| 921 | if (st != NULL__null) { | 
| 922 | ttyLocker ttyl; | 
| 923 | if (WizardMode) { | 
| 924 | CompileTask::print(st, this, msg, /*short_form:*/ true); | 
| 925 | st->print_cr(" (" INTPTR_FORMAT"0x%016" "l" "x" ")", p2i(this)); | 
| 926 | } else { | 
| 927 | CompileTask::print(st, this, msg, /*short_form:*/ false); | 
| 928 | } | 
| 929 | } | 
| 930 | } | 
| 931 | |
| 932 | void nmethod::maybe_print_nmethod(DirectiveSet* directive) { | 
| 933 | bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption; | 
| 934 | if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { | 
| 935 | print_nmethod(printnmethods); | 
| 936 | } | 
| 937 | } | 
| 938 | |
| 939 | void nmethod::print_nmethod(bool printmethod) { | 
| 940 | run_nmethod_entry_barrier(); // ensure all embedded OOPs are valid before printing | 
| 941 | |
| 942 | ttyLocker ttyl; // keep the following output all in one block | 
| 943 | if (xtty != NULL__null) { | 
| 944 | xtty->begin_head("print_nmethod"); | 
| 945 | log_identity(xtty); | 
| 946 | xtty->stamp(); | 
| 947 | xtty->end_head(); | 
| 948 | } | 
| 949 | // Print the header part, then print the requested information. | 
| 950 | // This is both handled in decode2(). | 
| 951 | if (printmethod) { | 
| 952 | ResourceMark m; | 
| 953 | if (is_compiled_by_c1()) { | 
| 954 | tty->cr(); | 
| 955 | tty->print_cr("============================= C1-compiled nmethod =============================="); | 
| 956 | } | 
| 957 | if (is_compiled_by_jvmci()) { | 
| 958 | tty->cr(); | 
| 959 | tty->print_cr("=========================== JVMCI-compiled nmethod ============================="); | 
| 960 | } | 
| 961 | tty->print_cr("----------------------------------- Assembly -----------------------------------"); | 
| 962 | decode2(tty); | 
| 963 | #if defined(SUPPORT_DATA_STRUCTS) | 
| 964 | if (AbstractDisassembler::show_structs()) { | 
| 965 | // Print the oops from the underlying CodeBlob as well. | 
| 966 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 967 | print_oops(tty); | 
| 968 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 969 | print_metadata(tty); | 
| 970 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 971 | print_pcs(); | 
| 972 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 973 | if (oop_maps() != NULL__null) { | 
| 974 | tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning | 
| 975 | oop_maps()->print_on(tty); | 
| 976 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 977 | } | 
| 978 | } | 
| 979 | #endif | 
| 980 | } else { | 
| 981 | print(); // print the header part only. | 
| 982 | } | 
| 983 | |
| 984 | #if defined(SUPPORT_DATA_STRUCTS) | 
| 985 | if (AbstractDisassembler::show_structs()) { | 
| 986 | methodHandle mh(Thread::current(), _method); | 
| 987 | if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommand::PrintDebugInfo)) { | 
| 988 | print_scopes(); | 
| 989 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 990 | } | 
| 991 | if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommand::PrintRelocations)) { | 
| 992 | print_relocations(); | 
| 993 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 994 | } | 
| 995 | if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommand::PrintDependencies)) { | 
| 996 | print_dependencies(); | 
| 997 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 998 | } | 
| 999 | if (printmethod && native_invokers_begin() < native_invokers_end()) { | 
| 1000 | print_native_invokers(); | 
| 1001 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 1002 | } | 
| 1003 | if (printmethod || PrintExceptionHandlers) { | 
| 1004 | print_handler_table(); | 
| 1005 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 1006 | print_nul_chk_table(); | 
| 1007 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 1008 | } | 
| 1009 | |
| 1010 | if (printmethod) { | 
| 1011 | print_recorded_oops(); | 
| 1012 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 1013 | print_recorded_metadata(); | 
| 1014 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); | 
| 1015 | } | 
| 1016 | } | 
| 1017 | #endif | 
| 1018 | |
| 1019 | if (xtty != NULL__null) { | 
| 1020 | xtty->tail("print_nmethod"); | 
| 1021 | } | 
| 1022 | } | 
| 1023 | |
| 1024 | |
| 1025 | // Promote one word from an assembly-time handle to a live embedded oop. | 
| 1026 | inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { | 
| 1027 | if (handle == NULL__null || | 
| 1028 | // As a special case, IC oops are initialized to 1 or -1. | 
| 1029 | handle == (jobject) Universe::non_oop_word()) { | 
| 1030 | *(void**)dest = handle; | 
| 1031 | } else { | 
| 1032 | *dest = JNIHandles::resolve_non_null(handle); | 
| 1033 | } | 
| 1034 | } | 
| 1035 | |
| 1036 | |
| 1037 | // Have to have the same name because it's called by a template | 
| 1038 | void nmethod::copy_values(GrowableArray<jobject>* array) { | 
| 1039 | int length = array->length(); | 
| 1040 |   assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough")do { if (!((address)(oops_begin() + length) <= (address)oops_end ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1040, "assert(" "(address)(oops_begin() + length) <= (address)oops_end()" ") failed", "oops big enough"); ::breakpoint(); } } while (0 );  | 
| 1041 | oop* dest = oops_begin(); | 
| 1042 | for (int index = 0 ; index < length; index++) { | 
| 1043 | initialize_immediate_oop(&dest[index], array->at(index)); | 
| 1044 | } | 
| 1045 | |
| 1046 | // Now we can fix up all the oops in the code. We need to do this | 
| 1047 | // in the code because the assembler uses jobjects as placeholders. | 
| 1048 | // The code and relocations have already been initialized by the | 
| 1049 | // CodeBlob constructor, so it is valid even at this early point to | 
| 1050 | // iterate over relocations and patch the code. | 
| 1051 | fix_oop_relocations(NULL__null, NULL__null, /*initialize_immediates=*/ true); | 
| 1052 | } | 
| 1053 | |
| 1054 | void nmethod::copy_values(GrowableArray<Metadata*>* array) { | 
| 1055 | int length = array->length(); | 
| 1056 |   assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough")do { if (!((address)(metadata_begin() + length) <= (address )metadata_end())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1056, "assert(" "(address)(metadata_begin() + length) <= (address)metadata_end()" ") failed", "big enough"); ::breakpoint(); } } while (0);  | 
| 1057 | Metadata** dest = metadata_begin(); | 
| 1058 | for (int index = 0 ; index < length; index++) { | 
| 1059 | dest[index] = array->at(index); | 
| 1060 | } | 
| 1061 | } | 
| 1062 | |
| 1063 | void nmethod::free_native_invokers() { | 
| 1064 | for (RuntimeStub** it = native_invokers_begin(); it < native_invokers_end(); it++) { | 
| 1065 | CodeCache::free(*it); | 
| 1066 | } | 
| 1067 | } | 
| 1068 | |
| 1069 | void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { | 
| 1070 | // re-patch all oop-bearing instructions, just in case some oops moved | 
| 1071 | RelocIterator iter(this, begin, end); | 
| 1072 | while (iter.next()) { | 
| 1073 | if (iter.type() == relocInfo::oop_type) { | 
| 1074 | oop_Relocation* reloc = iter.oop_reloc(); | 
| 1075 | if (initialize_immediates && reloc->oop_is_immediate()) { | 
| 1076 | oop* dest = reloc->oop_addr(); | 
| 1077 | initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest)); | 
| 1078 | } | 
| 1079 | // Refresh the oop-related bits of this instruction. | 
| 1080 | reloc->fix_oop_relocation(); | 
| 1081 | } else if (iter.type() == relocInfo::metadata_type) { | 
| 1082 | metadata_Relocation* reloc = iter.metadata_reloc(); | 
| 1083 | reloc->fix_metadata_relocation(); | 
| 1084 | } | 
| 1085 | } | 
| 1086 | } | 
| 1087 | |
| 1088 | |
| 1089 | void nmethod::verify_clean_inline_caches() { | 
| 1090 |   assert(CompiledICLocker::is_safe(this), "mt unsafe call")do { if (!(CompiledICLocker::is_safe(this))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1090, "assert(" "CompiledICLocker::is_safe(this)" ") failed" , "mt unsafe call"); ::breakpoint(); } } while (0);  | 
| 1091 | |
| 1092 | ResourceMark rm; | 
| 1093 | RelocIterator iter(this, oops_reloc_begin()); | 
| 1094 | while(iter.next()) { | 
| 1095 | switch(iter.type()) { | 
| 1096 | case relocInfo::virtual_call_type: | 
| 1097 | case relocInfo::opt_virtual_call_type: { | 
| 1098 | CompiledIC *ic = CompiledIC_at(&iter); | 
| 1099 | // Ok, to lookup references to zombies here | 
| 1100 | CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); | 
| 1101 |         assert(cb != NULL, "destination not in CodeBlob?")do { if (!(cb != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1101, "assert(" "cb != __null" ") failed", "destination not in CodeBlob?" ); ::breakpoint(); } } while (0);  | 
| 1102 | nmethod* nm = cb->as_nmethod_or_null(); | 
| 1103 | if( nm != NULL__null ) { | 
| 1104 | // Verify that inline caches pointing to both zombie and not_entrant methods are clean | 
| 1105 | if (!nm->is_in_use() || (nm->method()->code() != nm)) { | 
| 1106 |             assert(ic->is_clean(), "IC should be clean")do { if (!(ic->is_clean())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1106, "assert(" "ic->is_clean()" ") failed", "IC should be clean" ); ::breakpoint(); } } while (0);  | 
| 1107 | } | 
| 1108 | } | 
| 1109 | break; | 
| 1110 | } | 
| 1111 | case relocInfo::static_call_type: { | 
| 1112 | CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); | 
| 1113 | CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); | 
| 1114 |         assert(cb != NULL, "destination not in CodeBlob?")do { if (!(cb != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1114, "assert(" "cb != __null" ") failed", "destination not in CodeBlob?" ); ::breakpoint(); } } while (0);  | 
| 1115 | nmethod* nm = cb->as_nmethod_or_null(); | 
| 1116 | if( nm != NULL__null ) { | 
| 1117 | // Verify that inline caches pointing to both zombie and not_entrant methods are clean | 
| 1118 | if (!nm->is_in_use() || (nm->method()->code() != nm)) { | 
| 1119 |             assert(csc->is_clean(), "IC should be clean")do { if (!(csc->is_clean())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1119, "assert(" "csc->is_clean()" ") failed", "IC should be clean" ); ::breakpoint(); } } while (0);  | 
| 1120 | } | 
| 1121 | } | 
| 1122 | break; | 
| 1123 | } | 
| 1124 | default: | 
| 1125 | break; | 
| 1126 | } | 
| 1127 | } | 
| 1128 | } | 
| 1129 | |
| 1130 | // This is a private interface with the sweeper. | 
| 1131 | void nmethod::mark_as_seen_on_stack() { | 
| 1132 |   assert(is_alive(), "Must be an alive method")do { if (!(is_alive())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1132, "assert(" "is_alive()" ") failed", "Must be an alive method" ); ::breakpoint(); } } while (0);  | 
| 1133 | // Set the traversal mark to ensure that the sweeper does 2 | 
| 1134 | // cleaning passes before moving to zombie. | 
| 1135 | set_stack_traversal_mark(NMethodSweeper::traversal_count()); | 
| 1136 | } | 
| 1137 | |
| 1138 | // Tell if a non-entrant method can be converted to a zombie (i.e., | 
| 1139 | // there are no activations on the stack, not in use by the VM, | 
| 1140 | // and not in use by the ServiceThread) | 
| 1141 | bool nmethod::can_convert_to_zombie() { | 
| 1142 | // Note that this is called when the sweeper has observed the nmethod to be | 
| 1143 | // not_entrant. However, with concurrent code cache unloading, the state | 
| 1144 | // might have moved on to unloaded if it is_unloading(), due to racing | 
| 1145 | // concurrent GC threads. | 
| 1146 |   assert(is_not_entrant() || is_unloading() ||do { if (!(is_not_entrant() || is_unloading() || !Thread::current ()->is_Code_cache_sweeper_thread())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1148, "assert(" "is_not_entrant() || is_unloading() || !Thread::current()->is_Code_cache_sweeper_thread()" ") failed", "must be a non-entrant method if called from sweeper" ); ::breakpoint(); } } while (0)  | 
| 1147 |          !Thread::current()->is_Code_cache_sweeper_thread(),do { if (!(is_not_entrant() || is_unloading() || !Thread::current ()->is_Code_cache_sweeper_thread())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1148, "assert(" "is_not_entrant() || is_unloading() || !Thread::current()->is_Code_cache_sweeper_thread()" ") failed", "must be a non-entrant method if called from sweeper" ); ::breakpoint(); } } while (0)  | 
| 1148 |          "must be a non-entrant method if called from sweeper")do { if (!(is_not_entrant() || is_unloading() || !Thread::current ()->is_Code_cache_sweeper_thread())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1148, "assert(" "is_not_entrant() || is_unloading() || !Thread::current()->is_Code_cache_sweeper_thread()" ") failed", "must be a non-entrant method if called from sweeper" ); ::breakpoint(); } } while (0);  | 
| 1149 | |
| 1150 | // Since the nmethod sweeper only does partial sweep the sweeper's traversal | 
| 1151 | // count can be greater than the stack traversal count before it hits the | 
| 1152 | // nmethod for the second time. | 
| 1153 | // If an is_unloading() nmethod is still not_entrant, then it is not safe to | 
| 1154 | // convert it to zombie due to GC unloading interactions. However, if it | 
| 1155 | // has become unloaded, then it is okay to convert such nmethods to zombie. | 
| 1156 | return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() && | 
| 1157 | !is_locked_by_vm() && (!is_unloading() || is_unloaded()); | 
| 1158 | } | 
| 1159 | |
| 1160 | void nmethod::inc_decompile_count() { | 
| 1161 | if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return; | 
| 1162 | // Could be gated by ProfileTraps, but do not bother... | 
| 1163 | Method* m = method(); | 
| 1164 | if (m == NULL__null) return; | 
| 1165 | MethodData* mdo = m->method_data(); | 
| 1166 | if (mdo == NULL__null) return; | 
| 1167 | // There is a benign race here. See comments in methodData.hpp. | 
| 1168 | mdo->inc_decompile_count(); | 
| 1169 | } | 
| 1170 | |
| 1171 | bool nmethod::try_transition(int new_state_int) { | 
| 1172 | signed char new_state = new_state_int; | 
| 1173 | #ifdef ASSERT1 | 
| 1174 | if (new_state != unloaded) { | 
| 1175 | assert_lock_strong(CompiledMethod_lock); | 
| 1176 | } | 
| 1177 | #endif | 
| 1178 | for (;;) { | 
| 1179 | signed char old_state = Atomic::load(&_state); | 
| 1180 | if (old_state >= new_state) { | 
| 1181 | // Ensure monotonicity of transitions. | 
| 1182 | return false; | 
| 1183 | } | 
| 1184 | if (Atomic::cmpxchg(&_state, old_state, new_state) == old_state) { | 
| 1185 | return true; | 
| 1186 | } | 
| 1187 | } | 
| 1188 | } | 
| 1189 | |
| 1190 | void nmethod::make_unloaded() { | 
| 1191 | post_compiled_method_unload(); | 
| 1192 | |
| 1193 | // This nmethod is being unloaded, make sure that dependencies | 
| 1194 | // recorded in instanceKlasses get flushed. | 
| 1195 | // Since this work is being done during a GC, defer deleting dependencies from the | 
| 1196 | // InstanceKlass. | 
| 1197 |   assert(Universe::heap()->is_gc_active() ||do { if (!(Universe::heap()->is_gc_active() || Thread::current ()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1200, "assert(" "Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "should only be called during gc"); ::breakpoint (); } } while (0)  | 
| 1198 |          Thread::current()->is_ConcurrentGC_thread() ||do { if (!(Universe::heap()->is_gc_active() || Thread::current ()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1200, "assert(" "Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "should only be called during gc"); ::breakpoint (); } } while (0)  | 
| 1199 |          Thread::current()->is_Worker_thread(),do { if (!(Universe::heap()->is_gc_active() || Thread::current ()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1200, "assert(" "Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "should only be called during gc"); ::breakpoint (); } } while (0)  | 
| 1200 |          "should only be called during gc")do { if (!(Universe::heap()->is_gc_active() || Thread::current ()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1200, "assert(" "Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "should only be called during gc"); ::breakpoint (); } } while (0);  | 
| 1201 | flush_dependencies(/*delete_immediately*/false); | 
| 1202 | |
| 1203 | // Break cycle between nmethod & method | 
| 1204 |   LogTarget(Trace, class, unload, nmethod)LogTargetImpl<LogLevel::Trace, (LogTag::_class), (LogTag:: _unload), (LogTag::_nmethod), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG)> lt;  | 
| 1205 | if (lt.is_enabled()) { | 
| 1206 | LogStream ls(lt); | 
| 1207 | ls.print("making nmethod " INTPTR_FORMAT"0x%016" "l" "x" | 
| 1208 | " unloadable, Method*(" INTPTR_FORMAT"0x%016" "l" "x" | 
| 1209 | ") ", | 
| 1210 | p2i(this), p2i(_method)); | 
| 1211 | ls.cr(); | 
| 1212 | } | 
| 1213 | // Unlink the osr method, so we do not look this up again | 
| 1214 | if (is_osr_method()) { | 
| 1215 | // Invalidate the osr nmethod only once. Note that with concurrent | 
| 1216 | // code cache unloading, OSR nmethods are invalidated before they | 
| 1217 | // are made unloaded. Therefore, this becomes a no-op then. | 
| 1218 | if (is_in_use()) { | 
| 1219 | invalidate_osr_method(); | 
| 1220 | } | 
| 1221 | #ifdef ASSERT1 | 
| 1222 | if (method() != NULL__null) { | 
| 1223 | // Make sure osr nmethod is invalidated, i.e. not on the list | 
| 1224 | bool found = method()->method_holder()->remove_osr_nmethod(this); | 
| 1225 |       assert(!found, "osr nmethod should have been invalidated")do { if (!(!found)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1225, "assert(" "!found" ") failed", "osr nmethod should have been invalidated" ); ::breakpoint(); } } while (0);  | 
| 1226 | } | 
| 1227 | #endif | 
| 1228 | } | 
| 1229 | |
| 1230 | // If _method is already NULL the Method* is about to be unloaded, | 
| 1231 | // so we don't have to break the cycle. Note that it is possible to | 
| 1232 | // have the Method* live here, in case we unload the nmethod because | 
| 1233 | // it is pointing to some oop (other than the Method*) being unloaded. | 
| 1234 | if (_method != NULL__null) { | 
| 1235 | _method->unlink_code(this); | 
| 1236 | } | 
| 1237 | |
| 1238 | // Make the class unloaded - i.e., change state and notify sweeper | 
| 1239 |   assert(SafepointSynchronize::is_at_safepoint() ||do { if (!(SafepointSynchronize::is_at_safepoint() || Thread:: current()->is_ConcurrentGC_thread() || Thread::current()-> is_Worker_thread())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1242, "assert(" "SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "must be at safepoint"); ::breakpoint(); } } while (0)  | 
| 1240 |          Thread::current()->is_ConcurrentGC_thread() ||do { if (!(SafepointSynchronize::is_at_safepoint() || Thread:: current()->is_ConcurrentGC_thread() || Thread::current()-> is_Worker_thread())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1242, "assert(" "SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "must be at safepoint"); ::breakpoint(); } } while (0)  | 
| 1241 |          Thread::current()->is_Worker_thread(),do { if (!(SafepointSynchronize::is_at_safepoint() || Thread:: current()->is_ConcurrentGC_thread() || Thread::current()-> is_Worker_thread())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1242, "assert(" "SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "must be at safepoint"); ::breakpoint(); } } while (0)  | 
| 1242 |          "must be at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint() || Thread:: current()->is_ConcurrentGC_thread() || Thread::current()-> is_Worker_thread())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1242, "assert(" "SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread() || Thread::current()->is_Worker_thread()" ") failed", "must be at safepoint"); ::breakpoint(); } } while (0);  | 
| 1243 | |
| 1244 | { | 
| 1245 | // Clear ICStubs and release any CompiledICHolders. | 
| 1246 | CompiledICLocker ml(this); | 
| 1247 | clear_ic_callsites(); | 
| 1248 | } | 
| 1249 | |
| 1250 | // Unregister must be done before the state change | 
| 1251 | { | 
| 1252 | MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL__null : CodeCache_lock, | 
| 1253 | Mutex::_no_safepoint_check_flag); | 
| 1254 | Universe::heap()->unregister_nmethod(this); | 
| 1255 | } | 
| 1256 | |
| 1257 | // Clear the method of this dead nmethod | 
| 1258 | set_method(NULL__null); | 
| 1259 | |
| 1260 | // Log the unloading. | 
| 1261 | log_state_change(); | 
| 1262 | |
| 1263 | // The Method* is gone at this point | 
| 1264 |   assert(_method == NULL, "Tautology")do { if (!(_method == __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1264, "assert(" "_method == __null" ") failed", "Tautology" ); ::breakpoint(); } } while (0);  | 
| 1265 | |
| 1266 | set_osr_link(NULL__null); | 
| 1267 | NMethodSweeper::report_state_change(this); | 
| 1268 | |
| 1269 | bool transition_success = try_transition(unloaded); | 
| 1270 | |
| 1271 | // It is an important invariant that there exists no race between | 
| 1272 | // the sweeper and GC thread competing for making the same nmethod | 
| 1273 | // zombie and unloaded respectively. This is ensured by | 
| 1274 | // can_convert_to_zombie() returning false for any is_unloading() | 
| 1275 | // nmethod, informing the sweeper not to step on any GC toes. | 
| 1276 |   assert(transition_success, "Invalid nmethod transition to unloaded")do { if (!(transition_success)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1276, "assert(" "transition_success" ") failed", "Invalid nmethod transition to unloaded" ); ::breakpoint(); } } while (0);  | 
| 1277 | |
| 1278 | #if INCLUDE_JVMCI1 | 
| 1279 | // Clear the link between this nmethod and a HotSpotNmethod mirror | 
| 1280 | JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); | 
| 1281 | if (nmethod_data != NULL__null) { | 
| 1282 | nmethod_data->invalidate_nmethod_mirror(this); | 
| 1283 | } | 
| 1284 | #endif | 
| 1285 | } | 
| 1286 | |
| 1287 | void nmethod::invalidate_osr_method() { | 
| 1288 |   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod")do { if (!(_entry_bci != InvocationEntryBci)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1288, "assert(" "_entry_bci != InvocationEntryBci" ") failed" , "wrong kind of nmethod"); ::breakpoint(); } } while (0);  | 
| 1289 | // Remove from list of active nmethods | 
| 1290 | if (method() != NULL__null) { | 
| 1291 | method()->method_holder()->remove_osr_nmethod(this); | 
| 1292 | } | 
| 1293 | } | 
| 1294 | |
| 1295 | void nmethod::log_state_change() const { | 
| 1296 | if (LogCompilation) { | 
| 1297 | if (xtty != NULL__null) { | 
| 1298 | ttyLocker ttyl; // keep the following output all in one block | 
| 1299 | if (_state == unloaded) { | 
| 1300 | xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT"%" "l" "u" "'", | 
| 1301 | os::current_thread_id()); | 
| 1302 | } else { | 
| 1303 | xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT"%" "l" "u" "'%s", | 
| 1304 | os::current_thread_id(), | 
| 1305 | (_state == zombie ? " zombie='1'" : "")); | 
| 1306 | } | 
| 1307 | log_identity(xtty); | 
| 1308 | xtty->stamp(); | 
| 1309 | xtty->end_elem(); | 
| 1310 | } | 
| 1311 | } | 
| 1312 | |
| 1313 | const char *state_msg = _state == zombie ? "made zombie" : "made not entrant"; | 
| 1314 | CompileTask::print_ul(this, state_msg); | 
| 1315 | if (PrintCompilation && _state != unloaded) { | 
| 1316 | print_on(tty, state_msg); | 
| 1317 | } | 
| 1318 | } | 
| 1319 | |
| 1320 | void nmethod::unlink_from_method() { | 
| 1321 | if (method() != NULL__null) { | 
| 1322 | method()->unlink_code(this); | 
| 1323 | } | 
| 1324 | } | 
| 1325 | |
| 1326 | /** | 
| 1327 | * Common functionality for both make_not_entrant and make_zombie | 
| 1328 | */ | 
| 1329 | bool nmethod::make_not_entrant_or_zombie(int state) { | 
| 1330 |   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant")do { if (!(state == zombie || state == not_entrant)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1330, "assert(" "state == zombie || state == not_entrant" ") failed" , "must be zombie or not_entrant"); ::breakpoint(); } } while (0);  | 
| 1331 | |
| 1332 | if (Atomic::load(&_state) >= state) { | 
| 1333 | // Avoid taking the lock if already in required state. | 
| 1334 | // This is safe from races because the state is an end-state, | 
| 1335 | // which the nmethod cannot back out of once entered. | 
| 1336 | // No need for fencing either. | 
| 1337 | return false; | 
| 1338 | } | 
| 1339 | |
| 1340 | // Make sure the nmethod is not flushed. | 
| 1341 | nmethodLocker nml(this); | 
| 1342 | // This can be called while the system is already at a safepoint which is ok | 
| 1343 | NoSafepointVerifier nsv; | 
| 1344 | |
| 1345 | // during patching, depending on the nmethod state we must notify the GC that | 
| 1346 | // code has been unloaded, unregistering it. We cannot do this right while | 
| 1347 | // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This | 
| 1348 | // would be prone to deadlocks. | 
| 1349 | // This flag is used to remember whether we need to later lock and unregister. | 
| 1350 | bool nmethod_needs_unregister = false; | 
| 1351 | |
| 1352 | { | 
| 1353 | // Enter critical section. Does not block for safepoint. | 
| 1354 | MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL__null : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); | 
| 1355 | |
| 1356 | // This logic is equivalent to the logic below for patching the | 
| 1357 | // verified entry point of regular methods. We check that the | 
| 1358 | // nmethod is in use to ensure that it is invalidated only once. | 
| 1359 | if (is_osr_method() && is_in_use()) { | 
| 1360 | // this effectively makes the osr nmethod not entrant | 
| 1361 | invalidate_osr_method(); | 
| 1362 | } | 
| 1363 | |
| 1364 | if (Atomic::load(&_state) >= state) { | 
| 1365 | // another thread already performed this transition so nothing | 
| 1366 | // to do, but return false to indicate this. | 
| 1367 | return false; | 
| 1368 | } | 
| 1369 | |
| 1370 | // The caller can be calling the method statically or through an inline | 
| 1371 | // cache call. | 
| 1372 | if (!is_osr_method() && !is_not_entrant()) { | 
| 1373 | NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), | 
| 1374 | SharedRuntime::get_handle_wrong_method_stub()); | 
| 1375 | } | 
| 1376 | |
| 1377 | if (is_in_use() && update_recompile_counts()) { | 
| 1378 | // It's a true state change, so mark the method as decompiled. | 
| 1379 | // Do it only for transition from alive. | 
| 1380 | inc_decompile_count(); | 
| 1381 | } | 
| 1382 | |
| 1383 | // If the state is becoming a zombie, signal to unregister the nmethod with | 
| 1384 | // the heap. | 
| 1385 | // This nmethod may have already been unloaded during a full GC. | 
| 1386 | if ((state == zombie) && !is_unloaded()) { | 
| 1387 | nmethod_needs_unregister = true; | 
| 1388 | } | 
| 1389 | |
| 1390 | // Must happen before state change. Otherwise we have a race condition in | 
| 1391 | // nmethod::can_convert_to_zombie(). I.e., a method can immediately | 
| 1392 | // transition its state from 'not_entrant' to 'zombie' without having to wait | 
| 1393 | // for stack scanning. | 
| 1394 | if (state == not_entrant) { | 
| 1395 | mark_as_seen_on_stack(); | 
| 1396 | OrderAccess::storestore(); // _stack_traversal_mark and _state | 
| 1397 | } | 
| 1398 | |
| 1399 | // Change state | 
| 1400 | if (!try_transition(state)) { | 
| 1401 | // If the transition fails, it is due to another thread making the nmethod more | 
| 1402 | // dead. In particular, one thread might be making the nmethod unloaded concurrently. | 
| 1403 | // If so, having patched in the jump in the verified entry unnecessarily is fine. | 
| 1404 | // The nmethod is no longer possible to call by Java threads. | 
| 1405 | // Incrementing the decompile count is also fine as the caller of make_not_entrant() | 
| 1406 | // had a valid reason to deoptimize the nmethod. | 
| 1407 | // Marking the nmethod as seen on stack also has no effect, as the nmethod is now | 
| 1408 | // !is_alive(), and the seen on stack value is only used to convert not_entrant | 
| 1409 | // nmethods to zombie in can_convert_to_zombie(). | 
| 1410 | return false; | 
| 1411 | } | 
| 1412 | |
| 1413 | // Log the transition once | 
| 1414 | log_state_change(); | 
| 1415 | |
| 1416 | // Remove nmethod from method. | 
| 1417 | unlink_from_method(); | 
| 1418 | |
| 1419 | } // leave critical region under CompiledMethod_lock | 
| 1420 | |
| 1421 | #if INCLUDE_JVMCI1 | 
| 1422 | // Invalidate can't occur while holding the Patching lock | 
| 1423 | JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); | 
| 1424 | if (nmethod_data != NULL__null) { | 
| 1425 | nmethod_data->invalidate_nmethod_mirror(this); | 
| 1426 | } | 
| 1427 | #endif | 
| 1428 | |
| 1429 | #ifdef ASSERT1 | 
| 1430 | if (is_osr_method() && method() != NULL__null) { | 
| 1431 | // Make sure osr nmethod is invalidated, i.e. not on the list | 
| 1432 | bool found = method()->method_holder()->remove_osr_nmethod(this); | 
| 1433 |     assert(!found, "osr nmethod should have been invalidated")do { if (!(!found)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1433, "assert(" "!found" ") failed", "osr nmethod should have been invalidated" ); ::breakpoint(); } } while (0);  | 
| 1434 | } | 
| 1435 | #endif | 
| 1436 | |
| 1437 | // When the nmethod becomes zombie it is no longer alive so the | 
| 1438 | // dependencies must be flushed. nmethods in the not_entrant | 
| 1439 | // state will be flushed later when the transition to zombie | 
| 1440 | // happens or they get unloaded. | 
| 1441 | if (state == zombie) { | 
| 1442 | { | 
| 1443 | // Flushing dependencies must be done before any possible | 
| 1444 | // safepoint can sneak in, otherwise the oops used by the | 
| 1445 | // dependency logic could have become stale. | 
| 1446 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | 
| 1447 | if (nmethod_needs_unregister) { | 
| 1448 | Universe::heap()->unregister_nmethod(this); | 
| 1449 | } | 
| 1450 | flush_dependencies(/*delete_immediately*/true); | 
| 1451 | } | 
| 1452 | |
| 1453 | #if INCLUDE_JVMCI1 | 
| 1454 | // Now that the nmethod has been unregistered, it's | 
| 1455 | // safe to clear the HotSpotNmethod mirror oop. | 
| 1456 | if (nmethod_data != NULL__null) { | 
| 1457 | nmethod_data->clear_nmethod_mirror(this); | 
| 1458 | } | 
| 1459 | #endif | 
| 1460 | |
| 1461 | // Clear ICStubs to prevent back patching stubs of zombie or flushed | 
| 1462 | // nmethods during the next safepoint (see ICStub::finalize), as well | 
| 1463 | // as to free up CompiledICHolder resources. | 
| 1464 | { | 
| 1465 | CompiledICLocker ml(this); | 
| 1466 | clear_ic_callsites(); | 
| 1467 | } | 
| 1468 | |
| 1469 | // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload | 
| 1470 | // event and it hasn't already been reported for this nmethod then | 
| 1471 | // report it now. The event may have been reported earlier if the GC | 
| 1472 | // marked it for unloading). JvmtiDeferredEventQueue support means | 
| 1473 | // we no longer go to a safepoint here. | 
| 1474 | post_compiled_method_unload(); | 
| 1475 | |
| 1476 | #ifdef ASSERT1 | 
| 1477 | // It's no longer safe to access the oops section since zombie | 
| 1478 | // nmethods aren't scanned for GC. | 
| 1479 | _oops_are_stale = true; | 
| 1480 | #endif | 
| 1481 | // the Method may be reclaimed by class unloading now that the | 
| 1482 | // nmethod is in zombie state | 
| 1483 | set_method(NULL__null); | 
| 1484 | } else { | 
| 1485 |     assert(state == not_entrant, "other cases may need to be handled differently")do { if (!(state == not_entrant)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1485, "assert(" "state == not_entrant" ") failed", "other cases may need to be handled differently" ); ::breakpoint(); } } while (0);  | 
| 1486 | } | 
| 1487 | |
| 1488 | if (TraceCreateZombies && state == zombie) { | 
| 1489 | ResourceMark m; | 
| 1490 | tty->print_cr("nmethod <" INTPTR_FORMAT"0x%016" "l" "x" "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie"); | 
| 1491 | } | 
| 1492 | |
| 1493 | NMethodSweeper::report_state_change(this); | 
| 1494 | return true; | 
| 1495 | } | 
| 1496 | |
| 1497 | void nmethod::flush() { | 
| 1498 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | 
| 1499 | // Note that there are no valid oops in the nmethod anymore. | 
| 1500 |   assert(!is_osr_method() || is_unloaded() || is_zombie(),do { if (!(!is_osr_method() || is_unloaded() || is_zombie())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1501, "assert(" "!is_osr_method() || is_unloaded() || is_zombie()" ") failed", "osr nmethod must be unloaded or zombie before flushing" ); ::breakpoint(); } } while (0)  | 
| 1501 |          "osr nmethod must be unloaded or zombie before flushing")do { if (!(!is_osr_method() || is_unloaded() || is_zombie())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1501, "assert(" "!is_osr_method() || is_unloaded() || is_zombie()" ") failed", "osr nmethod must be unloaded or zombie before flushing" ); ::breakpoint(); } } while (0);  | 
| 1502 |   assert(is_zombie() || is_osr_method(), "must be a zombie method")do { if (!(is_zombie() || is_osr_method())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1502, "assert(" "is_zombie() || is_osr_method()" ") failed" , "must be a zombie method"); ::breakpoint(); } } while (0);  | 
| 1503 |   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed")do { if (!(!is_locked_by_vm())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1503, "assert(" "!is_locked_by_vm()" ") failed", "locked methods shouldn't be flushed" ); ::breakpoint(); } } while (0);  | 
| 1504 | assert_locked_or_safepoint(CodeCache_lock); | 
| 1505 | |
| 1506 | // completely deallocate this method | 
| 1507 | Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT"0x%016" "l" "x", p2i(this)); | 
| 1508 | if (PrintMethodFlushing) { | 
| 1509 | tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT"0x%016" "l" "x" ". Live blobs:" UINT32_FORMAT"%" "u" | 
| 1510 | "/Free CodeCache:" SIZE_FORMAT"%" "l" "u" "Kb", | 
| 1511 | is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(), | 
| 1512 | CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); | 
| 1513 | } | 
| 1514 | |
| 1515 | // We need to deallocate any ExceptionCache data. | 
| 1516 | // Note that we do not need to grab the nmethod lock for this, it | 
| 1517 | // better be thread safe if we're disposing of it! | 
| 1518 | ExceptionCache* ec = exception_cache(); | 
| 1519 | set_exception_cache(NULL__null); | 
| 1520 | while(ec != NULL__null) { | 
| 1521 | ExceptionCache* next = ec->next(); | 
| 1522 | delete ec; | 
| 1523 | ec = next; | 
| 1524 | } | 
| 1525 | |
| 1526 | Universe::heap()->flush_nmethod(this); | 
| 1527 | CodeCache::unregister_old_nmethod(this); | 
| 1528 | |
| 1529 | CodeBlob::flush(); | 
| 1530 | CodeCache::free(this); | 
| 1531 | } | 
| 1532 | |
| 1533 | oop nmethod::oop_at(int index) const { | 
| 1534 | if (index == 0) { | 
| 1535 | return NULL__null; | 
| 1536 | } | 
| 1537 | return NativeAccess<AS_NO_KEEPALIVE>::oop_load(oop_addr_at(index)); | 
| 1538 | } | 
| 1539 | |
| 1540 | oop nmethod::oop_at_phantom(int index) const { | 
| 1541 | if (index == 0) { | 
| 1542 | return NULL__null; | 
| 1543 | } | 
| 1544 | return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(oop_addr_at(index)); | 
| 1545 | } | 
| 1546 | |
| 1547 | // | 
| 1548 | // Notify all classes this nmethod is dependent on that it is no | 
| 1549 | // longer dependent. This should only be called in two situations. | 
| 1550 | // First, when a nmethod transitions to a zombie all dependents need | 
| 1551 | // to be clear. Since zombification happens at a safepoint there's no | 
| 1552 | // synchronization issues. The second place is a little more tricky. | 
| 1553 | // During phase 1 of mark sweep class unloading may happen and as a | 
| 1554 | // result some nmethods may get unloaded. In this case the flushing | 
| 1555 | // of dependencies must happen during phase 1 since after GC any | 
| 1556 | // dependencies in the unloaded nmethod won't be updated, so | 
| 1557 | // traversing the dependency information in unsafe. In that case this | 
| 1558 | // function is called with a boolean argument and this function only | 
| 1559 | // notifies instanceKlasses that are reachable | 
| 1560 | |
| 1561 | void nmethod::flush_dependencies(bool delete_immediately) { | 
| 1562 |   DEBUG_ONLY(bool called_by_gc = Universe::heap()->is_gc_active() ||bool called_by_gc = Universe::heap()->is_gc_active() || Thread ::current()->is_ConcurrentGC_thread() || Thread::current() ->is_Worker_thread();  | 
| 1563 |                                  Thread::current()->is_ConcurrentGC_thread() ||bool called_by_gc = Universe::heap()->is_gc_active() || Thread ::current()->is_ConcurrentGC_thread() || Thread::current() ->is_Worker_thread();  | 
| 1564 |                                  Thread::current()->is_Worker_thread();)bool called_by_gc = Universe::heap()->is_gc_active() || Thread ::current()->is_ConcurrentGC_thread() || Thread::current() ->is_Worker_thread();  | 
| 1565 |   assert(called_by_gc != delete_immediately,do { if (!(called_by_gc != delete_immediately)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1566, "assert(" "called_by_gc != delete_immediately" ") failed" , "delete_immediately is false if and only if we are called during GC" ); ::breakpoint(); } } while (0)  | 
| 1566 |   "delete_immediately is false if and only if we are called during GC")do { if (!(called_by_gc != delete_immediately)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1566, "assert(" "called_by_gc != delete_immediately" ") failed" , "delete_immediately is false if and only if we are called during GC" ); ::breakpoint(); } } while (0);  | 
| 1567 | if (!has_flushed_dependencies()) { | 
| 1568 | set_has_flushed_dependencies(); | 
| 1569 | for (Dependencies::DepStream deps(this); deps.next(); ) { | 
| 1570 | if (deps.type() == Dependencies::call_site_target_value) { | 
| 1571 | // CallSite dependencies are managed on per-CallSite instance basis. | 
| 1572 | oop call_site = deps.argument_oop(0); | 
| 1573 | if (delete_immediately) { | 
| 1574 | assert_locked_or_safepoint(CodeCache_lock); | 
| 1575 | MethodHandles::remove_dependent_nmethod(call_site, this); | 
| 1576 | } else { | 
| 1577 | MethodHandles::clean_dependency_context(call_site); | 
| 1578 | } | 
| 1579 | } else { | 
| 1580 | Klass* klass = deps.context_type(); | 
| 1581 | if (klass == NULL__null) { | 
| 1582 | continue; // ignore things like evol_method | 
| 1583 | } | 
| 1584 | // During GC delete_immediately is false, and liveness | 
| 1585 | // of dependee determines class that needs to be updated. | 
| 1586 | if (delete_immediately) { | 
| 1587 | assert_locked_or_safepoint(CodeCache_lock); | 
| 1588 | InstanceKlass::cast(klass)->remove_dependent_nmethod(this); | 
| 1589 | } else if (klass->is_loader_alive()) { | 
| 1590 | // The GC may clean dependency contexts concurrently and in parallel. | 
| 1591 | InstanceKlass::cast(klass)->clean_dependency_context(); | 
| 1592 | } | 
| 1593 | } | 
| 1594 | } | 
| 1595 | } | 
| 1596 | } | 
| 1597 | |
| 1598 | // ------------------------------------------------------------------ | 
| 1599 | // post_compiled_method_load_event | 
| 1600 | // new method for install_code() path | 
| 1601 | // Transfer information from compilation to jvmti | 
| 1602 | void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) { | 
| 1603 | |
| 1604 | // Don't post this nmethod load event if it is already dying | 
| 1605 | // because the sweeper might already be deleting this nmethod. | 
| 1606 | { | 
| 1607 | MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); | 
| 1608 | // When the nmethod is acquired from the CodeCache iterator, it can racingly become zombie | 
| 1609 | // before this code is called. Filter them out here under the CompiledMethod_lock. | 
| 1610 | if (!is_alive()) { | 
| 1611 | return; | 
| 1612 | } | 
| 1613 | // As for is_alive() nmethods, we also don't want them to racingly become zombie once we | 
| 1614 | // release this lock, so we check that this is not going to be the case. | 
| 1615 | if (is_not_entrant() && can_convert_to_zombie()) { | 
| 1616 | return; | 
| 1617 | } | 
| 1618 | // Ensure the sweeper can't collect this nmethod until it become "active" with JvmtiThreadState::nmethods_do. | 
| 1619 | mark_as_seen_on_stack(); | 
| 1620 | } | 
| 1621 | |
| 1622 | // This is a bad time for a safepoint. We don't want | 
| 1623 | // this nmethod to get unloaded while we're queueing the event. | 
| 1624 | NoSafepointVerifier nsv; | 
| 1625 | |
| 1626 | Method* m = method(); | 
| 1627 | HOTSPOT_COMPILED_METHOD_LOAD( | 
| 1628 | (char *) m->klass_name()->bytes(), | 
| 1629 | m->klass_name()->utf8_length(), | 
| 1630 | (char *) m->name()->bytes(), | 
| 1631 | m->name()->utf8_length(), | 
| 1632 | (char *) m->signature()->bytes(), | 
| 1633 | m->signature()->utf8_length(), | 
| 1634 | insts_begin(), insts_size()); | 
| 1635 | |
| 1636 | |
| 1637 | if (JvmtiExport::should_post_compiled_method_load()) { | 
| 1638 | // Only post unload events if load events are found. | 
| 1639 | set_load_reported(); | 
| 1640 | // If a JavaThread hasn't been passed in, let the Service thread | 
| 1641 | // (which is a real Java thread) post the event | 
| 1642 | JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this); | 
| 1643 | if (state == NULL__null) { | 
| 1644 | // Execute any barrier code for this nmethod as if it's called, since | 
| 1645 | // keeping it alive looks like stack walking. | 
| 1646 | run_nmethod_entry_barrier(); | 
| 1647 | ServiceThread::enqueue_deferred_event(&event); | 
| 1648 | } else { | 
| 1649 | // This enters the nmethod barrier outside in the caller. | 
| 1650 | state->enqueue_event(&event); | 
| 1651 | } | 
| 1652 | } | 
| 1653 | } | 
| 1654 | |
| 1655 | void nmethod::post_compiled_method_unload() { | 
| 1656 | if (unload_reported()) { | 
| 1657 | // During unloading we transition to unloaded and then to zombie | 
| 1658 | // and the unloading is reported during the first transition. | 
| 1659 | return; | 
| 1660 | } | 
| 1661 | |
| 1662 |   assert(_method != NULL && !is_unloaded(), "just checking")do { if (!(_method != __null && !is_unloaded())) { (* g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1662, "assert(" "_method != __null && !is_unloaded()" ") failed", "just checking"); ::breakpoint(); } } while (0);  | 
| 1663 | DTRACE_METHOD_UNLOAD_PROBE(method()); | 
| 1664 | |
| 1665 | // If a JVMTI agent has enabled the CompiledMethodUnload event then | 
| 1666 | // post the event. Sometime later this nmethod will be made a zombie | 
| 1667 | // by the sweeper but the Method* will not be valid at that point. | 
| 1668 | // The jmethodID is a weak reference to the Method* so if | 
| 1669 | // it's being unloaded there's no way to look it up since the weak | 
| 1670 | // ref will have been cleared. | 
| 1671 | |
| 1672 | // Don't bother posting the unload if the load event wasn't posted. | 
| 1673 | if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) { | 
| 1674 |     assert(!unload_reported(), "already unloaded")do { if (!(!unload_reported())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1674, "assert(" "!unload_reported()" ") failed", "already unloaded" ); ::breakpoint(); } } while (0);  | 
| 1675 | JvmtiDeferredEvent event = | 
| 1676 | JvmtiDeferredEvent::compiled_method_unload_event( | 
| 1677 | method()->jmethod_id(), insts_begin()); | 
| 1678 | ServiceThread::enqueue_deferred_event(&event); | 
| 1679 | } | 
| 1680 | |
| 1681 | // The JVMTI CompiledMethodUnload event can be enabled or disabled at | 
| 1682 | // any time. As the nmethod is being unloaded now we mark it has | 
| 1683 | // having the unload event reported - this will ensure that we don't | 
| 1684 | // attempt to report the event in the unlikely scenario where the | 
| 1685 | // event is enabled at the time the nmethod is made a zombie. | 
| 1686 | set_unload_reported(); | 
| 1687 | } | 
| 1688 | |
| 1689 | // Iterate over metadata calling this function. Used by RedefineClasses | 
| 1690 | void nmethod::metadata_do(MetadataClosure* f) { | 
| 1691 | { | 
| 1692 | // Visit all immediate references that are embedded in the instruction stream. | 
| 1693 | RelocIterator iter(this, oops_reloc_begin()); | 
| 1694 | while (iter.next()) { | 
| 1695 | if (iter.type() == relocInfo::metadata_type) { | 
| 1696 | metadata_Relocation* r = iter.metadata_reloc(); | 
| 1697 | // In this metadata, we must only follow those metadatas directly embedded in | 
| 1698 | // the code. Other metadatas (oop_index>0) are seen as part of | 
| 1699 | // the metadata section below. | 
| 1700 |         assert(1 == (r->metadata_is_immediate()) +do { if (!(1 == (r->metadata_is_immediate()) + (r->metadata_addr () >= metadata_begin() && r->metadata_addr() < metadata_end()))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1702, "assert(" "1 == (r->metadata_is_immediate()) + (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end())" ") failed", "metadata must be found in exactly one place"); :: breakpoint(); } } while (0)  | 
| 1701 |                (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),do { if (!(1 == (r->metadata_is_immediate()) + (r->metadata_addr () >= metadata_begin() && r->metadata_addr() < metadata_end()))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1702, "assert(" "1 == (r->metadata_is_immediate()) + (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end())" ") failed", "metadata must be found in exactly one place"); :: breakpoint(); } } while (0)  | 
| 1702 |                "metadata must be found in exactly one place")do { if (!(1 == (r->metadata_is_immediate()) + (r->metadata_addr () >= metadata_begin() && r->metadata_addr() < metadata_end()))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1702, "assert(" "1 == (r->metadata_is_immediate()) + (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end())" ") failed", "metadata must be found in exactly one place"); :: breakpoint(); } } while (0);  | 
| 1703 | if (r->metadata_is_immediate() && r->metadata_value() != NULL__null) { | 
| 1704 | Metadata* md = r->metadata_value(); | 
| 1705 | if (md != _method) f->do_metadata(md); | 
| 1706 | } | 
| 1707 | } else if (iter.type() == relocInfo::virtual_call_type) { | 
| 1708 | // Check compiledIC holders associated with this nmethod | 
| 1709 | ResourceMark rm; | 
| 1710 | CompiledIC *ic = CompiledIC_at(&iter); | 
| 1711 | if (ic->is_icholder_call()) { | 
| 1712 | CompiledICHolder* cichk = ic->cached_icholder(); | 
| 1713 | f->do_metadata(cichk->holder_metadata()); | 
| 1714 | f->do_metadata(cichk->holder_klass()); | 
| 1715 | } else { | 
| 1716 | Metadata* ic_oop = ic->cached_metadata(); | 
| 1717 | if (ic_oop != NULL__null) { | 
| 1718 | f->do_metadata(ic_oop); | 
| 1719 | } | 
| 1720 | } | 
| 1721 | } | 
| 1722 | } | 
| 1723 | } | 
| 1724 | |
| 1725 | // Visit the metadata section | 
| 1726 | for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { | 
| 1727 | if (*p == Universe::non_oop_word() || *p == NULL__null) continue; // skip non-oops | 
| 1728 | Metadata* md = *p; | 
| 1729 | f->do_metadata(md); | 
| 1730 | } | 
| 1731 | |
| 1732 | // Visit metadata not embedded in the other places. | 
| 1733 | if (_method != NULL__null) f->do_metadata(_method); | 
| 1734 | } | 
| 1735 | |
| 1736 | // The _is_unloading_state encodes a tuple comprising the unloading cycle | 
| 1737 | // and the result of IsUnloadingBehaviour::is_unloading() fpr that cycle. | 
| 1738 | // This is the bit layout of the _is_unloading_state byte: 00000CCU | 
| 1739 | // CC refers to the cycle, which has 2 bits, and U refers to the result of | 
| 1740 | // IsUnloadingBehaviour::is_unloading() for that unloading cycle. | 
| 1741 | |
| 1742 | class IsUnloadingState: public AllStatic { | 
| 1743 | static const uint8_t _is_unloading_mask = 1; | 
| 1744 | static const uint8_t _is_unloading_shift = 0; | 
| 1745 | static const uint8_t _unloading_cycle_mask = 6; | 
| 1746 | static const uint8_t _unloading_cycle_shift = 1; | 
| 1747 | |
| 1748 | static uint8_t set_is_unloading(uint8_t state, bool value) { | 
| 1749 | state &= ~_is_unloading_mask; | 
| 1750 | if (value) { | 
| 1751 | state |= 1 << _is_unloading_shift; | 
| 1752 | } | 
| 1753 |     assert(is_unloading(state) == value, "unexpected unloading cycle overflow")do { if (!(is_unloading(state) == value)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1753, "assert(" "is_unloading(state) == value" ") failed", "unexpected unloading cycle overflow" ); ::breakpoint(); } } while (0);  | 
| 1754 | return state; | 
| 1755 | } | 
| 1756 | |
| 1757 | static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) { | 
| 1758 | state &= ~_unloading_cycle_mask; | 
| 1759 | state |= value << _unloading_cycle_shift; | 
| 1760 |     assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow")do { if (!(unloading_cycle(state) == value)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1760, "assert(" "unloading_cycle(state) == value" ") failed" , "unexpected unloading cycle overflow"); ::breakpoint(); } } while (0);  | 
| 1761 | return state; | 
| 1762 | } | 
| 1763 | |
| 1764 | public: | 
| 1765 | static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; } | 
| 1766 | static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; } | 
| 1767 | |
| 1768 | static uint8_t create(bool is_unloading, uint8_t unloading_cycle) { | 
| 1769 | uint8_t state = 0; | 
| 1770 | state = set_is_unloading(state, is_unloading); | 
| 1771 | state = set_unloading_cycle(state, unloading_cycle); | 
| 1772 | return state; | 
| 1773 | } | 
| 1774 | }; | 
| 1775 | |
| 1776 | bool nmethod::is_unloading() { | 
| 1777 | uint8_t state = RawAccess<MO_RELAXED>::load(&_is_unloading_state); | 
| 1778 | bool state_is_unloading = IsUnloadingState::is_unloading(state); | 
| 1779 | if (state_is_unloading) { | 
| 1780 | return true; | 
| 1781 | } | 
| 1782 | uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state); | 
| 1783 | uint8_t current_cycle = CodeCache::unloading_cycle(); | 
| 1784 | if (state_unloading_cycle == current_cycle) { | 
| 1785 | return false; | 
| 1786 | } | 
| 1787 | |
| 1788 | // The IsUnloadingBehaviour is responsible for checking if there are any dead | 
| 1789 | // oops in the CompiledMethod, by calling oops_do on it. | 
| 1790 | state_unloading_cycle = current_cycle; | 
| 1791 | |
| 1792 | if (is_zombie()) { | 
| 1793 | // Zombies without calculated unloading epoch are never unloading due to GC. | 
| 1794 | |
| 1795 | // There are no races where a previously observed is_unloading() nmethod | 
| 1796 | // suddenly becomes not is_unloading() due to here being observed as zombie. | 
| 1797 | |
| 1798 | // With STW unloading, all is_alive() && is_unloading() nmethods are unlinked | 
| 1799 | // and unloaded in the safepoint. That makes races where an nmethod is first | 
| 1800 | // observed as is_alive() && is_unloading() and subsequently observed as | 
| 1801 | // is_zombie() impossible. | 
| 1802 | |
| 1803 | // With concurrent unloading, all references to is_unloading() nmethods are | 
| 1804 | // first unlinked (e.g. IC caches and dependency contexts). Then a global | 
| 1805 | // handshake operation is performed with all JavaThreads before finally | 
| 1806 | // unloading the nmethods. The sweeper never converts is_alive() && is_unloading() | 
| 1807 | // nmethods to zombies; it waits for them to become is_unloaded(). So before | 
| 1808 | // the global handshake, it is impossible for is_unloading() nmethods to | 
| 1809 | // racingly become is_zombie(). And is_unloading() is calculated for all is_alive() | 
| 1810 | // nmethods before taking that global handshake, meaning that it will never | 
| 1811 | // be recalculated after the handshake. | 
| 1812 | |
| 1813 | // After that global handshake, is_unloading() nmethods are only observable | 
| 1814 | // to the iterators, and they will never trigger recomputation of the cached | 
| 1815 | // is_unloading_state, and hence may not suffer from such races. | 
| 1816 | |
| 1817 | state_is_unloading = false; | 
| 1818 | } else { | 
| 1819 | state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this); | 
| 1820 | } | 
| 1821 | |
| 1822 | state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle); | 
| 1823 | |
| 1824 | RawAccess<MO_RELAXED>::store(&_is_unloading_state, state); | 
| 1825 | |
| 1826 | return state_is_unloading; | 
| 1827 | } | 
| 1828 | |
| 1829 | void nmethod::clear_unloading_state() { | 
| 1830 | uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle()); | 
| 1831 | RawAccess<MO_RELAXED>::store(&_is_unloading_state, state); | 
| 1832 | } | 
| 1833 | |
| 1834 | |
| 1835 | // This is called at the end of the strong tracing/marking phase of a | 
| 1836 | // GC to unload an nmethod if it contains otherwise unreachable | 
| 1837 | // oops. | 
| 1838 | |
| 1839 | void nmethod::do_unloading(bool unloading_occurred) { | 
| 1840 | // Make sure the oop's ready to receive visitors | 
| 1841 |   assert(!is_zombie() && !is_unloaded(),do { if (!(!is_zombie() && !is_unloaded())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1842, "assert(" "!is_zombie() && !is_unloaded()" ") failed" , "should not call follow on zombie or unloaded nmethod"); :: breakpoint(); } } while (0)  | 
| 1842 |          "should not call follow on zombie or unloaded nmethod")do { if (!(!is_zombie() && !is_unloaded())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1842, "assert(" "!is_zombie() && !is_unloaded()" ") failed" , "should not call follow on zombie or unloaded nmethod"); :: breakpoint(); } } while (0);  | 
| 1843 | |
| 1844 | if (is_unloading()) { | 
| 1845 | make_unloaded(); | 
| 1846 | } else { | 
| 1847 |     guarantee(unload_nmethod_caches(unloading_occurred),do { if (!(unload_nmethod_caches(unloading_occurred))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1848, "guarantee(" "unload_nmethod_caches(unloading_occurred)" ") failed", "Should not need transition stubs"); ::breakpoint (); } } while (0)  | 
| 1848 |               "Should not need transition stubs")do { if (!(unload_nmethod_caches(unloading_occurred))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1848, "guarantee(" "unload_nmethod_caches(unloading_occurred)" ") failed", "Should not need transition stubs"); ::breakpoint (); } } while (0);  | 
| 1849 | } | 
| 1850 | } | 
| 1851 | |
| 1852 | void nmethod::oops_do(OopClosure* f, bool allow_dead) { | 
| 1853 | // make sure the oops ready to receive visitors | 
| 1854 |   assert(allow_dead || is_alive(), "should not call follow on dead nmethod")do { if (!(allow_dead || is_alive())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1854, "assert(" "allow_dead || is_alive()" ") failed", "should not call follow on dead nmethod" ); ::breakpoint(); } } while (0);  | 
| 1855 | |
| 1856 | // Prevent extra code cache walk for platforms that don't have immediate oops. | 
| 1857 | if (relocInfo::mustIterateImmediateOopsInCode()) { | 
| 1858 | RelocIterator iter(this, oops_reloc_begin()); | 
| 1859 | |
| 1860 | while (iter.next()) { | 
| 1861 | if (iter.type() == relocInfo::oop_type ) { | 
| 1862 | oop_Relocation* r = iter.oop_reloc(); | 
| 1863 | // In this loop, we must only follow those oops directly embedded in | 
| 1864 | // the code. Other oops (oop_index>0) are seen as part of scopes_oops. | 
| 1865 |         assert(1 == (r->oop_is_immediate()) +do { if (!(1 == (r->oop_is_immediate()) + (r->oop_addr( ) >= oops_begin() && r->oop_addr() < oops_end ()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1867, "assert(" "1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end())" ") failed", "oop must be found in exactly one place"); ::breakpoint (); } } while (0)  | 
| 1866 |                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),do { if (!(1 == (r->oop_is_immediate()) + (r->oop_addr( ) >= oops_begin() && r->oop_addr() < oops_end ()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1867, "assert(" "1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end())" ") failed", "oop must be found in exactly one place"); ::breakpoint (); } } while (0)  | 
| 1867 |                "oop must be found in exactly one place")do { if (!(1 == (r->oop_is_immediate()) + (r->oop_addr( ) >= oops_begin() && r->oop_addr() < oops_end ()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1867, "assert(" "1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end())" ") failed", "oop must be found in exactly one place"); ::breakpoint (); } } while (0);  | 
| 1868 | if (r->oop_is_immediate() && r->oop_value() != NULL__null) { | 
| 1869 | f->do_oop(r->oop_addr()); | 
| 1870 | } | 
| 1871 | } | 
| 1872 | } | 
| 1873 | } | 
| 1874 | |
| 1875 | // Scopes | 
| 1876 | // This includes oop constants not inlined in the code stream. | 
| 1877 | for (oop* p = oops_begin(); p < oops_end(); p++) { | 
| 1878 | if (*p == Universe::non_oop_word()) continue; // skip non-oops | 
| 1879 | f->do_oop(p); | 
| 1880 | } | 
| 1881 | } | 
| 1882 | |
| 1883 | nmethod* volatile nmethod::_oops_do_mark_nmethods; | 
| 1884 | |
| 1885 | void nmethod::oops_do_log_change(const char* state) { | 
| 1886 |   LogTarget(Trace, gc, nmethod)LogTargetImpl<LogLevel::Trace, (LogTag::_gc), (LogTag::_nmethod ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)> lt;  | 
| 1887 | if (lt.is_enabled()) { | 
| 1888 | LogStream ls(lt); | 
| 1889 | CompileTask::print(&ls, this, state, true /* short_form */); | 
| 1890 | } | 
| 1891 | } | 
| 1892 | |
| 1893 | bool nmethod::oops_do_try_claim() { | 
| 1894 | if (oops_do_try_claim_weak_request()) { | 
| 1895 | nmethod* result = oops_do_try_add_to_list_as_weak_done(); | 
| 1896 |     assert(result == NULL, "adding to global list as weak done must always succeed.")do { if (!(result == __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1896, "assert(" "result == __null" ") failed", "adding to global list as weak done must always succeed." ); ::breakpoint(); } } while (0);  | 
| 1897 | return true; | 
| 1898 | } | 
| 1899 | return false; | 
| 1900 | } | 
| 1901 | |
| 1902 | bool nmethod::oops_do_try_claim_weak_request() { | 
| 1903 |   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1903, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "only at safepoint"); ::breakpoint(); } } while (0);  | 
| 1904 | |
| 1905 | if ((_oops_do_mark_link == NULL__null) && | 
| 1906 | (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) { | 
| 1907 | oops_do_log_change("oops_do, mark weak request"); | 
| 1908 | return true; | 
| 1909 | } | 
| 1910 | return false; | 
| 1911 | } | 
| 1912 | |
| 1913 | void nmethod::oops_do_set_strong_done(nmethod* old_head) { | 
| 1914 | _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag); | 
| 1915 | } | 
| 1916 | |
| 1917 | nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() { | 
| 1918 |   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1918, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "only at safepoint"); ::breakpoint(); } } while (0);  | 
| 1919 | |
| 1920 | oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(NULL__null, claim_weak_request_tag), mark_link(this, claim_strong_done_tag)); | 
| 1921 | if (old_next == NULL__null) { | 
| 1922 | oops_do_log_change("oops_do, mark strong done"); | 
| 1923 | } | 
| 1924 | return old_next; | 
| 1925 | } | 
| 1926 | |
| 1927 | nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) { | 
| 1928 |   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1928, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "only at safepoint"); ::breakpoint(); } } while (0);  | 
| 1929 |   assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak")do { if (!(next == mark_link(this, claim_weak_request_tag))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1929, "assert(" "next == mark_link(this, claim_weak_request_tag)" ") failed", "Should be claimed as weak"); ::breakpoint(); } } while (0);  | 
| 1930 | |
| 1931 | oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag)); | 
| 1932 | if (old_next == next) { | 
| 1933 | oops_do_log_change("oops_do, mark strong request"); | 
| 1934 | } | 
| 1935 | return old_next; | 
| 1936 | } | 
| 1937 | |
| 1938 | bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) { | 
| 1939 |   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1939, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "only at safepoint"); ::breakpoint(); } } while (0);  | 
| 1940 |   assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done")do { if (!(extract_state(next) == claim_weak_done_tag)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1940, "assert(" "extract_state(next) == claim_weak_done_tag" ") failed", "Should be claimed as weak done"); ::breakpoint( ); } } while (0);  | 
| 1941 | |
| 1942 | oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag)); | 
| 1943 | if (old_next == next) { | 
| 1944 | oops_do_log_change("oops_do, mark weak done -> mark strong done"); | 
| 1945 | return true; | 
| 1946 | } | 
| 1947 | return false; | 
| 1948 | } | 
| 1949 | |
| 1950 | nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() { | 
| 1951 |   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1951, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "only at safepoint"); ::breakpoint(); } } while (0);  | 
| 1952 | |
| 1953 |   assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||do { if (!(extract_state(_oops_do_mark_link) == claim_weak_request_tag || extract_state(_oops_do_mark_link) == claim_strong_request_tag )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1955, "assert(" "extract_state(_oops_do_mark_link) == claim_weak_request_tag || extract_state(_oops_do_mark_link) == claim_strong_request_tag" ") failed", "must be but is nmethod " "0x%016" "l" "x" " %u" , p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link )); ::breakpoint(); } } while (0)  | 
| 1954 |          extract_state(_oops_do_mark_link) == claim_strong_request_tag,do { if (!(extract_state(_oops_do_mark_link) == claim_weak_request_tag || extract_state(_oops_do_mark_link) == claim_strong_request_tag )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1955, "assert(" "extract_state(_oops_do_mark_link) == claim_weak_request_tag || extract_state(_oops_do_mark_link) == claim_strong_request_tag" ") failed", "must be but is nmethod " "0x%016" "l" "x" " %u" , p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link )); ::breakpoint(); } } while (0)  | 
| 1955 |          "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link))do { if (!(extract_state(_oops_do_mark_link) == claim_weak_request_tag || extract_state(_oops_do_mark_link) == claim_strong_request_tag )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1955, "assert(" "extract_state(_oops_do_mark_link) == claim_weak_request_tag || extract_state(_oops_do_mark_link) == claim_strong_request_tag" ") failed", "must be but is nmethod " "0x%016" "l" "x" " %u" , p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link )); ::breakpoint(); } } while (0);  | 
| 1956 | |
| 1957 | nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); | 
| 1958 | // Self-loop if needed. | 
| 1959 | if (old_head == NULL__null) { | 
| 1960 | old_head = this; | 
| 1961 | } | 
| 1962 | // Try to install end of list and weak done tag. | 
| 1963 | if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) { | 
| 1964 | oops_do_log_change("oops_do, mark weak done"); | 
| 1965 | return NULL__null; | 
| 1966 | } else { | 
| 1967 | return old_head; | 
| 1968 | } | 
| 1969 | } | 
| 1970 | |
| 1971 | void nmethod::oops_do_add_to_list_as_strong_done() { | 
| 1972 |   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1972, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "only at safepoint"); ::breakpoint(); } } while (0);  | 
| 1973 | |
| 1974 | nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); | 
| 1975 | // Self-loop if needed. | 
| 1976 | if (old_head == NULL__null) { | 
| 1977 | old_head = this; | 
| 1978 | } | 
| 1979 |   assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",do { if (!(_oops_do_mark_link == mark_link(this, claim_strong_done_tag ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1980, "assert(" "_oops_do_mark_link == mark_link(this, claim_strong_done_tag)" ") failed", "must be but is nmethod " "0x%016" "l" "x" " state %u" , p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link )); ::breakpoint(); } } while (0)  | 
| 1980 |          p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link))do { if (!(_oops_do_mark_link == mark_link(this, claim_strong_done_tag ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 1980, "assert(" "_oops_do_mark_link == mark_link(this, claim_strong_done_tag)" ") failed", "must be but is nmethod " "0x%016" "l" "x" " state %u" , p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link )); ::breakpoint(); } } while (0);  | 
| 1981 | |
| 1982 | oops_do_set_strong_done(old_head); | 
| 1983 | } | 
| 1984 | |
| 1985 | void nmethod::oops_do_process_weak(OopsDoProcessor* p) { | 
| 1986 | if (!oops_do_try_claim_weak_request()) { | 
| 1987 | // Failed to claim for weak processing. | 
| 1988 | oops_do_log_change("oops_do, mark weak request fail"); | 
| 1989 | return; | 
| 1990 | } | 
| 1991 | |
| 1992 | p->do_regular_processing(this); | 
| 1993 | |
| 1994 | nmethod* old_head = oops_do_try_add_to_list_as_weak_done(); | 
| 1995 | if (old_head == NULL__null) { | 
| 1996 | return; | 
| 1997 | } | 
| 1998 | oops_do_log_change("oops_do, mark weak done fail"); | 
| 1999 | // Adding to global list failed, another thread added a strong request. | 
| 2000 |   assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,do { if (!(extract_state(_oops_do_mark_link) == claim_strong_request_tag )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2001, "assert(" "extract_state(_oops_do_mark_link) == claim_strong_request_tag" ") failed", "must be but is %u", extract_state(_oops_do_mark_link )); ::breakpoint(); } } while (0)  | 
| 2001 |          "must be but is %u", extract_state(_oops_do_mark_link))do { if (!(extract_state(_oops_do_mark_link) == claim_strong_request_tag )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2001, "assert(" "extract_state(_oops_do_mark_link) == claim_strong_request_tag" ") failed", "must be but is %u", extract_state(_oops_do_mark_link )); ::breakpoint(); } } while (0);  | 
| 2002 | |
| 2003 | oops_do_log_change("oops_do, mark weak request -> mark strong done"); | 
| 2004 | |
| 2005 | oops_do_set_strong_done(old_head); | 
| 2006 | // Do missing strong processing. | 
| 2007 | p->do_remaining_strong_processing(this); | 
| 2008 | } | 
| 2009 | |
| 2010 | void nmethod::oops_do_process_strong(OopsDoProcessor* p) { | 
| 2011 | oops_do_mark_link* next_raw = oops_do_try_claim_strong_done(); | 
| 2012 | if (next_raw == NULL__null) { | 
| 2013 | p->do_regular_processing(this); | 
| 2014 | oops_do_add_to_list_as_strong_done(); | 
| 2015 | return; | 
| 2016 | } | 
| 2017 | // Claim failed. Figure out why and handle it. | 
| 2018 | if (oops_do_has_weak_request(next_raw)) { | 
| 2019 | oops_do_mark_link* old = next_raw; | 
| 2020 | // Claim failed because being weak processed (state == "weak request"). | 
| 2021 | // Try to request deferred strong processing. | 
| 2022 | next_raw = oops_do_try_add_strong_request(old); | 
| 2023 | if (next_raw == old) { | 
| 2024 | // Successfully requested deferred strong processing. | 
| 2025 | return; | 
| 2026 | } | 
| 2027 | // Failed because of a concurrent transition. No longer in "weak request" state. | 
| 2028 | } | 
| 2029 | if (oops_do_has_any_strong_state(next_raw)) { | 
| 2030 | // Already claimed for strong processing or requested for such. | 
| 2031 | return; | 
| 2032 | } | 
| 2033 | if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) { | 
| 2034 | // Successfully claimed "weak done" as "strong done". Do the missing marking. | 
| 2035 | p->do_remaining_strong_processing(this); | 
| 2036 | return; | 
| 2037 | } | 
| 2038 | // Claim failed, some other thread got it. | 
| 2039 | } | 
| 2040 | |
| 2041 | void nmethod::oops_do_marking_prologue() { | 
| 2042 |   assert_at_safepoint()do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2042, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "should be at a safepoint"); ::breakpoint(); } } while (0);  | 
| 2043 | |
| 2044 |   log_trace(gc, nmethod)(!(LogImpl<(LogTag::_gc), (LogTag::_nmethod), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_nmethod), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("oops_do_marking_prologue");  | 
| 2045 |   assert(_oops_do_mark_nmethods == NULL, "must be empty")do { if (!(_oops_do_mark_nmethods == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2045, "assert(" "_oops_do_mark_nmethods == __null" ") failed" , "must be empty"); ::breakpoint(); } } while (0);  | 
| 2046 | } | 
| 2047 | |
| 2048 | void nmethod::oops_do_marking_epilogue() { | 
| 2049 |   assert_at_safepoint()do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2049, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "should be at a safepoint"); ::breakpoint(); } } while (0);  | 
| 2050 | |
| 2051 | nmethod* next = _oops_do_mark_nmethods; | 
| 2052 | _oops_do_mark_nmethods = NULL__null; | 
| 2053 | if (next != NULL__null) { | 
| 2054 | nmethod* cur; | 
| 2055 | do { | 
| 2056 | cur = next; | 
| 2057 | next = extract_nmethod(cur->_oops_do_mark_link); | 
| 2058 | cur->_oops_do_mark_link = NULL__null; | 
| 2059 | DEBUG_ONLY(cur->verify_oop_relocations())cur->verify_oop_relocations(); | 
| 2060 | |
| 2061 |       LogTarget(Trace, gc, nmethod)LogTargetImpl<LogLevel::Trace, (LogTag::_gc), (LogTag::_nmethod ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)> lt;  | 
| 2062 | if (lt.is_enabled()) { | 
| 2063 | LogStream ls(lt); | 
| 2064 | CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true); | 
| 2065 | } | 
| 2066 | // End if self-loop has been detected. | 
| 2067 | } while (cur != next); | 
| 2068 | } | 
| 2069 |   log_trace(gc, nmethod)(!(LogImpl<(LogTag::_gc), (LogTag::_nmethod), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_nmethod), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("oops_do_marking_epilogue");  | 
| 2070 | } | 
| 2071 | |
| 2072 | inline bool includes(void* p, void* from, void* to) { | 
| 2073 | return from <= p && p < to; | 
| 2074 | } | 
| 2075 | |
| 2076 | |
| 2077 | void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) { | 
| 2078 |   assert(count >= 2, "must be sentinel values, at least")do { if (!(count >= 2)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2078, "assert(" "count >= 2" ") failed", "must be sentinel values, at least" ); ::breakpoint(); } } while (0);  | 
| 2079 | |
| 2080 | #ifdef ASSERT1 | 
| 2081 | // must be sorted and unique; we do a binary search in find_pc_desc() | 
| 2082 | int prev_offset = pcs[0].pc_offset(); | 
| 2083 |   assert(prev_offset == PcDesc::lower_offset_limit,do { if (!(prev_offset == PcDesc::lower_offset_limit)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2084, "assert(" "prev_offset == PcDesc::lower_offset_limit" ") failed", "must start with a sentinel"); ::breakpoint(); } } while (0)  | 
| 2084 |          "must start with a sentinel")do { if (!(prev_offset == PcDesc::lower_offset_limit)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2084, "assert(" "prev_offset == PcDesc::lower_offset_limit" ") failed", "must start with a sentinel"); ::breakpoint(); } } while (0);  | 
| 2085 | for (int i = 1; i < count; i++) { | 
| 2086 | int this_offset = pcs[i].pc_offset(); | 
| 2087 |     assert(this_offset > prev_offset, "offsets must be sorted")do { if (!(this_offset > prev_offset)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2087, "assert(" "this_offset > prev_offset" ") failed", "offsets must be sorted" ); ::breakpoint(); } } while (0);  | 
| 2088 | prev_offset = this_offset; | 
| 2089 | } | 
| 2090 |   assert(prev_offset == PcDesc::upper_offset_limit,do { if (!(prev_offset == PcDesc::upper_offset_limit)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2091, "assert(" "prev_offset == PcDesc::upper_offset_limit" ") failed", "must end with a sentinel"); ::breakpoint(); } } while (0)  | 
| 2091 |          "must end with a sentinel")do { if (!(prev_offset == PcDesc::upper_offset_limit)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2091, "assert(" "prev_offset == PcDesc::upper_offset_limit" ") failed", "must end with a sentinel"); ::breakpoint(); } } while (0);  | 
| 2092 | #endif //ASSERT | 
| 2093 | |
| 2094 | // Search for MethodHandle invokes and tag the nmethod. | 
| 2095 | for (int i = 0; i < count; i++) { | 
| 2096 | if (pcs[i].is_method_handle_invoke()) { | 
| 2097 | set_has_method_handle_invokes(true); | 
| 2098 | break; | 
| 2099 | } | 
| 2100 | } | 
| 2101 |   assert(has_method_handle_invokes() == (_deopt_mh_handler_begin != NULL), "must have deopt mh handler")do { if (!(has_method_handle_invokes() == (_deopt_mh_handler_begin != __null))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2101, "assert(" "has_method_handle_invokes() == (_deopt_mh_handler_begin != __null)" ") failed", "must have deopt mh handler"); ::breakpoint(); } } while (0);  | 
| 2102 | |
| 2103 | int size = count * sizeof(PcDesc); | 
| 2104 |   assert(scopes_pcs_size() >= size, "oob")do { if (!(scopes_pcs_size() >= size)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2104, "assert(" "scopes_pcs_size() >= size" ") failed", "oob" ); ::breakpoint(); } } while (0);  | 
| 2105 | memcpy(scopes_pcs_begin(), pcs, size); | 
| 2106 | |
| 2107 | // Adjust the final sentinel downward. | 
| 2108 | PcDesc* last_pc = &scopes_pcs_begin()[count-1]; | 
| 2109 |   assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity")do { if (!(last_pc->pc_offset() == PcDesc::upper_offset_limit )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2109, "assert(" "last_pc->pc_offset() == PcDesc::upper_offset_limit" ") failed", "sanity"); ::breakpoint(); } } while (0);  | 
| 2110 | last_pc->set_pc_offset(content_size() + 1); | 
| 2111 | for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) { | 
| 2112 | // Fill any rounding gaps with copies of the last record. | 
| 2113 | last_pc[1] = last_pc[0]; | 
| 2114 | } | 
| 2115 | // The following assert could fail if sizeof(PcDesc) is not | 
| 2116 | // an integral multiple of oopSize (the rounding term). | 
| 2117 | // If it fails, change the logic to always allocate a multiple | 
| 2118 | // of sizeof(PcDesc), and fill unused words with copies of *last_pc. | 
| 2119 |   assert(last_pc + 1 == scopes_pcs_end(), "must match exactly")do { if (!(last_pc + 1 == scopes_pcs_end())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2119, "assert(" "last_pc + 1 == scopes_pcs_end()" ") failed" , "must match exactly"); ::breakpoint(); } } while (0);  | 
| 2120 | } | 
| 2121 | |
| 2122 | void nmethod::copy_scopes_data(u_char* buffer, int size) { | 
| 2123 |   assert(scopes_data_size() >= size, "oob")do { if (!(scopes_data_size() >= size)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2123, "assert(" "scopes_data_size() >= size" ") failed", "oob"); ::breakpoint(); } } while (0);  | 
| 2124 | memcpy(scopes_data_begin(), buffer, size); | 
| 2125 | } | 
| 2126 | |
| 2127 | #ifdef ASSERT1 | 
| 2128 | static PcDesc* linear_search(const PcDescSearch& search, int pc_offset, bool approximate) { | 
| 2129 | PcDesc* lower = search.scopes_pcs_begin(); | 
| 2130 | PcDesc* upper = search.scopes_pcs_end(); | 
| 2131 | lower += 1; // exclude initial sentinel | 
| 2132 | PcDesc* res = NULL__null; | 
| 2133 | for (PcDesc* p = lower; p < upper; p++) { | 
| 2134 | NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests)--pc_nmethod_stats.pc_desc_tests; // don't count this call to match_desc | 
| 2135 | if (match_desc(p, pc_offset, approximate)) { | 
| 2136 | if (res == NULL__null) | 
| 2137 | res = p; | 
| 2138 | else | 
| 2139 | res = (PcDesc*) badAddress((address)::badAddressVal); | 
| 2140 | } | 
| 2141 | } | 
| 2142 | return res; | 
| 2143 | } | 
| 2144 | #endif | 
| 2145 | |
| 2146 | |
| 2147 | // Finds a PcDesc with real-pc equal to "pc" | 
| 2148 | PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search) { | 
| 2149 | address base_address = search.code_begin(); | 
| 2150 | if ((pc < base_address) || | 
| 2151 | (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) { | 
| 2152 | return NULL__null; // PC is wildly out of range | 
| 2153 | } | 
| 2154 | int pc_offset = (int) (pc - base_address); | 
| 2155 | |
| 2156 | // Check the PcDesc cache if it contains the desired PcDesc | 
| 2157 | // (This as an almost 100% hit rate.) | 
| 2158 | PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate); | 
| 2159 | if (res != NULL__null) { | 
| 2160 |     assert(res == linear_search(search, pc_offset, approximate), "cache ok")do { if (!(res == linear_search(search, pc_offset, approximate ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2160, "assert(" "res == linear_search(search, pc_offset, approximate)" ") failed", "cache ok"); ::breakpoint(); } } while (0);  | 
| 2161 | return res; | 
| 2162 | } | 
| 2163 | |
| 2164 | // Fallback algorithm: quasi-linear search for the PcDesc | 
| 2165 | // Find the last pc_offset less than the given offset. | 
| 2166 | // The successor must be the required match, if there is a match at all. | 
| 2167 | // (Use a fixed radix to avoid expensive affine pointer arithmetic.) | 
| 2168 | PcDesc* lower = search.scopes_pcs_begin(); | 
| 2169 | PcDesc* upper = search.scopes_pcs_end(); | 
| 2170 | upper -= 1; // exclude final sentinel | 
| 2171 | if (lower >= upper) return NULL__null; // native method; no PcDescs at all | 
| 2172 | |
| 2173 | #define assert_LU_OK \ | 
| 2174 | /* invariant on lower..upper during the following search: */ \ | 
| 2175 |   assert(lower->pc_offset() <  pc_offset, "sanity")do { if (!(lower->pc_offset() < pc_offset)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2175, "assert(" "lower->pc_offset() < pc_offset" ") failed" , "sanity"); ::breakpoint(); } } while (0); \  | 
| 2176 |   assert(upper->pc_offset() >= pc_offset, "sanity")do { if (!(upper->pc_offset() >= pc_offset)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2176, "assert(" "upper->pc_offset() >= pc_offset" ") failed" , "sanity"); ::breakpoint(); } } while (0)  | 
| 2177 | assert_LU_OK; | 
| 2178 | |
| 2179 | // Use the last successful return as a split point. | 
| 2180 | PcDesc* mid = _pc_desc_cache.last_pc_desc(); | 
| 2181 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches)++pc_nmethod_stats.pc_desc_searches; | 
| 2182 | if (mid->pc_offset() < pc_offset) { | 
| 2183 | lower = mid; | 
| 2184 | } else { | 
| 2185 | upper = mid; | 
| 2186 | } | 
| 2187 | |
| 2188 | // Take giant steps at first (4096, then 256, then 16, then 1) | 
| 2189 | const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1)-1; | 
| 2190 | const int RADIX = (1 << LOG2_RADIX); | 
| 2191 | for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { | 
| 2192 | while ((mid = lower + step) < upper) { | 
| 2193 | assert_LU_OK; | 
| 2194 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches)++pc_nmethod_stats.pc_desc_searches; | 
| 2195 | if (mid->pc_offset() < pc_offset) { | 
| 2196 | lower = mid; | 
| 2197 | } else { | 
| 2198 | upper = mid; | 
| 2199 | break; | 
| 2200 | } | 
| 2201 | } | 
| 2202 | assert_LU_OK; | 
| 2203 | } | 
| 2204 | |
| 2205 | // Sneak up on the value with a linear search of length ~16. | 
| 2206 | while (true) { | 
| 2207 | assert_LU_OK; | 
| 2208 | mid = lower + 1; | 
| 2209 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches)++pc_nmethod_stats.pc_desc_searches; | 
| 2210 | if (mid->pc_offset() < pc_offset) { | 
| 2211 | lower = mid; | 
| 2212 | } else { | 
| 2213 | upper = mid; | 
| 2214 | break; | 
| 2215 | } | 
| 2216 | } | 
| 2217 | #undef assert_LU_OK | 
| 2218 | |
| 2219 | if (match_desc(upper, pc_offset, approximate)) { | 
| 2220 |     assert(upper == linear_search(search, pc_offset, approximate), "search ok")do { if (!(upper == linear_search(search, pc_offset, approximate ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2220, "assert(" "upper == linear_search(search, pc_offset, approximate)" ") failed", "search ok"); ::breakpoint(); } } while (0);  | 
| 2221 | _pc_desc_cache.add_pc_desc(upper); | 
| 2222 | return upper; | 
| 2223 | } else { | 
| 2224 |     assert(NULL == linear_search(search, pc_offset, approximate), "search ok")do { if (!(__null == linear_search(search, pc_offset, approximate ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2224, "assert(" "__null == linear_search(search, pc_offset, approximate)" ") failed", "search ok"); ::breakpoint(); } } while (0);  | 
| 2225 | return NULL__null; | 
| 2226 | } | 
| 2227 | } | 
| 2228 | |
| 2229 | |
| 2230 | void nmethod::check_all_dependencies(DepChange& changes) { | 
| 2231 | // Checked dependencies are allocated into this ResourceMark | 
| 2232 | ResourceMark rm; | 
| 2233 | |
| 2234 | // Turn off dependency tracing while actually testing dependencies. | 
| 2235 | NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) )FlagSetting fs(TraceDependencies, false); | 
| 2236 | |
| 2237 | typedef ResourceHashtable<DependencySignature, int, 11027, | 
| 2238 | ResourceObj::RESOURCE_AREA, mtInternal, | 
| 2239 | &DependencySignature::hash, | 
| 2240 | &DependencySignature::equals> DepTable; | 
| 2241 | |
| 2242 | DepTable* table = new DepTable(); | 
| 2243 | |
| 2244 | // Iterate over live nmethods and check dependencies of all nmethods that are not | 
| 2245 | // marked for deoptimization. A particular dependency is only checked once. | 
| 2246 | NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); | 
| 2247 | while(iter.next()) { | 
| 2248 | nmethod* nm = iter.method(); | 
| 2249 | // Only notify for live nmethods | 
| 2250 | if (!nm->is_marked_for_deoptimization()) { | 
| 2251 | for (Dependencies::DepStream deps(nm); deps.next(); ) { | 
| 2252 | // Construct abstraction of a dependency. | 
| 2253 | DependencySignature* current_sig = new DependencySignature(deps); | 
| 2254 | |
| 2255 | // Determine if dependency is already checked. table->put(...) returns | 
| 2256 | // 'true' if the dependency is added (i.e., was not in the hashtable). | 
| 2257 | if (table->put(*current_sig, 1)) { | 
| 2258 | if (deps.check_dependency() != NULL__null) { | 
| 2259 | // Dependency checking failed. Print out information about the failed | 
| 2260 | // dependency and finally fail with an assert. We can fail here, since | 
| 2261 | // dependency checking is never done in a product build. | 
| 2262 | tty->print_cr("Failed dependency:"); | 
| 2263 | changes.print(); | 
| 2264 | nm->print(); | 
| 2265 | nm->print_dependencies(); | 
| 2266 |             assert(false, "Should have been marked for deoptimization")do { if (!(false)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2266, "assert(" "false" ") failed", "Should have been marked for deoptimization" ); ::breakpoint(); } } while (0);  | 
| 2267 | } | 
| 2268 | } | 
| 2269 | } | 
| 2270 | } | 
| 2271 | } | 
| 2272 | } | 
| 2273 | |
| 2274 | bool nmethod::check_dependency_on(DepChange& changes) { | 
| 2275 | // What has happened: | 
| 2276 | // 1) a new class dependee has been added | 
| 2277 | // 2) dependee and all its super classes have been marked | 
| 2278 | bool found_check = false; // set true if we are upset | 
| 2279 | for (Dependencies::DepStream deps(this); deps.next(); ) { | 
| 2280 | // Evaluate only relevant dependencies. | 
| 2281 | if (deps.spot_check_dependency_at(changes) != NULL__null) { | 
| 2282 | found_check = true; | 
| 2283 | NOT_DEBUG(break); | 
| 2284 | } | 
| 2285 | } | 
| 2286 | return found_check; | 
| 2287 | } | 
| 2288 | |
| 2289 | // Called from mark_for_deoptimization, when dependee is invalidated. | 
| 2290 | bool nmethod::is_dependent_on_method(Method* dependee) { | 
| 2291 | for (Dependencies::DepStream deps(this); deps.next(); ) { | 
| 2292 | if (deps.type() != Dependencies::evol_method) | 
| 2293 | continue; | 
| 2294 | Method* method = deps.method_argument(0); | 
| 2295 | if (method == dependee) return true; | 
| 2296 | } | 
| 2297 | return false; | 
| 2298 | } | 
| 2299 | |
| 2300 | |
| 2301 | bool nmethod::is_patchable_at(address instr_addr) { | 
| 2302 |   assert(insts_contains(instr_addr), "wrong nmethod used")do { if (!(insts_contains(instr_addr))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2302, "assert(" "insts_contains(instr_addr)" ") failed", "wrong nmethod used" ); ::breakpoint(); } } while (0);  | 
| 2303 | if (is_zombie()) { | 
| 2304 | // a zombie may never be patched | 
| 2305 | return false; | 
| 2306 | } | 
| 2307 | return true; | 
| 2308 | } | 
| 2309 | |
| 2310 | |
| 2311 | void nmethod_init() { | 
| 2312 | // make sure you didn't forget to adjust the filler fields | 
| 2313 |   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word")do { if (!(sizeof(nmethod) % oopSize == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2313, "assert(" "sizeof(nmethod) % oopSize == 0" ") failed" , "nmethod size must be multiple of a word"); ::breakpoint(); } } while (0);  | 
| 2314 | } | 
| 2315 | |
| 2316 | |
| 2317 | //------------------------------------------------------------------------------------------- | 
| 2318 | |
| 2319 | |
| 2320 | // QQQ might we make this work from a frame?? | 
| 2321 | nmethodLocker::nmethodLocker(address pc) { | 
| 2322 | CodeBlob* cb = CodeCache::find_blob(pc); | 
| 2323 |   guarantee(cb != NULL && cb->is_compiled(), "bad pc for a nmethod found")do { if (!(cb != __null && cb->is_compiled())) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2323, "guarantee(" "cb != NULL && cb->is_compiled()" ") failed", "bad pc for a nmethod found"); ::breakpoint(); } } while (0);  | 
| 2324 | _nm = cb->as_compiled_method(); | 
| 2325 | lock_nmethod(_nm); | 
| 2326 | } | 
| 2327 | |
| 2328 | // Only JvmtiDeferredEvent::compiled_method_unload_event() | 
| 2329 | // should pass zombie_ok == true. | 
| 2330 | void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) { | 
| 2331 | if (cm == NULL__null) return; | 
| 2332 | nmethod* nm = cm->as_nmethod(); | 
| 2333 | Atomic::inc(&nm->_lock_count); | 
| 2334 |   assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p", nm)do { if (!(zombie_ok || !nm->is_zombie())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2334, "assert(" "zombie_ok || !nm->is_zombie()" ") failed" , "cannot lock a zombie method: %p", nm); ::breakpoint(); } } while (0);  | 
| 2335 | } | 
| 2336 | |
| 2337 | void nmethodLocker::unlock_nmethod(CompiledMethod* cm) { | 
| 2338 | if (cm == NULL__null) return; | 
| 2339 | nmethod* nm = cm->as_nmethod(); | 
| 2340 | Atomic::dec(&nm->_lock_count); | 
| 2341 |   assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock")do { if (!(nm->_lock_count >= 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2341, "assert(" "nm->_lock_count >= 0" ") failed", "unmatched nmethod lock/unlock" ); ::breakpoint(); } } while (0);  | 
| 2342 | } | 
| 2343 | |
| 2344 | |
| 2345 | // ----------------------------------------------------------------------------- | 
| 2346 | // Verification | 
| 2347 | |
| 2348 | class VerifyOopsClosure: public OopClosure { | 
| 2349 | nmethod* _nm; | 
| 2350 | bool _ok; | 
| 2351 | public: | 
| 2352 | VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { } | 
| 2353 | bool ok() { return _ok; } | 
| 2354 | virtual void do_oop(oop* p) { | 
| 2355 | if (oopDesc::is_oop_or_null(*p)) return; | 
| 2356 | // Print diagnostic information before calling print_nmethod(). | 
| 2357 | // Assertions therein might prevent call from returning. | 
| 2358 | tty->print_cr("*** non-oop " PTR_FORMAT"0x%016" "l" "x" " found at " PTR_FORMAT"0x%016" "l" "x" " (offset %d)", | 
| 2359 | p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm)); | 
| 2360 | if (_ok) { | 
| 2361 | _nm->print_nmethod(true); | 
| 2362 | _ok = false; | 
| 2363 | } | 
| 2364 | } | 
| 2365 |   virtual void do_oop(narrowOop* p) { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2365); ::breakpoint(); } while (0); }  | 
| 2366 | }; | 
| 2367 | |
| 2368 | class VerifyMetadataClosure: public MetadataClosure { | 
| 2369 | public: | 
| 2370 | void do_metadata(Metadata* md) { | 
| 2371 | if (md->is_method()) { | 
| 2372 | Method* method = (Method*)md; | 
| 2373 |       assert(!method->is_old(), "Should not be installing old methods")do { if (!(!method->is_old())) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2373, "assert(" "!method->is_old()" ") failed", "Should not be installing old methods" ); ::breakpoint(); } } while (0);  | 
| 2374 | } | 
| 2375 | } | 
| 2376 | }; | 
| 2377 | |
| 2378 | |
| 2379 | void nmethod::verify() { | 
| 2380 | |
| 2381 | // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant | 
| 2382 | // seems odd. | 
| 2383 | |
| 2384 | if (is_zombie() || is_not_entrant() || is_unloaded()) | 
| 2385 | return; | 
| 2386 | |
| 2387 | // Make sure all the entry points are correctly aligned for patching. | 
| 2388 | NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); | 
| 2389 | |
| 2390 | // assert(oopDesc::is_oop(method()), "must be valid"); | 
| 2391 | |
| 2392 | ResourceMark rm; | 
| 2393 | |
| 2394 | if (!CodeCache::contains(this)) { | 
| 2395 |     fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this))do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2395, "nmethod at " "0x%016" "l" "x" " not in zone", p2i(this )); ::breakpoint(); } while (0);  | 
| 2396 | } | 
| 2397 | |
| 2398 | if(is_native_method() ) | 
| 2399 | return; | 
| 2400 | |
| 2401 | nmethod* nm = CodeCache::find_nmethod(verified_entry_point()); | 
| 2402 | if (nm != this) { | 
| 2403 |     fatal("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this))do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2403, "findNMethod did not find this nmethod (" "0x%016" "l" "x" ")", p2i(this)); ::breakpoint(); } while (0);  | 
| 2404 | } | 
| 2405 | |
| 2406 | for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { | 
| 2407 | if (! p->verify(this)) { | 
| 2408 | tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT"0x%016" "l" "x" " (pcs)", p2i(this)); | 
| 2409 | } | 
| 2410 | } | 
| 2411 | |
| 2412 | #ifdef ASSERT1 | 
| 2413 | #if INCLUDE_JVMCI1 | 
| 2414 | { | 
| 2415 | // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap | 
| 2416 | ImmutableOopMapSet* oms = oop_maps(); | 
| 2417 | ImplicitExceptionTable implicit_table(this); | 
| 2418 | for (uint i = 0; i < implicit_table.len(); i++) { | 
| 2419 | int exec_offset = (int) implicit_table.get_exec_offset(i); | 
| 2420 | if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) { | 
| 2421 |         assert(pc_desc_at(code_begin() + exec_offset) != NULL, "missing PcDesc")do { if (!(pc_desc_at(code_begin() + exec_offset) != __null)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2421, "assert(" "pc_desc_at(code_begin() + exec_offset) != __null" ") failed", "missing PcDesc"); ::breakpoint(); } } while (0);  | 
| 2422 | bool found = false; | 
| 2423 | for (int i = 0, imax = oms->count(); i < imax; i++) { | 
| 2424 | if (oms->pair_at(i)->pc_offset() == exec_offset) { | 
| 2425 | found = true; | 
| 2426 | break; | 
| 2427 | } | 
| 2428 | } | 
| 2429 |         assert(found, "missing oopmap")do { if (!(found)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2429, "assert(" "found" ") failed", "missing oopmap"); ::breakpoint (); } } while (0);  | 
| 2430 | } | 
| 2431 | } | 
| 2432 | } | 
| 2433 | #endif | 
| 2434 | #endif | 
| 2435 | |
| 2436 | VerifyOopsClosure voc(this); | 
| 2437 | oops_do(&voc); | 
| 2438 |   assert(voc.ok(), "embedded oops must be OK")do { if (!(voc.ok())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2438, "assert(" "voc.ok()" ") failed", "embedded oops must be OK" ); ::breakpoint(); } } while (0);  | 
| 2439 | Universe::heap()->verify_nmethod(this); | 
| 2440 | |
| 2441 |   assert(_oops_do_mark_link == NULL, "_oops_do_mark_link for %s should be NULL but is " PTR_FORMAT,do { if (!(_oops_do_mark_link == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2442, "assert(" "_oops_do_mark_link == __null" ") failed", "_oops_do_mark_link for %s should be NULL but is " "0x%016" "l" "x", nm->method()->external_name(), p2i(_oops_do_mark_link )); ::breakpoint(); } } while (0)  | 
| 2442 |          nm->method()->external_name(), p2i(_oops_do_mark_link))do { if (!(_oops_do_mark_link == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2442, "assert(" "_oops_do_mark_link == __null" ") failed", "_oops_do_mark_link for %s should be NULL but is " "0x%016" "l" "x", nm->method()->external_name(), p2i(_oops_do_mark_link )); ::breakpoint(); } } while (0);  | 
| 2443 | verify_scopes(); | 
| 2444 | |
| 2445 | CompiledICLocker nm_verify(this); | 
| 2446 | VerifyMetadataClosure vmc; | 
| 2447 | metadata_do(&vmc); | 
| 2448 | } | 
| 2449 | |
| 2450 | |
| 2451 | void nmethod::verify_interrupt_point(address call_site) { | 
| 2452 | |
| 2453 | // Verify IC only when nmethod installation is finished. | 
| 2454 | if (!is_not_installed()) { | 
| 2455 | if (CompiledICLocker::is_safe(this)) { | 
| 2456 | CompiledIC_at(this, call_site); | 
| 2457 | } else { | 
| 2458 | CompiledICLocker ml_verify(this); | 
| 2459 | CompiledIC_at(this, call_site); | 
| 2460 | } | 
| 2461 | } | 
| 2462 | |
| 2463 | HandleMark hm(Thread::current()); | 
| 2464 | |
| 2465 | PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); | 
| 2466 |   assert(pd != NULL, "PcDesc must exist")do { if (!(pd != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2466, "assert(" "pd != __null" ") failed", "PcDesc must exist" ); ::breakpoint(); } } while (0);  | 
| 2467 | for (ScopeDesc* sd = new ScopeDesc(this, pd); | 
| 2468 | !sd->is_top(); sd = sd->sender()) { | 
| 2469 | sd->verify(); | 
| 2470 | } | 
| 2471 | } | 
| 2472 | |
| 2473 | void nmethod::verify_scopes() { | 
| 2474 | if( !method() ) return; // Runtime stubs have no scope | 
| 2475 | if (method()->is_native()) return; // Ignore stub methods. | 
| 2476 | // iterate through all interrupt point | 
| 2477 | // and verify the debug information is valid. | 
| 2478 | RelocIterator iter((nmethod*)this); | 
| 2479 | while (iter.next()) { | 
| 2480 | address stub = NULL__null; | 
| 2481 | switch (iter.type()) { | 
| 2482 | case relocInfo::virtual_call_type: | 
| 2483 | verify_interrupt_point(iter.addr()); | 
| 2484 | break; | 
| 2485 | case relocInfo::opt_virtual_call_type: | 
| 2486 | stub = iter.opt_virtual_call_reloc()->static_stub(); | 
| 2487 | verify_interrupt_point(iter.addr()); | 
| 2488 | break; | 
| 2489 | case relocInfo::static_call_type: | 
| 2490 | stub = iter.static_call_reloc()->static_stub(); | 
| 2491 | //verify_interrupt_point(iter.addr()); | 
| 2492 | break; | 
| 2493 | case relocInfo::runtime_call_type: | 
| 2494 | case relocInfo::runtime_call_w_cp_type: { | 
| 2495 | address destination = iter.reloc()->value(); | 
| 2496 | // Right now there is no way to find out which entries support | 
| 2497 | // an interrupt point. It would be nice if we had this | 
| 2498 | // information in a table. | 
| 2499 | break; | 
| 2500 | } | 
| 2501 | default: | 
| 2502 | break; | 
| 2503 | } | 
| 2504 |     assert(stub == NULL || stub_contains(stub), "static call stub outside stub section")do { if (!(stub == __null || stub_contains(stub))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 2504, "assert(" "stub == __null || stub_contains(stub)" ") failed" , "static call stub outside stub section"); ::breakpoint(); } } while (0);  | 
| 2505 | } | 
| 2506 | } | 
| 2507 | |
| 2508 | |
| 2509 | // ----------------------------------------------------------------------------- | 
| 2510 | // Printing operations | 
| 2511 | |
| 2512 | void nmethod::print() const { | 
| 2513 | ttyLocker ttyl; // keep the following output all in one block | 
| 2514 | print(tty); | 
| 2515 | } | 
| 2516 | |
| 2517 | void nmethod::print(outputStream* st) const { | 
| 2518 | ResourceMark rm; | 
| 2519 | |
| 2520 | st->print("Compiled method "); | 
| 2521 | |
| 2522 | if (is_compiled_by_c1()) { | 
| 2523 | st->print("(c1) "); | 
| 2524 | } else if (is_compiled_by_c2()) { | 
| 2525 | st->print("(c2) "); | 
| 2526 | } else if (is_compiled_by_jvmci()) { | 
| 2527 | st->print("(JVMCI) "); | 
| 2528 | } else { | 
| 2529 | st->print("(n/a) "); | 
| 2530 | } | 
| 2531 | |
| 2532 | print_on(st, NULL__null); | 
| 2533 | |
| 2534 | if (WizardMode) { | 
| 2535 | st->print("((nmethod*) " INTPTR_FORMAT"0x%016" "l" "x" ") ", p2i(this)); | 
| 2536 | st->print(" for method " INTPTR_FORMAT"0x%016" "l" "x" , p2i(method())); | 
| 2537 | st->print(" { "); | 
| 2538 | st->print_cr("%s ", state()); | 
| 2539 | st->print_cr("}:"); | 
| 2540 | } | 
| 2541 | if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2542 | p2i(this), | 
| 2543 | p2i(this) + size(), | 
| 2544 | size()); | 
| 2545 | if (relocation_size () > 0) st->print_cr(" relocation [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2546 | p2i(relocation_begin()), | 
| 2547 | p2i(relocation_end()), | 
| 2548 | relocation_size()); | 
| 2549 | if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2550 | p2i(consts_begin()), | 
| 2551 | p2i(consts_end()), | 
| 2552 | consts_size()); | 
| 2553 | if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2554 | p2i(insts_begin()), | 
| 2555 | p2i(insts_end()), | 
| 2556 | insts_size()); | 
| 2557 | if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2558 | p2i(stub_begin()), | 
| 2559 | p2i(stub_end()), | 
| 2560 | stub_size()); | 
| 2561 | if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2562 | p2i(oops_begin()), | 
| 2563 | p2i(oops_end()), | 
| 2564 | oops_size()); | 
| 2565 | if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2566 | p2i(metadata_begin()), | 
| 2567 | p2i(metadata_end()), | 
| 2568 | metadata_size()); | 
| 2569 | if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2570 | p2i(scopes_data_begin()), | 
| 2571 | p2i(scopes_data_end()), | 
| 2572 | scopes_data_size()); | 
| 2573 | if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2574 | p2i(scopes_pcs_begin()), | 
| 2575 | p2i(scopes_pcs_end()), | 
| 2576 | scopes_pcs_size()); | 
| 2577 | if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2578 | p2i(dependencies_begin()), | 
| 2579 | p2i(dependencies_end()), | 
| 2580 | dependencies_size()); | 
| 2581 | if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2582 | p2i(handler_table_begin()), | 
| 2583 | p2i(handler_table_end()), | 
| 2584 | handler_table_size()); | 
| 2585 | if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2586 | p2i(nul_chk_table_begin()), | 
| 2587 | p2i(nul_chk_table_end()), | 
| 2588 | nul_chk_table_size()); | 
| 2589 | #if INCLUDE_JVMCI1 | 
| 2590 | if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2591 | p2i(speculations_begin()), | 
| 2592 | p2i(speculations_end()), | 
| 2593 | speculations_size()); | 
| 2594 | if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT"0x%016" "l" "x" "," INTPTR_FORMAT"0x%016" "l" "x" "] = %d", | 
| 2595 | p2i(jvmci_data_begin()), | 
| 2596 | p2i(jvmci_data_end()), | 
| 2597 | jvmci_data_size()); | 
| 2598 | #endif | 
| 2599 | } | 
| 2600 | |
| 2601 | void nmethod::print_code() { | 
| 2602 | ResourceMark m; | 
| 2603 | ttyLocker ttyl; | 
| 2604 | // Call the specialized decode method of this class. | 
| 2605 | decode(tty); | 
| 2606 | } | 
| 2607 | |
| 2608 | #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN | 
| 2609 | |
| 2610 | void nmethod::print_dependencies() { | 
| 2611 | ResourceMark rm; | 
| 2612 | ttyLocker ttyl; // keep the following output all in one block | 
| 2613 | tty->print_cr("Dependencies:"); | 
| 2614 | for (Dependencies::DepStream deps(this); deps.next(); ) { | 
| 2615 | deps.print_dependency(); | 
| 2616 | Klass* ctxk = deps.context_type(); | 
| 2617 | if (ctxk != NULL__null) { | 
| 2618 | if (ctxk->is_instance_klass() && InstanceKlass::cast(ctxk)->is_dependent_nmethod(this)) { | 
| 2619 | tty->print_cr(" [nmethod<=klass]%s", ctxk->external_name()); | 
| 2620 | } | 
| 2621 | } | 
| 2622 | deps.log_dependency(); // put it into the xml log also | 
| 2623 | } | 
| 2624 | } | 
| 2625 | #endif | 
| 2626 | |
| 2627 | #if defined(SUPPORT_DATA_STRUCTS) | 
| 2628 | |
| 2629 | // Print the oops from the underlying CodeBlob. | 
| 2630 | void nmethod::print_oops(outputStream* st) { | 
| 2631 | ResourceMark m; | 
| 2632 | st->print("Oops:"); | 
| 2633 | if (oops_begin() < oops_end()) { | 
| 2634 | st->cr(); | 
| 2635 | for (oop* p = oops_begin(); p < oops_end(); p++) { | 
| 2636 | Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false); | 
| 2637 | st->print(PTR_FORMAT"0x%016" "l" "x" " ", *((uintptr_t*)p)); | 
| 2638 | if (Universe::contains_non_oop_word(p)) { | 
| 2639 | st->print_cr("NON_OOP"); | 
| 2640 | continue; // skip non-oops | 
| 2641 | } | 
| 2642 | if (*p == NULL__null) { | 
| 2643 | st->print_cr("NULL-oop"); | 
| 2644 | continue; // skip non-oops | 
| 2645 | } | 
| 2646 | (*p)->print_value_on(st); | 
| 2647 | st->cr(); | 
| 2648 | } | 
| 2649 | } else { | 
| 2650 | st->print_cr(" <list empty>"); | 
| 2651 | } | 
| 2652 | } | 
| 2653 | |
| 2654 | // Print metadata pool. | 
| 2655 | void nmethod::print_metadata(outputStream* st) { | 
| 2656 | ResourceMark m; | 
| 2657 | st->print("Metadata:"); | 
| 2658 | if (metadata_begin() < metadata_end()) { | 
| 2659 | st->cr(); | 
| 2660 | for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { | 
| 2661 | Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false); | 
| 2662 | st->print(PTR_FORMAT"0x%016" "l" "x" " ", *((uintptr_t*)p)); | 
| 2663 | if (*p && *p != Universe::non_oop_word()) { | 
| 2664 | (*p)->print_value_on(st); | 
| 2665 | } | 
| 2666 | st->cr(); | 
| 2667 | } | 
| 2668 | } else { | 
| 2669 | st->print_cr(" <list empty>"); | 
| 2670 | } | 
| 2671 | } | 
| 2672 | |
| 2673 | #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN | 
| 2674 | void nmethod::print_scopes_on(outputStream* st) { | 
| 2675 | // Find the first pc desc for all scopes in the code and print it. | 
| 2676 | ResourceMark rm; | 
| 2677 | st->print("scopes:"); | 
| 2678 | if (scopes_pcs_begin() < scopes_pcs_end()) { | 
| 2679 | st->cr(); | 
| 2680 | for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { | 
| 2681 | if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null) | 
| 2682 | continue; | 
| 2683 | |
| 2684 | ScopeDesc* sd = scope_desc_at(p->real_pc(this)); | 
| 2685 | while (sd != NULL__null) { | 
| 2686 | sd->print_on(st, p); // print output ends with a newline | 
| 2687 | sd = sd->sender(); | 
| 2688 | } | 
| 2689 | } | 
| 2690 | } else { | 
| 2691 | st->print_cr(" <list empty>"); | 
| 2692 | } | 
| 2693 | } | 
| 2694 | #endif | 
| 2695 | |
| 2696 | #ifndef PRODUCT // RelocIterator does support printing only then. | 
| 2697 | void nmethod::print_relocations() { | 
| 2698 | ResourceMark m; // in case methods get printed via the debugger | 
| 2699 | tty->print_cr("relocations:"); | 
| 2700 | RelocIterator iter(this); | 
| 2701 | iter.print(); | 
| 2702 | } | 
| 2703 | #endif | 
| 2704 | |
| 2705 | void nmethod::print_pcs_on(outputStream* st) { | 
| 2706 | ResourceMark m; // in case methods get printed via debugger | 
| 2707 | st->print("pc-bytecode offsets:"); | 
| 2708 | if (scopes_pcs_begin() < scopes_pcs_end()) { | 
| 2709 | st->cr(); | 
| 2710 | for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { | 
| 2711 | p->print_on(st, this); // print output ends with a newline | 
| 2712 | } | 
| 2713 | } else { | 
| 2714 | st->print_cr(" <list empty>"); | 
| 2715 | } | 
| 2716 | } | 
| 2717 | |
| 2718 | void nmethod::print_native_invokers() { | 
| 2719 | ResourceMark m; // in case methods get printed via debugger | 
| 2720 | tty->print_cr("Native invokers:"); | 
| 2721 | for (RuntimeStub** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) { | 
| 2722 | (*itt)->print_on(tty); | 
| 2723 | } | 
| 2724 | } | 
| 2725 | |
| 2726 | void nmethod::print_handler_table() { | 
| 2727 | ExceptionHandlerTable(this).print(code_begin()); | 
| 2728 | } | 
| 2729 | |
| 2730 | void nmethod::print_nul_chk_table() { | 
| 2731 | ImplicitExceptionTable(this).print(code_begin()); | 
| 2732 | } | 
| 2733 | |
| 2734 | void nmethod::print_recorded_oop(int log_n, int i) { | 
| 2735 | void* value; | 
| 2736 | |
| 2737 | if (i == 0) { | 
| 2738 | value = NULL__null; | 
| 2739 | } else { | 
| 2740 | // Be careful around non-oop words. Don't create an oop | 
| 2741 | // with that value, or it will assert in verification code. | 
| 2742 | if (Universe::contains_non_oop_word(oop_addr_at(i))) { | 
| 2743 | value = Universe::non_oop_word(); | 
| 2744 | } else { | 
| 2745 | value = oop_at(i); | 
| 2746 | } | 
| 2747 | } | 
| 2748 | |
| 2749 | tty->print("#%*d: " INTPTR_FORMAT"0x%016" "l" "x" " ", log_n, i, p2i(value)); | 
| 2750 | |
| 2751 | if (value == Universe::non_oop_word()) { | 
| 2752 | tty->print("non-oop word"); | 
| 2753 | } else { | 
| 2754 | if (value == 0) { | 
| 2755 | tty->print("NULL-oop"); | 
| 2756 | } else { | 
| 2757 | oop_at(i)->print_value_on(tty); | 
| 2758 | } | 
| 2759 | } | 
| 2760 | |
| 2761 | tty->cr(); | 
| 2762 | } | 
| 2763 | |
| 2764 | void nmethod::print_recorded_oops() { | 
| 2765 | const int n = oops_count(); | 
| 2766 | const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6; | 
| 2767 | tty->print("Recorded oops:"); | 
| 2768 | if (n > 0) { | 
| 2769 | tty->cr(); | 
| 2770 | for (int i = 0; i < n; i++) { | 
| 2771 | print_recorded_oop(log_n, i); | 
| 2772 | } | 
| 2773 | } else { | 
| 2774 | tty->print_cr(" <list empty>"); | 
| 2775 | } | 
| 2776 | } | 
| 2777 | |
| 2778 | void nmethod::print_recorded_metadata() { | 
| 2779 | const int n = metadata_count(); | 
| 2780 | const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6; | 
| 2781 | tty->print("Recorded metadata:"); | 
| 2782 | if (n > 0) { | 
| 2783 | tty->cr(); | 
| 2784 | for (int i = 0; i < n; i++) { | 
| 2785 | Metadata* m = metadata_at(i); | 
| 2786 | tty->print("#%*d: " INTPTR_FORMAT"0x%016" "l" "x" " ", log_n, i, p2i(m)); | 
| 2787 | if (m == (Metadata*)Universe::non_oop_word()) { | 
| 2788 | tty->print("non-metadata word"); | 
| 2789 | } else if (m == NULL__null) { | 
| 2790 | tty->print("NULL-oop"); | 
| 2791 | } else { | 
| 2792 | Metadata::print_value_on_maybe_null(tty, m); | 
| 2793 | } | 
| 2794 | tty->cr(); | 
| 2795 | } | 
| 2796 | } else { | 
| 2797 | tty->print_cr(" <list empty>"); | 
| 2798 | } | 
| 2799 | } | 
| 2800 | #endif | 
| 2801 | |
| 2802 | #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) | 
| 2803 | |
| 2804 | void nmethod::print_constant_pool(outputStream* st) { | 
| 2805 | //----------------------------------- | 
| 2806 | //---< Print the constant pool >--- | 
| 2807 | //----------------------------------- | 
| 2808 | int consts_size = this->consts_size(); | 
| 2809 | if ( consts_size > 0 ) { | 
| 2810 | unsigned char* cstart = this->consts_begin(); | 
| 2811 | unsigned char* cp = cstart; | 
| 2812 | unsigned char* cend = cp + consts_size; | 
| 2813 | unsigned int bytes_per_line = 4; | 
| 2814 | unsigned int CP_alignment = 8; | 
| 2815 | unsigned int n; | 
| 2816 | |
| 2817 | st->cr(); | 
| 2818 | |
| 2819 | //---< print CP header to make clear what's printed >--- | 
| 2820 | if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) { | 
| 2821 | n = bytes_per_line; | 
| 2822 | st->print_cr("[Constant Pool]"); | 
| 2823 | Disassembler::print_location(cp, cstart, cend, st, true, true); | 
| 2824 | Disassembler::print_hexdata(cp, n, st, true); | 
| 2825 | st->cr(); | 
| 2826 | } else { | 
| 2827 | n = (uintptr_t)cp&(bytes_per_line-1); | 
| 2828 | st->print_cr("[Constant Pool (unaligned)]"); | 
| 2829 | } | 
| 2830 | |
| 2831 | //---< print CP contents, bytes_per_line at a time >--- | 
| 2832 | while (cp < cend) { | 
| 2833 | Disassembler::print_location(cp, cstart, cend, st, true, false); | 
| 2834 | Disassembler::print_hexdata(cp, n, st, false); | 
| 2835 | cp += n; | 
| 2836 | n = bytes_per_line; | 
| 2837 | st->cr(); | 
| 2838 | } | 
| 2839 | |
| 2840 | //---< Show potential alignment gap between constant pool and code >--- | 
| 2841 | cend = code_begin(); | 
| 2842 | if( cp < cend ) { | 
| 2843 | n = 4; | 
| 2844 | st->print_cr("[Code entry alignment]"); | 
| 2845 | while (cp < cend) { | 
| 2846 | Disassembler::print_location(cp, cstart, cend, st, false, false); | 
| 2847 | cp += n; | 
| 2848 | st->cr(); | 
| 2849 | } | 
| 2850 | } | 
| 2851 | } else { | 
| 2852 | st->print_cr("[Constant Pool (empty)]"); | 
| 2853 | } | 
| 2854 | st->cr(); | 
| 2855 | } | 
| 2856 | |
| 2857 | #endif | 
| 2858 | |
| 2859 | // Disassemble this nmethod. | 
| 2860 | // Print additional debug information, if requested. This could be code | 
| 2861 | // comments, block comments, profiling counters, etc. | 
| 2862 | // The undisassembled format is useful no disassembler library is available. | 
| 2863 | // The resulting hex dump (with markers) can be disassembled later, or on | 
| 2864 | // another system, when/where a disassembler library is available. | 
| 2865 | void nmethod::decode2(outputStream* ost) const { | 
| 2866 | |
| 2867 | // Called from frame::back_trace_with_decode without ResourceMark. | 
| 2868 | ResourceMark rm; | 
| 2869 | |
| 2870 | // Make sure we have a valid stream to print on. | 
| 2871 | outputStream* st = ost ? ost : tty; | 
| 2872 | |
| 2873 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY) | 
| 2874 | const bool use_compressed_format = true; | 
| 2875 | const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() || | 
| 2876 | AbstractDisassembler::show_block_comment()); | 
| 2877 | #else | 
| 2878 | const bool use_compressed_format = Disassembler::is_abstract(); | 
| 2879 | const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() || | 
| 2880 | AbstractDisassembler::show_block_comment()); | 
| 2881 | #endif | 
| 2882 | |
| 2883 | // Decoding an nmethod can write to a PcDescCache (see PcDescCache::add_pc_desc) | 
| 2884 | MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());) | 
| 2885 | |
| 2886 | st->cr(); | 
| 2887 | this->print(st); | 
| 2888 | st->cr(); | 
| 2889 | |
| 2890 | #if defined(SUPPORT_ASSEMBLY) | 
| 2891 | //---------------------------------- | 
| 2892 | //---< Print real disassembly >--- | 
| 2893 | //---------------------------------- | 
| 2894 | if (! use_compressed_format) { | 
| 2895 | st->print_cr("[Disassembly]"); | 
| 2896 | Disassembler::decode(const_cast<nmethod*>(this), st); | 
| 2897 | st->bol(); | 
| 2898 | st->print_cr("[/Disassembly]"); | 
| 2899 | return; | 
| 2900 | } | 
| 2901 | #endif | 
| 2902 | |
| 2903 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) | 
| 2904 | |
| 2905 | // Compressed undisassembled disassembly format. | 
| 2906 | // The following stati are defined/supported: | 
| 2907 | // = 0 - currently at bol() position, nothing printed yet on current line. | 
| 2908 | // = 1 - currently at position after print_location(). | 
| 2909 | // > 1 - in the midst of printing instruction stream bytes. | 
| 2910 | int compressed_format_idx = 0; | 
| 2911 | int code_comment_column = 0; | 
| 2912 | const int instr_maxlen = Assembler::instr_maxlen(); | 
| 2913 | const uint tabspacing = 8; | 
| 2914 | unsigned char* start = this->code_begin(); | 
| 2915 | unsigned char* p = this->code_begin(); | 
| 2916 | unsigned char* end = this->code_end(); | 
| 2917 | unsigned char* pss = p; // start of a code section (used for offsets) | 
| 2918 | |
| 2919 | if ((start == NULL__null) || (end == NULL__null)) { | 
| 2920 | st->print_cr("PrintAssembly not possible due to uninitialized section pointers"); | 
| 2921 | return; | 
| 2922 | } | 
| 2923 | #endif | 
| 2924 | |
| 2925 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) | 
| 2926 | //---< plain abstract disassembly, no comments or anything, just section headers >--- | 
| 2927 | if (use_compressed_format && ! compressed_with_comments) { | 
| 2928 | const_cast<nmethod*>(this)->print_constant_pool(st); | 
| 2929 | |
| 2930 | //---< Open the output (Marker for post-mortem disassembler) >--- | 
| 2931 | st->print_cr("[MachCode]"); | 
| 2932 | const char* header = NULL__null; | 
| 2933 | address p0 = p; | 
| 2934 | while (p < end) { | 
| 2935 | address pp = p; | 
| 2936 | while ((p < end) && (header == NULL__null)) { | 
| 2937 | header = nmethod_section_label(p); | 
| 2938 | pp = p; | 
| 2939 | p += Assembler::instr_len(p); | 
| 2940 | } | 
| 2941 | if (pp > p0) { | 
| 2942 | AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen()); | 
| 2943 | p0 = pp; | 
| 2944 | p = pp; | 
| 2945 | header = NULL__null; | 
| 2946 | } else if (header != NULL__null) { | 
| 2947 | st->bol(); | 
| 2948 | st->print_cr("%s", header); | 
| 2949 | header = NULL__null; | 
| 2950 | } | 
| 2951 | } | 
| 2952 | //---< Close the output (Marker for post-mortem disassembler) >--- | 
| 2953 | st->bol(); | 
| 2954 | st->print_cr("[/MachCode]"); | 
| 2955 | return; | 
| 2956 | } | 
| 2957 | #endif | 
| 2958 | |
| 2959 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) | 
| 2960 | //---< abstract disassembly with comments and section headers merged in >--- | 
| 2961 | if (compressed_with_comments) { | 
| 2962 | const_cast<nmethod*>(this)->print_constant_pool(st); | 
| 2963 | |
| 2964 | //---< Open the output (Marker for post-mortem disassembler) >--- | 
| 2965 | st->print_cr("[MachCode]"); | 
| 2966 | while ((p < end) && (p != NULL__null)) { | 
| 2967 | const int instruction_size_in_bytes = Assembler::instr_len(p); | 
| 2968 | |
| 2969 | //---< Block comments for nmethod. Interrupts instruction stream, if any. >--- | 
| 2970 | // Outputs a bol() before and a cr() after, but only if a comment is printed. | 
| 2971 | // Prints nmethod_section_label as well. | 
| 2972 | if (AbstractDisassembler::show_block_comment()) { | 
| 2973 | print_block_comment(st, p); | 
| 2974 | if (st->position() == 0) { | 
| 2975 | compressed_format_idx = 0; | 
| 2976 | } | 
| 2977 | } | 
| 2978 | |
| 2979 | //---< New location information after line break >--- | 
| 2980 | if (compressed_format_idx == 0) { | 
| 2981 | code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); | 
| 2982 | compressed_format_idx = 1; | 
| 2983 | } | 
| 2984 | |
| 2985 | //---< Code comment for current instruction. Address range [p..(p+len)) >--- | 
| 2986 | unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes; | 
| 2987 | S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end | 
| 2988 | |
| 2989 | if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) { | 
| 2990 | //---< interrupt instruction byte stream for code comment >--- | 
| 2991 | if (compressed_format_idx > 1) { | 
| 2992 | st->cr(); // interrupt byte stream | 
| 2993 | st->cr(); // add an empty line | 
| 2994 | code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); | 
| 2995 | } | 
| 2996 | const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end ); | 
| 2997 | st->bol(); | 
| 2998 | compressed_format_idx = 0; | 
| 2999 | } | 
| 3000 | |
| 3001 | //---< New location information after line break >--- | 
| 3002 | if (compressed_format_idx == 0) { | 
| 3003 | code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); | 
| 3004 | compressed_format_idx = 1; | 
| 3005 | } | 
| 3006 | |
| 3007 | //---< Nicely align instructions for readability >--- | 
| 3008 | if (compressed_format_idx > 1) { | 
| 3009 | Disassembler::print_delimiter(st); | 
| 3010 | } | 
| 3011 | |
| 3012 | //---< Now, finally, print the actual instruction bytes >--- | 
| 3013 | unsigned char* p0 = p; | 
| 3014 | p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen); | 
| 3015 | compressed_format_idx += p - p0; | 
| 3016 | |
| 3017 | if (Disassembler::start_newline(compressed_format_idx-1)) { | 
| 3018 | st->cr(); | 
| 3019 | compressed_format_idx = 0; | 
| 3020 | } | 
| 3021 | } | 
| 3022 | //---< Close the output (Marker for post-mortem disassembler) >--- | 
| 3023 | st->bol(); | 
| 3024 | st->print_cr("[/MachCode]"); | 
| 3025 | return; | 
| 3026 | } | 
| 3027 | #endif | 
| 3028 | } | 
| 3029 | |
| 3030 | #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) | 
| 3031 | |
| 3032 | const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { | 
| 3033 | RelocIterator iter(this, begin, end); | 
| 3034 | bool have_one = false; | 
| 3035 | while (iter.next()) { | 
| 3036 | have_one = true; | 
| 3037 | switch (iter.type()) { | 
| 3038 | case relocInfo::none: return "no_reloc"; | 
| 3039 | case relocInfo::oop_type: { | 
| 3040 | // Get a non-resizable resource-allocated stringStream. | 
| 3041 | // Our callees make use of (nested) ResourceMarks. | 
| 3042 | stringStream st(NEW_RESOURCE_ARRAY(char, 1024)(char*) resource_allocate_bytes((1024) * sizeof(char)), 1024); | 
| 3043 | oop_Relocation* r = iter.oop_reloc(); | 
| 3044 | oop obj = r->oop_value(); | 
| 3045 | st.print("oop("); | 
| 3046 | if (obj == NULL__null) st.print("NULL"); | 
| 3047 | else obj->print_value_on(&st); | 
| 3048 | st.print(")"); | 
| 3049 | return st.as_string(); | 
| 3050 | } | 
| 3051 | case relocInfo::metadata_type: { | 
| 3052 | stringStream st; | 
| 3053 | metadata_Relocation* r = iter.metadata_reloc(); | 
| 3054 | Metadata* obj = r->metadata_value(); | 
| 3055 | st.print("metadata("); | 
| 3056 | if (obj == NULL__null) st.print("NULL"); | 
| 3057 | else obj->print_value_on(&st); | 
| 3058 | st.print(")"); | 
| 3059 | return st.as_string(); | 
| 3060 | } | 
| 3061 | case relocInfo::runtime_call_type: | 
| 3062 | case relocInfo::runtime_call_w_cp_type: { | 
| 3063 | stringStream st; | 
| 3064 | st.print("runtime_call"); | 
| 3065 | CallRelocation* r = (CallRelocation*)iter.reloc(); | 
| 3066 | address dest = r->destination(); | 
| 3067 | CodeBlob* cb = CodeCache::find_blob(dest); | 
| 3068 | if (cb != NULL__null) { | 
| 3069 | st.print(" %s", cb->name()); | 
| 3070 | } else { | 
| 3071 | ResourceMark rm; | 
| 3072 | const int buflen = 1024; | 
| 3073 | char* buf = NEW_RESOURCE_ARRAY(char, buflen)(char*) resource_allocate_bytes((buflen) * sizeof(char)); | 
| 3074 | int offset; | 
| 3075 | if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) { | 
| 3076 | st.print(" %s", buf); | 
| 3077 | if (offset != 0) { | 
| 3078 | st.print("+%d", offset); | 
| 3079 | } | 
| 3080 | } | 
| 3081 | } | 
| 3082 | return st.as_string(); | 
| 3083 | } | 
| 3084 | case relocInfo::virtual_call_type: { | 
| 3085 | stringStream st; | 
| 3086 | st.print_raw("virtual_call"); | 
| 3087 | virtual_call_Relocation* r = iter.virtual_call_reloc(); | 
| 3088 | Method* m = r->method_value(); | 
| 3089 | if (m != NULL__null) { | 
| 3090 |             assert(m->is_method(), "")do { if (!(m->is_method())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3090, "assert(" "m->is_method()" ") failed", ""); ::breakpoint (); } } while (0);  | 
| 3091 | m->print_short_name(&st); | 
| 3092 | } | 
| 3093 | return st.as_string(); | 
| 3094 | } | 
| 3095 | case relocInfo::opt_virtual_call_type: { | 
| 3096 | stringStream st; | 
| 3097 | st.print_raw("optimized virtual_call"); | 
| 3098 | opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc(); | 
| 3099 | Method* m = r->method_value(); | 
| 3100 | if (m != NULL__null) { | 
| 3101 |             assert(m->is_method(), "")do { if (!(m->is_method())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3101, "assert(" "m->is_method()" ") failed", ""); ::breakpoint (); } } while (0);  | 
| 3102 | m->print_short_name(&st); | 
| 3103 | } | 
| 3104 | return st.as_string(); | 
| 3105 | } | 
| 3106 | case relocInfo::static_call_type: { | 
| 3107 | stringStream st; | 
| 3108 | st.print_raw("static_call"); | 
| 3109 | static_call_Relocation* r = iter.static_call_reloc(); | 
| 3110 | Method* m = r->method_value(); | 
| 3111 | if (m != NULL__null) { | 
| 3112 |             assert(m->is_method(), "")do { if (!(m->is_method())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3112, "assert(" "m->is_method()" ") failed", ""); ::breakpoint (); } } while (0);  | 
| 3113 | m->print_short_name(&st); | 
| 3114 | } | 
| 3115 | return st.as_string(); | 
| 3116 | } | 
| 3117 | case relocInfo::static_stub_type: return "static_stub"; | 
| 3118 | case relocInfo::external_word_type: return "external_word"; | 
| 3119 | case relocInfo::internal_word_type: return "internal_word"; | 
| 3120 | case relocInfo::section_word_type: return "section_word"; | 
| 3121 | case relocInfo::poll_type: return "poll"; | 
| 3122 | case relocInfo::poll_return_type: return "poll_return"; | 
| 3123 | case relocInfo::trampoline_stub_type: return "trampoline_stub"; | 
| 3124 | case relocInfo::type_mask: return "type_bit_mask"; | 
| 3125 | |
| 3126 | default: | 
| 3127 | break; | 
| 3128 | } | 
| 3129 | } | 
| 3130 | return have_one ? "other" : NULL__null; | 
| 3131 | } | 
| 3132 | |
| 3133 | // Return a the last scope in (begin..end] | 
| 3134 | ScopeDesc* nmethod::scope_desc_in(address begin, address end) { | 
| 3135 | PcDesc* p = pc_desc_near(begin+1); | 
| 3136 | if (p != NULL__null && p->real_pc(this) <= end) { | 
| 3137 | return new ScopeDesc(this, p); | 
| 3138 | } | 
| 3139 | return NULL__null; | 
| 3140 | } | 
| 3141 | |
| 3142 | const char* nmethod::nmethod_section_label(address pos) const { | 
| 3143 | const char* label = NULL__null; | 
| 3144 | if (pos == code_begin()) label = "[Instructions begin]"; | 
| 3145 | if (pos == entry_point()) label = "[Entry Point]"; | 
| 3146 | if (pos == verified_entry_point()) label = "[Verified Entry Point]"; | 
| 3147 | if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]"; | 
| 3148 | if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]"; | 
| 3149 | // Check stub_code before checking exception_handler or deopt_handler. | 
| 3150 | if (pos == this->stub_begin()) label = "[Stub Code]"; | 
| 3151 | if (JVMCI_ONLY(_exception_offset >= 0 &&)_exception_offset >= 0 && pos == exception_begin()) label = "[Exception Handler]"; | 
| 3152 | if (JVMCI_ONLY(_deopt_handler_begin != NULL &&)_deopt_handler_begin != __null && pos == deopt_handler_begin()) label = "[Deopt Handler Code]"; | 
| 3153 | return label; | 
| 3154 | } | 
| 3155 | |
| 3156 | void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const { | 
| 3157 | if (print_section_labels) { | 
| 3158 | const char* label = nmethod_section_label(block_begin); | 
| 3159 | if (label != NULL__null) { | 
| 3160 | stream->bol(); | 
| 3161 | stream->print_cr("%s", label); | 
| 3162 | } | 
| 3163 | } | 
| 3164 | |
| 3165 | if (block_begin == entry_point()) { | 
| 3166 | Method* m = method(); | 
| 3167 | if (m != NULL__null) { | 
| 3168 | stream->print(" # "); | 
| 3169 | m->print_value_on(stream); | 
| 3170 | stream->cr(); | 
| 3171 | } | 
| 3172 | if (m != NULL__null && !is_osr_method()) { | 
| 3173 | ResourceMark rm; | 
| 3174 | int sizeargs = m->size_of_parameters(); | 
| 3175 |       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs)(BasicType*) resource_allocate_bytes((sizeargs) * sizeof(BasicType ));  | 
| 3176 |       VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs)(VMRegPair*) resource_allocate_bytes((sizeargs) * sizeof(VMRegPair ));  | 
| 3177 | { | 
| 3178 | int sig_index = 0; | 
| 3179 | if (!m->is_static()) | 
| 3180 | sig_bt[sig_index++] = T_OBJECT; // 'this' | 
| 3181 | for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) { | 
| 3182 | BasicType t = ss.type(); | 
| 3183 | sig_bt[sig_index++] = t; | 
| 3184 | if (type2size[t] == 2) { | 
| 3185 | sig_bt[sig_index++] = T_VOID; | 
| 3186 | } else { | 
| 3187 |             assert(type2size[t] == 1, "size is 1 or 2")do { if (!(type2size[t] == 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3187, "assert(" "type2size[t] == 1" ") failed", "size is 1 or 2" ); ::breakpoint(); } } while (0);  | 
| 3188 | } | 
| 3189 | } | 
| 3190 |         assert(sig_index == sizeargs, "")do { if (!(sig_index == sizeargs)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3190, "assert(" "sig_index == sizeargs" ") failed", ""); :: breakpoint(); } } while (0);  | 
| 3191 | } | 
| 3192 | const char* spname = "sp"; // make arch-specific? | 
| 3193 | intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs); | 
Value stored to 'out_preserve' during its initialization is never read  | |
| 3194 | int stack_slot_offset = this->frame_size() * wordSize; | 
| 3195 | int tab1 = 14, tab2 = 24; | 
| 3196 | int sig_index = 0; | 
| 3197 | int arg_index = (m->is_static() ? 0 : -1); | 
| 3198 | bool did_old_sp = false; | 
| 3199 | for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) { | 
| 3200 | bool at_this = (arg_index == -1); | 
| 3201 | bool at_old_sp = false; | 
| 3202 | BasicType t = (at_this ? T_OBJECT : ss.type()); | 
| 3203 |         assert(t == sig_bt[sig_index], "sigs in sync")do { if (!(t == sig_bt[sig_index])) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3203, "assert(" "t == sig_bt[sig_index]" ") failed", "sigs in sync" ); ::breakpoint(); } } while (0);  | 
| 3204 | if (at_this) | 
| 3205 | stream->print(" # this: "); | 
| 3206 | else | 
| 3207 | stream->print(" # parm%d: ", arg_index); | 
| 3208 | stream->move_to(tab1); | 
| 3209 | VMReg fst = regs[sig_index].first(); | 
| 3210 | VMReg snd = regs[sig_index].second(); | 
| 3211 | if (fst->is_reg()) { | 
| 3212 | stream->print("%s", fst->name()); | 
| 3213 | if (snd->is_valid()) { | 
| 3214 | stream->print(":%s", snd->name()); | 
| 3215 | } | 
| 3216 | } else if (fst->is_stack()) { | 
| 3217 | int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset; | 
| 3218 | if (offset == stack_slot_offset) at_old_sp = true; | 
| 3219 | stream->print("[%s+0x%x]", spname, offset); | 
| 3220 | } else { | 
| 3221 | stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd); | 
| 3222 | } | 
| 3223 | stream->print(" "); | 
| 3224 | stream->move_to(tab2); | 
| 3225 | stream->print("= "); | 
| 3226 | if (at_this) { | 
| 3227 | m->method_holder()->print_value_on(stream); | 
| 3228 | } else { | 
| 3229 | bool did_name = false; | 
| 3230 | if (!at_this && ss.is_reference()) { | 
| 3231 | Symbol* name = ss.as_symbol(); | 
| 3232 | name->print_value_on(stream); | 
| 3233 | did_name = true; | 
| 3234 | } | 
| 3235 | if (!did_name) | 
| 3236 | stream->print("%s", type2name(t)); | 
| 3237 | } | 
| 3238 | if (at_old_sp) { | 
| 3239 | stream->print(" (%s of caller)", spname); | 
| 3240 | did_old_sp = true; | 
| 3241 | } | 
| 3242 | stream->cr(); | 
| 3243 | sig_index += type2size[t]; | 
| 3244 | arg_index += 1; | 
| 3245 | if (!at_this) ss.next(); | 
| 3246 | } | 
| 3247 | if (!did_old_sp) { | 
| 3248 | stream->print(" # "); | 
| 3249 | stream->move_to(tab1); | 
| 3250 | stream->print("[%s+0x%x]", spname, stack_slot_offset); | 
| 3251 | stream->print(" (%s of caller)", spname); | 
| 3252 | stream->cr(); | 
| 3253 | } | 
| 3254 | } | 
| 3255 | } | 
| 3256 | } | 
| 3257 | |
| 3258 | // Returns whether this nmethod has code comments. | 
| 3259 | bool nmethod::has_code_comment(address begin, address end) { | 
| 3260 | // scopes? | 
| 3261 | ScopeDesc* sd = scope_desc_in(begin, end); | 
| 3262 | if (sd != NULL__null) return true; | 
| 3263 | |
| 3264 | // relocations? | 
| 3265 | const char* str = reloc_string_for(begin, end); | 
| 3266 | if (str != NULL__null) return true; | 
| 3267 | |
| 3268 | // implicit exceptions? | 
| 3269 | int cont_offset = ImplicitExceptionTable(this).continuation_offset(begin - code_begin()); | 
| 3270 | if (cont_offset != 0) return true; | 
| 3271 | |
| 3272 | return false; | 
| 3273 | } | 
| 3274 | |
| 3275 | void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) { | 
| 3276 | ImplicitExceptionTable implicit_table(this); | 
| 3277 | int pc_offset = begin - code_begin(); | 
| 3278 | int cont_offset = implicit_table.continuation_offset(pc_offset); | 
| 3279 | bool oop_map_required = false; | 
| 3280 | if (cont_offset != 0) { | 
| 3281 | st->move_to(column, 6, 0); | 
| 3282 | if (pc_offset == cont_offset) { | 
| 3283 | st->print("; implicit exception: deoptimizes"); | 
| 3284 | oop_map_required = true; | 
| 3285 | } else { | 
| 3286 | st->print("; implicit exception: dispatches to " INTPTR_FORMAT"0x%016" "l" "x", p2i(code_begin() + cont_offset)); | 
| 3287 | } | 
| 3288 | } | 
| 3289 | |
| 3290 | // Find an oopmap in (begin, end]. We use the odd half-closed | 
| 3291 | // interval so that oop maps and scope descs which are tied to the | 
| 3292 | // byte after a call are printed with the call itself. OopMaps | 
| 3293 | // associated with implicit exceptions are printed with the implicit | 
| 3294 | // instruction. | 
| 3295 | address base = code_begin(); | 
| 3296 | ImmutableOopMapSet* oms = oop_maps(); | 
| 3297 | if (oms != NULL__null) { | 
| 3298 | for (int i = 0, imax = oms->count(); i < imax; i++) { | 
| 3299 | const ImmutableOopMapPair* pair = oms->pair_at(i); | 
| 3300 | const ImmutableOopMap* om = pair->get_from(oms); | 
| 3301 | address pc = base + pair->pc_offset(); | 
| 3302 | if (pc >= begin) { | 
| 3303 | #if INCLUDE_JVMCI1 | 
| 3304 | bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset(); | 
| 3305 | #else | 
| 3306 | bool is_implicit_deopt = false; | 
| 3307 | #endif | 
| 3308 | if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) { | 
| 3309 | st->move_to(column, 6, 0); | 
| 3310 | st->print("; "); | 
| 3311 | om->print_on(st); | 
| 3312 | oop_map_required = false; | 
| 3313 | } | 
| 3314 | } | 
| 3315 | if (pc > end) { | 
| 3316 | break; | 
| 3317 | } | 
| 3318 | } | 
| 3319 | } | 
| 3320 |   assert(!oop_map_required, "missed oopmap")do { if (!(!oop_map_required)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3320, "assert(" "!oop_map_required" ") failed", "missed oopmap" ); ::breakpoint(); } } while (0);  | 
| 3321 | |
| 3322 | Thread* thread = Thread::current(); | 
| 3323 | |
| 3324 | // Print any debug info present at this pc. | 
| 3325 | ScopeDesc* sd = scope_desc_in(begin, end); | 
| 3326 | if (sd != NULL__null) { | 
| 3327 | st->move_to(column, 6, 0); | 
| 3328 | if (sd->bci() == SynchronizationEntryBCI) { | 
| 3329 | st->print(";*synchronization entry"); | 
| 3330 | } else if (sd->bci() == AfterBci) { | 
| 3331 | st->print(";* method exit (unlocked if synchronized)"); | 
| 3332 | } else if (sd->bci() == UnwindBci) { | 
| 3333 | st->print(";* unwind (locked if synchronized)"); | 
| 3334 | } else if (sd->bci() == AfterExceptionBci) { | 
| 3335 | st->print(";* unwind (unlocked if synchronized)"); | 
| 3336 | } else if (sd->bci() == UnknownBci) { | 
| 3337 | st->print(";* unknown"); | 
| 3338 | } else if (sd->bci() == InvalidFrameStateBci) { | 
| 3339 | st->print(";* invalid frame state"); | 
| 3340 | } else { | 
| 3341 | if (sd->method() == NULL__null) { | 
| 3342 | st->print("method is NULL"); | 
| 3343 | } else if (sd->method()->is_native()) { | 
| 3344 | st->print("method is native"); | 
| 3345 | } else { | 
| 3346 | Bytecodes::Code bc = sd->method()->java_code_at(sd->bci()); | 
| 3347 | st->print(";*%s", Bytecodes::name(bc)); | 
| 3348 | switch (bc) { | 
| 3349 | case Bytecodes::_invokevirtual: | 
| 3350 | case Bytecodes::_invokespecial: | 
| 3351 | case Bytecodes::_invokestatic: | 
| 3352 | case Bytecodes::_invokeinterface: | 
| 3353 | { | 
| 3354 | Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci()); | 
| 3355 | st->print(" "); | 
| 3356 | if (invoke.name() != NULL__null) | 
| 3357 | invoke.name()->print_symbol_on(st); | 
| 3358 | else | 
| 3359 | st->print("<UNKNOWN>"); | 
| 3360 | break; | 
| 3361 | } | 
| 3362 | case Bytecodes::_getfield: | 
| 3363 | case Bytecodes::_putfield: | 
| 3364 | case Bytecodes::_getstatic: | 
| 3365 | case Bytecodes::_putstatic: | 
| 3366 | { | 
| 3367 | Bytecode_field field(methodHandle(thread, sd->method()), sd->bci()); | 
| 3368 | st->print(" "); | 
| 3369 | if (field.name() != NULL__null) | 
| 3370 | field.name()->print_symbol_on(st); | 
| 3371 | else | 
| 3372 | st->print("<UNKNOWN>"); | 
| 3373 | } | 
| 3374 | default: | 
| 3375 | break; | 
| 3376 | } | 
| 3377 | } | 
| 3378 | st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop()); | 
| 3379 | } | 
| 3380 | |
| 3381 | // Print all scopes | 
| 3382 | for (;sd != NULL__null; sd = sd->sender()) { | 
| 3383 | st->move_to(column, 6, 0); | 
| 3384 | st->print("; -"); | 
| 3385 | if (sd->should_reexecute()) { | 
| 3386 | st->print(" (reexecute)"); | 
| 3387 | } | 
| 3388 | if (sd->method() == NULL__null) { | 
| 3389 | st->print("method is NULL"); | 
| 3390 | } else { | 
| 3391 | sd->method()->print_short_name(st); | 
| 3392 | } | 
| 3393 | int lineno = sd->method()->line_number_from_bci(sd->bci()); | 
| 3394 | if (lineno != -1) { | 
| 3395 | st->print("@%d (line %d)", sd->bci(), lineno); | 
| 3396 | } else { | 
| 3397 | st->print("@%d", sd->bci()); | 
| 3398 | } | 
| 3399 | st->cr(); | 
| 3400 | } | 
| 3401 | } | 
| 3402 | |
| 3403 | // Print relocation information | 
| 3404 | // Prevent memory leak: allocating without ResourceMark. | 
| 3405 | ResourceMark rm; | 
| 3406 | const char* str = reloc_string_for(begin, end); | 
| 3407 | if (str != NULL__null) { | 
| 3408 | if (sd != NULL__null) st->cr(); | 
| 3409 | st->move_to(column, 6, 0); | 
| 3410 | st->print("; {%s}", str); | 
| 3411 | } | 
| 3412 | } | 
| 3413 | |
| 3414 | #endif | 
| 3415 | |
| 3416 | class DirectNativeCallWrapper: public NativeCallWrapper { | 
| 3417 | private: | 
| 3418 | NativeCall* _call; | 
| 3419 | |
| 3420 | public: | 
| 3421 | DirectNativeCallWrapper(NativeCall* call) : _call(call) {} | 
| 3422 | |
| 3423 | virtual address destination() const { return _call->destination(); } | 
| 3424 | virtual address instruction_address() const { return _call->instruction_address(); } | 
| 3425 | virtual address next_instruction_address() const { return _call->next_instruction_address(); } | 
| 3426 | virtual address return_address() const { return _call->return_address(); } | 
| 3427 | |
| 3428 | virtual address get_resolve_call_stub(bool is_optimized) const { | 
| 3429 | if (is_optimized) { | 
| 3430 | return SharedRuntime::get_resolve_opt_virtual_call_stub(); | 
| 3431 | } | 
| 3432 | return SharedRuntime::get_resolve_virtual_call_stub(); | 
| 3433 | } | 
| 3434 | |
| 3435 | virtual void set_destination_mt_safe(address dest) { | 
| 3436 | _call->set_destination_mt_safe(dest); | 
| 3437 | } | 
| 3438 | |
| 3439 | virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) { | 
| 3440 | CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address()); | 
| 3441 | { | 
| 3442 | csc->set_to_interpreted(method, info.entry()); | 
| 3443 | } | 
| 3444 | } | 
| 3445 | |
| 3446 | virtual void verify() const { | 
| 3447 | // make sure code pattern is actually a call imm32 instruction | 
| 3448 | _call->verify(); | 
| 3449 | _call->verify_alignment(); | 
| 3450 | } | 
| 3451 | |
| 3452 | virtual void verify_resolve_call(address dest) const { | 
| 3453 | CodeBlob* db = CodeCache::find_blob_unsafe(dest); | 
| 3454 |     assert(db != NULL && !db->is_adapter_blob(), "must use stub!")do { if (!(db != __null && !db->is_adapter_blob()) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3454, "assert(" "db != __null && !db->is_adapter_blob()" ") failed", "must use stub!"); ::breakpoint(); } } while (0);  | 
| 3455 | } | 
| 3456 | |
| 3457 | virtual bool is_call_to_interpreted(address dest) const { | 
| 3458 | CodeBlob* cb = CodeCache::find_blob(_call->instruction_address()); | 
| 3459 | return cb->contains(dest); | 
| 3460 | } | 
| 3461 | |
| 3462 | virtual bool is_safe_for_patching() const { return false; } | 
| 3463 | |
| 3464 | virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const { | 
| 3465 | return nativeMovConstReg_at(r->cached_value()); | 
| 3466 | } | 
| 3467 | |
| 3468 | virtual void *get_data(NativeInstruction* instruction) const { | 
| 3469 | return (void*)((NativeMovConstReg*) instruction)->data(); | 
| 3470 | } | 
| 3471 | |
| 3472 | virtual void set_data(NativeInstruction* instruction, intptr_t data) { | 
| 3473 | ((NativeMovConstReg*) instruction)->set_data(data); | 
| 3474 | } | 
| 3475 | }; | 
| 3476 | |
| 3477 | NativeCallWrapper* nmethod::call_wrapper_at(address call) const { | 
| 3478 | return new DirectNativeCallWrapper((NativeCall*) call); | 
| 3479 | } | 
| 3480 | |
| 3481 | NativeCallWrapper* nmethod::call_wrapper_before(address return_pc) const { | 
| 3482 | return new DirectNativeCallWrapper(nativeCall_before(return_pc)); | 
| 3483 | } | 
| 3484 | |
| 3485 | address nmethod::call_instruction_address(address pc) const { | 
| 3486 | if (NativeCall::is_call_before(pc)) { | 
| 3487 | NativeCall *ncall = nativeCall_before(pc); | 
| 3488 | return ncall->instruction_address(); | 
| 3489 | } | 
| 3490 | return NULL__null; | 
| 3491 | } | 
| 3492 | |
| 3493 | CompiledStaticCall* nmethod::compiledStaticCall_at(Relocation* call_site) const { | 
| 3494 | return CompiledDirectStaticCall::at(call_site); | 
| 3495 | } | 
| 3496 | |
| 3497 | CompiledStaticCall* nmethod::compiledStaticCall_at(address call_site) const { | 
| 3498 | return CompiledDirectStaticCall::at(call_site); | 
| 3499 | } | 
| 3500 | |
| 3501 | CompiledStaticCall* nmethod::compiledStaticCall_before(address return_addr) const { | 
| 3502 | return CompiledDirectStaticCall::before(return_addr); | 
| 3503 | } | 
| 3504 | |
| 3505 | #if defined(SUPPORT_DATA_STRUCTS) | 
| 3506 | void nmethod::print_value_on(outputStream* st) const { | 
| 3507 | st->print("nmethod"); | 
| 3508 | print_on(st, NULL__null); | 
| 3509 | } | 
| 3510 | #endif | 
| 3511 | |
| 3512 | #ifndef PRODUCT | 
| 3513 | |
| 3514 | void nmethod::print_calls(outputStream* st) { | 
| 3515 | RelocIterator iter(this); | 
| 3516 | while (iter.next()) { | 
| 3517 | switch (iter.type()) { | 
| 3518 | case relocInfo::virtual_call_type: | 
| 3519 | case relocInfo::opt_virtual_call_type: { | 
| 3520 | CompiledICLocker ml_verify(this); | 
| 3521 | CompiledIC_at(&iter)->print(); | 
| 3522 | break; | 
| 3523 | } | 
| 3524 | case relocInfo::static_call_type: | 
| 3525 | st->print_cr("Static call at " INTPTR_FORMAT"0x%016" "l" "x", p2i(iter.reloc()->addr())); | 
| 3526 | CompiledDirectStaticCall::at(iter.reloc())->print(); | 
| 3527 | break; | 
| 3528 | default: | 
| 3529 | break; | 
| 3530 | } | 
| 3531 | } | 
| 3532 | } | 
| 3533 | |
| 3534 | void nmethod::print_statistics() { | 
| 3535 | ttyLocker ttyl; | 
| 3536 | if (xtty != NULL__null) xtty->head("statistics type='nmethod'"); | 
| 3537 | native_nmethod_stats.print_native_nmethod_stats(); | 
| 3538 | #ifdef COMPILER11 | 
| 3539 | c1_java_nmethod_stats.print_nmethod_stats("C1"); | 
| 3540 | #endif | 
| 3541 | #ifdef COMPILER21 | 
| 3542 | c2_java_nmethod_stats.print_nmethod_stats("C2"); | 
| 3543 | #endif | 
| 3544 | #if INCLUDE_JVMCI1 | 
| 3545 | jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI"); | 
| 3546 | #endif | 
| 3547 | unknown_java_nmethod_stats.print_nmethod_stats("Unknown"); | 
| 3548 | DebugInformationRecorder::print_statistics(); | 
| 3549 | #ifndef PRODUCT | 
| 3550 | pc_nmethod_stats.print_pc_stats(); | 
| 3551 | #endif | 
| 3552 | Dependencies::print_statistics(); | 
| 3553 | if (xtty != NULL__null) xtty->tail("statistics"); | 
| 3554 | } | 
| 3555 | |
| 3556 | #endif // !PRODUCT | 
| 3557 | |
| 3558 | #if INCLUDE_JVMCI1 | 
| 3559 | void nmethod::update_speculation(JavaThread* thread) { | 
| 3560 | jlong speculation = thread->pending_failed_speculation(); | 
| 3561 | if (speculation != 0) { | 
| 3562 |     guarantee(jvmci_nmethod_data() != NULL, "failed speculation in nmethod without failed speculation list")do { if (!(jvmci_nmethod_data() != __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/nmethod.cpp" , 3562, "guarantee(" "jvmci_nmethod_data() != NULL" ") failed" , "failed speculation in nmethod without failed speculation list" ); ::breakpoint(); } } while (0);  | 
| 3563 | jvmci_nmethod_data()->add_failed_speculation(this, speculation); | 
| 3564 | thread->set_pending_failed_speculation(0); | 
| 3565 | } | 
| 3566 | } | 
| 3567 | |
| 3568 | const char* nmethod::jvmci_name() { | 
| 3569 | if (jvmci_nmethod_data() != NULL__null) { | 
| 3570 | return jvmci_nmethod_data()->name(); | 
| 3571 | } | 
| 3572 | return NULL__null; | 
| 3573 | } | 
| 3574 | #endif |