File: | jdk/src/hotspot/share/code/compiledMethod.cpp |
Warning: | line 447, column 5 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. | |||
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |||
4 | * | |||
5 | * This code is free software; you can redistribute it and/or modify it | |||
6 | * under the terms of the GNU General Public License version 2 only, as | |||
7 | * published by the Free Software Foundation. | |||
8 | * | |||
9 | * This code is distributed in the hope that it will be useful, but WITHOUT | |||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
12 | * version 2 for more details (a copy is included in the LICENSE file that | |||
13 | * accompanied this code). | |||
14 | * | |||
15 | * You should have received a copy of the GNU General Public License version | |||
16 | * 2 along with this work; if not, write to the Free Software Foundation, | |||
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |||
18 | * | |||
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |||
20 | * or visit www.oracle.com if you need additional information or have any | |||
21 | * questions. | |||
22 | * | |||
23 | */ | |||
24 | ||||
25 | #include "precompiled.hpp" | |||
26 | #include "code/compiledIC.hpp" | |||
27 | #include "code/compiledMethod.inline.hpp" | |||
28 | #include "code/exceptionHandlerTable.hpp" | |||
29 | #include "code/scopeDesc.hpp" | |||
30 | #include "code/codeCache.hpp" | |||
31 | #include "code/icBuffer.hpp" | |||
32 | #include "gc/shared/barrierSet.hpp" | |||
33 | #include "gc/shared/barrierSetNMethod.hpp" | |||
34 | #include "gc/shared/gcBehaviours.hpp" | |||
35 | #include "interpreter/bytecode.inline.hpp" | |||
36 | #include "logging/log.hpp" | |||
37 | #include "logging/logTag.hpp" | |||
38 | #include "memory/resourceArea.hpp" | |||
39 | #include "oops/compiledICHolder.inline.hpp" | |||
40 | #include "oops/klass.inline.hpp" | |||
41 | #include "oops/methodData.hpp" | |||
42 | #include "oops/method.inline.hpp" | |||
43 | #include "prims/methodHandles.hpp" | |||
44 | #include "runtime/atomic.hpp" | |||
45 | #include "runtime/deoptimization.hpp" | |||
46 | #include "runtime/handles.inline.hpp" | |||
47 | #include "runtime/mutexLocker.hpp" | |||
48 | #include "runtime/sharedRuntime.hpp" | |||
49 | ||||
50 | CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, | |||
51 | int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, | |||
52 | bool caller_must_gc_arguments) | |||
53 | : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), | |||
54 | _mark_for_deoptimization_status(not_marked), | |||
55 | _method(method), | |||
56 | _gc_data(NULL__null) | |||
57 | { | |||
58 | init_defaults(); | |||
59 | } | |||
60 | ||||
61 | CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, | |||
62 | int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, | |||
63 | OopMapSet* oop_maps, bool caller_must_gc_arguments) | |||
64 | : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, | |||
65 | frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), | |||
66 | _mark_for_deoptimization_status(not_marked), | |||
67 | _method(method), | |||
68 | _gc_data(NULL__null) | |||
69 | { | |||
70 | init_defaults(); | |||
71 | } | |||
72 | ||||
73 | void CompiledMethod::init_defaults() { | |||
74 | { // avoid uninitialized fields, even for short time periods | |||
75 | _scopes_data_begin = NULL__null; | |||
76 | _deopt_handler_begin = NULL__null; | |||
77 | _deopt_mh_handler_begin = NULL__null; | |||
78 | _exception_cache = NULL__null; | |||
79 | } | |||
80 | _has_unsafe_access = 0; | |||
81 | _has_method_handle_invokes = 0; | |||
82 | _has_wide_vectors = 0; | |||
83 | } | |||
84 | ||||
85 | bool CompiledMethod::is_method_handle_return(address return_pc) { | |||
86 | if (!has_method_handle_invokes()) return false; | |||
87 | PcDesc* pd = pc_desc_at(return_pc); | |||
88 | if (pd == NULL__null) | |||
89 | return false; | |||
90 | return pd->is_method_handle_invoke(); | |||
91 | } | |||
92 | ||||
93 | // Returns a string version of the method state. | |||
94 | const char* CompiledMethod::state() const { | |||
95 | int state = get_state(); | |||
96 | switch (state) { | |||
97 | case not_installed: | |||
98 | return "not installed"; | |||
99 | case in_use: | |||
100 | return "in use"; | |||
101 | case not_used: | |||
102 | return "not_used"; | |||
103 | case not_entrant: | |||
104 | return "not_entrant"; | |||
105 | case zombie: | |||
106 | return "zombie"; | |||
107 | case unloaded: | |||
108 | return "unloaded"; | |||
109 | default: | |||
110 | fatal("unexpected method state: %d", state)do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 110, "unexpected method state: %d", state); ::breakpoint(); } while (0); | |||
111 | return NULL__null; | |||
112 | } | |||
113 | } | |||
114 | ||||
115 | //----------------------------------------------------------------------------- | |||
116 | void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) { | |||
117 | MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL__null : CompiledMethod_lock, | |||
118 | Mutex::_no_safepoint_check_flag); | |||
119 | _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); | |||
120 | } | |||
121 | ||||
122 | //----------------------------------------------------------------------------- | |||
123 | ||||
124 | ExceptionCache* CompiledMethod::exception_cache_acquire() const { | |||
125 | return Atomic::load_acquire(&_exception_cache); | |||
126 | } | |||
127 | ||||
128 | void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { | |||
129 | assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock")do { if (!(ExceptionCache_lock->owned_by_self())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 129, "assert(" "ExceptionCache_lock->owned_by_self()" ") failed" , "Must hold the ExceptionCache_lock"); ::breakpoint(); } } while (0); | |||
130 | assert(new_entry != NULL,"Must be non null")do { if (!(new_entry != __null)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 130, "assert(" "new_entry != __null" ") failed", "Must be non null" ); ::breakpoint(); } } while (0); | |||
131 | assert(new_entry->next() == NULL, "Must be null")do { if (!(new_entry->next() == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 131, "assert(" "new_entry->next() == __null" ") failed", "Must be null"); ::breakpoint(); } } while (0); | |||
132 | ||||
133 | for (;;) { | |||
134 | ExceptionCache *ec = exception_cache(); | |||
135 | if (ec != NULL__null) { | |||
136 | Klass* ex_klass = ec->exception_type(); | |||
137 | if (!ex_klass->is_loader_alive()) { | |||
138 | // We must guarantee that entries are not inserted with new next pointer | |||
139 | // edges to ExceptionCache entries with dead klasses, due to bad interactions | |||
140 | // with concurrent ExceptionCache cleanup. Therefore, the inserts roll | |||
141 | // the head pointer forward to the first live ExceptionCache, so that the new | |||
142 | // next pointers always point at live ExceptionCaches, that are not removed due | |||
143 | // to concurrent ExceptionCache cleanup. | |||
144 | ExceptionCache* next = ec->next(); | |||
145 | if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { | |||
146 | CodeCache::release_exception_cache(ec); | |||
147 | } | |||
148 | continue; | |||
149 | } | |||
150 | ec = exception_cache(); | |||
151 | if (ec != NULL__null) { | |||
152 | new_entry->set_next(ec); | |||
153 | } | |||
154 | } | |||
155 | if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { | |||
156 | return; | |||
157 | } | |||
158 | } | |||
159 | } | |||
160 | ||||
161 | void CompiledMethod::clean_exception_cache() { | |||
162 | // For each nmethod, only a single thread may call this cleanup function | |||
163 | // at the same time, whether called in STW cleanup or concurrent cleanup. | |||
164 | // Note that if the GC is processing exception cache cleaning in a concurrent phase, | |||
165 | // then a single writer may contend with cleaning up the head pointer to the | |||
166 | // first ExceptionCache node that has a Klass* that is alive. That is fine, | |||
167 | // as long as there is no concurrent cleanup of next pointers from concurrent writers. | |||
168 | // And the concurrent writers do not clean up next pointers, only the head. | |||
169 | // Also note that concurent readers will walk through Klass* pointers that are not | |||
170 | // alive. That does not cause ABA problems, because Klass* is deleted after | |||
171 | // a handshake with all threads, after all stale ExceptionCaches have been | |||
172 | // unlinked. That is also when the CodeCache::exception_cache_purge_list() | |||
173 | // is deleted, with all ExceptionCache entries that were cleaned concurrently. | |||
174 | // That similarly implies that CAS operations on ExceptionCache entries do not | |||
175 | // suffer from ABA problems as unlinking and deletion is separated by a global | |||
176 | // handshake operation. | |||
177 | ExceptionCache* prev = NULL__null; | |||
178 | ExceptionCache* curr = exception_cache_acquire(); | |||
179 | ||||
180 | while (curr != NULL__null) { | |||
181 | ExceptionCache* next = curr->next(); | |||
182 | ||||
183 | if (!curr->exception_type()->is_loader_alive()) { | |||
184 | if (prev == NULL__null) { | |||
185 | // Try to clean head; this is contended by concurrent inserts, that | |||
186 | // both lazily clean the head, and insert entries at the head. If | |||
187 | // the CAS fails, the operation is restarted. | |||
188 | if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { | |||
189 | prev = NULL__null; | |||
190 | curr = exception_cache_acquire(); | |||
191 | continue; | |||
192 | } | |||
193 | } else { | |||
194 | // It is impossible to during cleanup connect the next pointer to | |||
195 | // an ExceptionCache that has not been published before a safepoint | |||
196 | // prior to the cleanup. Therefore, release is not required. | |||
197 | prev->set_next(next); | |||
198 | } | |||
199 | // prev stays the same. | |||
200 | ||||
201 | CodeCache::release_exception_cache(curr); | |||
202 | } else { | |||
203 | prev = curr; | |||
204 | } | |||
205 | ||||
206 | curr = next; | |||
207 | } | |||
208 | } | |||
209 | ||||
210 | // public method for accessing the exception cache | |||
211 | // These are the public access methods. | |||
212 | address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { | |||
213 | // We never grab a lock to read the exception cache, so we may | |||
214 | // have false negatives. This is okay, as it can only happen during | |||
215 | // the first few exception lookups for a given nmethod. | |||
216 | ExceptionCache* ec = exception_cache_acquire(); | |||
217 | while (ec != NULL__null) { | |||
218 | address ret_val; | |||
219 | if ((ret_val = ec->match(exception,pc)) != NULL__null) { | |||
220 | return ret_val; | |||
221 | } | |||
222 | ec = ec->next(); | |||
223 | } | |||
224 | return NULL__null; | |||
225 | } | |||
226 | ||||
227 | void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { | |||
228 | // There are potential race conditions during exception cache updates, so we | |||
229 | // must own the ExceptionCache_lock before doing ANY modifications. Because | |||
230 | // we don't lock during reads, it is possible to have several threads attempt | |||
231 | // to update the cache with the same data. We need to check for already inserted | |||
232 | // copies of the current data before adding it. | |||
233 | ||||
234 | MutexLocker ml(ExceptionCache_lock); | |||
235 | ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); | |||
236 | ||||
237 | if (target_entry == NULL__null || !target_entry->add_address_and_handler(pc,handler)) { | |||
238 | target_entry = new ExceptionCache(exception,pc,handler); | |||
239 | add_exception_cache_entry(target_entry); | |||
240 | } | |||
241 | } | |||
242 | ||||
243 | // private method for handling exception cache | |||
244 | // These methods are private, and used to manipulate the exception cache | |||
245 | // directly. | |||
246 | ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { | |||
247 | ExceptionCache* ec = exception_cache_acquire(); | |||
248 | while (ec != NULL__null) { | |||
249 | if (ec->match_exception_with_space(exception)) { | |||
250 | return ec; | |||
251 | } | |||
252 | ec = ec->next(); | |||
253 | } | |||
254 | return NULL__null; | |||
255 | } | |||
256 | ||||
257 | //-------------end of code for ExceptionCache-------------- | |||
258 | ||||
259 | bool CompiledMethod::is_at_poll_return(address pc) { | |||
260 | RelocIterator iter(this, pc, pc+1); | |||
261 | while (iter.next()) { | |||
262 | if (iter.type() == relocInfo::poll_return_type) | |||
263 | return true; | |||
264 | } | |||
265 | return false; | |||
266 | } | |||
267 | ||||
268 | ||||
269 | bool CompiledMethod::is_at_poll_or_poll_return(address pc) { | |||
270 | RelocIterator iter(this, pc, pc+1); | |||
271 | while (iter.next()) { | |||
272 | relocInfo::relocType t = iter.type(); | |||
273 | if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) | |||
274 | return true; | |||
275 | } | |||
276 | return false; | |||
277 | } | |||
278 | ||||
279 | void CompiledMethod::verify_oop_relocations() { | |||
280 | // Ensure sure that the code matches the current oop values | |||
281 | RelocIterator iter(this, NULL__null, NULL__null); | |||
282 | while (iter.next()) { | |||
283 | if (iter.type() == relocInfo::oop_type) { | |||
284 | oop_Relocation* reloc = iter.oop_reloc(); | |||
285 | if (!reloc->oop_is_immediate()) { | |||
286 | reloc->verify_oop_relocation(); | |||
287 | } | |||
288 | } | |||
289 | } | |||
290 | } | |||
291 | ||||
292 | ||||
293 | ScopeDesc* CompiledMethod::scope_desc_at(address pc) { | |||
294 | PcDesc* pd = pc_desc_at(pc); | |||
295 | guarantee(pd != NULL, "scope must be present")do { if (!(pd != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 295, "guarantee(" "pd != NULL" ") failed", "scope must be present" ); ::breakpoint(); } } while (0); | |||
296 | return new ScopeDesc(this, pd); | |||
297 | } | |||
298 | ||||
299 | ScopeDesc* CompiledMethod::scope_desc_near(address pc) { | |||
300 | PcDesc* pd = pc_desc_near(pc); | |||
301 | guarantee(pd != NULL, "scope must be present")do { if (!(pd != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 301, "guarantee(" "pd != NULL" ") failed", "scope must be present" ); ::breakpoint(); } } while (0); | |||
302 | return new ScopeDesc(this, pd); | |||
303 | } | |||
304 | ||||
305 | address CompiledMethod::oops_reloc_begin() const { | |||
306 | // If the method is not entrant or zombie then a JMP is plastered over the | |||
307 | // first few bytes. If an oop in the old code was there, that oop | |||
308 | // should not get GC'd. Skip the first few bytes of oops on | |||
309 | // not-entrant methods. | |||
310 | if (frame_complete_offset() != CodeOffsets::frame_never_safe && | |||
311 | code_begin() + frame_complete_offset() > | |||
312 | verified_entry_point() + NativeJump::instruction_size) | |||
313 | { | |||
314 | // If we have a frame_complete_offset after the native jump, then there | |||
315 | // is no point trying to look for oops before that. This is a requirement | |||
316 | // for being allowed to scan oops concurrently. | |||
317 | return code_begin() + frame_complete_offset(); | |||
318 | } | |||
319 | ||||
320 | // It is not safe to read oops concurrently using entry barriers, if their | |||
321 | // location depend on whether the nmethod is entrant or not. | |||
322 | assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan")do { if (!(BarrierSet::barrier_set()->barrier_set_nmethod( ) == __null)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 322, "assert(" "BarrierSet::barrier_set()->barrier_set_nmethod() == __null" ") failed", "Not safe oop scan"); ::breakpoint(); } } while ( 0); | |||
323 | ||||
324 | address low_boundary = verified_entry_point(); | |||
325 | if (!is_in_use() && is_nmethod()) { | |||
326 | low_boundary += NativeJump::instruction_size; | |||
327 | // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. | |||
328 | // This means that the low_boundary is going to be a little too high. | |||
329 | // This shouldn't matter, since oops of non-entrant methods are never used. | |||
330 | // In fact, why are we bothering to look at oops in a non-entrant method?? | |||
331 | } | |||
332 | return low_boundary; | |||
333 | } | |||
334 | ||||
335 | int CompiledMethod::verify_icholder_relocations() { | |||
336 | ResourceMark rm; | |||
337 | int count = 0; | |||
338 | ||||
339 | RelocIterator iter(this); | |||
340 | while(iter.next()) { | |||
341 | if (iter.type() == relocInfo::virtual_call_type) { | |||
342 | if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { | |||
343 | CompiledIC *ic = CompiledIC_at(&iter); | |||
344 | if (TraceCompiledIC) { | |||
345 | tty->print("noticed icholder " INTPTR_FORMAT"0x%016" "l" "x" " ", p2i(ic->cached_icholder())); | |||
346 | ic->print(); | |||
347 | } | |||
348 | assert(ic->cached_icholder() != NULL, "must be non-NULL")do { if (!(ic->cached_icholder() != __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 348, "assert(" "ic->cached_icholder() != __null" ") failed" , "must be non-NULL"); ::breakpoint(); } } while (0); | |||
349 | count++; | |||
350 | } | |||
351 | } | |||
352 | } | |||
353 | ||||
354 | return count; | |||
355 | } | |||
356 | ||||
357 | // Method that knows how to preserve outgoing arguments at call. This method must be | |||
358 | // called with a frame corresponding to a Java invoke | |||
359 | void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { | |||
360 | if (method() != NULL__null && !method()->is_native()) { | |||
361 | address pc = fr.pc(); | |||
362 | SimpleScopeDesc ssd(this, pc); | |||
363 | if (ssd.is_optimized_linkToNative()) return; // call was replaced | |||
364 | Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci()); | |||
365 | bool has_receiver = call.has_receiver(); | |||
366 | bool has_appendix = call.has_appendix(); | |||
367 | Symbol* signature = call.signature(); | |||
368 | ||||
369 | // The method attached by JIT-compilers should be used, if present. | |||
370 | // Bytecode can be inaccurate in such case. | |||
371 | Method* callee = attached_method_before_pc(pc); | |||
372 | if (callee != NULL__null) { | |||
373 | has_receiver = !(callee->access_flags().is_static()); | |||
374 | has_appendix = false; | |||
375 | signature = callee->signature(); | |||
376 | } | |||
377 | ||||
378 | fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); | |||
379 | } | |||
380 | } | |||
381 | ||||
382 | Method* CompiledMethod::attached_method(address call_instr) { | |||
383 | assert(code_contains(call_instr), "not part of the nmethod")do { if (!(code_contains(call_instr))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 383, "assert(" "code_contains(call_instr)" ") failed", "not part of the nmethod" ); ::breakpoint(); } } while (0); | |||
384 | RelocIterator iter(this, call_instr, call_instr + 1); | |||
385 | while (iter.next()) { | |||
386 | if (iter.addr() == call_instr) { | |||
387 | switch(iter.type()) { | |||
388 | case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); | |||
389 | case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); | |||
390 | case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); | |||
391 | default: break; | |||
392 | } | |||
393 | } | |||
394 | } | |||
395 | return NULL__null; // not found | |||
396 | } | |||
397 | ||||
398 | Method* CompiledMethod::attached_method_before_pc(address pc) { | |||
399 | if (NativeCall::is_call_before(pc)) { | |||
400 | NativeCall* ncall = nativeCall_before(pc); | |||
401 | return attached_method(ncall->instruction_address()); | |||
402 | } | |||
403 | return NULL__null; // not a call | |||
404 | } | |||
405 | ||||
406 | void CompiledMethod::clear_inline_caches() { | |||
407 | assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint")do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 407, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed" , "cleaning of IC's only allowed at safepoint"); ::breakpoint (); } } while (0); | |||
408 | if (is_zombie()) { | |||
409 | return; | |||
410 | } | |||
411 | ||||
412 | RelocIterator iter(this); | |||
413 | while (iter.next()) { | |||
414 | iter.reloc()->clear_inline_cache(); | |||
415 | } | |||
416 | } | |||
417 | ||||
418 | // Clear IC callsites, releasing ICStubs of all compiled ICs | |||
419 | // as well as any associated CompiledICHolders. | |||
420 | void CompiledMethod::clear_ic_callsites() { | |||
421 | assert(CompiledICLocker::is_safe(this), "mt unsafe call")do { if (!(CompiledICLocker::is_safe(this))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 421, "assert(" "CompiledICLocker::is_safe(this)" ") failed" , "mt unsafe call"); ::breakpoint(); } } while (0); | |||
422 | ResourceMark rm; | |||
423 | RelocIterator iter(this); | |||
424 | while(iter.next()) { | |||
425 | if (iter.type() == relocInfo::virtual_call_type) { | |||
426 | CompiledIC* ic = CompiledIC_at(&iter); | |||
427 | ic->set_to_clean(false); | |||
428 | } | |||
429 | } | |||
430 | } | |||
431 | ||||
432 | #ifdef ASSERT1 | |||
433 | // Check class_loader is alive for this bit of metadata. | |||
434 | class CheckClass : public MetadataClosure { | |||
435 | void do_metadata(Metadata* md) { | |||
436 | Klass* klass = NULL__null; | |||
| ||||
437 | if (md->is_klass()) { | |||
438 | klass = ((Klass*)md); | |||
439 | } else if (md->is_method()) { | |||
440 | klass = ((Method*)md)->method_holder(); | |||
441 | } else if (md->is_methodData()) { | |||
442 | klass = ((MethodData*)md)->method()->method_holder(); | |||
443 | } else { | |||
444 | md->print(); | |||
445 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 445); ::breakpoint(); } while (0); | |||
446 | } | |||
447 | assert(klass->is_loader_alive(), "must be alive")do { if (!(klass->is_loader_alive())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 447, "assert(" "klass->is_loader_alive()" ") failed", "must be alive" ); ::breakpoint(); } } while (0); | |||
| ||||
448 | } | |||
449 | }; | |||
450 | #endif // ASSERT | |||
451 | ||||
452 | ||||
453 | bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { | |||
454 | if (ic->is_clean()) { | |||
455 | return true; | |||
456 | } | |||
457 | if (ic->is_icholder_call()) { | |||
458 | // The only exception is compiledICHolder metdata which may | |||
459 | // yet be marked below. (We check this further below). | |||
460 | CompiledICHolder* cichk_metdata = ic->cached_icholder(); | |||
461 | ||||
462 | if (cichk_metdata->is_loader_alive()) { | |||
463 | return true; | |||
464 | } | |||
465 | } else { | |||
466 | Metadata* ic_metdata = ic->cached_metadata(); | |||
467 | if (ic_metdata != NULL__null) { | |||
468 | if (ic_metdata->is_klass()) { | |||
469 | if (((Klass*)ic_metdata)->is_loader_alive()) { | |||
470 | return true; | |||
471 | } | |||
472 | } else if (ic_metdata->is_method()) { | |||
473 | Method* method = (Method*)ic_metdata; | |||
474 | assert(!method->is_old(), "old method should have been cleaned")do { if (!(!method->is_old())) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 474, "assert(" "!method->is_old()" ") failed", "old method should have been cleaned" ); ::breakpoint(); } } while (0); | |||
475 | if (method->method_holder()->is_loader_alive()) { | |||
476 | return true; | |||
477 | } | |||
478 | } else { | |||
479 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 479); ::breakpoint(); } while (0); | |||
480 | } | |||
481 | } else { | |||
482 | // This inline cache is a megamorphic vtable call. Those ICs never hold | |||
483 | // any Metadata and should therefore never be cleaned by this function. | |||
484 | return true; | |||
485 | } | |||
486 | } | |||
487 | ||||
488 | return ic->set_to_clean(); | |||
489 | } | |||
490 | ||||
491 | // Clean references to unloaded nmethods at addr from this one, which is not unloaded. | |||
492 | template <class CompiledICorStaticCall> | |||
493 | static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, | |||
494 | bool clean_all) { | |||
495 | // Ok, to lookup references to zombies here | |||
496 | CodeBlob *cb = CodeCache::find_blob_unsafe(addr); | |||
497 | CompiledMethod* nm = (cb != NULL__null) ? cb->as_compiled_method_or_null() : NULL__null; | |||
498 | if (nm != NULL__null) { | |||
499 | // Clean inline caches pointing to both zombie and not_entrant methods | |||
500 | if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { | |||
501 | // Inline cache cleaning should only be initiated on CompiledMethods that have been | |||
502 | // observed to be is_alive(). However, with concurrent code cache unloading, it is | |||
503 | // possible that by now, the state has become !is_alive. This can happen in two ways: | |||
504 | // 1) It can be racingly flipped to unloaded if the nmethod // being cleaned (from the | |||
505 | // sweeper) is_unloading(). This is fine, because if that happens, then the inline | |||
506 | // caches have already been cleaned under the same CompiledICLocker that we now hold during | |||
507 | // inline cache cleaning, and we will simply walk the inline caches again, and likely not | |||
508 | // find much of interest to clean. However, this race prevents us from asserting that the | |||
509 | // nmethod is_alive(). The is_unloading() function is completely monotonic; once set due | |||
510 | // to an oop dying, it remains set forever until freed. Because of that, all unloaded | |||
511 | // nmethods are is_unloading(), but notably, an unloaded nmethod may also subsequently | |||
512 | // become zombie (when the sweeper converts it to zombie). | |||
513 | // 2) It can be racingly flipped to zombie if the nmethod being cleaned (by the concurrent | |||
514 | // GC) cleans a zombie nmethod that is concurrently made zombie by the sweeper. In this | |||
515 | // scenario, the sweeper will first transition the nmethod to zombie, and then when | |||
516 | // unregistering from the GC, it will wait until the GC is done. The GC will then clean | |||
517 | // the inline caches *with IC stubs*, even though no IC stubs are needed. This is fine, | |||
518 | // as long as the IC stubs are guaranteed to be released until the next safepoint, where | |||
519 | // IC finalization requires live IC stubs to not be associated with zombie nmethods. | |||
520 | // This is guaranteed, because the sweeper does not have a single safepoint check until | |||
521 | // after it completes the whole transition function; it will wake up after the GC is | |||
522 | // done with concurrent code cache cleaning (which blocks out safepoints using the | |||
523 | // suspendible threads set), and then call clear_ic_callsites, which will release the | |||
524 | // associated IC stubs, before a subsequent safepoint poll can be reached. This | |||
525 | // guarantees that the spuriously created IC stubs are released appropriately before | |||
526 | // IC finalization in a safepoint gets to run. Therefore, this race is fine. This is also | |||
527 | // valid in a scenario where an inline cache of a zombie nmethod gets a spurious IC stub, | |||
528 | // and then when cleaning another inline cache, fails to request an IC stub because we | |||
529 | // exhausted the IC stub buffer. In this scenario, the GC will request a safepoint after | |||
530 | // yielding the suspendible therad set, effectively unblocking safepoints. Before such | |||
531 | // a safepoint can be reached, the sweeper similarly has to wake up, clear the IC stubs, | |||
532 | // and reach the next safepoint poll, after the whole transition function has completed. | |||
533 | // Due to the various races that can cause an nmethod to first be is_alive() and then | |||
534 | // racingly become !is_alive(), it is unfortunately not possible to assert the nmethod | |||
535 | // is_alive(), !is_unloaded() or !is_zombie() here. | |||
536 | if (!ic->set_to_clean(!from->is_unloading())) { | |||
537 | return false; | |||
538 | } | |||
539 | assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string())do { if (!(ic->is_clean())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 539, "assert(" "ic->is_clean()" ") failed", "nmethod " "0x%016" "l" "x" "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string ()); ::breakpoint(); } } while (0); | |||
540 | } | |||
541 | } | |||
542 | return true; | |||
543 | } | |||
544 | ||||
545 | static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, | |||
546 | bool clean_all) { | |||
547 | return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all); | |||
548 | } | |||
549 | ||||
550 | static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, | |||
551 | bool clean_all) { | |||
552 | return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all); | |||
553 | } | |||
554 | ||||
555 | // Cleans caches in nmethods that point to either classes that are unloaded | |||
556 | // or nmethods that are unloaded. | |||
557 | // | |||
558 | // Can be called either in parallel by G1 currently or after all | |||
559 | // nmethods are unloaded. Return postponed=true in the parallel case for | |||
560 | // inline caches found that point to nmethods that are not yet visited during | |||
561 | // the do_unloading walk. | |||
562 | bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { | |||
563 | ResourceMark rm; | |||
564 | ||||
565 | // Exception cache only needs to be called if unloading occurred | |||
566 | if (unloading_occurred) { | |||
567 | clean_exception_cache(); | |||
568 | } | |||
569 | ||||
570 | if (!cleanup_inline_caches_impl(unloading_occurred, false)) { | |||
571 | return false; | |||
572 | } | |||
573 | ||||
574 | #ifdef ASSERT1 | |||
575 | // Check that the metadata embedded in the nmethod is alive | |||
576 | CheckClass check_class; | |||
577 | metadata_do(&check_class); | |||
578 | #endif | |||
579 | return true; | |||
580 | } | |||
581 | ||||
582 | void CompiledMethod::run_nmethod_entry_barrier() { | |||
583 | BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); | |||
584 | if (bs_nm != NULL__null) { | |||
585 | // We want to keep an invariant that nmethods found through iterations of a Thread's | |||
586 | // nmethods found in safepoints have gone through an entry barrier and are not armed. | |||
587 | // By calling this nmethod entry barrier, it plays along and acts | |||
588 | // like any other nmethod found on the stack of a thread (fewer surprises). | |||
589 | nmethod* nm = as_nmethod_or_null(); | |||
590 | if (nm != NULL__null) { | |||
591 | bool alive = bs_nm->nmethod_entry_barrier(nm); | |||
592 | assert(alive, "should be alive")do { if (!(alive)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 592, "assert(" "alive" ") failed", "should be alive"); ::breakpoint (); } } while (0); | |||
593 | } | |||
594 | } | |||
595 | } | |||
596 | ||||
597 | void CompiledMethod::cleanup_inline_caches(bool clean_all) { | |||
598 | for (;;) { | |||
599 | ICRefillVerifier ic_refill_verifier; | |||
600 | { CompiledICLocker ic_locker(this); | |||
601 | if (cleanup_inline_caches_impl(false, clean_all)) { | |||
602 | return; | |||
603 | } | |||
604 | } | |||
605 | // Call this nmethod entry barrier from the sweeper. | |||
606 | run_nmethod_entry_barrier(); | |||
607 | InlineCacheBuffer::refill_ic_stubs(); | |||
608 | } | |||
609 | } | |||
610 | ||||
611 | // Called to clean up after class unloading for live nmethods and from the sweeper | |||
612 | // for all methods. | |||
613 | bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { | |||
614 | assert(CompiledICLocker::is_safe(this), "mt unsafe call")do { if (!(CompiledICLocker::is_safe(this))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 614, "assert(" "CompiledICLocker::is_safe(this)" ") failed" , "mt unsafe call"); ::breakpoint(); } } while (0); | |||
615 | ResourceMark rm; | |||
616 | ||||
617 | // Find all calls in an nmethod and clear the ones that point to non-entrant, | |||
618 | // zombie and unloaded nmethods. | |||
619 | RelocIterator iter(this, oops_reloc_begin()); | |||
620 | bool is_in_static_stub = false; | |||
621 | while(iter.next()) { | |||
622 | ||||
623 | switch (iter.type()) { | |||
624 | ||||
625 | case relocInfo::virtual_call_type: | |||
626 | if (unloading_occurred) { | |||
627 | // If class unloading occurred we first clear ICs where the cached metadata | |||
628 | // is referring to an unloaded klass or method. | |||
629 | if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) { | |||
630 | return false; | |||
631 | } | |||
632 | } | |||
633 | ||||
634 | if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { | |||
635 | return false; | |||
636 | } | |||
637 | break; | |||
638 | ||||
639 | case relocInfo::opt_virtual_call_type: | |||
640 | if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { | |||
641 | return false; | |||
642 | } | |||
643 | break; | |||
644 | ||||
645 | case relocInfo::static_call_type: | |||
646 | if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) { | |||
647 | return false; | |||
648 | } | |||
649 | break; | |||
650 | ||||
651 | case relocInfo::static_stub_type: { | |||
652 | is_in_static_stub = true; | |||
653 | break; | |||
654 | } | |||
655 | ||||
656 | case relocInfo::metadata_type: { | |||
657 | // Only the metadata relocations contained in static/opt virtual call stubs | |||
658 | // contains the Method* passed to c2i adapters. It is the only metadata | |||
659 | // relocation that needs to be walked, as it is the one metadata relocation | |||
660 | // that violates the invariant that all metadata relocations have an oop | |||
661 | // in the compiled method (due to deferred resolution and code patching). | |||
662 | ||||
663 | // This causes dead metadata to remain in compiled methods that are not | |||
664 | // unloading. Unless these slippery metadata relocations of the static | |||
665 | // stubs are at least cleared, subsequent class redefinition operations | |||
666 | // will access potentially free memory, and JavaThread execution | |||
667 | // concurrent to class unloading may call c2i adapters with dead methods. | |||
668 | if (!is_in_static_stub) { | |||
669 | // The first metadata relocation after a static stub relocation is the | |||
670 | // metadata relocation of the static stub used to pass the Method* to | |||
671 | // c2i adapters. | |||
672 | continue; | |||
673 | } | |||
674 | is_in_static_stub = false; | |||
675 | if (is_unloading()) { | |||
676 | // If the nmethod itself is dying, then it may point at dead metadata. | |||
677 | // Nobody should follow that metadata; it is strictly unsafe. | |||
678 | continue; | |||
679 | } | |||
680 | metadata_Relocation* r = iter.metadata_reloc(); | |||
681 | Metadata* md = r->metadata_value(); | |||
682 | if (md != NULL__null && md->is_method()) { | |||
683 | Method* method = static_cast<Method*>(md); | |||
684 | if (!method->method_holder()->is_loader_alive()) { | |||
685 | Atomic::store(r->metadata_addr(), (Method*)NULL__null); | |||
686 | ||||
687 | if (!r->metadata_is_immediate()) { | |||
688 | r->fix_metadata_relocation(); | |||
689 | } | |||
690 | } | |||
691 | } | |||
692 | break; | |||
693 | } | |||
694 | ||||
695 | default: | |||
696 | break; | |||
697 | } | |||
698 | } | |||
699 | ||||
700 | return true; | |||
701 | } | |||
702 | ||||
703 | address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) { | |||
704 | // Exception happened outside inline-cache check code => we are inside | |||
705 | // an active nmethod => use cpc to determine a return address | |||
706 | int exception_offset = pc - code_begin(); | |||
707 | int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset ); | |||
708 | #ifdef ASSERT1 | |||
709 | if (cont_offset == 0) { | |||
710 | Thread* thread = Thread::current(); | |||
711 | ResourceMark rm(thread); | |||
712 | CodeBlob* cb = CodeCache::find_blob(pc); | |||
713 | assert(cb != NULL && cb == this, "")do { if (!(cb != __null && cb == this)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 713, "assert(" "cb != __null && cb == this" ") failed" , ""); ::breakpoint(); } } while (0); | |||
714 | ttyLocker ttyl; | |||
715 | tty->print_cr("implicit exception happened at " INTPTR_FORMAT"0x%016" "l" "x", p2i(pc)); | |||
716 | print(); | |||
717 | method()->print_codes(); | |||
718 | print_code(); | |||
719 | print_pcs(); | |||
720 | } | |||
721 | #endif | |||
722 | if (cont_offset == 0) { | |||
723 | // Let the normal error handling report the exception | |||
724 | return NULL__null; | |||
725 | } | |||
726 | if (cont_offset == exception_offset) { | |||
727 | #if INCLUDE_JVMCI1 | |||
728 | Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check; | |||
729 | JavaThread *thread = JavaThread::current(); | |||
730 | thread->set_jvmci_implicit_exception_pc(pc); | |||
731 | thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason, | |||
732 | Deoptimization::Action_reinterpret)); | |||
733 | return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); | |||
734 | #else | |||
735 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/code/compiledMethod.cpp" , 735); ::breakpoint(); } while (0); | |||
736 | #endif | |||
737 | } | |||
738 | return code_begin() + cont_offset; | |||
739 | } | |||
740 | ||||
741 | class HasEvolDependency : public MetadataClosure { | |||
742 | bool _has_evol_dependency; | |||
743 | public: | |||
744 | HasEvolDependency() : _has_evol_dependency(false) {} | |||
745 | void do_metadata(Metadata* md) { | |||
746 | if (md->is_method()) { | |||
747 | Method* method = (Method*)md; | |||
748 | if (method->is_old()) { | |||
749 | _has_evol_dependency = true; | |||
750 | } | |||
751 | } | |||
752 | } | |||
753 | bool has_evol_dependency() const { return _has_evol_dependency; } | |||
754 | }; | |||
755 | ||||
756 | bool CompiledMethod::has_evol_metadata() { | |||
757 | // Check the metadata in relocIter and CompiledIC and also deoptimize | |||
758 | // any nmethod that has reference to old methods. | |||
759 | HasEvolDependency check_evol; | |||
760 | metadata_do(&check_evol); | |||
761 | if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)(LogImpl<(LogTag::_redefine), (LogTag::_class), (LogTag::_nmethod ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) { | |||
762 | ResourceMark rm; | |||
763 | log_debug(redefine, class, nmethod)(!(LogImpl<(LogTag::_redefine), (LogTag::_class), (LogTag:: _nmethod), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG )>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_redefine), (LogTag::_class), (LogTag::_nmethod), (LogTag:: __NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write< LogLevel::Debug> | |||
764 | ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata", | |||
765 | _method->method_holder()->external_name(), | |||
766 | _method->name()->as_C_string(), | |||
767 | _method->signature()->as_C_string(), | |||
768 | compile_id()); | |||
769 | } | |||
770 | return check_evol.has_evol_dependency(); | |||
771 | } |