Bug Summary

File:jdk/src/hotspot/share/runtime/synchronizer.cpp
Warning:line 112, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name synchronizer.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -mthread-model posix -fno-delete-null-pointer-checks -mframe-pointer=all -relaxed-aliasing -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/libjvm/objs/precompiled -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D _GNU_SOURCE -D _REENTRANT -D LIBC=gnu -D LINUX -D VM_LITTLE_ENDIAN -D _LP64=1 -D ASSERT -D CHECK_UNHANDLED_OOPS -D TARGET_ARCH_x86 -D INCLUDE_SUFFIX_OS=_linux -D INCLUDE_SUFFIX_CPU=_x86 -D INCLUDE_SUFFIX_COMPILER=_gcc -D TARGET_COMPILER_gcc -D AMD64 -D HOTSPOT_LIB_ARCH="amd64" -D COMPILER1 -D COMPILER2 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -I /home/daniel/Projects/java/jdk/src/hotspot/share/precompiled -I /home/daniel/Projects/java/jdk/src/hotspot/share/include -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix/include -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base/linux -I /home/daniel/Projects/java/jdk/src/java.base/share/native/libjimage -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -D _FORTIFY_SOURCE=2 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-format-zero-length -Wno-unused-parameter -Wno-unused -Wno-parentheses -Wno-comment -Wno-unknown-pragmas -Wno-address -Wno-delete-non-virtual-dtor -Wno-char-subscripts -Wno-array-bounds -Wno-int-in-bool-context -Wno-ignored-qualifiers -Wno-missing-field-initializers -Wno-implicit-fallthrough -Wno-empty-body -Wno-strict-overflow -Wno-sequence-point -Wno-maybe-uninitialized -Wno-misleading-indentation -Wno-cast-function-type -Wno-shift-negative-value -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /home/daniel/Projects/java/jdk/make/hotspot -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -stack-protector 1 -fno-rtti -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -o /home/daniel/Projects/java/scan/2021-12-21-193737-8510-1 -x c++ /home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp

/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp

1/*
2 * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/vmSymbols.hpp"
27#include "jfr/jfrEvents.hpp"
28#include "logging/log.hpp"
29#include "logging/logStream.hpp"
30#include "memory/allocation.inline.hpp"
31#include "memory/padded.hpp"
32#include "memory/resourceArea.hpp"
33#include "memory/universe.hpp"
34#include "oops/markWord.hpp"
35#include "oops/oop.inline.hpp"
36#include "runtime/atomic.hpp"
37#include "runtime/handles.inline.hpp"
38#include "runtime/handshake.hpp"
39#include "runtime/interfaceSupport.inline.hpp"
40#include "runtime/mutexLocker.hpp"
41#include "runtime/objectMonitor.hpp"
42#include "runtime/objectMonitor.inline.hpp"
43#include "runtime/os.inline.hpp"
44#include "runtime/osThread.hpp"
45#include "runtime/perfData.hpp"
46#include "runtime/safepointMechanism.inline.hpp"
47#include "runtime/safepointVerifiers.hpp"
48#include "runtime/sharedRuntime.hpp"
49#include "runtime/stubRoutines.hpp"
50#include "runtime/synchronizer.hpp"
51#include "runtime/thread.inline.hpp"
52#include "runtime/timer.hpp"
53#include "runtime/vframe.hpp"
54#include "runtime/vmThread.hpp"
55#include "utilities/align.hpp"
56#include "utilities/dtrace.hpp"
57#include "utilities/events.hpp"
58#include "utilities/preserveException.hpp"
59
60void MonitorList::add(ObjectMonitor* m) {
61 ObjectMonitor* head;
62 do {
63 head = Atomic::load(&_head);
64 m->set_next_om(head);
65 } while (Atomic::cmpxchg(&_head, head, m) != head);
66
67 size_t count = Atomic::add(&_count, 1u);
68 if (count > max()) {
69 Atomic::inc(&_max);
70 }
71}
72
73size_t MonitorList::count() const {
74 return Atomic::load(&_count);
75}
76
77size_t MonitorList::max() const {
78 return Atomic::load(&_max);
79}
80
81// Walk the in-use list and unlink (at most MonitorDeflationMax) deflated
82// ObjectMonitors. Returns the number of unlinked ObjectMonitors.
83size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls,
84 elapsedTimer* timer_p,
85 GrowableArray<ObjectMonitor*>* unlinked_list) {
86 size_t unlinked_count = 0;
87 ObjectMonitor* prev = NULL__null;
19
'prev' initialized to a null pointer value
88 ObjectMonitor* head = Atomic::load_acquire(&_head);
89 ObjectMonitor* m = head;
90 // The in-use list head can be NULL during the final audit.
91 while (m != NULL__null) {
20
Assuming 'm' is not equal to NULL
21
Loop condition is true. Entering loop body
37
Assuming 'm' is not equal to NULL
38
Loop condition is true. Entering loop body
92 if (m->is_being_async_deflated()) {
22
Calling 'ObjectMonitor::is_being_async_deflated'
25
Returning from 'ObjectMonitor::is_being_async_deflated'
26
Taking true branch
39
Calling 'ObjectMonitor::is_being_async_deflated'
42
Returning from 'ObjectMonitor::is_being_async_deflated'
43
Taking true branch
93 // Find next live ObjectMonitor.
94 ObjectMonitor* next = m;
95 do {
96 ObjectMonitor* next_next = next->next_om();
97 unlinked_count++;
98 unlinked_list->append(next);
99 next = next_next;
100 if (unlinked_count >= (size_t)MonitorDeflationMax) {
27
Assuming 'unlinked_count' is >= 'MonitorDeflationMax'
28
Taking true branch
44
Assuming 'unlinked_count' is >= 'MonitorDeflationMax'
45
Taking true branch
101 // Reached the max so bail out on the gathering loop.
102 break;
29
Execution continues on line 105
46
Execution continues on line 105
103 }
104 } while (next != NULL__null && next->is_being_async_deflated());
105 if (prev
29.1
'prev' is equal to NULL
46.1
'prev' is equal to NULL
29.1
'prev' is equal to NULL
46.1
'prev' is equal to NULL
== NULL__null) {
30
Taking true branch
47
Taking true branch
106 ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, head, next);
107 if (prev_head != head) {
31
Assuming 'prev_head' is equal to 'head'
32
Taking false branch
48
Assuming 'prev_head' is not equal to 'head'
49
Taking true branch
108 // Find new prev ObjectMonitor that just got inserted.
109 for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
50
Assuming 'n' is equal to 'm'
51
Loop condition is false. Execution continues on line 112
110 prev = n;
111 }
112 prev->set_next_om(next);
52
Called C++ object pointer is null
113 }
114 } else {
115 prev->set_next_om(next);
116 }
117 if (unlinked_count >= (size_t)MonitorDeflationMax) {
33
Assuming 'unlinked_count' is < 'MonitorDeflationMax'
34
Taking false branch
118 // Reached the max so bail out on the searching loop.
119 break;
120 }
121 m = next;
122 } else {
123 prev = m;
124 m = m->next_om();
125 }
126
127 if (current->is_Java_thread()) {
35
Assuming the condition is false
36
Taking false branch
128 // A JavaThread must check for a safepoint/handshake and honor it.
129 ObjectSynchronizer::chk_for_block_req(JavaThread::cast(current), "unlinking",
130 "unlinked_count", unlinked_count,
131 ls, timer_p);
132 }
133 }
134 Atomic::sub(&_count, unlinked_count);
135 return unlinked_count;
136}
137
138MonitorList::Iterator MonitorList::iterator() const {
139 return Iterator(Atomic::load_acquire(&_head));
140}
141
142ObjectMonitor* MonitorList::Iterator::next() {
143 ObjectMonitor* current = _current;
144 _current = current->next_om();
145 return current;
146}
147
148// The "core" versions of monitor enter and exit reside in this file.
149// The interpreter and compilers contain specialized transliterated
150// variants of the enter-exit fast-path operations. See c2_MacroAssembler_x86.cpp
151// fast_lock(...) for instance. If you make changes here, make sure to modify the
152// interpreter, and both C1 and C2 fast-path inline locking code emission.
153//
154// -----------------------------------------------------------------------------
155
156#ifdef DTRACE_ENABLED
157
158// Only bother with this argument setup if dtrace is available
159// TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
160
161#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
162 char* bytes = NULL__null; \
163 int len = 0; \
164 jlong jtid = SharedRuntime::get_java_tid(thread); \
165 Symbol* klassname = obj->klass()->name(); \
166 if (klassname != NULL__null) { \
167 bytes = (char*)klassname->bytes(); \
168 len = klassname->utf8_length(); \
169 }
170
171#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis){;} \
172 { \
173 if (DTraceMonitorProbes) { \
174 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
175 HOTSPOT_MONITOR_WAIT(jtid, \
176 (uintptr_t)(monitor), bytes, len, (millis)); \
177 } \
178 }
179
180#define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
181#define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
182#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
183
184#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread){;} \
185 { \
186 if (DTraceMonitorProbes) { \
187 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
188 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
189 (uintptr_t)(monitor), bytes, len); \
190 } \
191 }
192
193#else // ndef DTRACE_ENABLED
194
195#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon){;} {;}
196#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon){;} {;}
197
198#endif // ndef DTRACE_ENABLED
199
200// This exists only as a workaround of dtrace bug 6254741
201int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
202 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr){;};
203 return 0;
204}
205
206static const int NINFLATIONLOCKS = 256;
207static os::PlatformMutex* gInflationLocks[NINFLATIONLOCKS];
208
209void ObjectSynchronizer::initialize() {
210 for (int i = 0; i < NINFLATIONLOCKS; i++) {
211 gInflationLocks[i] = new os::PlatformMutex();
212 }
213 // Start the ceiling with the estimate for one thread.
214 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
215}
216
217MonitorList ObjectSynchronizer::_in_use_list;
218// monitors_used_above_threshold() policy is as follows:
219//
220// The ratio of the current _in_use_list count to the ceiling is used
221// to determine if we are above MonitorUsedDeflationThreshold and need
222// to do an async monitor deflation cycle. The ceiling is increased by
223// AvgMonitorsPerThreadEstimate when a thread is added to the system
224// and is decreased by AvgMonitorsPerThreadEstimate when a thread is
225// removed from the system.
226//
227// Note: If the _in_use_list max exceeds the ceiling, then
228// monitors_used_above_threshold() will use the in_use_list max instead
229// of the thread count derived ceiling because we have used more
230// ObjectMonitors than the estimated average.
231//
232// Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
233// no-progress async monitor deflation cycles in a row, then the ceiling
234// is adjusted upwards by monitors_used_above_threshold().
235//
236// Start the ceiling with the estimate for one thread in initialize()
237// which is called after cmd line options are processed.
238static size_t _in_use_list_ceiling = 0;
239bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
240bool volatile ObjectSynchronizer::_is_final_audit = false;
241jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
242static uintx _no_progress_cnt = 0;
243
244// =====================> Quick functions
245
246// The quick_* forms are special fast-path variants used to improve
247// performance. In the simplest case, a "quick_*" implementation could
248// simply return false, in which case the caller will perform the necessary
249// state transitions and call the slow-path form.
250// The fast-path is designed to handle frequently arising cases in an efficient
251// manner and is just a degenerate "optimistic" variant of the slow-path.
252// returns true -- to indicate the call was satisfied.
253// returns false -- to indicate the call needs the services of the slow-path.
254// A no-loitering ordinance is in effect for code in the quick_* family
255// operators: safepoints or indefinite blocking (blocking that might span a
256// safepoint) are forbidden. Generally the thread_state() is _in_Java upon
257// entry.
258//
259// Consider: An interesting optimization is to have the JIT recognize the
260// following common idiom:
261// synchronized (someobj) { .... ; notify(); }
262// That is, we find a notify() or notifyAll() call that immediately precedes
263// the monitorexit operation. In that case the JIT could fuse the operations
264// into a single notifyAndExit() runtime primitive.
265
266bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
267 assert(current->thread_state() == _thread_in_Java, "invariant")do { if (!(current->thread_state() == _thread_in_Java)) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 267, "assert(" "current->thread_state() == _thread_in_Java"
") failed", "invariant"); ::breakpoint(); } } while (0)
;
268 NoSafepointVerifier nsv;
269 if (obj == NULL__null) return false; // slow-path for invalid obj
270 const markWord mark = obj->mark();
271
272 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
273 // Degenerate notify
274 // stack-locked by caller so by definition the implied waitset is empty.
275 return true;
276 }
277
278 if (mark.has_monitor()) {
279 ObjectMonitor* const mon = mark.monitor();
280 assert(mon->object() == oop(obj), "invariant")do { if (!(mon->object() == oop(obj))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 280, "assert(" "mon->object() == oop(obj)" ") failed", "invariant"
); ::breakpoint(); } } while (0)
;
281 if (mon->owner() != current) return false; // slow-path for IMS exception
282
283 if (mon->first_waiter() != NULL__null) {
284 // We have one or more waiters. Since this is an inflated monitor
285 // that we own, we can transfer one or more threads from the waitset
286 // to the entrylist here and now, avoiding the slow-path.
287 if (all) {
288 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current){;};
289 } else {
290 DTRACE_MONITOR_PROBE(notify, mon, obj, current){;};
291 }
292 int free_count = 0;
293 do {
294 mon->INotify(current);
295 ++free_count;
296 } while (mon->first_waiter() != NULL__null && all);
297 OM_PERFDATA_OP(Notifications, inc(free_count))do { if (ObjectMonitor::_sync_Notifications != __null &&
PerfDataManager::has_PerfData()) { ObjectMonitor::_sync_Notifications
->inc(free_count); } } while (0)
;
298 }
299 return true;
300 }
301
302 // other IMS exception states take the slow-path
303 return false;
304}
305
306
307// The LockNode emitted directly at the synchronization site would have
308// been too big if it were to have included support for the cases of inflated
309// recursive enter and exit, so they go here instead.
310// Note that we can't safely call AsyncPrintJavaStack() from within
311// quick_enter() as our thread state remains _in_Java.
312
313bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
314 BasicLock * lock) {
315 assert(current->thread_state() == _thread_in_Java, "invariant")do { if (!(current->thread_state() == _thread_in_Java)) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 315, "assert(" "current->thread_state() == _thread_in_Java"
") failed", "invariant"); ::breakpoint(); } } while (0)
;
316 NoSafepointVerifier nsv;
317 if (obj == NULL__null) return false; // Need to throw NPE
318
319 if (obj->klass()->is_value_based()) {
320 return false;
321 }
322
323 const markWord mark = obj->mark();
324
325 if (mark.has_monitor()) {
326 ObjectMonitor* const m = mark.monitor();
327 // An async deflation or GC can race us before we manage to make
328 // the ObjectMonitor busy by setting the owner below. If we detect
329 // that race we just bail out to the slow-path here.
330 if (m->object_peek() == NULL__null) {
331 return false;
332 }
333 JavaThread* const owner = (JavaThread*) m->owner_raw();
334
335 // Lock contention and Transactional Lock Elision (TLE) diagnostics
336 // and observability
337 // Case: light contention possibly amenable to TLE
338 // Case: TLE inimical operations such as nested/recursive synchronization
339
340 if (owner == current) {
341 m->_recursions++;
342 return true;
343 }
344
345 // This Java Monitor is inflated so obj's header will never be
346 // displaced to this thread's BasicLock. Make the displaced header
347 // non-NULL so this BasicLock is not seen as recursive nor as
348 // being locked. We do this unconditionally so that this thread's
349 // BasicLock cannot be mis-interpreted by any stack walkers. For
350 // performance reasons, stack walkers generally first check for
351 // stack-locking in the object's header, the second check is for
352 // recursive stack-locking in the displaced header in the BasicLock,
353 // and last are the inflated Java Monitor (ObjectMonitor) checks.
354 lock->set_displaced_header(markWord::unused_mark());
355
356 if (owner == NULL__null && m->try_set_owner_from(NULL__null, current) == NULL__null) {
357 assert(m->_recursions == 0, "invariant")do { if (!(m->_recursions == 0)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 357, "assert(" "m->_recursions == 0" ") failed", "invariant"
); ::breakpoint(); } } while (0)
;
358 return true;
359 }
360 }
361
362 // Note that we could inflate in quick_enter.
363 // This is likely a useful optimization
364 // Critically, in quick_enter() we must not:
365 // -- block indefinitely, or
366 // -- reach a safepoint
367
368 return false; // revert to slow-path
369}
370
371// Handle notifications when synchronizing on value based classes
372void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
373 frame last_frame = current->last_frame();
374 bool bcp_was_adjusted = false;
375 // Don't decrement bcp if it points to the frame's first instruction. This happens when
376 // handle_sync_on_value_based_class() is called because of a synchronized method. There
377 // is no actual monitorenter instruction in the byte code in this case.
378 if (last_frame.is_interpreted_frame() &&
379 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
380 // adjust bcp to point back to monitorenter so that we print the correct line numbers
381 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
382 bcp_was_adjusted = true;
383 }
384
385 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
386 ResourceMark rm(current);
387 stringStream ss;
388 current->print_stack_on(&ss);
389 char* base = (char*)strstr(ss.base(), "at");
390 char* newline = (char*)strchr(ss.base(), '\n');
391 if (newline != NULL__null) {
392 *newline = '\0';
393 }
394 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base)do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 394, "Synchronizing on object " "0x%016" "l" "x" " of klass %s %s"
, p2i(obj()), obj->klass()->external_name(), base); ::breakpoint
(); } while (0)
;
395 } else {
396 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses")do { if (!(DiagnoseSyncOnValueBasedClasses == LOG_WARNING)) {
(*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 396, "assert(" "DiagnoseSyncOnValueBasedClasses == LOG_WARNING"
") failed", "invalid value for DiagnoseSyncOnValueBasedClasses"
); ::breakpoint(); } } while (0)
;
397 ResourceMark rm(current);
398 Log(valuebasedclasses)LogImpl<(LogTag::_valuebasedclasses), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>
vblog;
399
400 vblog.info("Synchronizing on object " INTPTR_FORMAT"0x%016" "l" "x" " of klass %s", p2i(obj()), obj->klass()->external_name());
401 if (current->has_last_Java_frame()) {
402 LogStream info_stream(vblog.info());
403 current->print_stack_on(&info_stream);
404 } else {
405 vblog.info("Cannot find the last Java frame");
406 }
407
408 EventSyncOnValueBasedClass event;
409 if (event.should_commit()) {
410 event.set_valueBasedClass(obj->klass());
411 event.commit();
412 }
413 }
414
415 if (bcp_was_adjusted) {
416 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
417 }
418}
419
420static bool useHeavyMonitors() {
421#if defined(X86) || defined(AARCH64) || defined(PPC64)
422 return UseHeavyMonitors;
423#else
424 return false;
425#endif
426}
427
428// -----------------------------------------------------------------------------
429// Monitor Enter/Exit
430// The interpreter and compiler assembly code tries to lock using the fast path
431// of this algorithm. Make sure to update that code if the following function is
432// changed. The implementation is extremely sensitive to race condition. Be careful.
433
434void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
435 if (obj->klass()->is_value_based()) {
436 handle_sync_on_value_based_class(obj, current);
437 }
438
439 if (!useHeavyMonitors()) {
440 markWord mark = obj->mark();
441 if (mark.is_neutral()) {
442 // Anticipate successful CAS -- the ST of the displaced mark must
443 // be visible <= the ST performed by the CAS.
444 lock->set_displaced_header(mark);
445 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
446 return;
447 }
448 // Fall through to inflate() ...
449 } else if (mark.has_locker() &&
450 current->is_lock_owned((address)mark.locker())) {
451 assert(lock != mark.locker(), "must not re-lock the same lock")do { if (!(lock != mark.locker())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 451, "assert(" "lock != mark.locker()" ") failed", "must not re-lock the same lock"
); ::breakpoint(); } } while (0)
;
452 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock")do { if (!(lock != (BasicLock*)obj->mark().value())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 452, "assert(" "lock != (BasicLock*)obj->mark().value()"
") failed", "don't relock with same BasicLock"); ::breakpoint
(); } } while (0)
;
453 lock->set_displaced_header(markWord::from_pointer(NULL__null));
454 return;
455 }
456
457 // The object header will never be displaced to this lock,
458 // so it does not matter what the value is, except that it
459 // must be non-zero to avoid looking like a re-entrant lock,
460 // and must not look locked either.
461 lock->set_displaced_header(markWord::unused_mark());
462 } else if (VerifyHeavyMonitors) {
463 guarantee(!obj->mark().has_locker(), "must not be stack-locked")do { if (!(!obj->mark().has_locker())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 463, "guarantee(" "!obj->mark().has_locker()" ") failed"
, "must not be stack-locked"); ::breakpoint(); } } while (0)
;
464 }
465
466 // An async deflation can race after the inflate() call and before
467 // enter() can make the ObjectMonitor busy. enter() returns false if
468 // we have lost the race to async deflation and we simply try again.
469 while (true) {
470 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
471 if (monitor->enter(current)) {
472 return;
473 }
474 }
475}
476
477void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
478 if (!useHeavyMonitors()) {
479 markWord mark = object->mark();
480
481 markWord dhw = lock->displaced_header();
482 if (dhw.value() == 0) {
483 // If the displaced header is NULL, then this exit matches up with
484 // a recursive enter. No real work to do here except for diagnostics.
485#ifndef PRODUCT
486 if (mark != markWord::INFLATING()) {
487 // Only do diagnostics if we are not racing an inflation. Simply
488 // exiting a recursive enter of a Java Monitor that is being
489 // inflated is safe; see the has_monitor() comment below.
490 assert(!mark.is_neutral(), "invariant")do { if (!(!mark.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 490, "assert(" "!mark.is_neutral()" ") failed", "invariant"
); ::breakpoint(); } } while (0)
;
491 assert(!mark.has_locker() ||do { if (!(!mark.has_locker() || current->is_lock_owned((address
)mark.locker()))) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 492, "assert(" "!mark.has_locker() || current->is_lock_owned((address)mark.locker())"
") failed", "invariant"); ::breakpoint(); } } while (0)
492 current->is_lock_owned((address)mark.locker()), "invariant")do { if (!(!mark.has_locker() || current->is_lock_owned((address
)mark.locker()))) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 492, "assert(" "!mark.has_locker() || current->is_lock_owned((address)mark.locker())"
") failed", "invariant"); ::breakpoint(); } } while (0)
;
493 if (mark.has_monitor()) {
494 // The BasicLock's displaced_header is marked as a recursive
495 // enter and we have an inflated Java Monitor (ObjectMonitor).
496 // This is a special case where the Java Monitor was inflated
497 // after this thread entered the stack-lock recursively. When a
498 // Java Monitor is inflated, we cannot safely walk the Java
499 // Monitor owner's stack and update the BasicLocks because a
500 // Java Monitor can be asynchronously inflated by a thread that
501 // does not own the Java Monitor.
502 ObjectMonitor* m = mark.monitor();
503 assert(m->object()->mark() == mark, "invariant")do { if (!(m->object()->mark() == mark)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 503, "assert(" "m->object()->mark() == mark" ") failed"
, "invariant"); ::breakpoint(); } } while (0)
;
504 assert(m->is_entered(current), "invariant")do { if (!(m->is_entered(current))) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 504, "assert(" "m->is_entered(current)" ") failed", "invariant"
); ::breakpoint(); } } while (0)
;
505 }
506 }
507#endif
508 return;
509 }
510
511 if (mark == markWord::from_pointer(lock)) {
512 // If the object is stack-locked by the current thread, try to
513 // swing the displaced header from the BasicLock back to the mark.
514 assert(dhw.is_neutral(), "invariant")do { if (!(dhw.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 514, "assert(" "dhw.is_neutral()" ") failed", "invariant");
::breakpoint(); } } while (0)
;
515 if (object->cas_set_mark(dhw, mark) == mark) {
516 return;
517 }
518 }
519 } else if (VerifyHeavyMonitors) {
520 guarantee(!object->mark().has_locker(), "must not be stack-locked")do { if (!(!object->mark().has_locker())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 520, "guarantee(" "!object->mark().has_locker()" ") failed"
, "must not be stack-locked"); ::breakpoint(); } } while (0)
;
521 }
522
523 // We have to take the slow-path of possible inflation and then exit.
524 // The ObjectMonitor* can't be async deflated until ownership is
525 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
526 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
527 monitor->exit(current);
528}
529
530// -----------------------------------------------------------------------------
531// Class Loader support to workaround deadlocks on the class loader lock objects
532// Also used by GC
533// complete_exit()/reenter() are used to wait on a nested lock
534// i.e. to give up an outer lock completely and then re-enter
535// Used when holding nested locks - lock acquisition order: lock1 then lock2
536// 1) complete_exit lock1 - saving recursion count
537// 2) wait on lock2
538// 3) when notified on lock2, unlock lock2
539// 4) reenter lock1 with original recursion count
540// 5) lock lock2
541// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
542intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
543 // The ObjectMonitor* can't be async deflated until ownership is
544 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
545 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
546 intptr_t ret_code = monitor->complete_exit(current);
547 return ret_code;
548}
549
550// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
551void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
552 // An async deflation can race after the inflate() call and before
553 // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
554 // enter() returns false if we have lost the race to async deflation
555 // and we simply try again.
556 while (true) {
557 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
558 if (monitor->reenter(recursions, current)) {
559 return;
560 }
561 }
562}
563
564// -----------------------------------------------------------------------------
565// JNI locks on java objects
566// NOTE: must use heavy weight monitor to handle jni monitor enter
567void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
568 if (obj->klass()->is_value_based()) {
569 handle_sync_on_value_based_class(obj, current);
570 }
571
572 // the current locking is from JNI instead of Java code
573 current->set_current_pending_monitor_is_from_java(false);
574 // An async deflation can race after the inflate() call and before
575 // enter() can make the ObjectMonitor busy. enter() returns false if
576 // we have lost the race to async deflation and we simply try again.
577 while (true) {
578 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
579 if (monitor->enter(current)) {
580 break;
581 }
582 }
583 current->set_current_pending_monitor_is_from_java(true);
584}
585
586// NOTE: must use heavy weight monitor to handle jni monitor exit
587void ObjectSynchronizer::jni_exit(oop obj, TRAPSJavaThread* __the_thread__) {
588 JavaThread* current = THREAD__the_thread__;
589
590 // The ObjectMonitor* can't be async deflated until ownership is
591 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
592 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
593 // If this thread has locked the object, exit the monitor. We
594 // intentionally do not use CHECK on check_owner because we must exit the
595 // monitor even if an exception was already pending.
596 if (monitor->check_owner(THREAD__the_thread__)) {
597 monitor->exit(current);
598 }
599}
600
601// -----------------------------------------------------------------------------
602// Internal VM locks on java objects
603// standard constructor, allows locking failures
604ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
605 _thread = thread;
606 _thread->check_for_valid_safepoint_state();
607 _obj = obj;
608
609 if (_obj() != NULL__null) {
610 ObjectSynchronizer::enter(_obj, &_lock, _thread);
611 }
612}
613
614ObjectLocker::~ObjectLocker() {
615 if (_obj() != NULL__null) {
616 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
617 }
618}
619
620
621// -----------------------------------------------------------------------------
622// Wait/Notify/NotifyAll
623// NOTE: must use heavy weight monitor to handle wait()
624int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPSJavaThread* __the_thread__) {
625 JavaThread* current = THREAD__the_thread__;
626 if (millis < 0) {
627 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"){ Exceptions::_throw_msg(__the_thread__, "/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 627, vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"
); return 0; }
;
628 }
629 // The ObjectMonitor* can't be async deflated because the _waiters
630 // field is incremented before ownership is dropped and decremented
631 // after ownership is regained.
632 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
633
634 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis){;};
635 monitor->wait(millis, true, THREAD__the_thread__); // Not CHECK as we need following code
636
637 // This dummy call is in place to get around dtrace bug 6254741. Once
638 // that's fixed we can uncomment the following line, remove the call
639 // and change this function back into a "void" func.
640 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
641 int ret_code = dtrace_waited_probe(monitor, obj, THREAD__the_thread__);
642 return ret_code;
643}
644
645// No exception are possible in this case as we only use this internally when locking is
646// correct and we have to wait until notified - so no interrupts or timeouts.
647void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
648 // The ObjectMonitor* can't be async deflated because the _waiters
649 // field is incremented before ownership is dropped and decremented
650 // after ownership is regained.
651 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
652 monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
653}
654
655void ObjectSynchronizer::notify(Handle obj, TRAPSJavaThread* __the_thread__) {
656 JavaThread* current = THREAD__the_thread__;
657
658 markWord mark = obj->mark();
659 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
660 // Not inflated so there can't be any waiters to notify.
661 return;
662 }
663 // The ObjectMonitor* can't be async deflated until ownership is
664 // dropped by the calling thread.
665 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
666 monitor->notify(CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
667}
668
669// NOTE: see comment of notify()
670void ObjectSynchronizer::notifyall(Handle obj, TRAPSJavaThread* __the_thread__) {
671 JavaThread* current = THREAD__the_thread__;
672
673 markWord mark = obj->mark();
674 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
675 // Not inflated so there can't be any waiters to notify.
676 return;
677 }
678 // The ObjectMonitor* can't be async deflated until ownership is
679 // dropped by the calling thread.
680 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
681 monitor->notifyAll(CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
682}
683
684// -----------------------------------------------------------------------------
685// Hash Code handling
686
687struct SharedGlobals {
688 char _pad_prefix[OM_CACHE_LINE_SIZE64];
689 // This is a highly shared mostly-read variable.
690 // To avoid false-sharing it needs to be the sole occupant of a cache line.
691 volatile int stw_random;
692 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int))char _pad_buf1[(64) - (sizeof(volatile int))];
693 // Hot RW variable -- Sequester to avoid false-sharing
694 volatile int hc_sequence;
695 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int))char _pad_buf2[(64) - (sizeof(volatile int))];
696};
697
698static SharedGlobals GVars;
699
700static markWord read_stable_mark(oop obj) {
701 markWord mark = obj->mark_acquire();
702 if (!mark.is_being_inflated()) {
703 return mark; // normal fast-path return
704 }
705
706 int its = 0;
707 for (;;) {
708 markWord mark = obj->mark_acquire();
709 if (!mark.is_being_inflated()) {
710 return mark; // normal fast-path return
711 }
712
713 // The object is being inflated by some other thread.
714 // The caller of read_stable_mark() must wait for inflation to complete.
715 // Avoid live-lock.
716
717 ++its;
718 if (its > 10000 || !os::is_MP()) {
719 if (its & 1) {
720 os::naked_yield();
721 } else {
722 // Note that the following code attenuates the livelock problem but is not
723 // a complete remedy. A more complete solution would require that the inflating
724 // thread hold the associated inflation lock. The following code simply restricts
725 // the number of spinners to at most one. We'll have N-2 threads blocked
726 // on the inflationlock, 1 thread holding the inflation lock and using
727 // a yield/park strategy, and 1 thread in the midst of inflation.
728 // A more refined approach would be to change the encoding of INFLATING
729 // to allow encapsulation of a native thread pointer. Threads waiting for
730 // inflation to complete would use CAS to push themselves onto a singly linked
731 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
732 // and calling park(). When inflation was complete the thread that accomplished inflation
733 // would detach the list and set the markword to inflated with a single CAS and
734 // then for each thread on the list, set the flag and unpark() the thread.
735
736 // Index into the lock array based on the current object address.
737 static_assert(is_power_of_2(NINFLATIONLOCKS), "must be");
738 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
739 int YieldThenBlock = 0;
740 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant")do { if (!(ix >= 0 && ix < NINFLATIONLOCKS)) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 740, "assert(" "ix >= 0 && ix < NINFLATIONLOCKS"
") failed", "invariant"); ::breakpoint(); } } while (0)
;
741 gInflationLocks[ix]->lock();
742 while (obj->mark_acquire() == markWord::INFLATING()) {
743 // Beware: naked_yield() is advisory and has almost no effect on some platforms
744 // so we periodically call current->_ParkEvent->park(1).
745 // We use a mixed spin/yield/block mechanism.
746 if ((YieldThenBlock++) >= 16) {
747 Thread::current()->_ParkEvent->park(1);
748 } else {
749 os::naked_yield();
750 }
751 }
752 gInflationLocks[ix]->unlock();
753 }
754 } else {
755 SpinPause(); // SMP-polite spinning
756 }
757 }
758}
759
760// hashCode() generation :
761//
762// Possibilities:
763// * MD5Digest of {obj,stw_random}
764// * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
765// * A DES- or AES-style SBox[] mechanism
766// * One of the Phi-based schemes, such as:
767// 2654435761 = 2^32 * Phi (golden ratio)
768// HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
769// * A variation of Marsaglia's shift-xor RNG scheme.
770// * (obj ^ stw_random) is appealing, but can result
771// in undesirable regularity in the hashCode values of adjacent objects
772// (objects allocated back-to-back, in particular). This could potentially
773// result in hashtable collisions and reduced hashtable efficiency.
774// There are simple ways to "diffuse" the middle address bits over the
775// generated hashCode values:
776
777static inline intptr_t get_next_hash(Thread* current, oop obj) {
778 intptr_t value = 0;
779 if (hashCode == 0) {
780 // This form uses global Park-Miller RNG.
781 // On MP system we'll have lots of RW access to a global, so the
782 // mechanism induces lots of coherency traffic.
783 value = os::random();
784 } else if (hashCode == 1) {
785 // This variation has the property of being stable (idempotent)
786 // between STW operations. This can be useful in some of the 1-0
787 // synchronization schemes.
788 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
789 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
790 } else if (hashCode == 2) {
791 value = 1; // for sensitivity testing
792 } else if (hashCode == 3) {
793 value = ++GVars.hc_sequence;
794 } else if (hashCode == 4) {
795 value = cast_from_oop<intptr_t>(obj);
796 } else {
797 // Marsaglia's xor-shift scheme with thread-specific state
798 // This is probably the best overall implementation -- we'll
799 // likely make this the default in future releases.
800 unsigned t = current->_hashStateX;
801 t ^= (t << 11);
802 current->_hashStateX = current->_hashStateY;
803 current->_hashStateY = current->_hashStateZ;
804 current->_hashStateZ = current->_hashStateW;
805 unsigned v = current->_hashStateW;
806 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
807 current->_hashStateW = v;
808 value = v;
809 }
810
811 value &= markWord::hash_mask;
812 if (value == 0) value = 0xBAD;
813 assert(value != markWord::no_hash, "invariant")do { if (!(value != markWord::no_hash)) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 813, "assert(" "value != markWord::no_hash" ") failed", "invariant"
); ::breakpoint(); } } while (0)
;
814 return value;
815}
816
817intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
818
819 while (true) {
820 ObjectMonitor* monitor = NULL__null;
821 markWord temp, test;
822 intptr_t hash;
823 markWord mark = read_stable_mark(obj);
824 if (VerifyHeavyMonitors) {
825 assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors")do { if (!(UseHeavyMonitors)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 825, "assert(" "UseHeavyMonitors" ") failed", "+VerifyHeavyMonitors requires +UseHeavyMonitors"
); ::breakpoint(); } } while (0)
;
826 guarantee(!mark.has_locker(), "must not be stack locked")do { if (!(!mark.has_locker())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 826, "guarantee(" "!mark.has_locker()" ") failed", "must not be stack locked"
); ::breakpoint(); } } while (0)
;
827 }
828 if (mark.is_neutral()) { // if this is a normal header
829 hash = mark.hash();
830 if (hash != 0) { // if it has a hash, just return it
831 return hash;
832 }
833 hash = get_next_hash(current, obj); // get a new hash
834 temp = mark.copy_set_hash(hash); // merge the hash into header
835 // try to install the hash
836 test = obj->cas_set_mark(temp, mark);
837 if (test == mark) { // if the hash was installed, return it
838 return hash;
839 }
840 // Failed to install the hash. It could be that another thread
841 // installed the hash just before our attempt or inflation has
842 // occurred or... so we fall thru to inflate the monitor for
843 // stability and then install the hash.
844 } else if (mark.has_monitor()) {
845 monitor = mark.monitor();
846 temp = monitor->header();
847 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value())do { if (!(temp.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 847, "assert(" "temp.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", temp.value()); ::breakpoint(); } } while (
0)
;
848 hash = temp.hash();
849 if (hash != 0) {
850 // It has a hash.
851
852 // Separate load of dmw/header above from the loads in
853 // is_being_async_deflated().
854
855 // dmw/header and _contentions may get written by different threads.
856 // Make sure to observe them in the same order when having several observers.
857 OrderAccess::loadload_for_IRIW();
858
859 if (monitor->is_being_async_deflated()) {
860 // But we can't safely use the hash if we detect that async
861 // deflation has occurred. So we attempt to restore the
862 // header/dmw to the object's header so that we only retry
863 // once if the deflater thread happens to be slow.
864 monitor->install_displaced_markword_in_object(obj);
865 continue;
866 }
867 return hash;
868 }
869 // Fall thru so we only have one place that installs the hash in
870 // the ObjectMonitor.
871 } else if (current->is_lock_owned((address)mark.locker())) {
872 // This is a stack lock owned by the calling thread so fetch the
873 // displaced markWord from the BasicLock on the stack.
874 temp = mark.displaced_mark_helper();
875 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value())do { if (!(temp.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 875, "assert(" "temp.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", temp.value()); ::breakpoint(); } } while (
0)
;
876 hash = temp.hash();
877 if (hash != 0) { // if it has a hash, just return it
878 return hash;
879 }
880 // WARNING:
881 // The displaced header in the BasicLock on a thread's stack
882 // is strictly immutable. It CANNOT be changed in ANY cases.
883 // So we have to inflate the stack lock into an ObjectMonitor
884 // even if the current thread owns the lock. The BasicLock on
885 // a thread's stack can be asynchronously read by other threads
886 // during an inflate() call so any change to that stack memory
887 // may not propagate to other threads correctly.
888 }
889
890 // Inflate the monitor to set the hash.
891
892 // An async deflation can race after the inflate() call and before we
893 // can update the ObjectMonitor's header with the hash value below.
894 monitor = inflate(current, obj, inflate_cause_hash_code);
895 // Load ObjectMonitor's header/dmw field and see if it has a hash.
896 mark = monitor->header();
897 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value())do { if (!(mark.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 897, "assert(" "mark.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", mark.value()); ::breakpoint(); } } while (
0)
;
898 hash = mark.hash();
899 if (hash == 0) { // if it does not have a hash
900 hash = get_next_hash(current, obj); // get a new hash
901 temp = mark.copy_set_hash(hash) ; // merge the hash into header
902 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value())do { if (!(temp.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 902, "assert(" "temp.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", temp.value()); ::breakpoint(); } } while (
0)
;
903 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
904 test = markWord(v);
905 if (test != mark) {
906 // The attempt to update the ObjectMonitor's header/dmw field
907 // did not work. This can happen if another thread managed to
908 // merge in the hash just before our cmpxchg().
909 // If we add any new usages of the header/dmw field, this code
910 // will need to be updated.
911 hash = test.hash();
912 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value())do { if (!(test.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 912, "assert(" "test.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", test.value()); ::breakpoint(); } } while (
0)
;
913 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash")do { if (!(hash != 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 913, "assert(" "hash != 0" ") failed", "should only have lost the race to a thread that set a non-zero hash"
); ::breakpoint(); } } while (0)
;
914 }
915 if (monitor->is_being_async_deflated()) {
916 // If we detect that async deflation has occurred, then we
917 // attempt to restore the header/dmw to the object's header
918 // so that we only retry once if the deflater thread happens
919 // to be slow.
920 monitor->install_displaced_markword_in_object(obj);
921 continue;
922 }
923 }
924 // We finally get the hash.
925 return hash;
926 }
927}
928
929// Deprecated -- use FastHashCode() instead.
930
931intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
932 return FastHashCode(Thread::current(), obj());
933}
934
935
936bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
937 Handle h_obj) {
938 assert(current == JavaThread::current(), "Can only be called on current thread")do { if (!(current == JavaThread::current())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 938, "assert(" "current == JavaThread::current()" ") failed"
, "Can only be called on current thread"); ::breakpoint(); } }
while (0)
;
939 oop obj = h_obj();
940
941 markWord mark = read_stable_mark(obj);
942
943 // Uncontended case, header points to stack
944 if (mark.has_locker()) {
945 return current->is_lock_owned((address)mark.locker());
946 }
947 // Contended case, header points to ObjectMonitor (tagged pointer)
948 if (mark.has_monitor()) {
949 // The first stage of async deflation does not affect any field
950 // used by this comparison so the ObjectMonitor* is usable here.
951 ObjectMonitor* monitor = mark.monitor();
952 return monitor->is_entered(current) != 0;
953 }
954 // Unlocked case, header in place
955 assert(mark.is_neutral(), "sanity check")do { if (!(mark.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 955, "assert(" "mark.is_neutral()" ") failed", "sanity check"
); ::breakpoint(); } } while (0)
;
956 return false;
957}
958
959// FIXME: jvmti should call this
960JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
961 oop obj = h_obj();
962 address owner = NULL__null;
963
964 markWord mark = read_stable_mark(obj);
965
966 // Uncontended case, header points to stack
967 if (mark.has_locker()) {
968 owner = (address) mark.locker();
969 }
970
971 // Contended case, header points to ObjectMonitor (tagged pointer)
972 else if (mark.has_monitor()) {
973 // The first stage of async deflation does not affect any field
974 // used by this comparison so the ObjectMonitor* is usable here.
975 ObjectMonitor* monitor = mark.monitor();
976 assert(monitor != NULL, "monitor should be non-null")do { if (!(monitor != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 976, "assert(" "monitor != __null" ") failed", "monitor should be non-null"
); ::breakpoint(); } } while (0)
;
977 owner = (address) monitor->owner();
978 }
979
980 if (owner != NULL__null) {
981 // owning_thread_from_monitor_owner() may also return NULL here
982 return Threads::owning_thread_from_monitor_owner(t_list, owner);
983 }
984
985 // Unlocked case, header in place
986 // Cannot have assertion since this object may have been
987 // locked by another thread when reaching here.
988 // assert(mark.is_neutral(), "sanity check");
989
990 return NULL__null;
991}
992
993// Visitors ...
994
995void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
996 MonitorList::Iterator iter = _in_use_list.iterator();
997 while (iter.has_next()) {
998 ObjectMonitor* mid = iter.next();
999 if (mid->owner() != thread) {
1000 continue;
1001 }
1002 if (!mid->is_being_async_deflated() && mid->object_peek() != NULL__null) {
1003 // Only process with closure if the object is set.
1004
1005 // monitors_iterate() is only called at a safepoint or when the
1006 // target thread is suspended or when the target thread is
1007 // operating on itself. The current closures in use today are
1008 // only interested in an owned ObjectMonitor and ownership
1009 // cannot be dropped under the calling contexts so the
1010 // ObjectMonitor cannot be async deflated.
1011 closure->do_monitor(mid);
1012 }
1013 }
1014}
1015
1016static bool monitors_used_above_threshold(MonitorList* list) {
1017 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
1018 return false;
1019 }
1020 // Start with ceiling based on a per-thread estimate:
1021 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1022 size_t old_ceiling = ceiling;
1023 if (ceiling < list->max()) {
1024 // The max used by the system has exceeded the ceiling so use that:
1025 ceiling = list->max();
1026 }
1027 size_t monitors_used = list->count();
1028 if (monitors_used == 0) { // empty list is easy
1029 return false;
1030 }
1031 if (NoAsyncDeflationProgressMax != 0 &&
1032 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1033 float remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
1034 size_t new_ceiling = ceiling + (ceiling * remainder) + 1;
1035 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
1036 log_info(monitorinflation)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::is_level(LogLevel::Info))) ? (void)0
: LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::write<LogLevel::Info>
("Too many deflations without progress; "
1037 "bumping in_use_list_ceiling from " SIZE_FORMAT"%" "l" "u"
1038 " to " SIZE_FORMAT"%" "l" "u", old_ceiling, new_ceiling);
1039 _no_progress_cnt = 0;
1040 ceiling = new_ceiling;
1041 }
1042
1043 // Check if our monitor usage is above the threshold:
1044 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
1045 return int(monitor_usage) > MonitorUsedDeflationThreshold;
1046}
1047
1048size_t ObjectSynchronizer::in_use_list_ceiling() {
1049 return _in_use_list_ceiling;
1050}
1051
1052void ObjectSynchronizer::dec_in_use_list_ceiling() {
1053 Atomic::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
1054}
1055
1056void ObjectSynchronizer::inc_in_use_list_ceiling() {
1057 Atomic::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
1058}
1059
1060void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
1061 _in_use_list_ceiling = new_value;
1062}
1063
1064bool ObjectSynchronizer::is_async_deflation_needed() {
1065 if (is_async_deflation_requested()) {
1066 // Async deflation request.
1067 return true;
1068 }
1069 if (AsyncDeflationInterval > 0 &&
1070 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1071 monitors_used_above_threshold(&_in_use_list)) {
1072 // It's been longer than our specified deflate interval and there
1073 // are too many monitors in use. We don't deflate more frequently
1074 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1075 // in order to not swamp the MonitorDeflationThread.
1076 return true;
1077 }
1078 return false;
1079}
1080
1081bool ObjectSynchronizer::request_deflate_idle_monitors() {
1082 JavaThread* current = JavaThread::current();
1083 bool ret_code = false;
1084
1085 jlong last_time = last_async_deflation_time_ns();
1086 set_is_async_deflation_requested(true);
1087 {
1088 MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
1089 ml.notify_all();
1090 }
1091 const int N_CHECKS = 5;
1092 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
1093 if (last_async_deflation_time_ns() > last_time) {
1094 log_info(monitorinflation)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::is_level(LogLevel::Info))) ? (void)0
: LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::write<LogLevel::Info>
("Async Deflation happened after %d check(s).", i);
1095 ret_code = true;
1096 break;
1097 }
1098 {
1099 // JavaThread has to honor the blocking protocol.
1100 ThreadBlockInVM tbivm(current);
1101 os::naked_short_sleep(999); // sleep for almost 1 second
1102 }
1103 }
1104 if (!ret_code) {
1105 log_info(monitorinflation)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::is_level(LogLevel::Info))) ? (void)0
: LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::write<LogLevel::Info>
("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1106 }
1107
1108 return ret_code;
1109}
1110
1111jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1112 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1113}
1114
1115static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1116 const oop obj,
1117 ObjectSynchronizer::InflateCause cause) {
1118 assert(event != NULL, "invariant")do { if (!(event != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1118, "assert(" "event != __null" ") failed", "invariant");
::breakpoint(); } } while (0)
;
1119 assert(event->should_commit(), "invariant")do { if (!(event->should_commit())) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1119, "assert(" "event->should_commit()" ") failed", "invariant"
); ::breakpoint(); } } while (0)
;
1120 event->set_monitorClass(obj->klass());
1121 event->set_address((uintptr_t)(void*)obj);
1122 event->set_cause((u1)cause);
1123 event->commit();
1124}
1125
1126// Fast path code shared by multiple functions
1127void ObjectSynchronizer::inflate_helper(oop obj) {
1128 markWord mark = obj->mark_acquire();
1129 if (mark.has_monitor()) {
1130 ObjectMonitor* monitor = mark.monitor();
1131 markWord dmw = monitor->header();
1132 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value())do { if (!(dmw.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1132, "assert(" "dmw.is_neutral()" ") failed", "sanity check: header="
"0x%016" "l" "x", dmw.value()); ::breakpoint(); } } while (0
)
;
1133 return;
1134 }
1135 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1136}
1137
1138ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1139 const InflateCause cause) {
1140 EventJavaMonitorInflate event;
1141
1142 for (;;) {
1143 const markWord mark = object->mark_acquire();
1144
1145 // The mark can be in one of the following states:
1146 // * Inflated - just return
1147 // * Stack-locked - coerce it to inflated
1148 // * INFLATING - busy wait for conversion to complete
1149 // * Neutral - aggressively inflate the object.
1150
1151 // CASE: inflated
1152 if (mark.has_monitor()) {
1153 ObjectMonitor* inf = mark.monitor();
1154 markWord dmw = inf->header();
1155 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value())do { if (!(dmw.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1155, "assert(" "dmw.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", dmw.value()); ::breakpoint(); } } while (0
)
;
1156 return inf;
1157 }
1158
1159 // CASE: inflation in progress - inflating over a stack-lock.
1160 // Some other thread is converting from stack-locked to inflated.
1161 // Only that thread can complete inflation -- other threads must wait.
1162 // The INFLATING value is transient.
1163 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1164 // We could always eliminate polling by parking the thread on some auxiliary list.
1165 if (mark == markWord::INFLATING()) {
1166 read_stable_mark(object);
1167 continue;
1168 }
1169
1170 // CASE: stack-locked
1171 // Could be stack-locked either by this thread or by some other thread.
1172 //
1173 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1174 // to install INFLATING into the mark word. We originally installed INFLATING,
1175 // allocated the ObjectMonitor, and then finally STed the address of the
1176 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1177 // the interval in which INFLATING appeared in the mark, thus increasing
1178 // the odds of inflation contention.
1179
1180 LogStreamHandle(Trace, monitorinflation)LogStreamTemplate<LogLevel::Trace, (LogTag::_monitorinflation
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG)>
lsh;
1181
1182 if (mark.has_locker()) {
1183 ObjectMonitor* m = new ObjectMonitor(object);
1184 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1185 // We do this before the CAS in order to minimize the length of time
1186 // in which INFLATING appears in the mark.
1187
1188 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1189 if (cmp != mark) {
1190 delete m;
1191 continue; // Interference -- just retry
1192 }
1193
1194 // We've successfully installed INFLATING (0) into the mark-word.
1195 // This is the only case where 0 will appear in a mark-word.
1196 // Only the singular thread that successfully swings the mark-word
1197 // to 0 can perform (or more precisely, complete) inflation.
1198 //
1199 // Why do we CAS a 0 into the mark-word instead of just CASing the
1200 // mark-word from the stack-locked value directly to the new inflated state?
1201 // Consider what happens when a thread unlocks a stack-locked object.
1202 // It attempts to use CAS to swing the displaced header value from the
1203 // on-stack BasicLock back into the object header. Recall also that the
1204 // header value (hash code, etc) can reside in (a) the object header, or
1205 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1206 // header in an ObjectMonitor. The inflate() routine must copy the header
1207 // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1208 // the while preserving the hashCode stability invariants. If the owner
1209 // decides to release the lock while the value is 0, the unlock will fail
1210 // and control will eventually pass from slow_exit() to inflate. The owner
1211 // will then spin, waiting for the 0 value to disappear. Put another way,
1212 // the 0 causes the owner to stall if the owner happens to try to
1213 // drop the lock (restoring the header from the BasicLock to the object)
1214 // while inflation is in-progress. This protocol avoids races that might
1215 // would otherwise permit hashCode values to change or "flicker" for an object.
1216 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1217 // 0 serves as a "BUSY" inflate-in-progress indicator.
1218
1219
1220 // fetch the displaced mark from the owner's stack.
1221 // The owner can't die or unwind past the lock while our INFLATING
1222 // object is in the mark. Furthermore the owner can't complete
1223 // an unlock on the object, either.
1224 markWord dmw = mark.displaced_mark_helper();
1225 // Catch if the object's header is not neutral (not locked and
1226 // not marked is what we care about here).
1227 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value())do { if (!(dmw.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1227, "assert(" "dmw.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", dmw.value()); ::breakpoint(); } } while (0
)
;
1228
1229 // Setup monitor fields to proper values -- prepare the monitor
1230 m->set_header(dmw);
1231
1232 // Optimization: if the mark.locker stack address is associated
1233 // with this thread we could simply set m->_owner = current.
1234 // Note that a thread can inflate an object
1235 // that it has stack-locked -- as might happen in wait() -- directly
1236 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1237 m->set_owner_from(NULL__null, mark.locker());
1238 // TODO-FIXME: assert BasicLock->dhw != 0.
1239
1240 // Must preserve store ordering. The monitor state must
1241 // be stable at the time of publishing the monitor address.
1242 guarantee(object->mark() == markWord::INFLATING(), "invariant")do { if (!(object->mark() == markWord::INFLATING())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1242, "guarantee(" "object->mark() == markWord::INFLATING()"
") failed", "invariant"); ::breakpoint(); } } while (0)
;
1243 // Release semantics so that above set_object() is seen first.
1244 object->release_set_mark(markWord::encode(m));
1245
1246 // Once ObjectMonitor is configured and the object is associated
1247 // with the ObjectMonitor, it is safe to allow async deflation:
1248 _in_use_list.add(m);
1249
1250 // Hopefully the performance counters are allocated on distinct cache lines
1251 // to avoid false sharing on MP systems ...
1252 OM_PERFDATA_OP(Inflations, inc())do { if (ObjectMonitor::_sync_Inflations != __null &&
PerfDataManager::has_PerfData()) { ObjectMonitor::_sync_Inflations
->inc(); } } while (0)
;
1253 if (log_is_enabled(Trace, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))
) {
1254 ResourceMark rm(current);
1255 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT"0x%016" "l" "x" ", mark="
1256 INTPTR_FORMAT"0x%016" "l" "x" ", type='%s'", p2i(object),
1257 object->mark().value(), object->klass()->external_name());
1258 }
1259 if (event.should_commit()) {
1260 post_monitor_inflate_event(&event, object, cause);
1261 }
1262 return m;
1263 }
1264
1265 // CASE: neutral
1266 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1267 // If we know we're inflating for entry it's better to inflate by swinging a
1268 // pre-locked ObjectMonitor pointer into the object header. A successful
1269 // CAS inflates the object *and* confers ownership to the inflating thread.
1270 // In the current implementation we use a 2-step mechanism where we CAS()
1271 // to inflate and then CAS() again to try to swing _owner from NULL to current.
1272 // An inflateTry() method that we could call from enter() would be useful.
1273
1274 // Catch if the object's header is not neutral (not locked and
1275 // not marked is what we care about here).
1276 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value())do { if (!(mark.is_neutral())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1276, "assert(" "mark.is_neutral()" ") failed", "invariant: header="
"0x%016" "l" "x", mark.value()); ::breakpoint(); } } while (
0)
;
1277 ObjectMonitor* m = new ObjectMonitor(object);
1278 // prepare m for installation - set monitor to initial state
1279 m->set_header(mark);
1280
1281 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1282 delete m;
1283 m = NULL__null;
1284 continue;
1285 // interference - the markword changed - just retry.
1286 // The state-transitions are one-way, so there's no chance of
1287 // live-lock -- "Inflated" is an absorbing state.
1288 }
1289
1290 // Once the ObjectMonitor is configured and object is associated
1291 // with the ObjectMonitor, it is safe to allow async deflation:
1292 _in_use_list.add(m);
1293
1294 // Hopefully the performance counters are allocated on distinct
1295 // cache lines to avoid false sharing on MP systems ...
1296 OM_PERFDATA_OP(Inflations, inc())do { if (ObjectMonitor::_sync_Inflations != __null &&
PerfDataManager::has_PerfData()) { ObjectMonitor::_sync_Inflations
->inc(); } } while (0)
;
1297 if (log_is_enabled(Trace, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))
) {
1298 ResourceMark rm(current);
1299 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT"0x%016" "l" "x" ", mark="
1300 INTPTR_FORMAT"0x%016" "l" "x" ", type='%s'", p2i(object),
1301 object->mark().value(), object->klass()->external_name());
1302 }
1303 if (event.should_commit()) {
1304 post_monitor_inflate_event(&event, object, cause);
1305 }
1306 return m;
1307 }
1308}
1309
1310void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1311 const char* cnt_name, size_t cnt,
1312 LogStream* ls, elapsedTimer* timer_p) {
1313 if (!SafepointMechanism::should_process(current)) {
1314 return;
1315 }
1316
1317 // A safepoint/handshake has started.
1318 if (ls != NULL__null) {
1319 timer_p->stop();
1320 ls->print_cr("pausing %s: %s=" SIZE_FORMAT"%" "l" "u" ", in_use_list stats: ceiling="
1321 SIZE_FORMAT"%" "l" "u" ", count=" SIZE_FORMAT"%" "l" "u" ", max=" SIZE_FORMAT"%" "l" "u",
1322 op_name, cnt_name, cnt, in_use_list_ceiling(),
1323 _in_use_list.count(), _in_use_list.max());
1324 }
1325
1326 {
1327 // Honor block request.
1328 ThreadBlockInVM tbivm(current);
1329 }
1330
1331 if (ls != NULL__null) {
1332 ls->print_cr("resuming %s: in_use_list stats: ceiling=" SIZE_FORMAT"%" "l" "u"
1333 ", count=" SIZE_FORMAT"%" "l" "u" ", max=" SIZE_FORMAT"%" "l" "u", op_name,
1334 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1335 timer_p->start();
1336 }
1337}
1338
1339// Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1340// ObjectMonitors. Returns the number of deflated ObjectMonitors.
1341size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
1342 elapsedTimer* timer_p) {
1343 MonitorList::Iterator iter = _in_use_list.iterator();
1344 size_t deflated_count = 0;
1345
1346 while (iter.has_next()) {
1347 if (deflated_count >= (size_t)MonitorDeflationMax) {
1348 break;
1349 }
1350 ObjectMonitor* mid = iter.next();
1351 if (mid->deflate_monitor()) {
1352 deflated_count++;
1353 }
1354
1355 if (current->is_Java_thread()) {
1356 // A JavaThread must check for a safepoint/handshake and honor it.
1357 chk_for_block_req(JavaThread::cast(current), "deflation", "deflated_count",
1358 deflated_count, ls, timer_p);
1359 }
1360 }
1361
1362 return deflated_count;
1363}
1364
1365class HandshakeForDeflation : public HandshakeClosure {
1366 public:
1367 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1368
1369 void do_thread(Thread* thread) {
1370 log_trace(monitorinflation)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::is_level(LogLevel::Trace))) ? (void)
0 : LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG)>::write<LogLevel::Trace>
("HandshakeForDeflation::do_thread: thread="
1371 INTPTR_FORMAT"0x%016" "l" "x", p2i(thread));
1372 }
1373};
1374
1375// This function is called by the MonitorDeflationThread to deflate
1376// ObjectMonitors. It is also called via do_final_audit_and_print_stats()
1377// by the VMThread.
1378size_t ObjectSynchronizer::deflate_idle_monitors() {
1379 Thread* current = Thread::current();
1380 if (current->is_Java_thread()) {
9
Assuming the condition is false
10
Taking false branch
1381 // The async deflation request has been processed.
1382 _last_async_deflation_time_ns = os::javaTimeNanos();
1383 set_is_async_deflation_requested(false);
1384 }
1385
1386 LogStreamHandle(Debug, monitorinflation)LogStreamTemplate<LogLevel::Debug, (LogTag::_monitorinflation
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG)>
lsh_debug;
1387 LogStreamHandle(Info, monitorinflation)LogStreamTemplate<LogLevel::Info, (LogTag::_monitorinflation
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG)>
lsh_info;
1388 LogStream* ls = NULL__null;
1389 if (log_is_enabled(Debug, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Debug))
) {
11
Assuming the condition is false
12
Taking false branch
1390 ls = &lsh_debug;
1391 } else if (log_is_enabled(Info, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Info))
) {
13
Assuming the condition is false
14
Taking false branch
1392 ls = &lsh_info;
1393 }
1394
1395 elapsedTimer timer;
1396 if (ls
14.1
'ls' is equal to NULL
14.1
'ls' is equal to NULL
!= NULL__null) {
15
Taking false branch
1397 ls->print_cr("begin deflating: in_use_list stats: ceiling=" SIZE_FORMAT"%" "l" "u" ", count=" SIZE_FORMAT"%" "l" "u" ", max=" SIZE_FORMAT"%" "l" "u",
1398 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1399 timer.start();
1400 }
1401
1402 // Deflate some idle ObjectMonitors.
1403 size_t deflated_count = deflate_monitor_list(current, ls, &timer);
1404 if (deflated_count
15.1
'deflated_count' is <= 0
15.1
'deflated_count' is <= 0
> 0 || is_final_audit()) {
16
Assuming the condition is true
17
Taking true branch
1405 // There are ObjectMonitors that have been deflated or this is the
1406 // final audit and all the remaining ObjectMonitors have been
1407 // deflated, BUT the MonitorDeflationThread blocked for the final
1408 // safepoint during unlinking.
1409
1410 // Unlink deflated ObjectMonitors from the in-use list.
1411 ResourceMark rm;
1412 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1413 size_t unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer,
18
Calling 'MonitorList::unlink_deflated'
1414 &delete_list);
1415 if (current->is_Java_thread()) {
1416 if (ls != NULL__null) {
1417 timer.stop();
1418 ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT"%" "l" "u"
1419 ", in_use_list stats: ceiling=" SIZE_FORMAT"%" "l" "u" ", count="
1420 SIZE_FORMAT"%" "l" "u" ", max=" SIZE_FORMAT"%" "l" "u",
1421 unlinked_count, in_use_list_ceiling(),
1422 _in_use_list.count(), _in_use_list.max());
1423 }
1424
1425 // A JavaThread needs to handshake in order to safely free the
1426 // ObjectMonitors that were deflated in this cycle.
1427 HandshakeForDeflation hfd_hc;
1428 Handshake::execute(&hfd_hc);
1429
1430 if (ls != NULL__null) {
1431 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1432 SIZE_FORMAT"%" "l" "u" ", count=" SIZE_FORMAT"%" "l" "u" ", max=" SIZE_FORMAT"%" "l" "u",
1433 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1434 timer.start();
1435 }
1436 }
1437
1438 // After the handshake, safely free the ObjectMonitors that were
1439 // deflated in this cycle.
1440 size_t deleted_count = 0;
1441 for (ObjectMonitor* monitor: delete_list) {
1442 delete monitor;
1443 deleted_count++;
1444
1445 if (current->is_Java_thread()) {
1446 // A JavaThread must check for a safepoint/handshake and honor it.
1447 chk_for_block_req(JavaThread::cast(current), "deletion", "deleted_count",
1448 deleted_count, ls, &timer);
1449 }
1450 }
1451 }
1452
1453 if (ls != NULL__null) {
1454 timer.stop();
1455 if (deflated_count != 0 || log_is_enabled(Debug, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Debug))
) {
1456 ls->print_cr("deflated " SIZE_FORMAT"%" "l" "u" " monitors in %3.7f secs",
1457 deflated_count, timer.seconds());
1458 }
1459 ls->print_cr("end deflating: in_use_list stats: ceiling=" SIZE_FORMAT"%" "l" "u" ", count=" SIZE_FORMAT"%" "l" "u" ", max=" SIZE_FORMAT"%" "l" "u",
1460 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1461 }
1462
1463 OM_PERFDATA_OP(MonExtant, set_value(_in_use_list.count()))do { if (ObjectMonitor::_sync_MonExtant != __null && PerfDataManager
::has_PerfData()) { ObjectMonitor::_sync_MonExtant->set_value
(_in_use_list.count()); } } while (0)
;
1464 OM_PERFDATA_OP(Deflations, inc(deflated_count))do { if (ObjectMonitor::_sync_Deflations != __null &&
PerfDataManager::has_PerfData()) { ObjectMonitor::_sync_Deflations
->inc(deflated_count); } } while (0)
;
1465
1466 GVars.stw_random = os::random();
1467
1468 if (deflated_count != 0) {
1469 _no_progress_cnt = 0;
1470 } else {
1471 _no_progress_cnt++;
1472 }
1473
1474 return deflated_count;
1475}
1476
1477// Monitor cleanup on JavaThread::exit
1478
1479// Iterate through monitor cache and attempt to release thread's monitors
1480class ReleaseJavaMonitorsClosure: public MonitorClosure {
1481 private:
1482 JavaThread* _thread;
1483
1484 public:
1485 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1486 void do_monitor(ObjectMonitor* mid) {
1487 (void)mid->complete_exit(_thread);
1488 }
1489};
1490
1491// Release all inflated monitors owned by current thread. Lightweight monitors are
1492// ignored. This is meant to be called during JNI thread detach which assumes
1493// all remaining monitors are heavyweight. All exceptions are swallowed.
1494// Scanning the extant monitor list can be time consuming.
1495// A simple optimization is to add a per-thread flag that indicates a thread
1496// called jni_monitorenter() during its lifetime.
1497//
1498// Instead of NoSafepointVerifier it might be cheaper to
1499// use an idiom of the form:
1500// auto int tmp = SafepointSynchronize::_safepoint_counter ;
1501// <code that must not run at safepoint>
1502// guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1503// Since the tests are extremely cheap we could leave them enabled
1504// for normal product builds.
1505
1506void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1507 assert(current == JavaThread::current(), "must be current Java thread")do { if (!(current == JavaThread::current())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1507, "assert(" "current == JavaThread::current()" ") failed"
, "must be current Java thread"); ::breakpoint(); } } while (
0)
;
1508 NoSafepointVerifier nsv;
1509 ReleaseJavaMonitorsClosure rjmc(current);
1510 ObjectSynchronizer::monitors_iterate(&rjmc, current);
1511 assert(!current->has_pending_exception(), "Should not be possible")do { if (!(!current->has_pending_exception())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1511, "assert(" "!current->has_pending_exception()" ") failed"
, "Should not be possible"); ::breakpoint(); } } while (0)
;
1512 current->clear_pending_exception();
1513}
1514
1515const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1516 switch (cause) {
1517 case inflate_cause_vm_internal: return "VM Internal";
1518 case inflate_cause_monitor_enter: return "Monitor Enter";
1519 case inflate_cause_wait: return "Monitor Wait";
1520 case inflate_cause_notify: return "Monitor Notify";
1521 case inflate_cause_hash_code: return "Monitor Hash Code";
1522 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1523 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1524 default:
1525 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1525); ::breakpoint(); } while (0)
;
1526 }
1527 return "Unknown";
1528}
1529
1530//------------------------------------------------------------------------------
1531// Debugging code
1532
1533u_char* ObjectSynchronizer::get_gvars_addr() {
1534 return (u_char*)&GVars;
1535}
1536
1537u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1538 return (u_char*)&GVars.hc_sequence;
1539}
1540
1541size_t ObjectSynchronizer::get_gvars_size() {
1542 return sizeof(SharedGlobals);
1543}
1544
1545u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1546 return (u_char*)&GVars.stw_random;
1547}
1548
1549// Do the final audit and print of ObjectMonitor stats; must be done
1550// by the VMThread at VM exit time.
1551void ObjectSynchronizer::do_final_audit_and_print_stats() {
1552 assert(Thread::current()->is_VM_thread(), "sanity check")do { if (!(Thread::current()->is_VM_thread())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1552, "assert(" "Thread::current()->is_VM_thread()" ") failed"
, "sanity check"); ::breakpoint(); } } while (0)
;
1
Assuming the condition is false
2
Taking false branch
3
Loop condition is false. Exiting loop
1553
1554 if (is_final_audit()) { // Only do the audit once.
4
Assuming the condition is false
5
Taking false branch
1555 return;
1556 }
1557 set_is_final_audit();
1558
1559 if (log_is_enabled(Info, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Info))
) {
6
Assuming the condition is true
7
Taking true branch
1560 // Do a deflation in order to reduce the in-use monitor population
1561 // that is reported by ObjectSynchronizer::log_in_use_monitor_details()
1562 // which is called by ObjectSynchronizer::audit_and_print_stats().
1563 while (ObjectSynchronizer::deflate_idle_monitors() != 0) {
8
Calling 'ObjectSynchronizer::deflate_idle_monitors'
1564 ; // empty
1565 }
1566 // The other audit_and_print_stats() call is done at the Debug
1567 // level at a safepoint in ObjectSynchronizer::do_safepoint_work().
1568 ObjectSynchronizer::audit_and_print_stats(true /* on_exit */);
1569 }
1570}
1571
1572// This function can be called at a safepoint or it can be called when
1573// we are trying to exit the VM. When we are trying to exit the VM, the
1574// list walker functions can run in parallel with the other list
1575// operations so spin-locking is used for safety.
1576//
1577// Calls to this function can be added in various places as a debugging
1578// aid; pass 'true' for the 'on_exit' parameter to have in-use monitor
1579// details logged at the Info level and 'false' for the 'on_exit'
1580// parameter to have in-use monitor details logged at the Trace level.
1581//
1582void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
1583 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant")do { if (!(on_exit || SafepointSynchronize::is_at_safepoint()
)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1583, "assert(" "on_exit || SafepointSynchronize::is_at_safepoint()"
") failed", "invariant"); ::breakpoint(); } } while (0)
;
1584
1585 LogStreamHandle(Debug, monitorinflation)LogStreamTemplate<LogLevel::Debug, (LogTag::_monitorinflation
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG)>
lsh_debug;
1586 LogStreamHandle(Info, monitorinflation)LogStreamTemplate<LogLevel::Info, (LogTag::_monitorinflation
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG)>
lsh_info;
1587 LogStreamHandle(Trace, monitorinflation)LogStreamTemplate<LogLevel::Trace, (LogTag::_monitorinflation
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG)>
lsh_trace;
1588 LogStream* ls = NULL__null;
1589 if (log_is_enabled(Trace, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))
) {
1590 ls = &lsh_trace;
1591 } else if (log_is_enabled(Debug, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Debug))
) {
1592 ls = &lsh_debug;
1593 } else if (log_is_enabled(Info, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Info))
) {
1594 ls = &lsh_info;
1595 }
1596 assert(ls != NULL, "sanity check")do { if (!(ls != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1596, "assert(" "ls != __null" ") failed", "sanity check");
::breakpoint(); } } while (0)
;
1597
1598 int error_cnt = 0;
1599
1600 ls->print_cr("Checking in_use_list:");
1601 chk_in_use_list(ls, &error_cnt);
1602
1603 if (error_cnt == 0) {
1604 ls->print_cr("No errors found in in_use_list checks.");
1605 } else {
1606 log_error(monitorinflation)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG),
(LogTag::__NO_TAG)>::is_level(LogLevel::Error))) ? (void)
0 : LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG)>::write<LogLevel::Error>
("found in_use_list errors: error_cnt=%d", error_cnt);
1607 }
1608
1609 if ((on_exit && log_is_enabled(Info, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Info))
) ||
1610 (!on_exit && log_is_enabled(Trace, monitorinflation)(LogImpl<(LogTag::_monitorinflation), (LogTag::__NO_TAG), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))
)) {
1611 // When exiting this log output is at the Info level. When called
1612 // at a safepoint, this log output is at the Trace level since
1613 // there can be a lot of it.
1614 log_in_use_monitor_details(ls);
1615 }
1616
1617 ls->flush();
1618
1619 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt)do { if (!(error_cnt == 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/synchronizer.cpp"
, 1619, "guarantee(" "error_cnt == 0" ") failed", "ERROR: found monitor list errors: error_cnt=%d"
, error_cnt); ::breakpoint(); } } while (0)
;
1620}
1621
1622// Check the in_use_list; log the results of the checks.
1623void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
1624 size_t l_in_use_count = _in_use_list.count();
1625 size_t l_in_use_max = _in_use_list.max();
1626 out->print_cr("count=" SIZE_FORMAT"%" "l" "u" ", max=" SIZE_FORMAT"%" "l" "u", l_in_use_count,
1627 l_in_use_max);
1628
1629 size_t ck_in_use_count = 0;
1630 MonitorList::Iterator iter = _in_use_list.iterator();
1631 while (iter.has_next()) {
1632 ObjectMonitor* mid = iter.next();
1633 chk_in_use_entry(mid, out, error_cnt_p);
1634 ck_in_use_count++;
1635 }
1636
1637 if (l_in_use_count == ck_in_use_count) {
1638 out->print_cr("in_use_count=" SIZE_FORMAT"%" "l" "u" " equals ck_in_use_count="
1639 SIZE_FORMAT"%" "l" "u", l_in_use_count, ck_in_use_count);
1640 } else {
1641 out->print_cr("WARNING: in_use_count=" SIZE_FORMAT"%" "l" "u" " is not equal to "
1642 "ck_in_use_count=" SIZE_FORMAT"%" "l" "u", l_in_use_count,
1643 ck_in_use_count);
1644 }
1645
1646 size_t ck_in_use_max = _in_use_list.max();
1647 if (l_in_use_max == ck_in_use_max) {
1648 out->print_cr("in_use_max=" SIZE_FORMAT"%" "l" "u" " equals ck_in_use_max="
1649 SIZE_FORMAT"%" "l" "u", l_in_use_max, ck_in_use_max);
1650 } else {
1651 out->print_cr("WARNING: in_use_max=" SIZE_FORMAT"%" "l" "u" " is not equal to "
1652 "ck_in_use_max=" SIZE_FORMAT"%" "l" "u", l_in_use_max, ck_in_use_max);
1653 }
1654}
1655
1656// Check an in-use monitor entry; log any errors.
1657void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1658 int* error_cnt_p) {
1659 if (n->owner_is_DEFLATER_MARKER()) {
1660 // This should not happen, but if it does, it is not fatal.
1661 out->print_cr("WARNING: monitor=" INTPTR_FORMAT"0x%016" "l" "x" ": in-use monitor is "
1662 "deflated.", p2i(n));
1663 return;
1664 }
1665 if (n->header().value() == 0) {
1666 out->print_cr("ERROR: monitor=" INTPTR_FORMAT"0x%016" "l" "x" ": in-use monitor must "
1667 "have non-NULL _header field.", p2i(n));
1668 *error_cnt_p = *error_cnt_p + 1;
1669 }
1670 const oop obj = n->object_peek();
1671 if (obj != NULL__null) {
1672 const markWord mark = obj->mark();
1673 if (!mark.has_monitor()) {
1674 out->print_cr("ERROR: monitor=" INTPTR_FORMAT"0x%016" "l" "x" ": in-use monitor's "
1675 "object does not think it has a monitor: obj="
1676 INTPTR_FORMAT"0x%016" "l" "x" ", mark=" INTPTR_FORMAT"0x%016" "l" "x", p2i(n),
1677 p2i(obj), mark.value());
1678 *error_cnt_p = *error_cnt_p + 1;
1679 }
1680 ObjectMonitor* const obj_mon = mark.monitor();
1681 if (n != obj_mon) {
1682 out->print_cr("ERROR: monitor=" INTPTR_FORMAT"0x%016" "l" "x" ": in-use monitor's "
1683 "object does not refer to the same monitor: obj="
1684 INTPTR_FORMAT"0x%016" "l" "x" ", mark=" INTPTR_FORMAT"0x%016" "l" "x" ", obj_mon="
1685 INTPTR_FORMAT"0x%016" "l" "x", p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
1686 *error_cnt_p = *error_cnt_p + 1;
1687 }
1688 }
1689}
1690
1691// Log details about ObjectMonitors on the in_use_list. The 'BHL'
1692// flags indicate why the entry is in-use, 'object' and 'object type'
1693// indicate the associated object and its type.
1694void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out) {
1695 stringStream ss;
1696 if (_in_use_list.count() > 0) {
1697 out->print_cr("In-use monitor info:");
1698 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
1699 out->print_cr("%18s %s %18s %18s",
1700 "monitor", "BHL", "object", "object type");
1701 out->print_cr("================== === ================== ==================");
1702 MonitorList::Iterator iter = _in_use_list.iterator();
1703 while (iter.has_next()) {
1704 ObjectMonitor* mid = iter.next();
1705 const oop obj = mid->object_peek();
1706 const markWord mark = mid->header();
1707 ResourceMark rm;
1708 out->print(INTPTR_FORMAT"0x%016" "l" "x" " %d%d%d " INTPTR_FORMAT"0x%016" "l" "x" " %s", p2i(mid),
1709 mid->is_busy(), mark.hash() != 0, mid->owner() != NULL__null,
1710 p2i(obj), obj == NULL__null ? "" : obj->klass()->external_name());
1711 if (mid->is_busy()) {
1712 out->print(" (%s)", mid->is_busy_to_string(&ss));
1713 ss.reset();
1714 }
1715 out->cr();
1716 }
1717 }
1718
1719 out->flush();
1720}

/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/objectMonitor.inline.hpp

1/*
2 * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
26#define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
27
28#include "runtime/objectMonitor.hpp"
29
30#include "logging/log.hpp"
31#include "oops/access.inline.hpp"
32#include "runtime/atomic.hpp"
33#include "runtime/synchronizer.hpp"
34
35inline intptr_t ObjectMonitor::is_entered(JavaThread* current) const {
36 void* owner = owner_raw();
37 if (current == owner || current->is_lock_owned((address)owner)) {
38 return 1;
39 }
40 return 0;
41}
42
43inline markWord ObjectMonitor::header() const {
44 return Atomic::load(&_header);
45}
46
47inline volatile markWord* ObjectMonitor::header_addr() {
48 return &_header;
49}
50
51inline void ObjectMonitor::set_header(markWord hdr) {
52 Atomic::store(&_header, hdr);
53}
54
55inline int ObjectMonitor::waiters() const {
56 return _waiters;
57}
58
59// Returns NULL if DEFLATER_MARKER is observed.
60inline void* ObjectMonitor::owner() const {
61 void* owner = owner_raw();
62 return owner != DEFLATER_MARKERreinterpret_cast<void*>(-1) ? owner : NULL__null;
63}
64
65inline void* ObjectMonitor::owner_raw() const {
66 return Atomic::load(&_owner);
67}
68
69// Returns true if owner field == DEFLATER_MARKER and false otherwise.
70// This accessor is called when we really need to know if the owner
71// field == DEFLATER_MARKER and any non-NULL value won't do the trick.
72inline bool ObjectMonitor::owner_is_DEFLATER_MARKER() const {
73 return owner_raw() == DEFLATER_MARKERreinterpret_cast<void*>(-1);
74}
75
76// Returns true if 'this' is being async deflated and false otherwise.
77inline bool ObjectMonitor::is_being_async_deflated() {
78 return contentions() < 0;
23
Assuming the condition is true
24
Returning the value 1, which participates in a condition later
40
Assuming the condition is true
41
Returning the value 1, which participates in a condition later
79}
80
81// Return number of threads contending for this monitor.
82inline int ObjectMonitor::contentions() const {
83 return Atomic::load(&_contentions);
84}
85
86// Add value to the contentions field.
87inline void ObjectMonitor::add_to_contentions(int value) {
88 Atomic::add(&_contentions, value);
89}
90
91// Clear _owner field; current value must match old_value.
92inline void ObjectMonitor::release_clear_owner(void* old_value) {
93#ifdef ASSERT1
94 void* prev = Atomic::load(&_owner);
95 assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMATdo { if (!(prev == old_value)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/objectMonitor.inline.hpp"
, 96, "assert(" "prev == old_value" ") failed", "unexpected prev owner="
"0x%016" "l" "x" ", expected=" "0x%016" "l" "x", p2i(prev), p2i
(old_value)); ::breakpoint(); } } while (0)
96 ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value))do { if (!(prev == old_value)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/objectMonitor.inline.hpp"
, 96, "assert(" "prev == old_value" ") failed", "unexpected prev owner="
"0x%016" "l" "x" ", expected=" "0x%016" "l" "x", p2i(prev), p2i
(old_value)); ::breakpoint(); } } while (0)
;
97#endif
98 Atomic::release_store(&_owner, (void*)NULL__null);
99 log_trace(monitorinflation, owner)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::_owner), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl
<(LogTag::_monitorinflation), (LogTag::_owner), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::write<LogLevel::Trace>
("release_clear_owner(): mid="
100 INTPTR_FORMAT"0x%016" "l" "x" ", old_value=" INTPTR_FORMAT"0x%016" "l" "x",
101 p2i(this), p2i(old_value));
102}
103
104// Simply set _owner field to new_value; current value must match old_value.
105// (Simple means no memory sync needed.)
106inline void ObjectMonitor::set_owner_from(void* old_value, void* new_value) {
107#ifdef ASSERT1
108 void* prev = Atomic::load(&_owner);
109 assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMATdo { if (!(prev == old_value)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/objectMonitor.inline.hpp"
, 110, "assert(" "prev == old_value" ") failed", "unexpected prev owner="
"0x%016" "l" "x" ", expected=" "0x%016" "l" "x", p2i(prev), p2i
(old_value)); ::breakpoint(); } } while (0)
110 ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value))do { if (!(prev == old_value)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/objectMonitor.inline.hpp"
, 110, "assert(" "prev == old_value" ") failed", "unexpected prev owner="
"0x%016" "l" "x" ", expected=" "0x%016" "l" "x", p2i(prev), p2i
(old_value)); ::breakpoint(); } } while (0)
;
111#endif
112 Atomic::store(&_owner, new_value);
113 log_trace(monitorinflation, owner)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::_owner), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl
<(LogTag::_monitorinflation), (LogTag::_owner), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::write<LogLevel::Trace>
("set_owner_from(): mid="
114 INTPTR_FORMAT"0x%016" "l" "x" ", old_value=" INTPTR_FORMAT"0x%016" "l" "x"
115 ", new_value=" INTPTR_FORMAT"0x%016" "l" "x", p2i(this),
116 p2i(old_value), p2i(new_value));
117}
118
119// Simply set _owner field to self; current value must match basic_lock_p.
120inline void ObjectMonitor::set_owner_from_BasicLock(void* basic_lock_p, JavaThread* current) {
121#ifdef ASSERT1
122 void* prev = Atomic::load(&_owner);
123 assert(prev == basic_lock_p, "unexpected prev owner=" INTPTR_FORMATdo { if (!(prev == basic_lock_p)) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/objectMonitor.inline.hpp"
, 124, "assert(" "prev == basic_lock_p" ") failed", "unexpected prev owner="
"0x%016" "l" "x" ", expected=" "0x%016" "l" "x", p2i(prev), p2i
(basic_lock_p)); ::breakpoint(); } } while (0)
124 ", expected=" INTPTR_FORMAT, p2i(prev), p2i(basic_lock_p))do { if (!(prev == basic_lock_p)) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/runtime/objectMonitor.inline.hpp"
, 124, "assert(" "prev == basic_lock_p" ") failed", "unexpected prev owner="
"0x%016" "l" "x" ", expected=" "0x%016" "l" "x", p2i(prev), p2i
(basic_lock_p)); ::breakpoint(); } } while (0)
;
125#endif
126 // Non-null owner field to non-null owner field is safe without
127 // cmpxchg() as long as all readers can tolerate either flavor.
128 Atomic::store(&_owner, current);
129 log_trace(monitorinflation, owner)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::_owner), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl
<(LogTag::_monitorinflation), (LogTag::_owner), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::write<LogLevel::Trace>
("set_owner_from_BasicLock(): mid="
130 INTPTR_FORMAT"0x%016" "l" "x" ", basic_lock_p="
131 INTPTR_FORMAT"0x%016" "l" "x" ", new_value=" INTPTR_FORMAT"0x%016" "l" "x",
132 p2i(this), p2i(basic_lock_p), p2i(current));
133}
134
135// Try to set _owner field to new_value if the current value matches
136// old_value. Otherwise, does not change the _owner field. Returns
137// the prior value of the _owner field.
138inline void* ObjectMonitor::try_set_owner_from(void* old_value, void* new_value) {
139 void* prev = Atomic::cmpxchg(&_owner, old_value, new_value);
140 if (prev == old_value) {
141 log_trace(monitorinflation, owner)(!(LogImpl<(LogTag::_monitorinflation), (LogTag::_owner), (
LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag
::__NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl
<(LogTag::_monitorinflation), (LogTag::_owner), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::write<LogLevel::Trace>
("try_set_owner_from(): mid="
142 INTPTR_FORMAT"0x%016" "l" "x" ", prev=" INTPTR_FORMAT"0x%016" "l" "x"
143 ", new=" INTPTR_FORMAT"0x%016" "l" "x", p2i(this),
144 p2i(prev), p2i(new_value));
145 }
146 return prev;
147}
148
149// The _next_om field can be concurrently read and modified so we
150// use Atomic operations to disable compiler optimizations that
151// might try to elide loading and/or storing this field.
152
153// Simply get _next_om field.
154inline ObjectMonitor* ObjectMonitor::next_om() const {
155 return Atomic::load(&_next_om);
156}
157
158// Get _next_om field with acquire semantics.
159inline ObjectMonitor* ObjectMonitor::next_om_acquire() const {
160 return Atomic::load_acquire(&_next_om);
161}
162
163// Simply set _next_om field to new_value.
164inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) {
165 Atomic::store(&_next_om, new_value);
166}
167
168// Set _next_om field to new_value with release semantics.
169inline void ObjectMonitor::release_set_next_om(ObjectMonitor* new_value) {
170 Atomic::release_store(&_next_om, new_value);
171}
172
173// Try to set _next_om field to new_value if the current value matches
174// old_value. Otherwise, does not change the _next_om field. Returns
175// the prior value of the _next_om field.
176inline ObjectMonitor* ObjectMonitor::try_set_next_om(ObjectMonitor* old_value, ObjectMonitor* new_value) {
177 return Atomic::cmpxchg(&_next_om, old_value, new_value);
178}
179
180#endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP