Bug Summary

File:jdk/src/hotspot/share/cds/heapShared.cpp
Warning:line 1436, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name heapShared.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -mthread-model posix -fno-delete-null-pointer-checks -mframe-pointer=all -relaxed-aliasing -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/libjvm/objs/precompiled -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D _GNU_SOURCE -D _REENTRANT -D LIBC=gnu -D LINUX -D VM_LITTLE_ENDIAN -D _LP64=1 -D ASSERT -D CHECK_UNHANDLED_OOPS -D TARGET_ARCH_x86 -D INCLUDE_SUFFIX_OS=_linux -D INCLUDE_SUFFIX_CPU=_x86 -D INCLUDE_SUFFIX_COMPILER=_gcc -D TARGET_COMPILER_gcc -D AMD64 -D HOTSPOT_LIB_ARCH="amd64" -D COMPILER1 -D COMPILER2 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -I /home/daniel/Projects/java/jdk/src/hotspot/share/precompiled -I /home/daniel/Projects/java/jdk/src/hotspot/share/include -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix/include -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base/linux -I /home/daniel/Projects/java/jdk/src/java.base/share/native/libjimage -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -D _FORTIFY_SOURCE=2 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-format-zero-length -Wno-unused-parameter -Wno-unused -Wno-parentheses -Wno-comment -Wno-unknown-pragmas -Wno-address -Wno-delete-non-virtual-dtor -Wno-char-subscripts -Wno-array-bounds -Wno-int-in-bool-context -Wno-ignored-qualifiers -Wno-missing-field-initializers -Wno-implicit-fallthrough -Wno-empty-body -Wno-strict-overflow -Wno-sequence-point -Wno-maybe-uninitialized -Wno-misleading-indentation -Wno-cast-function-type -Wno-shift-negative-value -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /home/daniel/Projects/java/jdk/make/hotspot -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -stack-protector 1 -fno-rtti -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -o /home/daniel/Projects/java/scan/2021-12-21-193737-8510-1 -x c++ /home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp

/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp

1/*
2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "cds/archiveBuilder.hpp"
27#include "cds/archiveUtils.hpp"
28#include "cds/filemap.hpp"
29#include "cds/heapShared.inline.hpp"
30#include "cds/metaspaceShared.hpp"
31#include "classfile/classLoaderData.hpp"
32#include "classfile/classLoaderDataShared.hpp"
33#include "classfile/javaClasses.inline.hpp"
34#include "classfile/moduleEntry.hpp"
35#include "classfile/stringTable.hpp"
36#include "classfile/symbolTable.hpp"
37#include "classfile/systemDictionary.hpp"
38#include "classfile/systemDictionaryShared.hpp"
39#include "classfile/vmClasses.hpp"
40#include "classfile/vmSymbols.hpp"
41#include "gc/shared/collectedHeap.hpp"
42#include "gc/shared/gcLocker.hpp"
43#include "gc/shared/gcVMOperations.hpp"
44#include "logging/log.hpp"
45#include "logging/logMessage.hpp"
46#include "logging/logStream.hpp"
47#include "memory/iterator.inline.hpp"
48#include "memory/metadataFactory.hpp"
49#include "memory/metaspaceClosure.hpp"
50#include "memory/resourceArea.hpp"
51#include "memory/universe.hpp"
52#include "oops/compressedOops.inline.hpp"
53#include "oops/fieldStreams.inline.hpp"
54#include "oops/objArrayOop.hpp"
55#include "oops/oop.inline.hpp"
56#include "prims/jvmtiExport.hpp"
57#include "runtime/fieldDescriptor.inline.hpp"
58#include "runtime/globals_extension.hpp"
59#include "runtime/init.hpp"
60#include "runtime/java.hpp"
61#include "runtime/javaCalls.hpp"
62#include "runtime/safepointVerifiers.hpp"
63#include "utilities/bitMap.inline.hpp"
64#include "utilities/copy.hpp"
65#if INCLUDE_G1GC1
66#include "gc/g1/g1CollectedHeap.hpp"
67#endif
68
69#if INCLUDE_CDS_JAVA_HEAP1
70
71bool HeapShared::_closed_regions_mapped = false;
72bool HeapShared::_open_regions_mapped = false;
73bool HeapShared::_is_loaded = false;
74bool HeapShared::_disable_writing = false;
75address HeapShared::_narrow_oop_base;
76int HeapShared::_narrow_oop_shift;
77DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL__null;
78
79uintptr_t HeapShared::_loaded_heap_bottom = 0;
80uintptr_t HeapShared::_loaded_heap_top = 0;
81uintptr_t HeapShared::_dumptime_base_0 = UINTPTR_MAX(18446744073709551615UL);
82uintptr_t HeapShared::_dumptime_base_1 = UINTPTR_MAX(18446744073709551615UL);
83uintptr_t HeapShared::_dumptime_base_2 = UINTPTR_MAX(18446744073709551615UL);
84uintptr_t HeapShared::_dumptime_base_3 = UINTPTR_MAX(18446744073709551615UL);
85uintptr_t HeapShared::_dumptime_top = 0;
86intx HeapShared::_runtime_offset_0 = 0;
87intx HeapShared::_runtime_offset_1 = 0;
88intx HeapShared::_runtime_offset_2 = 0;
89intx HeapShared::_runtime_offset_3 = 0;
90bool HeapShared::_loading_failed = false;
91//
92// If you add new entries to the following tables, you should know what you're doing!
93//
94
95// Entry fields for shareable subgraphs archived in the closed archive heap
96// region. Warning: Objects in the subgraphs should not have reference fields
97// assigned at runtime.
98static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
99 {"java/lang/Integer$IntegerCache", "archivedCache"},
100 {"java/lang/Long$LongCache", "archivedCache"},
101 {"java/lang/Byte$ByteCache", "archivedCache"},
102 {"java/lang/Short$ShortCache", "archivedCache"},
103 {"java/lang/Character$CharacterCache", "archivedCache"},
104 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
105 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
106};
107// Entry fields for subgraphs archived in the open archive heap region.
108static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
109 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
110 {"java/util/ImmutableCollections", "archivedObjects"},
111 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
112 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
113 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
114};
115
116// Entry fields for subgraphs archived in the open archive heap region (full module graph).
117static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
118 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
119 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
120 {"java/lang/Module$ArchivedData", "archivedData"},
121};
122
123const static int num_closed_archive_subgraph_entry_fields =
124 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
125const static int num_open_archive_subgraph_entry_fields =
126 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
127const static int num_fmg_open_archive_subgraph_entry_fields =
128 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
129
130GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL__null;
131narrowOop HeapShared::_roots_narrow;
132OopHandle HeapShared::_roots;
133
134#ifdef ASSERT1
135bool HeapShared::is_archived_object_during_dumptime(oop p) {
136 assert(HeapShared::can_write(), "must be")do { if (!(HeapShared::can_write())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 136, "assert(" "HeapShared::can_write()" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
137 assert(DumpSharedSpaces, "this function is only used with -Xshare:dump")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 137, "assert(" "DumpSharedSpaces" ") failed", "this function is only used with -Xshare:dump"
); ::breakpoint(); } } while (0)
;
138 return Universe::heap()->is_archived_object(p);
139}
140#endif
141
142////////////////////////////////////////////////////////////////
143//
144// Java heap object archiving support
145//
146////////////////////////////////////////////////////////////////
147void HeapShared::fixup_regions() {
148 FileMapInfo* mapinfo = FileMapInfo::current_info();
149 if (is_mapped()) {
150 mapinfo->fixup_mapped_heap_regions();
151 } else if (_loading_failed) {
152 fill_failed_loaded_region();
153 }
154 if (is_fully_available()) {
155 _roots = OopHandle(Universe::vm_global(), decode_from_archive(_roots_narrow));
156 if (!MetaspaceShared::use_full_module_graph()) {
157 // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
158 ClassLoaderDataShared::clear_archived_oops();
159 }
160 }
161 SystemDictionaryShared::update_archived_mirror_native_pointers();
162}
163
164unsigned HeapShared::oop_hash(oop const& p) {
165 unsigned hash = (unsigned)p->identity_hash();
166 return hash;
167}
168
169static void reset_states(oop obj, TRAPSJavaThread* __the_thread__) {
170 Handle h_obj(THREAD__the_thread__, obj);
171 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
172 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
173 Symbol* method_sig = vmSymbols::void_method_signature();
174
175 while (klass != NULL__null) {
176 Method* method = klass->find_method(method_name, method_sig);
177 if (method != NULL__null) {
178 assert(method->is_private(), "must be")do { if (!(method->is_private())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 178, "assert(" "method->is_private()" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
179 if (log_is_enabled(Debug, cds)(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))
) {
180 ResourceMark rm(THREAD__the_thread__);
181 log_debug(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
(" calling %s", method->name_and_sig_as_C_string());
182 }
183 JavaValue result(T_VOID);
184 JavaCalls::call_special(&result, h_obj, klass,
185 method_name, method_sig, CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
186 }
187 klass = klass->java_super();
188 }
189}
190
191void HeapShared::reset_archived_object_states(TRAPSJavaThread* __the_thread__) {
192 assert(DumpSharedSpaces, "dump-time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 192, "assert(" "DumpSharedSpaces" ") failed", "dump-time only"
); ::breakpoint(); } } while (0)
;
193 log_debug(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("Resetting platform loader");
194 reset_states(SystemDictionary::java_platform_loader(), CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
195 log_debug(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("Resetting system loader");
196 reset_states(SystemDictionary::java_system_loader(), CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
197}
198
199HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL__null;
200oop HeapShared::find_archived_heap_object(oop obj) {
201 assert(DumpSharedSpaces, "dump-time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 201, "assert(" "DumpSharedSpaces" ") failed", "dump-time only"
); ::breakpoint(); } } while (0)
;
202 ArchivedObjectCache* cache = archived_object_cache();
203 oop* p = cache->get(obj);
204 if (p != NULL__null) {
205 return *p;
206 } else {
207 return NULL__null;
208 }
209}
210
211int HeapShared::append_root(oop obj) {
212 assert(DumpSharedSpaces, "dump-time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 212, "assert(" "DumpSharedSpaces" ") failed", "dump-time only"
); ::breakpoint(); } } while (0)
;
213
214 // No GC should happen since we aren't scanning _pending_roots.
215 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread")do { if (!(Thread::current() == (Thread*)VMThread::vm_thread(
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 215, "assert(" "Thread::current() == (Thread*)VMThread::vm_thread()"
") failed", "should be in vm thread"); ::breakpoint(); } } while
(0)
;
216
217 if (_pending_roots == NULL__null) {
218 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
219 }
220
221 return _pending_roots->append(obj);
222}
223
224objArrayOop HeapShared::roots() {
225 if (DumpSharedSpaces) {
226 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread")do { if (!(Thread::current() == (Thread*)VMThread::vm_thread(
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 226, "assert(" "Thread::current() == (Thread*)VMThread::vm_thread()"
") failed", "should be in vm thread"); ::breakpoint(); } } while
(0)
;
227 if (!HeapShared::can_write()) {
228 return NULL__null;
229 }
230 } else {
231 assert(UseSharedSpaces, "must be")do { if (!(UseSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 231, "assert(" "UseSharedSpaces" ") failed", "must be"); ::
breakpoint(); } } while (0)
;
232 }
233
234 objArrayOop roots = (objArrayOop)_roots.resolve();
235 assert(roots != NULL, "should have been initialized")do { if (!(roots != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 235, "assert(" "roots != __null" ") failed", "should have been initialized"
); ::breakpoint(); } } while (0)
;
236 return roots;
237}
238
239void HeapShared::set_roots(narrowOop roots) {
240 assert(UseSharedSpaces, "runtime only")do { if (!(UseSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 240, "assert(" "UseSharedSpaces" ") failed", "runtime only"
); ::breakpoint(); } } while (0)
;
241 assert(is_fully_available(), "must be")do { if (!(is_fully_available())) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 241, "assert(" "is_fully_available()" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
242 _roots_narrow = roots;
243}
244
245// Returns an objArray that contains all the roots of the archived objects
246oop HeapShared::get_root(int index, bool clear) {
247 assert(index >= 0, "sanity")do { if (!(index >= 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 247, "assert(" "index >= 0" ") failed", "sanity"); ::breakpoint
(); } } while (0)
;
248 if (DumpSharedSpaces) {
249 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread")do { if (!(Thread::current() == (Thread*)VMThread::vm_thread(
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 249, "assert(" "Thread::current() == (Thread*)VMThread::vm_thread()"
") failed", "should be in vm thread"); ::breakpoint(); } } while
(0)
;
250 assert(_pending_roots != NULL, "sanity")do { if (!(_pending_roots != __null)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 250, "assert(" "_pending_roots != __null" ") failed", "sanity"
); ::breakpoint(); } } while (0)
;
251 return _pending_roots->at(index);
252 } else {
253 assert(UseSharedSpaces, "must be")do { if (!(UseSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 253, "assert(" "UseSharedSpaces" ") failed", "must be"); ::
breakpoint(); } } while (0)
;
254 assert(!_roots.is_empty(), "must have loaded shared heap")do { if (!(!_roots.is_empty())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 254, "assert(" "!_roots.is_empty()" ") failed", "must have loaded shared heap"
); ::breakpoint(); } } while (0)
;
255 oop result = roots()->obj_at(index);
256 if (clear) {
257 clear_root(index);
258 }
259 return result;
260 }
261}
262
263void HeapShared::clear_root(int index) {
264 assert(index >= 0, "sanity")do { if (!(index >= 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 264, "assert(" "index >= 0" ") failed", "sanity"); ::breakpoint
(); } } while (0)
;
265 assert(UseSharedSpaces, "must be")do { if (!(UseSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 265, "assert(" "UseSharedSpaces" ") failed", "must be"); ::
breakpoint(); } } while (0)
;
266 if (is_fully_available()) {
267 if (log_is_enabled(Debug, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))
) {
268 oop old = roots()->obj_at(index);
269 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("Clearing root %d: was " PTR_FORMAT"0x%016" "l" "x", index, p2i(old));
270 }
271 roots()->obj_at_put(index, NULL__null);
272 }
273}
274
275oop HeapShared::archive_object(oop obj) {
276 assert(DumpSharedSpaces, "dump-time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 276, "assert(" "DumpSharedSpaces" ") failed", "dump-time only"
); ::breakpoint(); } } while (0)
;
277
278 oop ao = find_archived_heap_object(obj);
279 if (ao != NULL__null) {
280 // already archived
281 return ao;
282 }
283
284 int len = obj->size();
285 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
286 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("Cannot archive, object (" PTR_FORMAT"0x%016" "l" "x" ") is too large: " SIZE_FORMAT"%" "l" "u",
287 p2i(obj), (size_t)obj->size());
288 return NULL__null;
289 }
290
291 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
292 if (archived_oop != NULL__null) {
293 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
294 // Reinitialize markword to remove age/marking/locking/etc.
295 //
296 // We need to retain the identity_hash, because it may have been used by some hashtables
297 // in the shared heap. This also has the side effect of pre-initializing the
298 // identity_hash for all shared objects, so they are less likely to be written
299 // into during run time, increasing the potential of memory sharing.
300 int hash_original = obj->identity_hash();
301 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
302 assert(archived_oop->mark().is_unlocked(), "sanity")do { if (!(archived_oop->mark().is_unlocked())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 302, "assert(" "archived_oop->mark().is_unlocked()" ") failed"
, "sanity"); ::breakpoint(); } } while (0)
;
303
304 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash())int hash_archived = archived_oop->identity_hash();
305 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived)do { if (!(hash_original == hash_archived)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 305, "assert(" "hash_original == hash_archived" ") failed",
"Different hash codes: original %x, archived %x", hash_original
, hash_archived); ::breakpoint(); } } while (0)
;
306
307 ArchivedObjectCache* cache = archived_object_cache();
308 cache->put(obj, archived_oop);
309 if (log_is_enabled(Debug, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))
) {
310 ResourceMark rm;
311 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("Archived heap object " PTR_FORMAT"0x%016" "l" "x" " ==> " PTR_FORMAT"0x%016" "l" "x" " : %s",
312 p2i(obj), p2i(archived_oop), obj->klass()->external_name());
313 }
314 } else {
315 log_error(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Error))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Error>
(
316 "Cannot allocate space for object " PTR_FORMAT"0x%016" "l" "x" " in archived heap region",
317 p2i(obj));
318 vm_direct_exit(-1,
319 err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
320 SIZE_FORMAT"%" "l" "u" "M", MaxHeapSize/M));
321 }
322 return archived_oop;
323}
324
325void HeapShared::archive_klass_objects() {
326 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
327 assert(klasses != NULL, "sanity")do { if (!(klasses != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 327, "assert(" "klasses != __null" ") failed", "sanity"); ::
breakpoint(); } } while (0)
;
328 for (int i = 0; i < klasses->length(); i++) {
329 Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
330
331 // archive mirror object
332 java_lang_Class::archive_mirror(k);
333
334 // archive the resolved_referenes array
335 if (k->is_instance_klass()) {
336 InstanceKlass* ik = InstanceKlass::cast(k);
337 ik->constants()->archive_resolved_references();
338 }
339 }
340}
341
342void HeapShared::run_full_gc_in_vm_thread() {
343 if (HeapShared::can_write()) {
344 // Avoid fragmentation while archiving heap objects.
345 // We do this inside a safepoint, so that no further allocation can happen after GC
346 // has finished.
347 if (GCLocker::is_active()) {
348 // Just checking for safety ...
349 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
350 // has been modified such that JNI code is executed in some clean up threads after
351 // we have finished class loading.
352 log_warning(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Warning))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Warning>
("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
353 } else {
354 log_info(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Run GC ...");
355 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
356 log_info(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Run GC done");
357 }
358 }
359}
360
361void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
362 GrowableArray<MemRegion>* open_regions) {
363
364 G1HeapVerifier::verify_ready_for_archiving();
365
366 {
367 NoSafepointVerifier nsv;
368
369 // Cache for recording where the archived objects are copied to
370 create_archived_object_cache();
371
372 log_info(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Heap range = [" PTR_FORMAT"0x%016" "l" "x" " - " PTR_FORMAT"0x%016" "l" "x" "]",
373 p2i(CompressedOops::begin()), p2i(CompressedOops::end()));
374 log_info(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Dumping objects to closed archive heap region ...");
375 copy_closed_objects(closed_regions);
376
377 log_info(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Dumping objects to open archive heap region ...");
378 copy_open_objects(open_regions);
379
380 destroy_archived_object_cache();
381 }
382
383 G1HeapVerifier::verify_archive_regions();
384}
385
386void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
387 assert(HeapShared::can_write(), "must be")do { if (!(HeapShared::can_write())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 387, "assert(" "HeapShared::can_write()" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
388
389 G1CollectedHeap::heap()->begin_archive_alloc_range();
390
391 // Archive interned string objects
392 StringTable::write_to_archive(_dumped_interned_strings);
393
394 archive_object_subgraphs(closed_archive_subgraph_entry_fields,
395 num_closed_archive_subgraph_entry_fields,
396 true /* is_closed_archive */,
397 false /* is_full_module_graph */);
398
399 G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions,
400 os::vm_allocation_granularity());
401}
402
403void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
404 assert(HeapShared::can_write(), "must be")do { if (!(HeapShared::can_write())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 404, "assert(" "HeapShared::can_write()" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
405
406 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
407
408 java_lang_Class::archive_basic_type_mirrors();
409
410 archive_klass_objects();
411
412 archive_object_subgraphs(open_archive_subgraph_entry_fields,
413 num_open_archive_subgraph_entry_fields,
414 false /* is_closed_archive */,
415 false /* is_full_module_graph */);
416 if (MetaspaceShared::use_full_module_graph()) {
417 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
418 num_fmg_open_archive_subgraph_entry_fields,
419 false /* is_closed_archive */,
420 true /* is_full_module_graph */);
421 ClassLoaderDataShared::init_archived_oops();
422 }
423
424 copy_roots();
425
426 G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
427 os::vm_allocation_granularity());
428}
429
430// Copy _pending_archive_roots into an objArray
431void HeapShared::copy_roots() {
432 int length = _pending_roots != NULL__null ? _pending_roots->length() : 0;
433 size_t size = objArrayOopDesc::object_size(length);
434 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
435 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
436
437 memset(mem, 0, size * BytesPerWord);
438 {
439 // This is copied from MemAllocator::finish
440 oopDesc::set_mark(mem, markWord::prototype());
441 oopDesc::release_set_klass(mem, k);
442 }
443 {
444 // This is copied from ObjArrayAllocator::initialize
445 arrayOopDesc::set_length(mem, length);
446 }
447
448 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
449 for (int i = 0; i < length; i++) {
450 roots()->obj_at_put(i, _pending_roots->at(i));
451 }
452 log_info(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("archived obj roots[%d] = " SIZE_FORMAT"%" "l" "u" " words, klass = %p, obj = %p", length, size, k, mem);
453}
454
455void HeapShared::init_narrow_oop_decoding(address base, int shift) {
456 _narrow_oop_base = base;
457 _narrow_oop_shift = shift;
458}
459
460//
461// Subgraph archiving support
462//
463HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL__null;
464HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
465
466// Get the subgraph_info for Klass k. A new subgraph_info is created if
467// there is no existing one for k. The subgraph_info records the relocated
468// Klass* of the original k.
469KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
470 assert(DumpSharedSpaces, "dump time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 470, "assert(" "DumpSharedSpaces" ") failed", "dump time only"
); ::breakpoint(); } } while (0)
;
471 bool created;
472 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
473 KlassSubGraphInfo* info =
474 _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
475 &created);
476 assert(created, "must not initialize twice")do { if (!(created)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 476, "assert(" "created" ") failed", "must not initialize twice"
); ::breakpoint(); } } while (0)
;
477 return info;
478}
479
480KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
481 assert(DumpSharedSpaces, "dump time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 481, "assert(" "DumpSharedSpaces" ") failed", "dump time only"
); ::breakpoint(); } } while (0)
;
482 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
483 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
484 assert(info != NULL, "must have been initialized")do { if (!(info != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 484, "assert(" "info != __null" ") failed", "must have been initialized"
); ::breakpoint(); } } while (0)
;
485 return info;
486}
487
488// Add an entry field to the current KlassSubGraphInfo.
489void KlassSubGraphInfo::add_subgraph_entry_field(
490 int static_field_offset, oop v, bool is_closed_archive) {
491 assert(DumpSharedSpaces, "dump time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 491, "assert(" "DumpSharedSpaces" ") failed", "dump time only"
); ::breakpoint(); } } while (0)
;
492 if (_subgraph_entry_fields == NULL__null) {
493 _subgraph_entry_fields =
494 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass);
495 }
496 _subgraph_entry_fields->append(static_field_offset);
497 _subgraph_entry_fields->append(HeapShared::append_root(v));
498}
499
500// Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
501// Only objects of boot classes can be included in sub-graph.
502void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
503 assert(DumpSharedSpaces, "dump time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 503, "assert(" "DumpSharedSpaces" ") failed", "dump time only"
); ::breakpoint(); } } while (0)
;
504 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k);
505
506 if (_subgraph_object_klasses == NULL__null) {
507 _subgraph_object_klasses =
508 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
509 }
510
511 assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class")do { if (!(ArchiveBuilder::current()->is_in_buffer_space(relocated_k
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 511, "assert(" "ArchiveBuilder::current()->is_in_buffer_space(relocated_k)"
") failed", "must be a shared class"); ::breakpoint(); } } while
(0)
;
512
513 if (_k == relocated_k) {
514 // Don't add the Klass containing the sub-graph to it's own klass
515 // initialization list.
516 return;
517 }
518
519 if (relocated_k->is_instance_klass()) {
520 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),do { if (!(InstanceKlass::cast(relocated_k)->is_shared_boot_class
())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 521, "assert(" "InstanceKlass::cast(relocated_k)->is_shared_boot_class()"
") failed", "must be boot class"); ::breakpoint(); } } while
(0)
521 "must be boot class")do { if (!(InstanceKlass::cast(relocated_k)->is_shared_boot_class
())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 521, "assert(" "InstanceKlass::cast(relocated_k)->is_shared_boot_class()"
") failed", "must be boot class"); ::breakpoint(); } } while
(0)
;
522 // vmClasses::xxx_klass() are not updated, need to check
523 // the original Klass*
524 if (orig_k == vmClasses::String_klass() ||
525 orig_k == vmClasses::Object_klass()) {
526 // Initialized early during VM initialization. No need to be added
527 // to the sub-graph object class list.
528 return;
529 }
530 } else if (relocated_k->is_objArray_klass()) {
531 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
532 if (abk->is_instance_klass()) {
533 assert(InstanceKlass::cast(abk)->is_shared_boot_class(),do { if (!(InstanceKlass::cast(abk)->is_shared_boot_class(
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 534, "assert(" "InstanceKlass::cast(abk)->is_shared_boot_class()"
") failed", "must be boot class"); ::breakpoint(); } } while
(0)
534 "must be boot class")do { if (!(InstanceKlass::cast(abk)->is_shared_boot_class(
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 534, "assert(" "InstanceKlass::cast(abk)->is_shared_boot_class()"
") failed", "must be boot class"); ::breakpoint(); } } while
(0)
;
535 }
536 if (relocated_k == Universe::objectArrayKlassObj()) {
537 // Initialized early during Universe::genesis. No need to be added
538 // to the list.
539 return;
540 }
541 } else {
542 assert(relocated_k->is_typeArray_klass(), "must be")do { if (!(relocated_k->is_typeArray_klass())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 542, "assert(" "relocated_k->is_typeArray_klass()" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
543 // Primitive type arrays are created early during Universe::genesis.
544 return;
545 }
546
547 if (log_is_enabled(Debug, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))
) {
548 if (!_subgraph_object_klasses->contains(relocated_k)) {
549 ResourceMark rm;
550 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("Adding klass %s", orig_k->external_name());
551 }
552 }
553
554 _subgraph_object_klasses->append_if_missing(relocated_k);
555 _has_non_early_klasses |= is_non_early_klass(orig_k);
556}
557
558bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
559 if (k->is_objArray_klass()) {
560 k = ObjArrayKlass::cast(k)->bottom_klass();
561 }
562 if (k->is_instance_klass()) {
563 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
564 ResourceMark rm;
565 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("non-early: %s", k->external_name());
566 return true;
567 } else {
568 return false;
569 }
570 } else {
571 return false;
572 }
573}
574
575// Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
576void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
577 _k = info->klass();
578 _entry_field_records = NULL__null;
579 _subgraph_object_klasses = NULL__null;
580 _is_full_module_graph = info->is_full_module_graph();
581
582 if (_is_full_module_graph) {
583 // Consider all classes referenced by the full module graph as early -- we will be
584 // allocating objects of these classes during JVMTI early phase, so they cannot
585 // be processed by (non-early) JVMTI ClassFileLoadHook
586 _has_non_early_klasses = false;
587 } else {
588 _has_non_early_klasses = info->has_non_early_klasses();
589 }
590
591 if (_has_non_early_klasses) {
592 ResourceMark rm;
593 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
(
594 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
595 _k->external_name());
596 }
597
598 // populate the entry fields
599 GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
600 if (entry_fields != NULL__null) {
601 int num_entry_fields = entry_fields->length();
602 assert(num_entry_fields % 2 == 0, "sanity")do { if (!(num_entry_fields % 2 == 0)) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 602, "assert(" "num_entry_fields % 2 == 0" ") failed", "sanity"
); ::breakpoint(); } } while (0)
;
603 _entry_field_records =
604 ArchiveBuilder::new_ro_array<int>(num_entry_fields);
605 for (int i = 0 ; i < num_entry_fields; i++) {
606 _entry_field_records->at_put(i, entry_fields->at(i));
607 }
608 }
609
610 // the Klasses of the objects in the sub-graphs
611 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
612 if (subgraph_object_klasses != NULL__null) {
613 int num_subgraphs_klasses = subgraph_object_klasses->length();
614 _subgraph_object_klasses =
615 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
616 for (int i = 0; i < num_subgraphs_klasses; i++) {
617 Klass* subgraph_k = subgraph_object_klasses->at(i);
618 if (log_is_enabled(Info, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))
) {
619 ResourceMark rm;
620 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
(
621 "Archived object klass %s (%2d) => %s",
622 _k->external_name(), i, subgraph_k->external_name());
623 }
624 _subgraph_object_klasses->at_put(i, subgraph_k);
625 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
626 }
627 }
628
629 ArchivePtrMarker::mark_pointer(&_k);
630 ArchivePtrMarker::mark_pointer(&_entry_field_records);
631 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
632}
633
634struct CopyKlassSubGraphInfoToArchive : StackObj {
635 CompactHashtableWriter* _writer;
636 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
637
638 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
639 if (info.subgraph_object_klasses() != NULL__null || info.subgraph_entry_fields() != NULL__null) {
640 ArchivedKlassSubGraphInfoRecord* record =
641 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
642 record->init(&info);
643
644 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass);
645 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
646 _writer->add(hash, delta);
647 }
648 return true; // keep on iterating
649 }
650};
651
652// Build the records of archived subgraph infos, which include:
653// - Entry points to all subgraphs from the containing class mirror. The entry
654// points are static fields in the mirror. For each entry point, the field
655// offset, value and is_closed_archive flag are recorded in the sub-graph
656// info. The value is stored back to the corresponding field at runtime.
657// - A list of klasses that need to be loaded/initialized before archived
658// java object sub-graph can be accessed at runtime.
659void HeapShared::write_subgraph_info_table() {
660 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
661 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
662 CompactHashtableStats stats;
663
664 _run_time_subgraph_info_table.reset();
665
666 CompactHashtableWriter writer(d_table->_count, &stats);
667 CopyKlassSubGraphInfoToArchive copy(&writer);
668 d_table->iterate(&copy);
669
670 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
671}
672
673void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
674 _run_time_subgraph_info_table.serialize_header(soc);
675}
676
677static void verify_the_heap(Klass* k, const char* which) {
678 if (VerifyArchivedFields > 0) {
679 ResourceMark rm;
680 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Verify heap %s initializing static field(s) in %s",
681 which, k->external_name());
682
683 VM_Verify verify_op;
684 VMThread::execute(&verify_op);
685
686 if (VerifyArchivedFields > 1 && is_init_completed()) {
687 // At this time, the oop->klass() of some archived objects in the heap may not
688 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
689 // have enough information (object size, oop maps, etc) so that a GC can be safely
690 // performed.
691 //
692 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
693 // to check for GC safety.
694 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Trigger GC %s initializing static field(s) in %s",
695 which, k->external_name());
696 FlagSetting fs1(VerifyBeforeGC, true);
697 FlagSetting fs2(VerifyDuringGC, true);
698 FlagSetting fs3(VerifyAfterGC, true);
699 Universe::heap()->collect(GCCause::_java_lang_system_gc);
700 }
701 }
702}
703
704// Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
705// have a valid klass. I.e., oopDesc::klass() must have already been resolved.
706//
707// Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
708// ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
709// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
710void HeapShared::resolve_classes(JavaThread* THREAD__the_thread__) {
711 if (!is_fully_available()) {
712 return; // nothing to do
713 }
714 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields,
715 num_closed_archive_subgraph_entry_fields,
716 THREAD__the_thread__);
717 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields,
718 num_open_archive_subgraph_entry_fields,
719 THREAD__the_thread__);
720 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields,
721 num_fmg_open_archive_subgraph_entry_fields,
722 THREAD__the_thread__);
723}
724
725void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[],
726 int num, JavaThread* THREAD__the_thread__) {
727 for (int i = 0; i < num; i++) {
728 ArchivableStaticFieldInfo* info = &fields[i];
729 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
730 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
731 assert(k != NULL && k->is_shared_boot_class(), "sanity")do { if (!(k != __null && k->is_shared_boot_class(
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 731, "assert(" "k != __null && k->is_shared_boot_class()"
") failed", "sanity"); ::breakpoint(); } } while (0)
;
732 resolve_classes_for_subgraph_of(k, THREAD__the_thread__);
733 }
734}
735
736void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD__the_thread__) {
737 ExceptionMark em(THREAD__the_thread__);
738 const ArchivedKlassSubGraphInfoRecord* record =
739 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD__the_thread__);
740 if (HAS_PENDING_EXCEPTION(((ThreadShadow*)__the_thread__)->has_pending_exception())) {
741 CLEAR_PENDING_EXCEPTION(((ThreadShadow*)__the_thread__)->clear_pending_exception(
))
;
742 }
743 if (record == NULL__null) {
744 clear_archived_roots_of(k);
745 }
746}
747
748void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD__the_thread__) {
749 if (!is_fully_available()) {
750 return; // nothing to do
751 }
752
753 ExceptionMark em(THREAD__the_thread__);
754 const ArchivedKlassSubGraphInfoRecord* record =
755 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD__the_thread__);
756
757 if (HAS_PENDING_EXCEPTION(((ThreadShadow*)__the_thread__)->has_pending_exception())) {
758 CLEAR_PENDING_EXCEPTION(((ThreadShadow*)__the_thread__)->clear_pending_exception(
))
;
759 // None of the field value will be set if there was an exception when initializing the classes.
760 // The java code will not see any of the archived objects in the
761 // subgraphs referenced from k in this case.
762 return;
763 }
764
765 if (record != NULL__null) {
766 init_archived_fields_for(k, record);
767 }
768}
769
770const ArchivedKlassSubGraphInfoRecord*
771HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPSJavaThread* __the_thread__) {
772 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces")do { if (!(!DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 772, "assert(" "!DumpSharedSpaces" ") failed", "Should not be called with DumpSharedSpaces"
); ::breakpoint(); } } while (0)
;
773
774 if (!k->is_shared()) {
775 return NULL__null;
776 }
777 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
778 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
779
780 // Initialize from archived data. Currently this is done only
781 // during VM initialization time. No lock is needed.
782 if (record != NULL__null) {
783 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
784 if (log_is_enabled(Info, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))
) {
785 ResourceMark rm(THREAD__the_thread__);
786 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("subgraph %s cannot be used because full module graph is disabled",
787 k->external_name());
788 }
789 return NULL__null;
790 }
791
792 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
793 if (log_is_enabled(Info, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))
) {
794 ResourceMark rm(THREAD__the_thread__);
795 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
796 k->external_name());
797 }
798 return NULL__null;
799 }
800
801 resolve_or_init(k, do_init, CHECK_NULL__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return __null; (void)(0
);
802
803 // Load/link/initialize the klasses of the objects in the subgraph.
804 // NULL class loader is used.
805 Array<Klass*>* klasses = record->subgraph_object_klasses();
806 if (klasses != NULL__null) {
807 for (int i = 0; i < klasses->length(); i++) {
808 Klass* klass = klasses->at(i);
809 if (!klass->is_shared()) {
810 return NULL__null;
811 }
812 resolve_or_init(klass, do_init, CHECK_NULL__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return __null; (void)(0
);
813 }
814 }
815 }
816
817 return record;
818}
819
820void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPSJavaThread* __the_thread__) {
821 if (!do_init) {
822 if (k->class_loader_data() == NULL__null) {
823 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
824 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook")do { if (!(resolved_k == k)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 824, "assert(" "resolved_k == k" ") failed", "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"
); ::breakpoint(); } } while (0)
;
825 }
826 } else {
827 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes")do { if (!(k->class_loader_data() != __null)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 827, "assert(" "k->class_loader_data() != __null" ") failed"
, "must have been resolved by HeapShared::resolve_classes"); ::
breakpoint(); } } while (0)
;
828 if (k->is_instance_klass()) {
829 InstanceKlass* ik = InstanceKlass::cast(k);
830 ik->initialize(CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
831 } else if (k->is_objArray_klass()) {
832 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
833 oak->initialize(CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
834 }
835 }
836}
837
838void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
839 verify_the_heap(k, "before");
840
841 // Load the subgraph entry fields from the record and store them back to
842 // the corresponding fields within the mirror.
843 oop m = k->java_mirror();
844 Array<int>* entry_field_records = record->entry_field_records();
845 if (entry_field_records != NULL__null) {
846 int efr_len = entry_field_records->length();
847 assert(efr_len % 2 == 0, "sanity")do { if (!(efr_len % 2 == 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 847, "assert(" "efr_len % 2 == 0" ") failed", "sanity"); ::
breakpoint(); } } while (0)
;
848 for (int i = 0; i < efr_len; i += 2) {
849 int field_offset = entry_field_records->at(i);
850 int root_index = entry_field_records->at(i+1);
851 oop v = get_root(root_index, /*clear=*/true);
852 m->obj_field_put(field_offset, v);
853 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
(" " PTR_FORMAT"0x%016" "l" "x" " init field @ %2d = " PTR_FORMAT"0x%016" "l" "x", p2i(k), field_offset, p2i(v));
854 }
855
856 // Done. Java code can see the archived sub-graphs referenced from k's
857 // mirror after this point.
858 if (log_is_enabled(Info, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))
) {
859 ResourceMark rm;
860 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("initialize_from_archived_subgraph %s " PTR_FORMAT"0x%016" "l" "x" "%s",
861 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
862 }
863 }
864
865 verify_the_heap(k, "after ");
866}
867
868void HeapShared::clear_archived_roots_of(Klass* k) {
869 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
870 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
871 if (record != NULL__null) {
872 Array<int>* entry_field_records = record->entry_field_records();
873 if (entry_field_records != NULL__null) {
874 int efr_len = entry_field_records->length();
875 assert(efr_len % 2 == 0, "sanity")do { if (!(efr_len % 2 == 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 875, "assert(" "efr_len % 2 == 0" ") failed", "sanity"); ::
breakpoint(); } } while (0)
;
876 for (int i = 0; i < efr_len; i += 2) {
877 int root_index = entry_field_records->at(i+1);
878 clear_root(root_index);
879 }
880 }
881 }
882}
883
884class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
885 int _level;
886 bool _is_closed_archive;
887 bool _record_klasses_only;
888 KlassSubGraphInfo* _subgraph_info;
889 oop _orig_referencing_obj;
890 oop _archived_referencing_obj;
891 public:
892 WalkOopAndArchiveClosure(int level,
893 bool is_closed_archive,
894 bool record_klasses_only,
895 KlassSubGraphInfo* subgraph_info,
896 oop orig, oop archived) :
897 _level(level), _is_closed_archive(is_closed_archive),
898 _record_klasses_only(record_klasses_only),
899 _subgraph_info(subgraph_info),
900 _orig_referencing_obj(orig), _archived_referencing_obj(archived) {}
901 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
902 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
903
904 protected:
905 template <class T> void do_oop_work(T *p) {
906 oop obj = RawAccess<>::oop_load(p);
907 if (!CompressedOops::is_null(obj)) {
908 assert(!HeapShared::is_archived_object_during_dumptime(obj),do { if (!(!HeapShared::is_archived_object_during_dumptime(obj
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 909, "assert(" "!HeapShared::is_archived_object_during_dumptime(obj)"
") failed", "original objects must not point to archived objects"
); ::breakpoint(); } } while (0)
909 "original objects must not point to archived objects")do { if (!(!HeapShared::is_archived_object_during_dumptime(obj
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 909, "assert(" "!HeapShared::is_archived_object_during_dumptime(obj)"
") failed", "original objects must not point to archived objects"
); ::breakpoint(); } } while (0)
;
910
911 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
912 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
913
914 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))
) {
915 ResourceMark rm;
916 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("(%d) %s[" SIZE_FORMAT"%" "l" "u" "] ==> " PTR_FORMAT"0x%016" "l" "x" " size " SIZE_FORMAT"%" "l" "u" " %s", _level,
917 _orig_referencing_obj->klass()->external_name(), field_delta,
918 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
919 LogTarget(Trace, cds, heap)LogTargetImpl<LogLevel::Trace, (LogTag::_cds), (LogTag::_heap
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG)>
log;
920 LogStream out(log);
921 obj->print_on(&out);
922 }
923
924 oop archived = HeapShared::archive_reachable_objects_from(
925 _level + 1, _subgraph_info, obj, _is_closed_archive);
926 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1")do { if (!(archived != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 926, "assert(" "archived != __null" ") failed", "VM should have exited with unarchivable objects for _level > 1"
); ::breakpoint(); } } while (0)
;
927 assert(HeapShared::is_archived_object_during_dumptime(archived), "must be")do { if (!(HeapShared::is_archived_object_during_dumptime(archived
))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 927, "assert(" "HeapShared::is_archived_object_during_dumptime(archived)"
") failed", "must be"); ::breakpoint(); } } while (0)
;
928
929 if (!_record_klasses_only) {
930 // Update the reference in the archived copy of the referencing object.
931 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("(%d) updating oop @[" PTR_FORMAT"0x%016" "l" "x" "] " PTR_FORMAT"0x%016" "l" "x" " ==> " PTR_FORMAT"0x%016" "l" "x",
932 _level, p2i(new_p), p2i(obj), p2i(archived));
933 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
934 }
935 }
936 }
937};
938
939void HeapShared::check_closed_region_object(InstanceKlass* k) {
940 // Check fields in the object
941 for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
942 if (!fs.access_flags().is_static()) {
943 BasicType ft = fs.field_descriptor().field_type();
944 if (!fs.access_flags().is_final() && is_reference_type(ft)) {
945 ResourceMark rm;
946 log_warning(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Warning))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Warning>
(
947 "Please check reference field in %s instance in closed archive heap region: %s %s",
948 k->external_name(), (fs.name())->as_C_string(),
949 (fs.signature())->as_C_string());
950 }
951 }
952 }
953}
954
955void HeapShared::check_module_oop(oop orig_module_obj) {
956 assert(DumpSharedSpaces, "must be")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 956, "assert(" "DumpSharedSpaces" ") failed", "must be"); ::
breakpoint(); } } while (0)
;
957 assert(java_lang_Module::is_instance(orig_module_obj), "must be")do { if (!(java_lang_Module::is_instance(orig_module_obj))) {
(*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 957, "assert(" "java_lang_Module::is_instance(orig_module_obj)"
") failed", "must be"); ::breakpoint(); } } while (0)
;
958 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj);
959 if (orig_module_ent == NULL__null) {
960 // These special Module objects are created in Java code. They are not
961 // defined via Modules::define_module(), so they don't have a ModuleEntry:
962 // java.lang.Module::ALL_UNNAMED_MODULE
963 // java.lang.Module::EVERYONE_MODULE
964 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule
965 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed")do { if (!(java_lang_Module::name(orig_module_obj) == __null)
) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 965, "assert(" "java_lang_Module::name(orig_module_obj) == __null"
") failed", "must be unnamed"); ::breakpoint(); } } while (0
)
;
966 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Module oop with No ModuleEntry* @[" PTR_FORMAT"0x%016" "l" "x" "]", p2i(orig_module_obj));
967 } else {
968 ClassLoaderData* loader_data = orig_module_ent->loader_data();
969 assert(loader_data->is_builtin_class_loader_data(), "must be")do { if (!(loader_data->is_builtin_class_loader_data())) {
(*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 969, "assert(" "loader_data->is_builtin_class_loader_data()"
") failed", "must be"); ::breakpoint(); } } while (0)
;
970 }
971}
972
973
974// (1) If orig_obj has not been archived yet, archive it.
975// (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
976// trace all objects that are reachable from it, and make sure these objects are archived.
977// (3) Record the klasses of all orig_obj and all reachable objects.
978oop HeapShared::archive_reachable_objects_from(int level,
979 KlassSubGraphInfo* subgraph_info,
980 oop orig_obj,
981 bool is_closed_archive) {
982 assert(orig_obj != NULL, "must be")do { if (!(orig_obj != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 982, "assert(" "orig_obj != __null" ") failed", "must be");
::breakpoint(); } } while (0)
;
983 assert(!is_archived_object_during_dumptime(orig_obj), "sanity")do { if (!(!is_archived_object_during_dumptime(orig_obj))) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 983, "assert(" "!is_archived_object_during_dumptime(orig_obj)"
") failed", "sanity"); ::breakpoint(); } } while (0)
;
984
985 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
986 // This object has injected fields that cannot be supported easily, so we disallow them for now.
987 // If you get an error here, you probably made a change in the JDK library that has added
988 // these objects that are referenced (directly or indirectly) by static fields.
989 ResourceMark rm;
990 log_error(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Error))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Error>
("Cannot archive object of class %s", orig_obj->klass()->external_name());
991 vm_direct_exit(1);
992 }
993
994 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
995 // them as Klass::_archived_mirror because they need to be specially restored at run time.
996 //
997 // If you get an error here, you probably made a change in the JDK library that has added a Class
998 // object that is referenced (directly or indirectly) by static fields.
999 if (java_lang_Class::is_instance(orig_obj)) {
1000 log_error(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Error))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Error>
("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1001 vm_direct_exit(1);
1002 }
1003
1004 oop archived_obj = find_archived_heap_object(orig_obj);
1005 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL__null) {
1006 // To save time, don't walk strings that are already archived. They just contain
1007 // pointers to a type array, whose klass doesn't need to be recorded.
1008 return archived_obj;
1009 }
1010
1011 if (has_been_seen_during_subgraph_recording(orig_obj)) {
1012 // orig_obj has already been archived and traced. Nothing more to do.
1013 return archived_obj;
1014 } else {
1015 set_has_been_seen_during_subgraph_recording(orig_obj);
1016 }
1017
1018 bool record_klasses_only = (archived_obj != NULL__null);
1019 if (archived_obj == NULL__null) {
1020 ++_num_new_archived_objs;
1021 archived_obj = archive_object(orig_obj);
1022 if (archived_obj == NULL__null) {
1023 // Skip archiving the sub-graph referenced from the current entry field.
1024 ResourceMark rm;
1025 log_error(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Error))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Error>
(
1026 "Cannot archive the sub-graph referenced from %s object ("
1027 PTR_FORMAT"0x%016" "l" "x" ") size " SIZE_FORMAT"%" "l" "u" ", skipped.",
1028 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1029 if (level == 1) {
1030 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1031 // as the Java code will take care of initializing this field dynamically.
1032 return NULL__null;
1033 } else {
1034 // We don't know how to handle an object that has been archived, but some of its reachable
1035 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1036 // we have a real use case.
1037 vm_direct_exit(1);
1038 }
1039 }
1040
1041 if (java_lang_Module::is_instance(orig_obj)) {
1042 check_module_oop(orig_obj);
1043 java_lang_Module::set_module_entry(archived_obj, NULL__null);
1044 java_lang_Module::set_loader(archived_obj, NULL__null);
1045 } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
1046 // class_data will be restored explicitly at run time.
1047 guarantee(orig_obj == SystemDictionary::java_platform_loader() ||do { if (!(orig_obj == SystemDictionary::java_platform_loader
() || orig_obj == SystemDictionary::java_system_loader() || java_lang_ClassLoader
::loader_data(orig_obj) == __null)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1049, "guarantee(" "orig_obj == SystemDictionary::java_platform_loader() || orig_obj == SystemDictionary::java_system_loader() || java_lang_ClassLoader::loader_data(orig_obj) == NULL"
") failed", "must be"); ::breakpoint(); } } while (0)
1048 orig_obj == SystemDictionary::java_system_loader() ||do { if (!(orig_obj == SystemDictionary::java_platform_loader
() || orig_obj == SystemDictionary::java_system_loader() || java_lang_ClassLoader
::loader_data(orig_obj) == __null)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1049, "guarantee(" "orig_obj == SystemDictionary::java_platform_loader() || orig_obj == SystemDictionary::java_system_loader() || java_lang_ClassLoader::loader_data(orig_obj) == NULL"
") failed", "must be"); ::breakpoint(); } } while (0)
1049 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be")do { if (!(orig_obj == SystemDictionary::java_platform_loader
() || orig_obj == SystemDictionary::java_system_loader() || java_lang_ClassLoader
::loader_data(orig_obj) == __null)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1049, "guarantee(" "orig_obj == SystemDictionary::java_platform_loader() || orig_obj == SystemDictionary::java_system_loader() || java_lang_ClassLoader::loader_data(orig_obj) == NULL"
") failed", "must be"); ::breakpoint(); } } while (0)
;
1050 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL__null);
1051 }
1052 }
1053
1054 assert(archived_obj != NULL, "must be")do { if (!(archived_obj != __null)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1054, "assert(" "archived_obj != __null" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
1055 Klass *orig_k = orig_obj->klass();
1056 subgraph_info->add_subgraph_object_klass(orig_k);
1057
1058 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
1059 subgraph_info, orig_obj, archived_obj);
1060 orig_obj->oop_iterate(&walker);
1061 if (is_closed_archive && orig_k->is_instance_klass()) {
1062 check_closed_region_object(InstanceKlass::cast(orig_k));
1063 }
1064 return archived_obj;
1065}
1066
1067//
1068// Start from the given static field in a java mirror and archive the
1069// complete sub-graph of java heap objects that are reached directly
1070// or indirectly from the starting object by following references.
1071// Sub-graph archiving restrictions (current):
1072//
1073// - All classes of objects in the archived sub-graph (including the
1074// entry class) must be boot class only.
1075// - No java.lang.Class instance (java mirror) can be included inside
1076// an archived sub-graph. Mirror can only be the sub-graph entry object.
1077//
1078// The Java heap object sub-graph archiving process (see
1079// WalkOopAndArchiveClosure):
1080//
1081// 1) Java object sub-graph archiving starts from a given static field
1082// within a Class instance (java mirror). If the static field is a
1083// refererence field and points to a non-null java object, proceed to
1084// the next step.
1085//
1086// 2) Archives the referenced java object. If an archived copy of the
1087// current object already exists, updates the pointer in the archived
1088// copy of the referencing object to point to the current archived object.
1089// Otherwise, proceed to the next step.
1090//
1091// 3) Follows all references within the current java object and recursively
1092// archive the sub-graph of objects starting from each reference.
1093//
1094// 4) Updates the pointer in the archived copy of referencing object to
1095// point to the current archived object.
1096//
1097// 5) The Klass of the current java object is added to the list of Klasses
1098// for loading and initialzing before any object in the archived graph can
1099// be accessed at runtime.
1100//
1101void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
1102 const char* klass_name,
1103 int field_offset,
1104 const char* field_name,
1105 bool is_closed_archive) {
1106 assert(DumpSharedSpaces, "dump time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1106, "assert(" "DumpSharedSpaces" ") failed", "dump time only"
); ::breakpoint(); } } while (0)
;
1107 assert(k->is_shared_boot_class(), "must be boot class")do { if (!(k->is_shared_boot_class())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1107, "assert(" "k->is_shared_boot_class()" ") failed", "must be boot class"
); ::breakpoint(); } } while (0)
;
1108
1109 oop m = k->java_mirror();
1110
1111 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
1112 oop f = m->obj_field(field_offset);
1113
1114 log_debug(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Debug>
("Start archiving from: %s::%s (" PTR_FORMAT"0x%016" "l" "x" ")", klass_name, field_name, p2i(f));
1115
1116 if (!CompressedOops::is_null(f)) {
1117 if (log_is_enabled(Trace, cds, heap)(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Trace))
) {
1118 LogTarget(Trace, cds, heap)LogTargetImpl<LogLevel::Trace, (LogTag::_cds), (LogTag::_heap
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
, (LogTag::__NO_TAG)>
log;
1119 LogStream out(log);
1120 f->print_on(&out);
1121 }
1122
1123 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
1124
1125 if (af == NULL__null) {
1126 log_error(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Error))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Error>
("Archiving failed %s::%s (some reachable objects cannot be archived)",
1127 klass_name, field_name);
1128 } else {
1129 // Note: the field value is not preserved in the archived mirror.
1130 // Record the field as a new subGraph entry point. The recorded
1131 // information is restored from the archive at runtime.
1132 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
1133 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Archived field %s::%s => " PTR_FORMAT"0x%016" "l" "x", klass_name, field_name, p2i(af));
1134 }
1135 } else {
1136 // The field contains null, we still need to record the entry point,
1137 // so it can be restored at runtime.
1138 subgraph_info->add_subgraph_entry_field(field_offset, NULL__null, false);
1139 }
1140}
1141
1142#ifndef PRODUCT
1143class VerifySharedOopClosure: public BasicOopIterateClosure {
1144 private:
1145 bool _is_archived;
1146
1147 public:
1148 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
1149
1150 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
1151 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); }
1152
1153 protected:
1154 template <class T> void do_oop_work(T *p) {
1155 oop obj = RawAccess<>::oop_load(p);
1156 if (!CompressedOops::is_null(obj)) {
1157 HeapShared::verify_reachable_objects_from(obj, _is_archived);
1158 }
1159 }
1160};
1161
1162void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
1163 assert(DumpSharedSpaces, "dump time only")do { if (!(DumpSharedSpaces)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1163, "assert(" "DumpSharedSpaces" ") failed", "dump time only"
); ::breakpoint(); } } while (0)
;
1164 assert(k->is_shared_boot_class(), "must be boot class")do { if (!(k->is_shared_boot_class())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1164, "assert(" "k->is_shared_boot_class()" ") failed", "must be boot class"
); ::breakpoint(); } } while (0)
;
1165
1166 oop m = k->java_mirror();
1167 oop f = m->obj_field(field_offset);
1168 if (!CompressedOops::is_null(f)) {
1169 verify_subgraph_from(f);
1170 }
1171}
1172
1173void HeapShared::verify_subgraph_from(oop orig_obj) {
1174 oop archived_obj = find_archived_heap_object(orig_obj);
1175 if (archived_obj == NULL__null) {
1176 // It's OK for the root of a subgraph to be not archived. See comments in
1177 // archive_reachable_objects_from().
1178 return;
1179 }
1180
1181 // Verify that all objects reachable from orig_obj are archived.
1182 init_seen_objects_table();
1183 verify_reachable_objects_from(orig_obj, false);
1184 delete_seen_objects_table();
1185
1186 // Note: we could also verify that all objects reachable from the archived
1187 // copy of orig_obj can only point to archived objects, with:
1188 // init_seen_objects_table();
1189 // verify_reachable_objects_from(archived_obj, true);
1190 // init_seen_objects_table();
1191 // but that's already done in G1HeapVerifier::verify_archive_regions so we
1192 // won't do it here.
1193}
1194
1195void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
1196 _num_total_verifications ++;
1197 if (!has_been_seen_during_subgraph_recording(obj)) {
1198 set_has_been_seen_during_subgraph_recording(obj);
1199
1200 if (is_archived) {
1201 assert(is_archived_object_during_dumptime(obj), "must be")do { if (!(is_archived_object_during_dumptime(obj))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1201, "assert(" "is_archived_object_during_dumptime(obj)" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
1202 assert(find_archived_heap_object(obj) == NULL, "must be")do { if (!(find_archived_heap_object(obj) == __null)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1202, "assert(" "find_archived_heap_object(obj) == __null" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
1203 } else {
1204 assert(!is_archived_object_during_dumptime(obj), "must be")do { if (!(!is_archived_object_during_dumptime(obj))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1204, "assert(" "!is_archived_object_during_dumptime(obj)" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
1205 assert(find_archived_heap_object(obj) != NULL, "must be")do { if (!(find_archived_heap_object(obj) != __null)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1205, "assert(" "find_archived_heap_object(obj) != __null" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
1206 }
1207
1208 VerifySharedOopClosure walker(is_archived);
1209 obj->oop_iterate(&walker);
1210 }
1211}
1212#endif
1213
1214HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL__null;
1215int HeapShared::_num_new_walked_objs;
1216int HeapShared::_num_new_archived_objs;
1217int HeapShared::_num_old_recorded_klasses;
1218
1219int HeapShared::_num_total_subgraph_recordings = 0;
1220int HeapShared::_num_total_walked_objs = 0;
1221int HeapShared::_num_total_archived_objs = 0;
1222int HeapShared::_num_total_recorded_klasses = 0;
1223int HeapShared::_num_total_verifications = 0;
1224
1225bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1226 return _seen_objects_table->get(obj) != NULL__null;
1227}
1228
1229void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1230 assert(!has_been_seen_during_subgraph_recording(obj), "sanity")do { if (!(!has_been_seen_during_subgraph_recording(obj))) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1230, "assert(" "!has_been_seen_during_subgraph_recording(obj)"
") failed", "sanity"); ::breakpoint(); } } while (0)
;
1231 _seen_objects_table->put(obj, true);
1232 ++ _num_new_walked_objs;
1233}
1234
1235void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
1236 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Start recording subgraph(s) for archived fields in %s", class_name);
1237 init_subgraph_info(k, is_full_module_graph);
1238 init_seen_objects_table();
1239 _num_new_walked_objs = 0;
1240 _num_new_archived_objs = 0;
1241 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
1242}
1243
1244void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
1245 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
1246 _num_old_recorded_klasses;
1247 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Done recording subgraph(s) for archived fields in %s: "
1248 "walked %d objs, archived %d new objs, recorded %d classes",
1249 class_name, _num_new_walked_objs, _num_new_archived_objs,
1250 num_new_recorded_klasses);
1251
1252 delete_seen_objects_table();
1253
1254 _num_total_subgraph_recordings ++;
1255 _num_total_walked_objs += _num_new_walked_objs;
1256 _num_total_archived_objs += _num_new_archived_objs;
1257 _num_total_recorded_klasses += num_new_recorded_klasses;
1258}
1259
1260class ArchivableStaticFieldFinder: public FieldClosure {
1261 InstanceKlass* _ik;
1262 Symbol* _field_name;
1263 bool _found;
1264 int _offset;
1265public:
1266 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1267 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1268
1269 virtual void do_field(fieldDescriptor* fd) {
1270 if (fd->name() == _field_name) {
1271 assert(!_found, "fields cannot be overloaded")do { if (!(!_found)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1271, "assert(" "!_found" ") failed", "fields cannot be overloaded"
); ::breakpoint(); } } while (0)
;
1272 assert(is_reference_type(fd->field_type()), "can archive only fields that are references")do { if (!(is_reference_type(fd->field_type()))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1272, "assert(" "is_reference_type(fd->field_type())" ") failed"
, "can archive only fields that are references"); ::breakpoint
(); } } while (0)
;
1273 _found = true;
1274 _offset = fd->offset();
1275 }
1276 }
1277 bool found() { return _found; }
1278 int offset() { return _offset; }
1279};
1280
1281void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1282 int num, TRAPSJavaThread* __the_thread__) {
1283 for (int i = 0; i < num; i++) {
1284 ArchivableStaticFieldInfo* info = &fields[i];
1285 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1286 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name);
1287
1288 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
1289 InstanceKlass* ik = InstanceKlass::cast(k);
1290 assert(InstanceKlass::cast(ik)->is_shared_boot_class(),do { if (!(InstanceKlass::cast(ik)->is_shared_boot_class()
)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1291, "assert(" "InstanceKlass::cast(ik)->is_shared_boot_class()"
") failed", "Only support boot classes"); ::breakpoint(); } }
while (0)
1291 "Only support boot classes")do { if (!(InstanceKlass::cast(ik)->is_shared_boot_class()
)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1291, "assert(" "InstanceKlass::cast(ik)->is_shared_boot_class()"
") failed", "Only support boot classes"); ::breakpoint(); } }
while (0)
;
1292 ik->initialize(CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
1293
1294 ArchivableStaticFieldFinder finder(ik, field_name);
1295 ik->do_local_static_fields(&finder);
1296 assert(finder.found(), "field must exist")do { if (!(finder.found())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1296, "assert(" "finder.found()" ") failed", "field must exist"
); ::breakpoint(); } } while (0)
;
1297
1298 info->klass = ik;
1299 info->offset = finder.offset();
1300 }
1301}
1302
1303void HeapShared::init_subgraph_entry_fields(TRAPSJavaThread* __the_thread__) {
1304 assert(HeapShared::can_write(), "must be")do { if (!(HeapShared::can_write())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1304, "assert(" "HeapShared::can_write()" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
1305 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
1306 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
1307 num_closed_archive_subgraph_entry_fields,
1308 CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
1309 init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
1310 num_open_archive_subgraph_entry_fields,
1311 CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
1312 if (MetaspaceShared::use_full_module_graph()) {
1313 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields,
1314 num_fmg_open_archive_subgraph_entry_fields,
1315 CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
1316 }
1317}
1318
1319void HeapShared::init_for_dumping(TRAPSJavaThread* __the_thread__) {
1320 if (HeapShared::can_write()) {
1321 _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
1322 init_subgraph_entry_fields(CHECK__the_thread__); if ((((ThreadShadow*)__the_thread__)->has_pending_exception
())) return ; (void)(0
);
1323 }
1324}
1325
1326void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1327 int num, bool is_closed_archive,
1328 bool is_full_module_graph) {
1329 _num_total_subgraph_recordings = 0;
1330 _num_total_walked_objs = 0;
1331 _num_total_archived_objs = 0;
1332 _num_total_recorded_klasses = 0;
1333 _num_total_verifications = 0;
1334
1335 // For each class X that has one or more archived fields:
1336 // [1] Dump the subgraph of each archived field
1337 // [2] Create a list of all the class of the objects that can be reached
1338 // by any of these static fields.
1339 // At runtime, these classes are initialized before X's archived fields
1340 // are restored by HeapShared::initialize_from_archived_subgraph().
1341 int i;
1342 for (i = 0; i < num; ) {
1343 ArchivableStaticFieldInfo* info = &fields[i];
1344 const char* klass_name = info->klass_name;
1345 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1346
1347 // If you have specified consecutive fields of the same klass in
1348 // fields[], these will be archived in the same
1349 // {start_recording_subgraph ... done_recording_subgraph} pass to
1350 // save time.
1351 for (; i < num; i++) {
1352 ArchivableStaticFieldInfo* f = &fields[i];
1353 if (f->klass_name != klass_name) {
1354 break;
1355 }
1356
1357 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1358 f->offset, f->field_name,
1359 is_closed_archive);
1360 }
1361 done_recording_subgraph(info->klass, klass_name);
1362 }
1363
1364 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Archived subgraph records in %s archive heap region = %d",
1365 is_closed_archive ? "closed" : "open",
1366 _num_total_subgraph_recordings);
1367 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
(" Walked %d objects", _num_total_walked_objs);
1368 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
(" Archived %d objects", _num_total_archived_objs);
1369 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
(" Recorded %d klasses", _num_total_recorded_klasses);
1370
1371#ifndef PRODUCT
1372 for (int i = 0; i < num; i++) {
1373 ArchivableStaticFieldInfo* f = &fields[i];
1374 verify_subgraph_from_static_field(f->klass, f->offset);
1375 }
1376 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
(" Verified %d references", _num_total_verifications);
1377#endif
1378}
1379
1380// Not all the strings in the global StringTable are dumped into the archive, because
1381// some of those strings may be only referenced by classes that are excluded from
1382// the archive. We need to explicitly mark the strings that are:
1383// [1] used by classes that WILL be archived;
1384// [2] included in the SharedArchiveConfigFile.
1385void HeapShared::add_to_dumped_interned_strings(oop string) {
1386 assert_at_safepoint()do { if (!(SafepointSynchronize::is_at_safepoint())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1386, "assert(" "SafepointSynchronize::is_at_safepoint()" ") failed"
, "should be at a safepoint"); ::breakpoint(); } } while (0)
; // DumpedInternedStrings uses raw oops
1387 bool created;
1388 _dumped_interned_strings->put_if_absent(string, true, &created);
1389}
1390
1391// At dump-time, find the location of all the non-null oop pointers in an archived heap
1392// region. This way we can quickly relocate all the pointers without using
1393// BasicOopIterateClosure at runtime.
1394class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1395 narrowOop* _start;
1396 BitMap *_oopmap;
1397 int _num_total_oops;
1398 int _num_null_oops;
1399 public:
1400 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
1401 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {}
1402
1403 virtual void do_oop(narrowOop* p) {
1404 _num_total_oops ++;
1405 narrowOop v = *p;
1406 if (!CompressedOops::is_null(v)) {
1407 size_t idx = p - _start;
1408 _oopmap->set_bit(idx);
1409 } else {
1410 _num_null_oops ++;
1411 }
1412 }
1413 virtual void do_oop(oop *p) {
1414 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1414); ::breakpoint(); } while (0)
;
1415 }
1416 int num_total_oops() const { return _num_total_oops; }
1417 int num_null_oops() const { return _num_null_oops; }
1418};
1419
1420ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1421 assert(UseCompressedOops, "must be")do { if (!(UseCompressedOops)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1421, "assert(" "UseCompressedOops" ") failed", "must be");
::breakpoint(); } } while (0)
;
2
Assuming 'UseCompressedOops' is true
3
Taking false branch
4
Loop condition is false. Exiting loop
1422 size_t num_bits = region.byte_size() / sizeof(narrowOop);
1423 ResourceBitMap oopmap(num_bits);
1424
1425 HeapWord* p = region.start();
1426 HeapWord* end = region.end();
1427 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1428 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL__null;
5
Assuming 'DumpSharedSpaces' is false
6
'?' condition is false
7
'builder' initialized to a null pointer value
1429
1430 int num_objs = 0;
1431 while (p < end) {
8
Loop condition is true. Entering loop body
1432 oop o = cast_to_oop(p);
1433 o->oop_iterate(&finder);
1434 p += o->size();
9
Calling 'oopDesc::size'
11
Returning from 'oopDesc::size'
1435 if (DumpSharedSpaces) {
12
Assuming 'DumpSharedSpaces' is true
13
Taking true branch
1436 builder->relocate_klass_ptr(o);
14
Called C++ object pointer is null
1437 }
1438 ++ num_objs;
1439 }
1440
1441 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1442 num_objs, finder.num_total_oops(), finder.num_null_oops());
1443 return oopmap;
1444}
1445
1446// Patch all the embedded oop pointers inside an archived heap region,
1447// to be consistent with the runtime oop encoding.
1448class PatchEmbeddedPointers: public BitMapClosure {
1449 narrowOop* _start;
1450
1451 public:
1452 PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1453
1454 bool do_bit(size_t offset) {
1455 narrowOop* p = _start + offset;
1456 narrowOop v = *p;
1457 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time")do { if (!(!CompressedOops::is_null(v))) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1457, "assert(" "!CompressedOops::is_null(v)" ") failed", "null oops should have been filtered out at dump time"
); ::breakpoint(); } } while (0)
;
1458 oop o = HeapShared::decode_from_archive(v);
1459 RawAccess<IS_NOT_NULL>::oop_store(p, o);
1460 return true;
1461 }
1462};
1463
1464// Patch all the non-null pointers that are embedded in the archived heap objects
1465// in this region
1466void HeapShared::patch_embedded_pointers(MemRegion region, address oopmap,
1467 size_t oopmap_size_in_bits) {
1468 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1469
1470#ifndef PRODUCT
1471 ResourceMark rm;
1472 ResourceBitMap checkBm = calculate_oopmap(region);
1
Calling 'HeapShared::calculate_oopmap'
1473 assert(bm.is_same(checkBm), "sanity")do { if (!(bm.is_same(checkBm))) { (*g_assert_poison) = 'X';;
report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1473, "assert(" "bm.is_same(checkBm)" ") failed", "sanity")
; ::breakpoint(); } } while (0)
;
1474#endif
1475
1476 PatchEmbeddedPointers patcher((narrowOop*)region.start());
1477 bm.iterate(&patcher);
1478}
1479
1480// The CDS archive remembers each heap object by its address at dump time, but
1481// the heap object may be loaded at a different address at run time. This structure is used
1482// to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
1483// to their runtime addresses.
1484struct LoadedArchiveHeapRegion {
1485 int _region_index; // index for FileMapInfo::space_at(index)
1486 size_t _region_size; // number of bytes in this region
1487 uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
1488 intx _runtime_offset; // If an object's dump time address P is within in this region, its
1489 // runtime address is P + _runtime_offset
1490
1491 static int comparator(const void* a, const void* b) {
1492 LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a;
1493 LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b;
1494 if (reg_a->_dumptime_base < reg_b->_dumptime_base) {
1495 return -1;
1496 } else if (reg_a->_dumptime_base == reg_b->_dumptime_base) {
1497 return 0;
1498 } else {
1499 return 1;
1500 }
1501 }
1502
1503 uintptr_t top() {
1504 return _dumptime_base + _region_size;
1505 }
1506};
1507
1508void HeapShared::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions,
1509 int num_loaded_regions) {
1510 _dumptime_base_0 = loaded_regions[0]._dumptime_base;
1511 _dumptime_base_1 = loaded_regions[1]._dumptime_base;
1512 _dumptime_base_2 = loaded_regions[2]._dumptime_base;
1513 _dumptime_base_3 = loaded_regions[3]._dumptime_base;
1514 _dumptime_top = loaded_regions[num_loaded_regions-1].top();
1515
1516 _runtime_offset_0 = loaded_regions[0]._runtime_offset;
1517 _runtime_offset_1 = loaded_regions[1]._runtime_offset;
1518 _runtime_offset_2 = loaded_regions[2]._runtime_offset;
1519 _runtime_offset_3 = loaded_regions[3]._runtime_offset;
1520
1521 assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be")do { if (!(2 <= num_loaded_regions && num_loaded_regions
<= 4)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1521, "assert(" "2 <= num_loaded_regions && num_loaded_regions <= 4"
") failed", "must be"); ::breakpoint(); } } while (0)
;
1522 if (num_loaded_regions < 4) {
1523 _dumptime_base_3 = UINTPTR_MAX(18446744073709551615UL);
1524 }
1525 if (num_loaded_regions < 3) {
1526 _dumptime_base_2 = UINTPTR_MAX(18446744073709551615UL);
1527 }
1528}
1529
1530bool HeapShared::can_load() {
1531 return Universe::heap()->can_load_archived_objects();
1532}
1533
1534template <int NUM_LOADED_REGIONS>
1535class PatchLoadedRegionPointers: public BitMapClosure {
1536 narrowOop* _start;
1537 intx _offset_0;
1538 intx _offset_1;
1539 intx _offset_2;
1540 intx _offset_3;
1541 uintptr_t _base_0;
1542 uintptr_t _base_1;
1543 uintptr_t _base_2;
1544 uintptr_t _base_3;
1545 uintptr_t _top;
1546
1547 static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions");
1548 static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions");
1549 static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions");
1550
1551 public:
1552 PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions)
1553 : _start(start),
1554 _offset_0(loaded_regions[0]._runtime_offset),
1555 _offset_1(loaded_regions[1]._runtime_offset),
1556 _offset_2(loaded_regions[2]._runtime_offset),
1557 _offset_3(loaded_regions[3]._runtime_offset),
1558 _base_0(loaded_regions[0]._dumptime_base),
1559 _base_1(loaded_regions[1]._dumptime_base),
1560 _base_2(loaded_regions[2]._dumptime_base),
1561 _base_3(loaded_regions[3]._dumptime_base) {
1562 _top = loaded_regions[NUM_LOADED_REGIONS-1].top();
1563 }
1564
1565 bool do_bit(size_t offset) {
1566 narrowOop* p = _start + offset;
1567 narrowOop v = *p;
1568 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time")do { if (!(!CompressedOops::is_null(v))) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1568, "assert(" "!CompressedOops::is_null(v)" ") failed", "null oops should have been filtered out at dump time"
); ::breakpoint(); } } while (0)
;
1569 uintptr_t o = cast_from_oop<uintptr_t>(HeapShared::decode_from_archive(v));
1570 assert(_base_0 <= o && o < _top, "must be")do { if (!(_base_0 <= o && o < _top)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1570, "assert(" "_base_0 <= o && o < _top" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
1571
1572
1573 // We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons.
1574 if (NUM_LOADED_REGIONS > 3 && o >= _base_3) {
1575 o += _offset_3;
1576 } else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) {
1577 o += _offset_2;
1578 } else if (o >= _base_1) {
1579 o += _offset_1;
1580 } else {
1581 o += _offset_0;
1582 }
1583 HeapShared::assert_in_loaded_heap(o);
1584 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
1585 return true;
1586 }
1587};
1588
1589int HeapShared::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
1590 MemRegion& archive_space) {
1591 size_t total_bytes = 0;
1592 int num_loaded_regions = 0;
1593 for (int i = MetaspaceShared::first_archive_heap_region;
1594 i <= MetaspaceShared::last_archive_heap_region; i++) {
1595 FileMapRegion* r = mapinfo->space_at(i);
1596 r->assert_is_heap_region();
1597 if (r->used() > 0) {
1598 assert(is_aligned(r->used(), HeapWordSize), "must be")do { if (!(is_aligned(r->used(), HeapWordSize))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1598, "assert(" "is_aligned(r->used(), HeapWordSize)" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
1599 total_bytes += r->used();
1600 LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++];
1601 ri->_region_index = i;
1602 ri->_region_size = r->used();
1603 ri->_dumptime_base = (uintptr_t)mapinfo->start_address_as_decoded_from_archive(r);
1604 }
1605 }
1606
1607 assert(is_aligned(total_bytes, HeapWordSize), "must be")do { if (!(is_aligned(total_bytes, HeapWordSize))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1607, "assert(" "is_aligned(total_bytes, HeapWordSize)" ") failed"
, "must be"); ::breakpoint(); } } while (0)
;
1608 size_t word_size = total_bytes / HeapWordSize;
1609 HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
1610 if (buffer == nullptr) {
1611 return 0;
1612 }
1613
1614 archive_space = MemRegion(buffer, word_size);
1615 _loaded_heap_bottom = (uintptr_t)archive_space.start();
1616 _loaded_heap_top = _loaded_heap_bottom + total_bytes;
1617
1618 return num_loaded_regions;
1619}
1620
1621void HeapShared::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
1622 uintptr_t buffer) {
1623 // Find the relocation offset of the pointers in each region
1624 qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion),
1625 LoadedArchiveHeapRegion::comparator);
1626
1627 uintptr_t p = buffer;
1628 for (int i = 0; i < num_loaded_regions; i++) {
1629 // This region will be loaded at p, so all objects inside this
1630 // region will be shifted by ri->offset
1631 LoadedArchiveHeapRegion* ri = &loaded_regions[i];
1632 ri->_runtime_offset = p - ri->_dumptime_base;
1633 p += ri->_region_size;
1634 }
1635 assert(p == _loaded_heap_top, "must be")do { if (!(p == _loaded_heap_top)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1635, "assert(" "p == _loaded_heap_top" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
1636}
1637
1638bool HeapShared::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
1639 int num_loaded_regions, uintptr_t buffer) {
1640 uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
1641 uintptr_t load_address = buffer;
1642 for (int i = 0; i < num_loaded_regions; i++) {
1643 LoadedArchiveHeapRegion* ri = &loaded_regions[i];
1644 FileMapRegion* r = mapinfo->space_at(ri->_region_index);
1645
1646 if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
1647 // There's no easy way to free the buffer, so we will fill it with zero later
1648 // in fill_failed_loaded_region(), and it will eventually be GC'ed.
1649 log_warning(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Warning))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Warning>
("Loading of heap region %d has failed. Archived objects are disabled", i);
1650 _loading_failed = true;
1651 return false;
1652 }
1653 log_info(cds)(!(LogImpl<(LogTag::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Loaded heap region #%d at base " INTPTR_FORMAT"0x%016" "l" "x" " top " INTPTR_FORMAT"0x%016" "l" "x"
1654 " size " SIZE_FORMAT_W(6)"%" "6" "l" "u" " delta " INTX_FORMAT"%" "l" "d",
1655 ri->_region_index, load_address, load_address + ri->_region_size,
1656 ri->_region_size, ri->_runtime_offset);
1657
1658 uintptr_t oopmap = bitmap_base + r->oopmap_offset();
1659 BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
1660
1661 if (num_loaded_regions == 4) {
1662 PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions);
1663 bm.iterate(&patcher);
1664 } else if (num_loaded_regions == 3) {
1665 PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions);
1666 bm.iterate(&patcher);
1667 } else {
1668 assert(num_loaded_regions == 2, "must be")do { if (!(num_loaded_regions == 2)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1668, "assert(" "num_loaded_regions == 2" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
1669 PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions);
1670 bm.iterate(&patcher);
1671 }
1672
1673 load_address += r->used();
1674 }
1675
1676 return true;
1677}
1678
1679bool HeapShared::load_heap_regions(FileMapInfo* mapinfo) {
1680 init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
1681
1682 LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions];
1683 memset(loaded_regions, 0, sizeof(loaded_regions));
1684
1685 MemRegion archive_space;
1686 int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, archive_space);
1687 if (num_loaded_regions <= 0) {
1688 return false;
1689 }
1690 sort_loaded_regions(loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start());
1691 if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start())) {
1692 assert(_loading_failed, "must be")do { if (!(_loading_failed)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1692, "assert(" "_loading_failed" ") failed", "must be"); ::
breakpoint(); } } while (0)
;
1693 return false;
1694 }
1695
1696 init_loaded_heap_relocation(loaded_regions, num_loaded_regions);
1697 _is_loaded = true;
1698 set_roots(mapinfo->heap_obj_roots());
1699
1700 return true;
1701}
1702
1703class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
1704 ResourceHashtable<uintptr_t, bool>* _table;
1705
1706 public:
1707 VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
1708
1709 virtual void do_oop(narrowOop* p) {
1710 // This should be called before the loaded regions are modified, so all the embedded pointers
1711 // must be NULL, or must point to a valid object in the loaded regions.
1712 narrowOop v = *p;
1713 if (!CompressedOops::is_null(v)) {
1714 oop o = CompressedOops::decode_not_null(v);
1715 uintptr_t u = cast_from_oop<uintptr_t>(o);
1716 HeapShared::assert_in_loaded_heap(u);
1717 guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions")do { if (!(_table->contains(u))) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1717, "guarantee(" "_table->contains(u)" ") failed", "must point to beginning of object in loaded archived regions"
); ::breakpoint(); } } while (0)
;
1718 }
1719 }
1720 virtual void do_oop(oop* p) {
1721 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1721); ::breakpoint(); } while (0)
;
1722 }
1723};
1724
1725void HeapShared::finish_initialization() {
1726 if (is_loaded()) {
1727 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1728 HeapWord* top = (HeapWord*)_loaded_heap_top;
1729
1730 MemRegion archive_space = MemRegion(bottom, top);
1731 Universe::heap()->complete_loaded_archive_space(archive_space);
1732 }
1733
1734 if (VerifyArchivedFields <= 0 || !is_loaded()) {
1735 return;
1736 }
1737
1738 log_info(cds, heap)(!(LogImpl<(LogTag::_cds), (LogTag::_heap), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)
>::is_level(LogLevel::Info))) ? (void)0 : LogImpl<(LogTag
::_cds), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG
), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel
::Info>
("Verify all oops and pointers in loaded heap");
1739
1740 ResourceMark rm;
1741 ResourceHashtable<uintptr_t, bool> table;
1742 VerifyLoadedHeapEmbeddedPointers verifier(&table);
1743 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1744 HeapWord* top = (HeapWord*)_loaded_heap_top;
1745
1746 for (HeapWord* p = bottom; p < top; ) {
1747 oop o = cast_to_oop(p);
1748 table.put(cast_from_oop<uintptr_t>(o), true);
1749 p += o->size();
1750 }
1751
1752 for (HeapWord* p = bottom; p < top; ) {
1753 oop o = cast_to_oop(p);
1754 o->oop_iterate(&verifier);
1755 p += o->size();
1756 }
1757}
1758
1759void HeapShared::fill_failed_loaded_region() {
1760 assert(_loading_failed, "must be")do { if (!(_loading_failed)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1760, "assert(" "_loading_failed" ") failed", "must be"); ::
breakpoint(); } } while (0)
;
1761 if (_loaded_heap_bottom != 0) {
1762 assert(_loaded_heap_top != 0, "must be")do { if (!(_loaded_heap_top != 0)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/cds/heapShared.cpp"
, 1762, "assert(" "_loaded_heap_top != 0" ") failed", "must be"
); ::breakpoint(); } } while (0)
;
1763 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1764 HeapWord* top = (HeapWord*)_loaded_heap_top;
1765 Universe::heap()->fill_with_objects(bottom, top - bottom);
1766 }
1767}
1768
1769#endif // INCLUDE_CDS_JAVA_HEAP

/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp

1/*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_OOPS_OOP_INLINE_HPP
26#define SHARE_OOPS_OOP_INLINE_HPP
27
28#include "oops/oop.hpp"
29
30#include "memory/universe.hpp"
31#include "oops/access.inline.hpp"
32#include "oops/arrayKlass.hpp"
33#include "oops/arrayOop.hpp"
34#include "oops/compressedOops.inline.hpp"
35#include "oops/markWord.hpp"
36#include "oops/oopsHierarchy.hpp"
37#include "runtime/atomic.hpp"
38#include "runtime/globals.hpp"
39#include "utilities/align.hpp"
40#include "utilities/debug.hpp"
41#include "utilities/macros.hpp"
42#include "utilities/globalDefinitions.hpp"
43
44// Implementation of all inlined member functions defined in oop.hpp
45// We need a separate file to avoid circular references
46
47markWord oopDesc::mark() const {
48 return Atomic::load(&_mark);
49}
50
51markWord oopDesc::mark_acquire() const {
52 return Atomic::load_acquire(&_mark);
53}
54
55markWord* oopDesc::mark_addr() const {
56 return (markWord*) &_mark;
57}
58
59void oopDesc::set_mark(markWord m) {
60 Atomic::store(&_mark, m);
61}
62
63void oopDesc::set_mark(HeapWord* mem, markWord m) {
64 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
65}
66
67void oopDesc::release_set_mark(markWord m) {
68 Atomic::release_store(&_mark, m);
69}
70
71markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
72 return Atomic::cmpxchg(&_mark, old_mark, new_mark);
73}
74
75markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
76 return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
77}
78
79void oopDesc::init_mark() {
80 set_mark(markWord::prototype());
81}
82
83Klass* oopDesc::klass() const {
84 if (UseCompressedClassPointers) {
85 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
86 } else {
87 return _metadata._klass;
88 }
89}
90
91Klass* oopDesc::klass_or_null() const {
92 if (UseCompressedClassPointers) {
93 return CompressedKlassPointers::decode(_metadata._compressed_klass);
94 } else {
95 return _metadata._klass;
96 }
97}
98
99Klass* oopDesc::klass_or_null_acquire() const {
100 if (UseCompressedClassPointers) {
101 narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass);
102 return CompressedKlassPointers::decode(nklass);
103 } else {
104 return Atomic::load_acquire(&_metadata._klass);
105 }
106}
107
108void oopDesc::set_klass(Klass* k) {
109 assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass")do { if (!(Universe::is_bootstrapping() || (k != __null &&
k->is_klass()))) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 109, "assert(" "Universe::is_bootstrapping() || (k != __null && k->is_klass())"
") failed", "incorrect Klass"); ::breakpoint(); } } while (0
)
;
110 if (UseCompressedClassPointers) {
111 _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k);
112 } else {
113 _metadata._klass = k;
114 }
115}
116
117void oopDesc::release_set_klass(HeapWord* mem, Klass* k) {
118 assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass")do { if (!(Universe::is_bootstrapping() || (k != __null &&
k->is_klass()))) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 118, "assert(" "Universe::is_bootstrapping() || (k != __null && k->is_klass())"
") failed", "incorrect Klass"); ::breakpoint(); } } while (0
)
;
119 char* raw_mem = ((char*)mem + klass_offset_in_bytes());
120 if (UseCompressedClassPointers) {
121 Atomic::release_store((narrowKlass*)raw_mem,
122 CompressedKlassPointers::encode_not_null(k));
123 } else {
124 Atomic::release_store((Klass**)raw_mem, k);
125 }
126}
127
128int oopDesc::klass_gap() const {
129 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
130}
131
132void oopDesc::set_klass_gap(HeapWord* mem, int v) {
133 if (UseCompressedClassPointers) {
134 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
135 }
136}
137
138void oopDesc::set_klass_gap(int v) {
139 set_klass_gap((HeapWord*)this, v);
140}
141
142bool oopDesc::is_a(Klass* k) const {
143 return klass()->is_subtype_of(k);
144}
145
146size_t oopDesc::size() {
147 return size_given_klass(klass());
10
Value assigned to 'DumpSharedSpaces', which participates in a condition later
148}
149
150size_t oopDesc::size_given_klass(Klass* klass) {
151 int lh = klass->layout_helper();
152 size_t s;
153
154 // lh is now a value computed at class initialization that may hint
155 // at the size. For instances, this is positive and equal to the
156 // size. For arrays, this is negative and provides log2 of the
157 // array element size. For other oops, it is zero and thus requires
158 // a virtual call.
159 //
160 // We go to all this trouble because the size computation is at the
161 // heart of phase 2 of mark-compaction, and called for every object,
162 // alive or dead. So the speed here is equal in importance to the
163 // speed of allocation.
164
165 if (lh > Klass::_lh_neutral_value) {
166 if (!Klass::layout_helper_needs_slow_path(lh)) {
167 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
168 } else {
169 s = klass->oop_size(this);
170 }
171 } else if (lh <= Klass::_lh_neutral_value) {
172 // The most common case is instances; fall through if so.
173 if (lh < Klass::_lh_neutral_value) {
174 // Second most common case is arrays. We have to fetch the
175 // length of the array, shift (multiply) it appropriately,
176 // up to wordSize, add the header, and align to object size.
177 size_t size_in_bytes;
178 size_t array_length = (size_t) ((arrayOop)this)->length();
179 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
180 size_in_bytes += Klass::layout_helper_header_size(lh);
181
182 // This code could be simplified, but by keeping array_header_in_bytes
183 // in units of bytes and doing it this way we can round up just once,
184 // skipping the intermediate round to HeapWordSize.
185 s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
186
187 // UseParallelGC and UseG1GC can change the length field
188 // of an "old copy" of an object array in the young gen so it indicates
189 // the grey portion of an already copied array. This will cause the first
190 // disjunct below to fail if the two comparands are computed across such
191 // a concurrent change.
192 assert((s == klass->oop_size(this)) ||do { if (!((s == klass->oop_size(this)) || (Universe::is_gc_active
() && is_objArray() && is_forwarded() &&
(get_UseParallelGC() || get_UseG1GC())))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 194, "assert(" "(s == klass->oop_size(this)) || (Universe::is_gc_active() && is_objArray() && is_forwarded() && (get_UseParallelGC() || get_UseG1GC()))"
") failed", "wrong array object size"); ::breakpoint(); } } while
(0)
193 (Universe::is_gc_active() && is_objArray() && is_forwarded() && (get_UseParallelGC() || get_UseG1GC())),do { if (!((s == klass->oop_size(this)) || (Universe::is_gc_active
() && is_objArray() && is_forwarded() &&
(get_UseParallelGC() || get_UseG1GC())))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 194, "assert(" "(s == klass->oop_size(this)) || (Universe::is_gc_active() && is_objArray() && is_forwarded() && (get_UseParallelGC() || get_UseG1GC()))"
") failed", "wrong array object size"); ::breakpoint(); } } while
(0)
194 "wrong array object size")do { if (!((s == klass->oop_size(this)) || (Universe::is_gc_active
() && is_objArray() && is_forwarded() &&
(get_UseParallelGC() || get_UseG1GC())))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 194, "assert(" "(s == klass->oop_size(this)) || (Universe::is_gc_active() && is_objArray() && is_forwarded() && (get_UseParallelGC() || get_UseG1GC()))"
") failed", "wrong array object size"); ::breakpoint(); } } while
(0)
;
195 } else {
196 // Must be zero, so bite the bullet and take the virtual call.
197 s = klass->oop_size(this);
198 }
199 }
200
201 assert(s > 0, "Oop size must be greater than zero, not " SIZE_FORMAT, s)do { if (!(s > 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 201, "assert(" "s > 0" ") failed", "Oop size must be greater than zero, not "
"%" "l" "u", s); ::breakpoint(); } } while (0)
;
202 assert(is_object_aligned(s), "Oop size is not properly aligned: " SIZE_FORMAT, s)do { if (!(is_object_aligned(s))) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 202, "assert(" "is_object_aligned(s)" ") failed", "Oop size is not properly aligned: "
"%" "l" "u", s); ::breakpoint(); } } while (0)
;
203 return s;
204}
205
206bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
207bool oopDesc::is_array() const { return klass()->is_array_klass(); }
208bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
209bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
210
211template<typename T>
212T* oopDesc::field_addr(int offset) const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
213
214template <typename T>
215size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
216
217template <DecoratorSet decorators>
218inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
219inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
220
221inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
222
223inline jbyte oopDesc::byte_field(int offset) const { return *field_addr<jbyte>(offset); }
224inline void oopDesc::byte_field_put(int offset, jbyte value) { *field_addr<jbyte>(offset) = value; }
225
226inline jchar oopDesc::char_field(int offset) const { return *field_addr<jchar>(offset); }
227inline void oopDesc::char_field_put(int offset, jchar value) { *field_addr<jchar>(offset) = value; }
228
229inline jboolean oopDesc::bool_field(int offset) const { return *field_addr<jboolean>(offset); }
230inline void oopDesc::bool_field_put(int offset, jboolean value) { *field_addr<jboolean>(offset) = jboolean(value & 1); }
231inline jboolean oopDesc::bool_field_volatile(int offset) const { return RawAccess<MO_SEQ_CST>::load(field_addr<jboolean>(offset)); }
232inline void oopDesc::bool_field_put_volatile(int offset, jboolean value) { RawAccess<MO_SEQ_CST>::store(field_addr<jboolean>(offset), jboolean(value & 1)); }
233inline jshort oopDesc::short_field(int offset) const { return *field_addr<jshort>(offset); }
234inline void oopDesc::short_field_put(int offset, jshort value) { *field_addr<jshort>(offset) = value; }
235
236inline jint oopDesc::int_field(int offset) const { return *field_addr<jint>(offset); }
237inline void oopDesc::int_field_put(int offset, jint value) { *field_addr<jint>(offset) = value; }
238
239inline jlong oopDesc::long_field(int offset) const { return *field_addr<jlong>(offset); }
240inline void oopDesc::long_field_put(int offset, jlong value) { *field_addr<jlong>(offset) = value; }
241
242inline jfloat oopDesc::float_field(int offset) const { return *field_addr<jfloat>(offset); }
243inline void oopDesc::float_field_put(int offset, jfloat value) { *field_addr<jfloat>(offset) = value; }
244
245inline jdouble oopDesc::double_field(int offset) const { return *field_addr<jdouble>(offset); }
246inline void oopDesc::double_field_put(int offset, jdouble value) { *field_addr<jdouble>(offset) = value; }
247
248bool oopDesc::is_locked() const {
249 return mark().is_locked();
250}
251
252bool oopDesc::is_unlocked() const {
253 return mark().is_unlocked();
254}
255
256// Used only for markSweep, scavenging
257bool oopDesc::is_gc_marked() const {
258 return mark().is_marked();
259}
260
261// Used by scavengers
262bool oopDesc::is_forwarded() const {
263 // The extra heap check is needed since the obj might be locked, in which case the
264 // mark would point to a stack location and have the sentinel bit cleared
265 return mark().is_marked();
266}
267
268// Used by scavengers
269void oopDesc::forward_to(oop p) {
270 verify_forwardee(p);
271 markWord m = markWord::encode_pointer_as_mark(p);
272 assert(m.decode_pointer() == p, "encoding must be reversable")do { if (!(m.decode_pointer() == p)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 272, "assert(" "m.decode_pointer() == p" ") failed", "encoding must be reversable"
); ::breakpoint(); } } while (0)
;
273 set_mark(m);
274}
275
276oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
277 verify_forwardee(p);
278 markWord m = markWord::encode_pointer_as_mark(p);
279 assert(m.decode_pointer() == p, "encoding must be reversable")do { if (!(m.decode_pointer() == p)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 279, "assert(" "m.decode_pointer() == p" ") failed", "encoding must be reversable"
); ::breakpoint(); } } while (0)
;
280 markWord old_mark = cas_set_mark(m, compare, order);
281 if (old_mark == compare) {
282 return NULL__null;
283 } else {
284 return cast_to_oop(old_mark.decode_pointer());
285 }
286}
287
288// Note that the forwardee is not the same thing as the displaced_mark.
289// The forwardee is used when copying during scavenge and mark-sweep.
290// It does need to clear the low two locking- and GC-related bits.
291oop oopDesc::forwardee() const {
292 assert(is_forwarded(), "only decode when actually forwarded")do { if (!(is_forwarded())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 292, "assert(" "is_forwarded()" ") failed", "only decode when actually forwarded"
); ::breakpoint(); } } while (0)
;
293 return cast_to_oop(mark().decode_pointer());
294}
295
296// The following method needs to be MT safe.
297uint oopDesc::age() const {
298 assert(!mark().is_marked(), "Attempt to read age from forwarded mark")do { if (!(!mark().is_marked())) { (*g_assert_poison) = 'X';;
report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 298, "assert(" "!mark().is_marked()" ") failed", "Attempt to read age from forwarded mark"
); ::breakpoint(); } } while (0)
;
299 if (has_displaced_mark()) {
300 return displaced_mark().age();
301 } else {
302 return mark().age();
303 }
304}
305
306void oopDesc::incr_age() {
307 assert(!mark().is_marked(), "Attempt to increment age of forwarded mark")do { if (!(!mark().is_marked())) { (*g_assert_poison) = 'X';;
report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 307, "assert(" "!mark().is_marked()" ") failed", "Attempt to increment age of forwarded mark"
); ::breakpoint(); } } while (0)
;
308 if (has_displaced_mark()) {
309 set_displaced_mark(displaced_mark().incr_age());
310 } else {
311 set_mark(mark().incr_age());
312 }
313}
314
315template <typename OopClosureType>
316void oopDesc::oop_iterate(OopClosureType* cl) {
317 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
318}
319
320template <typename OopClosureType>
321void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
322 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
323}
324
325template <typename OopClosureType>
326size_t oopDesc::oop_iterate_size(OopClosureType* cl) {
327 Klass* k = klass();
328 size_t size = size_given_klass(k);
329 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
330 return size;
331}
332
333template <typename OopClosureType>
334size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
335 Klass* k = klass();
336 size_t size = size_given_klass(k);
337 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
338 return size;
339}
340
341template <typename OopClosureType>
342void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
343 oop_iterate_backwards(cl, klass());
344}
345
346template <typename OopClosureType>
347void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
348 assert(k == klass(), "wrong klass")do { if (!(k == klass())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/oops/oop.inline.hpp"
, 348, "assert(" "k == klass()" ") failed", "wrong klass"); ::
breakpoint(); } } while (0)
;
349 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
350}
351
352bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
353 return obj == NULL__null || obj->klass()->is_subtype_of(klass);
354}
355
356intptr_t oopDesc::identity_hash() {
357 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
358 // Note: The mark must be read into local variable to avoid concurrent updates.
359 markWord mrk = mark();
360 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
361 return mrk.hash();
362 } else if (mrk.is_marked()) {
363 return mrk.hash();
364 } else {
365 return slow_identity_hash();
366 }
367}
368
369bool oopDesc::has_displaced_mark() const {
370 return mark().has_displaced_mark_helper();
371}
372
373markWord oopDesc::displaced_mark() const {
374 return mark().displaced_mark_helper();
375}
376
377void oopDesc::set_displaced_mark(markWord m) {
378 mark().set_displaced_mark_helper(m);
379}
380
381bool oopDesc::mark_must_be_preserved() const {
382 return mark_must_be_preserved(mark());
383}
384
385bool oopDesc::mark_must_be_preserved(markWord m) const {
386 return m.must_be_preserved(this);
387}
388
389#endif // SHARE_OOPS_OOP_INLINE_HPP