| File: | jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp |
| Warning: | line 174, column 14 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. | |||
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |||
| 4 | * | |||
| 5 | * This code is free software; you can redistribute it and/or modify it | |||
| 6 | * under the terms of the GNU General Public License version 2 only, as | |||
| 7 | * published by the Free Software Foundation. | |||
| 8 | * | |||
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | |||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | |||
| 13 | * accompanied this code). | |||
| 14 | * | |||
| 15 | * You should have received a copy of the GNU General Public License version | |||
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | |||
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |||
| 18 | * | |||
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |||
| 20 | * or visit www.oracle.com if you need additional information or have any | |||
| 21 | * questions. | |||
| 22 | * | |||
| 23 | */ | |||
| 24 | ||||
| 25 | #include "precompiled.hpp" | |||
| 26 | #include "classfile/javaClasses.inline.hpp" | |||
| 27 | #include "gc/shared/collectedHeap.hpp" | |||
| 28 | #include "gc/shared/collectedHeap.inline.hpp" | |||
| 29 | #include "gc/shared/gc_globals.hpp" | |||
| 30 | #include "gc/shared/gcTimer.hpp" | |||
| 31 | #include "gc/shared/gcTraceTime.inline.hpp" | |||
| 32 | #include "gc/shared/referencePolicy.hpp" | |||
| 33 | #include "gc/shared/referenceProcessor.inline.hpp" | |||
| 34 | #include "gc/shared/referenceProcessorPhaseTimes.hpp" | |||
| 35 | #include "logging/log.hpp" | |||
| 36 | #include "memory/allocation.inline.hpp" | |||
| 37 | #include "memory/resourceArea.hpp" | |||
| 38 | #include "memory/universe.hpp" | |||
| 39 | #include "oops/access.inline.hpp" | |||
| 40 | #include "oops/oop.inline.hpp" | |||
| 41 | #include "runtime/java.hpp" | |||
| 42 | #include "runtime/nonJavaThread.hpp" | |||
| 43 | #include "utilities/globalDefinitions.hpp" | |||
| 44 | ||||
| 45 | ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL__null; | |||
| 46 | ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL__null; | |||
| 47 | jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; | |||
| 48 | ||||
| 49 | void referenceProcessor_init() { | |||
| 50 | ReferenceProcessor::init_statics(); | |||
| 51 | } | |||
| 52 | ||||
| 53 | void ReferenceProcessor::init_statics() { | |||
| 54 | // We need a monotonically non-decreasing time in ms but | |||
| 55 | // os::javaTimeMillis() does not guarantee monotonicity. | |||
| 56 | jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; | |||
| 57 | ||||
| 58 | // Initialize the soft ref timestamp clock. | |||
| 59 | _soft_ref_timestamp_clock = now; | |||
| 60 | // Also update the soft ref clock in j.l.r.SoftReference | |||
| 61 | java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); | |||
| 62 | ||||
| 63 | _always_clear_soft_ref_policy = new AlwaysClearPolicy(); | |||
| 64 | if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) { | |||
| 65 | _default_soft_ref_policy = new LRUMaxHeapPolicy(); | |||
| 66 | } else { | |||
| 67 | _default_soft_ref_policy = new LRUCurrentHeapPolicy(); | |||
| 68 | } | |||
| 69 | guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||do { if (!(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 71, "guarantee(" "RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery" ") failed", "Unrecognized RefDiscoveryPolicy"); ::breakpoint (); } } while (0) | |||
| 70 | RefDiscoveryPolicy == ReferentBasedDiscovery,do { if (!(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 71, "guarantee(" "RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery" ") failed", "Unrecognized RefDiscoveryPolicy"); ::breakpoint (); } } while (0) | |||
| 71 | "Unrecognized RefDiscoveryPolicy")do { if (!(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 71, "guarantee(" "RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery" ") failed", "Unrecognized RefDiscoveryPolicy"); ::breakpoint (); } } while (0); | |||
| 72 | } | |||
| 73 | ||||
| 74 | void ReferenceProcessor::enable_discovery(bool check_no_refs) { | |||
| 75 | #ifdef ASSERT1 | |||
| 76 | // Verify that we're not currently discovering refs | |||
| 77 | assert(!_discovering_refs, "nested call?")do { if (!(!_discovering_refs)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 77, "assert(" "!_discovering_refs" ") failed", "nested call?" ); ::breakpoint(); } } while (0); | |||
| 78 | ||||
| 79 | if (check_no_refs) { | |||
| 80 | // Verify that the discovered lists are empty | |||
| 81 | verify_no_references_recorded(); | |||
| 82 | } | |||
| 83 | #endif // ASSERT | |||
| 84 | ||||
| 85 | _discovering_refs = true; | |||
| 86 | } | |||
| 87 | ||||
| 88 | ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, | |||
| 89 | uint mt_processing_degree, | |||
| 90 | uint mt_discovery_degree, | |||
| 91 | bool concurrent_discovery, | |||
| 92 | BoolObjectClosure* is_alive_non_header) : | |||
| 93 | _is_subject_to_discovery(is_subject_to_discovery), | |||
| 94 | _discovering_refs(false), | |||
| 95 | _next_id(0), | |||
| 96 | _is_alive_non_header(is_alive_non_header) | |||
| 97 | { | |||
| 98 | assert(is_subject_to_discovery != NULL, "must be set")do { if (!(is_subject_to_discovery != __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 98, "assert(" "is_subject_to_discovery != __null" ") failed" , "must be set"); ::breakpoint(); } } while (0); | |||
| 99 | ||||
| 100 | _discovery_is_concurrent = concurrent_discovery; | |||
| 101 | _discovery_is_mt = (mt_discovery_degree > 1); | |||
| 102 | _num_queues = MAX2(1U, mt_processing_degree); | |||
| 103 | _max_num_queues = MAX2(_num_queues, mt_discovery_degree); | |||
| 104 | _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList,(DiscoveredList*) (AllocateHeap((_max_num_queues * number_of_subclasses_of_ref ()) * sizeof(DiscoveredList), mtGC)) | |||
| 105 | _max_num_queues * number_of_subclasses_of_ref(), mtGC)(DiscoveredList*) (AllocateHeap((_max_num_queues * number_of_subclasses_of_ref ()) * sizeof(DiscoveredList), mtGC)); | |||
| 106 | ||||
| 107 | _discoveredSoftRefs = &_discovered_refs[0]; | |||
| 108 | _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; | |||
| 109 | _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; | |||
| 110 | _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; | |||
| 111 | ||||
| 112 | // Initialize all entries to NULL | |||
| 113 | for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { | |||
| 114 | _discovered_refs[i].clear(); | |||
| 115 | } | |||
| 116 | ||||
| 117 | setup_policy(false /* default soft ref policy */); | |||
| 118 | } | |||
| 119 | ||||
| 120 | #ifndef PRODUCT | |||
| 121 | void ReferenceProcessor::verify_no_references_recorded() { | |||
| 122 | guarantee(!_discovering_refs, "Discovering refs?")do { if (!(!_discovering_refs)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 122, "guarantee(" "!_discovering_refs" ") failed", "Discovering refs?" ); ::breakpoint(); } } while (0); | |||
| 123 | for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { | |||
| 124 | guarantee(_discovered_refs[i].is_empty(),do { if (!(_discovered_refs[i].is_empty())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 125, "guarantee(" "_discovered_refs[i].is_empty()" ") failed" , "Found non-empty discovered list at %u", i); ::breakpoint() ; } } while (0) | |||
| 125 | "Found non-empty discovered list at %u", i)do { if (!(_discovered_refs[i].is_empty())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 125, "guarantee(" "_discovered_refs[i].is_empty()" ") failed" , "Found non-empty discovered list at %u", i); ::breakpoint() ; } } while (0); | |||
| 126 | } | |||
| 127 | } | |||
| 128 | #endif | |||
| 129 | ||||
| 130 | bool ReferenceProcessor::processing_is_mt() const { | |||
| 131 | return ParallelRefProcEnabled && _num_queues > 1; | |||
| 132 | } | |||
| 133 | ||||
| 134 | void ReferenceProcessor::weak_oops_do(OopClosure* f) { | |||
| 135 | for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { | |||
| 136 | if (UseCompressedOops) { | |||
| 137 | f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); | |||
| 138 | } else { | |||
| 139 | f->do_oop((oop*)_discovered_refs[i].adr_head()); | |||
| 140 | } | |||
| 141 | } | |||
| 142 | } | |||
| 143 | ||||
| 144 | void ReferenceProcessor::update_soft_ref_master_clock() { | |||
| 145 | // Update (advance) the soft ref master clock field. This must be done | |||
| 146 | // after processing the soft ref list. | |||
| 147 | ||||
| 148 | // We need a monotonically non-decreasing time in ms but | |||
| 149 | // os::javaTimeMillis() does not guarantee monotonicity. | |||
| 150 | jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; | |||
| 151 | ||||
| 152 | NOT_PRODUCT(if (now < _soft_ref_timestamp_clock) { (!(LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::is_level(LogLevel ::Warning))) ? (void)0 : LogImpl<(LogTag::_gc), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)>::write<LogLevel::Warning>("time warp: " "%" "l" "d" " to " "%" "l" "d", _soft_ref_timestamp_clock, now ); } | |||
| 153 | if (now < _soft_ref_timestamp_clock) {if (now < _soft_ref_timestamp_clock) { (!(LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::is_level(LogLevel ::Warning))) ? (void)0 : LogImpl<(LogTag::_gc), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)>::write<LogLevel::Warning>("time warp: " "%" "l" "d" " to " "%" "l" "d", _soft_ref_timestamp_clock, now ); } | |||
| 154 | log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT,if (now < _soft_ref_timestamp_clock) { (!(LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::is_level(LogLevel ::Warning))) ? (void)0 : LogImpl<(LogTag::_gc), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)>::write<LogLevel::Warning>("time warp: " "%" "l" "d" " to " "%" "l" "d", _soft_ref_timestamp_clock, now ); } | |||
| 155 | _soft_ref_timestamp_clock, now);if (now < _soft_ref_timestamp_clock) { (!(LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::is_level(LogLevel ::Warning))) ? (void)0 : LogImpl<(LogTag::_gc), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)>::write<LogLevel::Warning>("time warp: " "%" "l" "d" " to " "%" "l" "d", _soft_ref_timestamp_clock, now ); } | |||
| 156 | }if (now < _soft_ref_timestamp_clock) { (!(LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::is_level(LogLevel ::Warning))) ? (void)0 : LogImpl<(LogTag::_gc), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)>::write<LogLevel::Warning>("time warp: " "%" "l" "d" " to " "%" "l" "d", _soft_ref_timestamp_clock, now ); } | |||
| 157 | )if (now < _soft_ref_timestamp_clock) { (!(LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::is_level(LogLevel ::Warning))) ? (void)0 : LogImpl<(LogTag::_gc), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)>::write<LogLevel::Warning>("time warp: " "%" "l" "d" " to " "%" "l" "d", _soft_ref_timestamp_clock, now ); } | |||
| 158 | // The values of now and _soft_ref_timestamp_clock are set using | |||
| 159 | // javaTimeNanos(), which is guaranteed to be monotonically | |||
| 160 | // non-decreasing provided the underlying platform provides such | |||
| 161 | // a time source (and it is bug free). | |||
| 162 | // In product mode, however, protect ourselves from non-monotonicity. | |||
| 163 | if (now > _soft_ref_timestamp_clock) { | |||
| 164 | _soft_ref_timestamp_clock = now; | |||
| 165 | java_lang_ref_SoftReference::set_clock(now); | |||
| 166 | } | |||
| 167 | // Else leave clock stalled at its old value until time progresses | |||
| 168 | // past clock value. | |||
| 169 | } | |||
| 170 | ||||
| 171 | size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { | |||
| 172 | size_t total = 0; | |||
| 173 | for (uint i = 0; i < _max_num_queues; ++i) { | |||
| 174 | total += lists[i].length(); | |||
| ||||
| 175 | } | |||
| 176 | return total; | |||
| 177 | } | |||
| 178 | ||||
| 179 | #ifdef ASSERT1 | |||
| 180 | void ReferenceProcessor::verify_total_count_zero(DiscoveredList lists[], const char* type) { | |||
| 181 | size_t count = total_count(lists); | |||
| 182 | assert(count == 0, "%ss must be empty but has " SIZE_FORMAT " elements", type, count)do { if (!(count == 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 182, "assert(" "count == 0" ") failed", "%ss must be empty but has " "%" "l" "u" " elements", type, count); ::breakpoint(); } } while (0); | |||
| 183 | } | |||
| 184 | #endif | |||
| 185 | ||||
| 186 | ReferenceProcessorStats ReferenceProcessor::process_discovered_references(RefProcProxyTask& proxy_task, | |||
| 187 | ReferenceProcessorPhaseTimes& phase_times) { | |||
| 188 | ||||
| 189 | double start_time = os::elapsedTime(); | |||
| 190 | ||||
| 191 | // Stop treating discovered references specially. | |||
| 192 | disable_discovery(); | |||
| 193 | ||||
| 194 | phase_times.set_ref_discovered(REF_SOFT, total_count(_discoveredSoftRefs)); | |||
| 195 | phase_times.set_ref_discovered(REF_WEAK, total_count(_discoveredWeakRefs)); | |||
| 196 | phase_times.set_ref_discovered(REF_FINAL, total_count(_discoveredFinalRefs)); | |||
| 197 | phase_times.set_ref_discovered(REF_PHANTOM, total_count(_discoveredPhantomRefs)); | |||
| 198 | ||||
| 199 | update_soft_ref_master_clock(); | |||
| 200 | ||||
| 201 | phase_times.set_processing_is_mt(processing_is_mt()); | |||
| 202 | ||||
| 203 | { | |||
| 204 | RefProcTotalPhaseTimesTracker tt(SoftWeakFinalRefsPhase, &phase_times); | |||
| 205 | process_soft_weak_final_refs(proxy_task, phase_times); | |||
| 206 | } | |||
| 207 | ||||
| 208 | { | |||
| 209 | RefProcTotalPhaseTimesTracker tt(KeepAliveFinalRefsPhase, &phase_times); | |||
| 210 | process_final_keep_alive(proxy_task, phase_times); | |||
| 211 | } | |||
| 212 | ||||
| 213 | { | |||
| 214 | RefProcTotalPhaseTimesTracker tt(PhantomRefsPhase, &phase_times); | |||
| 215 | process_phantom_refs(proxy_task, phase_times); | |||
| 216 | } | |||
| 217 | ||||
| 218 | phase_times.set_total_time_ms((os::elapsedTime() - start_time) * 1000); | |||
| 219 | ||||
| 220 | // Elements on discovered lists were pushed to the pending list. | |||
| 221 | verify_no_references_recorded(); | |||
| 222 | ||||
| 223 | ReferenceProcessorStats stats(phase_times.ref_discovered(REF_SOFT), | |||
| 224 | phase_times.ref_discovered(REF_WEAK), | |||
| 225 | phase_times.ref_discovered(REF_FINAL), | |||
| 226 | phase_times.ref_discovered(REF_PHANTOM)); | |||
| 227 | return stats; | |||
| 228 | } | |||
| 229 | ||||
| 230 | void BarrierEnqueueDiscoveredFieldClosure::enqueue(HeapWord* discovered_field_addr, oop value) { | |||
| 231 | assert(Universe::heap()->is_in(discovered_field_addr), PTR_FORMAT " not in heap", p2i(discovered_field_addr))do { if (!(Universe::heap()->is_in(discovered_field_addr)) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 231, "assert(" "Universe::heap()->is_in(discovered_field_addr)" ") failed", "0x%016" "l" "x" " not in heap", p2i(discovered_field_addr )); ::breakpoint(); } } while (0); | |||
| 232 | HeapAccess<AS_NO_KEEPALIVE>::oop_store(discovered_field_addr, | |||
| 233 | value); | |||
| 234 | } | |||
| 235 | ||||
| 236 | void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)bool allow_null_referent) { | |||
| 237 | _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); | |||
| 238 | oop discovered = java_lang_ref_Reference::discovered(_current_discovered); | |||
| 239 | assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered),do { if (!(_current_discovered_addr && oopDesc::is_oop_or_null (discovered))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 240, "assert(" "_current_discovered_addr && oopDesc::is_oop_or_null(discovered)" ") failed", "Expected an oop or NULL for discovered field at " "0x%016" "l" "x", p2i(discovered)); ::breakpoint(); } } while (0) | |||
| 240 | "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered))do { if (!(_current_discovered_addr && oopDesc::is_oop_or_null (discovered))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 240, "assert(" "_current_discovered_addr && oopDesc::is_oop_or_null(discovered)" ") failed", "Expected an oop or NULL for discovered field at " "0x%016" "l" "x", p2i(discovered)); ::breakpoint(); } } while (0); | |||
| 241 | _next_discovered = discovered; | |||
| 242 | _referent = java_lang_ref_Reference::unknown_referent_no_keepalive(_current_discovered); | |||
| 243 | assert(Universe::heap()->is_in_or_null(_referent),do { if (!(Universe::heap()->is_in_or_null(_referent))) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 244, "assert(" "Universe::heap()->is_in_or_null(_referent)" ") failed", "Wrong oop found in java.lang.Reference object") ; ::breakpoint(); } } while (0) | |||
| 244 | "Wrong oop found in java.lang.Reference object")do { if (!(Universe::heap()->is_in_or_null(_referent))) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 244, "assert(" "Universe::heap()->is_in_or_null(_referent)" ") failed", "Wrong oop found in java.lang.Reference object") ; ::breakpoint(); } } while (0); | |||
| 245 | assert(allow_null_referent ?do { if (!(allow_null_referent ? oopDesc::is_oop_or_null(_referent ) : oopDesc::is_oop(_referent))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 250, "assert(" "allow_null_referent ? oopDesc::is_oop_or_null(_referent) : oopDesc::is_oop(_referent)" ") failed", "Expected an oop%s for referent field at " "0x%016" "l" "x", (allow_null_referent ? " or NULL" : ""), p2i(_referent )); ::breakpoint(); } } while (0) | |||
| 246 | oopDesc::is_oop_or_null(_referent)do { if (!(allow_null_referent ? oopDesc::is_oop_or_null(_referent ) : oopDesc::is_oop(_referent))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 250, "assert(" "allow_null_referent ? oopDesc::is_oop_or_null(_referent) : oopDesc::is_oop(_referent)" ") failed", "Expected an oop%s for referent field at " "0x%016" "l" "x", (allow_null_referent ? " or NULL" : ""), p2i(_referent )); ::breakpoint(); } } while (0) | |||
| 247 | : oopDesc::is_oop(_referent),do { if (!(allow_null_referent ? oopDesc::is_oop_or_null(_referent ) : oopDesc::is_oop(_referent))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 250, "assert(" "allow_null_referent ? oopDesc::is_oop_or_null(_referent) : oopDesc::is_oop(_referent)" ") failed", "Expected an oop%s for referent field at " "0x%016" "l" "x", (allow_null_referent ? " or NULL" : ""), p2i(_referent )); ::breakpoint(); } } while (0) | |||
| 248 | "Expected an oop%s for referent field at " PTR_FORMAT,do { if (!(allow_null_referent ? oopDesc::is_oop_or_null(_referent ) : oopDesc::is_oop(_referent))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 250, "assert(" "allow_null_referent ? oopDesc::is_oop_or_null(_referent) : oopDesc::is_oop(_referent)" ") failed", "Expected an oop%s for referent field at " "0x%016" "l" "x", (allow_null_referent ? " or NULL" : ""), p2i(_referent )); ::breakpoint(); } } while (0) | |||
| 249 | (allow_null_referent ? " or NULL" : ""),do { if (!(allow_null_referent ? oopDesc::is_oop_or_null(_referent ) : oopDesc::is_oop(_referent))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 250, "assert(" "allow_null_referent ? oopDesc::is_oop_or_null(_referent) : oopDesc::is_oop(_referent)" ") failed", "Expected an oop%s for referent field at " "0x%016" "l" "x", (allow_null_referent ? " or NULL" : ""), p2i(_referent )); ::breakpoint(); } } while (0) | |||
| 250 | p2i(_referent))do { if (!(allow_null_referent ? oopDesc::is_oop_or_null(_referent ) : oopDesc::is_oop(_referent))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 250, "assert(" "allow_null_referent ? oopDesc::is_oop_or_null(_referent) : oopDesc::is_oop(_referent)" ") failed", "Expected an oop%s for referent field at " "0x%016" "l" "x", (allow_null_referent ? " or NULL" : ""), p2i(_referent )); ::breakpoint(); } } while (0); | |||
| 251 | } | |||
| 252 | ||||
| 253 | void DiscoveredListIterator::remove() { | |||
| 254 | assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference")do { if (!(oopDesc::is_oop(_current_discovered))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 254, "assert(" "oopDesc::is_oop(_current_discovered)" ") failed" , "Dropping a bad reference"); ::breakpoint(); } } while (0); | |||
| 255 | RawAccess<>::oop_store(_current_discovered_addr, oop(NULL__null)); | |||
| 256 | ||||
| 257 | // First _prev_next ref actually points into DiscoveredList (gross). | |||
| 258 | oop new_next; | |||
| 259 | if (_next_discovered == _current_discovered) { | |||
| 260 | // At the end of the list, we should make _prev point to itself. | |||
| 261 | // If _ref is the first ref, then _prev_next will be in the DiscoveredList, | |||
| 262 | // and _prev will be NULL. | |||
| 263 | new_next = _prev_discovered; | |||
| 264 | } else { | |||
| 265 | new_next = _next_discovered; | |||
| 266 | } | |||
| 267 | // Remove Reference object from discovered list. We do not need barriers here, | |||
| 268 | // as we only remove. We will do the barrier when we actually advance the cursor. | |||
| 269 | RawAccess<>::oop_store(_prev_discovered_addr, new_next); | |||
| 270 | _removed++; | |||
| 271 | _refs_list.dec_length(1); | |||
| 272 | } | |||
| 273 | ||||
| 274 | void DiscoveredListIterator::make_referent_alive() { | |||
| 275 | HeapWord* addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); | |||
| 276 | if (UseCompressedOops) { | |||
| 277 | _keep_alive->do_oop((narrowOop*)addr); | |||
| 278 | } else { | |||
| 279 | _keep_alive->do_oop((oop*)addr); | |||
| 280 | } | |||
| 281 | } | |||
| 282 | ||||
| 283 | void DiscoveredListIterator::clear_referent() { | |||
| 284 | java_lang_ref_Reference::clear_referent(_current_discovered); | |||
| 285 | } | |||
| 286 | ||||
| 287 | void DiscoveredListIterator::enqueue() { | |||
| 288 | if (_prev_discovered_addr != _refs_list.adr_head()) { | |||
| 289 | _enqueue->enqueue(_prev_discovered_addr, _current_discovered); | |||
| 290 | } else { | |||
| 291 | RawAccess<>::oop_store(_prev_discovered_addr, _current_discovered); | |||
| 292 | } | |||
| 293 | } | |||
| 294 | ||||
| 295 | void DiscoveredListIterator::complete_enqueue() { | |||
| 296 | if (_prev_discovered != nullptr) { | |||
| 297 | // This is the last object. | |||
| 298 | // Swap refs_list into pending list and set obj's | |||
| 299 | // discovered to what we read from the pending list. | |||
| 300 | oop old = Universe::swap_reference_pending_list(_refs_list.head()); | |||
| 301 | _enqueue->enqueue(java_lang_ref_Reference::discovered_addr_raw(_prev_discovered), old); | |||
| 302 | } | |||
| 303 | } | |||
| 304 | ||||
| 305 | inline void log_preclean_ref(const DiscoveredListIterator& iter, const char* reason) { | |||
| 306 | if (log_develop_is_enabled(Trace, gc, ref)(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)> ::is_level(LogLevel::Trace))) { | |||
| 307 | ResourceMark rm; | |||
| 308 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Precleaning %s reference " PTR_FORMAT"0x%016" "l" "x" ": %s", | |||
| 309 | reason, p2i(iter.obj()), | |||
| 310 | iter.obj()->klass()->internal_name()); | |||
| 311 | } | |||
| 312 | } | |||
| 313 | ||||
| 314 | inline void log_dropped_ref(const DiscoveredListIterator& iter, const char* reason) { | |||
| 315 | if (log_develop_is_enabled(Trace, gc, ref)(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)> ::is_level(LogLevel::Trace))) { | |||
| 316 | ResourceMark rm; | |||
| 317 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Dropping %s reference " PTR_FORMAT"0x%016" "l" "x" ": %s", | |||
| 318 | reason, p2i(iter.obj()), | |||
| 319 | iter.obj()->klass()->internal_name()); | |||
| 320 | } | |||
| 321 | } | |||
| 322 | ||||
| 323 | inline void log_enqueued_ref(const DiscoveredListIterator& iter, const char* reason) { | |||
| 324 | if (log_develop_is_enabled(Trace, gc, ref)(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)> ::is_level(LogLevel::Trace))) { | |||
| 325 | ResourceMark rm; | |||
| 326 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Enqueue %s reference (" INTPTR_FORMAT"0x%016" "l" "x" ": %s)", | |||
| 327 | reason, p2i(iter.obj()), iter.obj()->klass()->internal_name()); | |||
| 328 | } | |||
| 329 | assert(oopDesc::is_oop(iter.obj()), "Adding a bad reference")do { if (!(oopDesc::is_oop(iter.obj()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 329, "assert(" "oopDesc::is_oop(iter.obj())" ") failed", "Adding a bad reference" ); ::breakpoint(); } } while (0); | |||
| 330 | } | |||
| 331 | ||||
| 332 | size_t ReferenceProcessor::process_discovered_list_work(DiscoveredList& refs_list, | |||
| 333 | BoolObjectClosure* is_alive, | |||
| 334 | OopClosure* keep_alive, | |||
| 335 | EnqueueDiscoveredFieldClosure* enqueue, | |||
| 336 | bool do_enqueue_and_clear) { | |||
| 337 | DiscoveredListIterator iter(refs_list, keep_alive, is_alive, enqueue); | |||
| 338 | while (iter.has_next()) { | |||
| 339 | iter.load_ptrs(DEBUG_ONLY(discovery_is_concurrent() /* allow_null_referent */)discovery_is_concurrent()); | |||
| 340 | if (iter.referent() == NULL__null) { | |||
| 341 | // Reference has been cleared since discovery; only possible if | |||
| 342 | // discovery is concurrent (checked by load_ptrs). Remove | |||
| 343 | // reference from list. | |||
| 344 | log_dropped_ref(iter, "cleared"); | |||
| 345 | iter.remove(); | |||
| 346 | iter.move_to_next(); | |||
| 347 | } else if (iter.is_referent_alive()) { | |||
| 348 | // The referent is reachable after all. | |||
| 349 | // Remove reference from list. | |||
| 350 | log_dropped_ref(iter, "reachable"); | |||
| 351 | iter.remove(); | |||
| 352 | // Update the referent pointer as necessary. Note that this | |||
| 353 | // should not entail any recursive marking because the | |||
| 354 | // referent must already have been traversed. | |||
| 355 | iter.make_referent_alive(); | |||
| 356 | iter.move_to_next(); | |||
| 357 | } else { | |||
| 358 | if (do_enqueue_and_clear) { | |||
| 359 | iter.clear_referent(); | |||
| 360 | iter.enqueue(); | |||
| 361 | log_enqueued_ref(iter, "cleared"); | |||
| 362 | } | |||
| 363 | // Keep in discovered list | |||
| 364 | iter.next(); | |||
| 365 | } | |||
| 366 | } | |||
| 367 | if (do_enqueue_and_clear) { | |||
| 368 | iter.complete_enqueue(); | |||
| 369 | refs_list.clear(); | |||
| 370 | } | |||
| 371 | ||||
| 372 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>(" Dropped " SIZE_FORMAT"%" "l" "u" " active Refs out of " SIZE_FORMAT"%" "l" "u" | |||
| 373 | " Refs in discovered list " INTPTR_FORMAT"0x%016" "l" "x", | |||
| 374 | iter.removed(), iter.processed(), p2i(&refs_list)); | |||
| 375 | return iter.removed(); | |||
| 376 | } | |||
| 377 | ||||
| 378 | size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_list, | |||
| 379 | OopClosure* keep_alive, | |||
| 380 | EnqueueDiscoveredFieldClosure* enqueue) { | |||
| 381 | DiscoveredListIterator iter(refs_list, keep_alive, NULL__null, enqueue); | |||
| 382 | while (iter.has_next()) { | |||
| 383 | iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)false); | |||
| 384 | // keep the referent and followers around | |||
| 385 | iter.make_referent_alive(); | |||
| 386 | ||||
| 387 | // Self-loop next, to mark the FinalReference not active. | |||
| 388 | assert(java_lang_ref_Reference::next(iter.obj()) == NULL, "enqueued FinalReference")do { if (!(java_lang_ref_Reference::next(iter.obj()) == __null )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 388, "assert(" "java_lang_ref_Reference::next(iter.obj()) == __null" ") failed", "enqueued FinalReference"); ::breakpoint(); } } while (0); | |||
| 389 | java_lang_ref_Reference::set_next_raw(iter.obj(), iter.obj()); | |||
| 390 | ||||
| 391 | iter.enqueue(); | |||
| 392 | log_enqueued_ref(iter, "Final"); | |||
| 393 | iter.next(); | |||
| 394 | } | |||
| 395 | iter.complete_enqueue(); | |||
| 396 | refs_list.clear(); | |||
| 397 | ||||
| 398 | assert(iter.removed() == 0, "This phase does not remove anything.")do { if (!(iter.removed() == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 398, "assert(" "iter.removed() == 0" ") failed", "This phase does not remove anything." ); ::breakpoint(); } } while (0); | |||
| 399 | return iter.removed(); | |||
| 400 | } | |||
| 401 | ||||
| 402 | void | |||
| 403 | ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { | |||
| 404 | oop obj = NULL__null; | |||
| 405 | oop next = refs_list.head(); | |||
| 406 | while (next != obj) { | |||
| 407 | obj = next; | |||
| 408 | next = java_lang_ref_Reference::discovered(obj); | |||
| 409 | java_lang_ref_Reference::set_discovered_raw(obj, NULL__null); | |||
| 410 | } | |||
| 411 | refs_list.clear(); | |||
| 412 | } | |||
| 413 | ||||
| 414 | void ReferenceProcessor::abandon_partial_discovery() { | |||
| 415 | // loop over the lists | |||
| 416 | for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { | |||
| 417 | if ((i % _max_num_queues) == 0) { | |||
| 418 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Abandoning %s discovered list", list_name(i)); | |||
| 419 | } | |||
| 420 | clear_discovered_references(_discovered_refs[i]); | |||
| 421 | } | |||
| 422 | } | |||
| 423 | ||||
| 424 | size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { | |||
| 425 | DiscoveredList* list = NULL__null; | |||
| ||||
| 426 | ||||
| 427 | switch (type) { | |||
| 428 | case REF_SOFT: | |||
| 429 | list = _discoveredSoftRefs; | |||
| 430 | break; | |||
| 431 | case REF_WEAK: | |||
| 432 | list = _discoveredWeakRefs; | |||
| 433 | break; | |||
| 434 | case REF_FINAL: | |||
| 435 | list = _discoveredFinalRefs; | |||
| 436 | break; | |||
| 437 | case REF_PHANTOM: | |||
| 438 | list = _discoveredPhantomRefs; | |||
| 439 | break; | |||
| 440 | case REF_OTHER: | |||
| 441 | case REF_NONE: | |||
| 442 | default: | |||
| 443 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 443); ::breakpoint(); } while (0); | |||
| 444 | } | |||
| 445 | return total_count(list); | |||
| 446 | } | |||
| 447 | ||||
| 448 | void RefProcTask::process_discovered_list(uint worker_id, | |||
| 449 | ReferenceType ref_type, | |||
| 450 | BoolObjectClosure* is_alive, | |||
| 451 | OopClosure* keep_alive, | |||
| 452 | EnqueueDiscoveredFieldClosure* enqueue) { | |||
| 453 | ReferenceProcessor::RefProcSubPhases subphase; | |||
| 454 | DiscoveredList* dl; | |||
| 455 | switch (ref_type) { | |||
| 456 | case ReferenceType::REF_SOFT: | |||
| 457 | subphase = ReferenceProcessor::ProcessSoftRefSubPhase; | |||
| 458 | dl = _ref_processor._discoveredSoftRefs; | |||
| 459 | break; | |||
| 460 | case ReferenceType::REF_WEAK: | |||
| 461 | subphase = ReferenceProcessor::ProcessWeakRefSubPhase; | |||
| 462 | dl = _ref_processor._discoveredWeakRefs; | |||
| 463 | break; | |||
| 464 | case ReferenceType::REF_FINAL: | |||
| 465 | subphase = ReferenceProcessor::ProcessFinalRefSubPhase; | |||
| 466 | dl = _ref_processor._discoveredFinalRefs; | |||
| 467 | break; | |||
| 468 | case ReferenceType::REF_PHANTOM: | |||
| 469 | subphase = ReferenceProcessor::ProcessPhantomRefsSubPhase; | |||
| 470 | dl = _ref_processor._discoveredPhantomRefs; | |||
| 471 | break; | |||
| 472 | default: | |||
| 473 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 473); ::breakpoint(); } while (0); | |||
| 474 | } | |||
| 475 | ||||
| 476 | // Only Final refs are not enqueued and cleared here. | |||
| 477 | bool do_enqueue_and_clear = (ref_type != REF_FINAL); | |||
| 478 | ||||
| 479 | { | |||
| 480 | RefProcSubPhasesWorkerTimeTracker tt(subphase, _phase_times, tracker_id(worker_id)); | |||
| 481 | size_t const removed = _ref_processor.process_discovered_list_work(dl[worker_id], | |||
| 482 | is_alive, | |||
| 483 | keep_alive, | |||
| 484 | enqueue, | |||
| 485 | do_enqueue_and_clear); | |||
| 486 | _phase_times->add_ref_cleared(ref_type, removed); | |||
| 487 | } | |||
| 488 | } | |||
| 489 | ||||
| 490 | class RefProcSoftWeakFinalPhaseTask: public RefProcTask { | |||
| 491 | public: | |||
| 492 | RefProcSoftWeakFinalPhaseTask(ReferenceProcessor& ref_processor, | |||
| 493 | ReferenceProcessorPhaseTimes* phase_times) | |||
| 494 | : RefProcTask(ref_processor, | |||
| 495 | phase_times) {} | |||
| 496 | ||||
| 497 | void rp_work(uint worker_id, | |||
| 498 | BoolObjectClosure* is_alive, | |||
| 499 | OopClosure* keep_alive, | |||
| 500 | EnqueueDiscoveredFieldClosure* enqueue, | |||
| 501 | VoidClosure* complete_gc) override { | |||
| 502 | RefProcWorkerTimeTracker t(_phase_times->soft_weak_final_refs_phase_worker_time_sec(), tracker_id(worker_id)); | |||
| 503 | ||||
| 504 | process_discovered_list(worker_id, REF_SOFT, is_alive, keep_alive, enqueue); | |||
| 505 | ||||
| 506 | process_discovered_list(worker_id, REF_WEAK, is_alive, keep_alive, enqueue); | |||
| 507 | ||||
| 508 | process_discovered_list(worker_id, REF_FINAL, is_alive, keep_alive, enqueue); | |||
| 509 | ||||
| 510 | // Close the reachable set; needed for collectors which keep_alive_closure do | |||
| 511 | // not immediately complete their work. | |||
| 512 | complete_gc->do_void(); | |||
| 513 | } | |||
| 514 | }; | |||
| 515 | ||||
| 516 | class RefProcKeepAliveFinalPhaseTask: public RefProcTask { | |||
| 517 | public: | |||
| 518 | RefProcKeepAliveFinalPhaseTask(ReferenceProcessor& ref_processor, | |||
| 519 | ReferenceProcessorPhaseTimes* phase_times) | |||
| 520 | : RefProcTask(ref_processor, | |||
| 521 | phase_times) {} | |||
| 522 | ||||
| 523 | void rp_work(uint worker_id, | |||
| 524 | BoolObjectClosure* is_alive, | |||
| 525 | OopClosure* keep_alive, | |||
| 526 | EnqueueDiscoveredFieldClosure* enqueue, | |||
| 527 | VoidClosure* complete_gc) override { | |||
| 528 | RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::KeepAliveFinalRefsSubPhase, _phase_times, tracker_id(worker_id)); | |||
| 529 | _ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], keep_alive, enqueue); | |||
| 530 | // Close the reachable set | |||
| 531 | complete_gc->do_void(); | |||
| 532 | } | |||
| 533 | }; | |||
| 534 | ||||
| 535 | class RefProcPhantomPhaseTask: public RefProcTask { | |||
| 536 | public: | |||
| 537 | RefProcPhantomPhaseTask(ReferenceProcessor& ref_processor, | |||
| 538 | ReferenceProcessorPhaseTimes* phase_times) | |||
| 539 | : RefProcTask(ref_processor, | |||
| 540 | phase_times) {} | |||
| 541 | ||||
| 542 | void rp_work(uint worker_id, | |||
| 543 | BoolObjectClosure* is_alive, | |||
| 544 | OopClosure* keep_alive, | |||
| 545 | EnqueueDiscoveredFieldClosure* enqueue, | |||
| 546 | VoidClosure* complete_gc) override { | |||
| 547 | process_discovered_list(worker_id, REF_PHANTOM, is_alive, keep_alive, enqueue); | |||
| 548 | ||||
| 549 | // Close the reachable set; needed for collectors which keep_alive_closure do | |||
| 550 | // not immediately complete their work. | |||
| 551 | complete_gc->do_void(); | |||
| 552 | } | |||
| 553 | }; | |||
| 554 | ||||
| 555 | void ReferenceProcessor::log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues) { | |||
| 556 | LogTarget(Trace, gc, ref)LogTargetImpl<LogLevel::Trace, (LogTag::_gc), (LogTag::_ref ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) , (LogTag::__NO_TAG)> lt; | |||
| 557 | ||||
| 558 | if (!lt.is_enabled()) { | |||
| 559 | return; | |||
| 560 | } | |||
| 561 | ||||
| 562 | size_t total = 0; | |||
| 563 | ||||
| 564 | LogStream ls(lt); | |||
| 565 | ls.print("%s", prefix); | |||
| 566 | for (uint i = 0; i < num_active_queues; i++) { | |||
| 567 | ls.print(SIZE_FORMAT"%" "l" "u" " ", list[i].length()); | |||
| 568 | total += list[i].length(); | |||
| 569 | } | |||
| 570 | ls.print_cr("(" SIZE_FORMAT"%" "l" "u" ")", total); | |||
| 571 | } | |||
| 572 | ||||
| 573 | #ifndef PRODUCT | |||
| 574 | void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) { | |||
| 575 | if (!log_is_enabled(Trace, gc, ref)(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG)> ::is_level(LogLevel::Trace))) { | |||
| 576 | return; | |||
| 577 | } | |||
| 578 | ||||
| 579 | log_reflist("", ref_lists, num_active_queues); | |||
| 580 | #ifdef ASSERT1 | |||
| 581 | for (uint i = num_active_queues; i < _max_num_queues; i++) { | |||
| 582 | assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u",do { if (!(ref_lists[i].length() == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 583, "assert(" "ref_lists[i].length() == 0" ") failed", "%" "l" "u" " unexpected References in %u", ref_lists[i].length( ), i); ::breakpoint(); } } while (0) | |||
| 583 | ref_lists[i].length(), i)do { if (!(ref_lists[i].length() == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 583, "assert(" "ref_lists[i].length() == 0" ") failed", "%" "l" "u" " unexpected References in %u", ref_lists[i].length( ), i); ::breakpoint(); } } while (0); | |||
| 584 | } | |||
| 585 | #endif | |||
| 586 | } | |||
| 587 | #endif | |||
| 588 | ||||
| 589 | void ReferenceProcessor::set_active_mt_degree(uint v) { | |||
| 590 | assert(v <= max_num_queues(), "Mt degree %u too high, maximum %u", v, max_num_queues())do { if (!(v <= max_num_queues())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 590, "assert(" "v <= max_num_queues()" ") failed", "Mt degree %u too high, maximum %u" , v, max_num_queues()); ::breakpoint(); } } while (0); | |||
| 591 | _num_queues = v; | |||
| 592 | _next_id = 0; | |||
| 593 | } | |||
| 594 | ||||
| 595 | bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) { | |||
| 596 | assert(processing_is_mt(), "why balance non-mt processing?")do { if (!(processing_is_mt())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 596, "assert(" "processing_is_mt()" ") failed", "why balance non-mt processing?" ); ::breakpoint(); } } while (0); | |||
| 597 | // _num_queues is the processing degree. Only list entries up to | |||
| 598 | // _num_queues will be processed, so any non-empty lists beyond | |||
| 599 | // that must be redistributed to lists in that range. Even if not | |||
| 600 | // needed for that, balancing may be desirable to eliminate poor | |||
| 601 | // distribution of references among the lists. | |||
| 602 | if (ParallelRefProcBalancingEnabled) { | |||
| 603 | return true; // Configuration says do it. | |||
| 604 | } else { | |||
| 605 | // Configuration says don't balance, but if there are non-empty | |||
| 606 | // lists beyond the processing degree, then must ignore the | |||
| 607 | // configuration and balance anyway. | |||
| 608 | for (uint i = _num_queues; i < _max_num_queues; ++i) { | |||
| 609 | if (!refs_lists[i].is_empty()) { | |||
| 610 | return true; // Must balance despite configuration. | |||
| 611 | } | |||
| 612 | } | |||
| 613 | return false; // Safe to obey configuration and not balance. | |||
| 614 | } | |||
| 615 | } | |||
| 616 | ||||
| 617 | void ReferenceProcessor::maybe_balance_queues(DiscoveredList refs_lists[]) { | |||
| 618 | assert(processing_is_mt(), "Should not call this otherwise")do { if (!(processing_is_mt())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 618, "assert(" "processing_is_mt()" ") failed", "Should not call this otherwise" ); ::breakpoint(); } } while (0); | |||
| 619 | if (need_balance_queues(refs_lists)) { | |||
| 620 | balance_queues(refs_lists); | |||
| 621 | } | |||
| 622 | } | |||
| 623 | ||||
| 624 | // Balances reference queues. | |||
| 625 | // Move entries from all queues[0, 1, ..., _max_num_q-1] to | |||
| 626 | // queues[0, 1, ..., _num_q-1] because only the first _num_q | |||
| 627 | // corresponding to the active workers will be processed. | |||
| 628 | void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) | |||
| 629 | { | |||
| 630 | // calculate total length | |||
| 631 | size_t total_refs = 0; | |||
| 632 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Balance ref_lists "); | |||
| 633 | ||||
| 634 | log_reflist_counts(ref_lists, _max_num_queues); | |||
| 635 | ||||
| 636 | for (uint i = 0; i < _max_num_queues; ++i) { | |||
| 637 | total_refs += ref_lists[i].length(); | |||
| 638 | } | |||
| 639 | size_t avg_refs = total_refs / _num_queues + 1; | |||
| 640 | uint to_idx = 0; | |||
| 641 | for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { | |||
| 642 | bool move_all = false; | |||
| 643 | if (from_idx >= _num_queues) { | |||
| 644 | move_all = ref_lists[from_idx].length() > 0; | |||
| 645 | } | |||
| 646 | while ((ref_lists[from_idx].length() > avg_refs) || | |||
| 647 | move_all) { | |||
| 648 | assert(to_idx < _num_queues, "Sanity Check!")do { if (!(to_idx < _num_queues)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 648, "assert(" "to_idx < _num_queues" ") failed", "Sanity Check!" ); ::breakpoint(); } } while (0); | |||
| 649 | if (ref_lists[to_idx].length() < avg_refs) { | |||
| 650 | // move superfluous refs | |||
| 651 | size_t refs_to_move; | |||
| 652 | // Move all the Ref's if the from queue will not be processed. | |||
| 653 | if (move_all) { | |||
| 654 | refs_to_move = MIN2(ref_lists[from_idx].length(), | |||
| 655 | avg_refs - ref_lists[to_idx].length()); | |||
| 656 | } else { | |||
| 657 | refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, | |||
| 658 | avg_refs - ref_lists[to_idx].length()); | |||
| 659 | } | |||
| 660 | ||||
| 661 | assert(refs_to_move > 0, "otherwise the code below will fail")do { if (!(refs_to_move > 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 661, "assert(" "refs_to_move > 0" ") failed", "otherwise the code below will fail" ); ::breakpoint(); } } while (0); | |||
| 662 | ||||
| 663 | oop move_head = ref_lists[from_idx].head(); | |||
| 664 | oop move_tail = move_head; | |||
| 665 | oop new_head = move_head; | |||
| 666 | // find an element to split the list on | |||
| 667 | for (size_t j = 0; j < refs_to_move; ++j) { | |||
| 668 | move_tail = new_head; | |||
| 669 | new_head = java_lang_ref_Reference::discovered(new_head); | |||
| 670 | } | |||
| 671 | ||||
| 672 | // Add the chain to the to list. | |||
| 673 | if (ref_lists[to_idx].head() == NULL__null) { | |||
| 674 | // to list is empty. Make a loop at the end. | |||
| 675 | java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); | |||
| 676 | } else { | |||
| 677 | java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); | |||
| 678 | } | |||
| 679 | ref_lists[to_idx].set_head(move_head); | |||
| 680 | ref_lists[to_idx].inc_length(refs_to_move); | |||
| 681 | ||||
| 682 | // Remove the chain from the from list. | |||
| 683 | if (move_tail == new_head) { | |||
| 684 | // We found the end of the from list. | |||
| 685 | ref_lists[from_idx].set_head(NULL__null); | |||
| 686 | } else { | |||
| 687 | ref_lists[from_idx].set_head(new_head); | |||
| 688 | } | |||
| 689 | ref_lists[from_idx].dec_length(refs_to_move); | |||
| 690 | if (ref_lists[from_idx].length() == 0) { | |||
| 691 | break; | |||
| 692 | } | |||
| 693 | } else { | |||
| 694 | to_idx = (to_idx + 1) % _num_queues; | |||
| 695 | } | |||
| 696 | } | |||
| 697 | } | |||
| 698 | #ifdef ASSERT1 | |||
| 699 | log_reflist_counts(ref_lists, _num_queues); | |||
| 700 | size_t balanced_total_refs = 0; | |||
| 701 | for (uint i = 0; i < _num_queues; ++i) { | |||
| 702 | balanced_total_refs += ref_lists[i].length(); | |||
| 703 | } | |||
| 704 | assert(total_refs == balanced_total_refs, "Balancing was incomplete")do { if (!(total_refs == balanced_total_refs)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 704, "assert(" "total_refs == balanced_total_refs" ") failed" , "Balancing was incomplete"); ::breakpoint(); } } while (0); | |||
| 705 | #endif | |||
| 706 | } | |||
| 707 | ||||
| 708 | void ReferenceProcessor::run_task(RefProcTask& task, RefProcProxyTask& proxy_task, bool marks_oops_alive) { | |||
| 709 | log_debug(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("ReferenceProcessor::execute queues: %d, %s, marks_oops_alive: %s", | |||
| 710 | num_queues(), | |||
| 711 | processing_is_mt() ? "RefProcThreadModel::Multi" : "RefProcThreadModel::Single", | |||
| 712 | marks_oops_alive ? "true" : "false"); | |||
| 713 | ||||
| 714 | proxy_task.prepare_run_task(task, num_queues(), processing_is_mt() ? RefProcThreadModel::Multi : RefProcThreadModel::Single, marks_oops_alive); | |||
| 715 | if (processing_is_mt()) { | |||
| 716 | WorkerThreads* workers = Universe::heap()->safepoint_workers(); | |||
| 717 | assert(workers != NULL, "can not dispatch multi threaded without workers")do { if (!(workers != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 717, "assert(" "workers != __null" ") failed", "can not dispatch multi threaded without workers" ); ::breakpoint(); } } while (0); | |||
| 718 | assert(workers->active_workers() >= num_queues(),do { if (!(workers->active_workers() >= num_queues())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 720, "assert(" "workers->active_workers() >= num_queues()" ") failed", "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)" , num_queues(), workers->active_workers()); ::breakpoint() ; } } while (0) | |||
| 719 | "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",do { if (!(workers->active_workers() >= num_queues())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 720, "assert(" "workers->active_workers() >= num_queues()" ") failed", "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)" , num_queues(), workers->active_workers()); ::breakpoint() ; } } while (0) | |||
| 720 | num_queues(), workers->active_workers())do { if (!(workers->active_workers() >= num_queues())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 720, "assert(" "workers->active_workers() >= num_queues()" ") failed", "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)" , num_queues(), workers->active_workers()); ::breakpoint() ; } } while (0); | |||
| 721 | workers->run_task(&proxy_task, num_queues()); | |||
| 722 | } else { | |||
| 723 | for (unsigned i = 0; i < _max_num_queues; ++i) { | |||
| 724 | proxy_task.work(i); | |||
| 725 | } | |||
| 726 | } | |||
| 727 | } | |||
| 728 | ||||
| 729 | void ReferenceProcessor::process_soft_weak_final_refs(RefProcProxyTask& proxy_task, | |||
| 730 | ReferenceProcessorPhaseTimes& phase_times) { | |||
| 731 | ||||
| 732 | size_t const num_soft_refs = phase_times.ref_discovered(REF_SOFT); | |||
| 733 | size_t const num_weak_refs = phase_times.ref_discovered(REF_WEAK); | |||
| 734 | size_t const num_final_refs = phase_times.ref_discovered(REF_FINAL); | |||
| 735 | size_t const num_total_refs = num_soft_refs + num_weak_refs + num_final_refs; | |||
| 736 | ||||
| 737 | if (num_total_refs == 0) { | |||
| 738 | log_debug(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("Skipped SoftWeakFinalRefsPhase of Reference Processing: no references"); | |||
| 739 | return; | |||
| 740 | } | |||
| 741 | ||||
| 742 | RefProcMTDegreeAdjuster a(this, SoftWeakFinalRefsPhase, num_total_refs); | |||
| 743 | ||||
| 744 | if (processing_is_mt()) { | |||
| 745 | RefProcBalanceQueuesTimeTracker tt(SoftWeakFinalRefsPhase, &phase_times); | |||
| 746 | maybe_balance_queues(_discoveredSoftRefs); | |||
| 747 | maybe_balance_queues(_discoveredWeakRefs); | |||
| 748 | maybe_balance_queues(_discoveredFinalRefs); | |||
| 749 | } | |||
| 750 | ||||
| 751 | RefProcPhaseTimeTracker tt(SoftWeakFinalRefsPhase, &phase_times); | |||
| 752 | ||||
| 753 | log_reflist("SoftWeakFinalRefsPhase Soft before", _discoveredSoftRefs, _max_num_queues); | |||
| 754 | log_reflist("SoftWeakFinalRefsPhase Weak before", _discoveredWeakRefs, _max_num_queues); | |||
| 755 | log_reflist("SoftWeakFinalRefsPhase Final before", _discoveredFinalRefs, _max_num_queues); | |||
| 756 | ||||
| 757 | RefProcSoftWeakFinalPhaseTask phase_task(*this, &phase_times); | |||
| 758 | run_task(phase_task, proxy_task, false); | |||
| 759 | ||||
| 760 | verify_total_count_zero(_discoveredSoftRefs, "SoftReference"); | |||
| 761 | verify_total_count_zero(_discoveredWeakRefs, "WeakReference"); | |||
| 762 | log_reflist("SoftWeakFinalRefsPhase Final after", _discoveredFinalRefs, _max_num_queues); | |||
| 763 | } | |||
| 764 | ||||
| 765 | void ReferenceProcessor::process_final_keep_alive(RefProcProxyTask& proxy_task, | |||
| 766 | ReferenceProcessorPhaseTimes& phase_times) { | |||
| 767 | ||||
| 768 | size_t const num_final_refs = phase_times.ref_discovered(REF_FINAL); | |||
| 769 | ||||
| 770 | if (num_final_refs == 0) { | |||
| 771 | log_debug(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("Skipped KeepAliveFinalRefsPhase of Reference Processing: no references"); | |||
| 772 | return; | |||
| 773 | } | |||
| 774 | ||||
| 775 | RefProcMTDegreeAdjuster a(this, KeepAliveFinalRefsPhase, num_final_refs); | |||
| 776 | ||||
| 777 | if (processing_is_mt()) { | |||
| 778 | RefProcBalanceQueuesTimeTracker tt(KeepAliveFinalRefsPhase, &phase_times); | |||
| 779 | maybe_balance_queues(_discoveredFinalRefs); | |||
| 780 | } | |||
| 781 | ||||
| 782 | // Traverse referents of final references and keep them and followers alive. | |||
| 783 | RefProcPhaseTimeTracker tt(KeepAliveFinalRefsPhase, &phase_times); | |||
| 784 | RefProcKeepAliveFinalPhaseTask phase_task(*this, &phase_times); | |||
| 785 | run_task(phase_task, proxy_task, true); | |||
| 786 | ||||
| 787 | verify_total_count_zero(_discoveredFinalRefs, "FinalReference"); | |||
| 788 | } | |||
| 789 | ||||
| 790 | void ReferenceProcessor::process_phantom_refs(RefProcProxyTask& proxy_task, | |||
| 791 | ReferenceProcessorPhaseTimes& phase_times) { | |||
| 792 | ||||
| 793 | size_t const num_phantom_refs = phase_times.ref_discovered(REF_PHANTOM); | |||
| 794 | ||||
| 795 | if (num_phantom_refs == 0) { | |||
| 796 | log_debug(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("Skipped PhantomRefsPhase of Reference Processing: no references"); | |||
| 797 | return; | |||
| 798 | } | |||
| 799 | ||||
| 800 | RefProcMTDegreeAdjuster a(this, PhantomRefsPhase, num_phantom_refs); | |||
| 801 | ||||
| 802 | if (processing_is_mt()) { | |||
| 803 | RefProcBalanceQueuesTimeTracker tt(PhantomRefsPhase, &phase_times); | |||
| 804 | maybe_balance_queues(_discoveredPhantomRefs); | |||
| 805 | } | |||
| 806 | ||||
| 807 | // Walk phantom references appropriately. | |||
| 808 | RefProcPhaseTimeTracker tt(PhantomRefsPhase, &phase_times); | |||
| 809 | ||||
| 810 | log_reflist("PhantomRefsPhase Phantom before", _discoveredPhantomRefs, _max_num_queues); | |||
| 811 | ||||
| 812 | RefProcPhantomPhaseTask phase_task(*this, &phase_times); | |||
| 813 | run_task(phase_task, proxy_task, false); | |||
| 814 | ||||
| 815 | verify_total_count_zero(_discoveredPhantomRefs, "PhantomReference"); | |||
| 816 | } | |||
| 817 | ||||
| 818 | inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { | |||
| 819 | uint id = 0; | |||
| 820 | // Determine the queue index to use for this object. | |||
| 821 | if (_discovery_is_mt) { | |||
| 822 | // During a multi-threaded discovery phase, | |||
| 823 | // each thread saves to its "own" list. | |||
| 824 | id = WorkerThread::worker_id(); | |||
| 825 | } else { | |||
| 826 | // single-threaded discovery, we save in round-robin | |||
| 827 | // fashion to each of the lists. | |||
| 828 | if (processing_is_mt()) { | |||
| 829 | id = next_id(); | |||
| 830 | } | |||
| 831 | } | |||
| 832 | assert(id < _max_num_queues, "Id is out of bounds id %u and max id %u)", id, _max_num_queues)do { if (!(id < _max_num_queues)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 832, "assert(" "id < _max_num_queues" ") failed", "Id is out of bounds id %u and max id %u)" , id, _max_num_queues); ::breakpoint(); } } while (0); | |||
| 833 | ||||
| 834 | // Get the discovered queue to which we will add | |||
| 835 | DiscoveredList* list = NULL__null; | |||
| 836 | switch (rt) { | |||
| 837 | case REF_OTHER: | |||
| 838 | // Unknown reference type, no special treatment | |||
| 839 | break; | |||
| 840 | case REF_SOFT: | |||
| 841 | list = &_discoveredSoftRefs[id]; | |||
| 842 | break; | |||
| 843 | case REF_WEAK: | |||
| 844 | list = &_discoveredWeakRefs[id]; | |||
| 845 | break; | |||
| 846 | case REF_FINAL: | |||
| 847 | list = &_discoveredFinalRefs[id]; | |||
| 848 | break; | |||
| 849 | case REF_PHANTOM: | |||
| 850 | list = &_discoveredPhantomRefs[id]; | |||
| 851 | break; | |||
| 852 | case REF_NONE: | |||
| 853 | // we should not reach here if we are an InstanceRefKlass | |||
| 854 | default: | |||
| 855 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 855); ::breakpoint(); } while (0); | |||
| 856 | } | |||
| 857 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Thread %d gets list " INTPTR_FORMAT"0x%016" "l" "x", id, p2i(list)); | |||
| 858 | return list; | |||
| 859 | } | |||
| 860 | ||||
| 861 | inline bool ReferenceProcessor::set_discovered_link(HeapWord* discovered_addr, oop next_discovered) { | |||
| 862 | return discovery_is_mt() ? set_discovered_link_mt(discovered_addr, next_discovered) | |||
| 863 | : set_discovered_link_st(discovered_addr, next_discovered); | |||
| 864 | } | |||
| 865 | ||||
| 866 | inline void ReferenceProcessor::add_to_discovered_list(DiscoveredList& refs_list, | |||
| 867 | oop obj, | |||
| 868 | HeapWord* discovered_addr) { | |||
| 869 | oop current_head = refs_list.head(); | |||
| 870 | // Prepare value to put into the discovered field. The last ref must have its | |||
| 871 | // discovered field pointing to itself. | |||
| 872 | oop next_discovered = (current_head != NULL__null) ? current_head : obj; | |||
| 873 | ||||
| 874 | bool added = set_discovered_link(discovered_addr, next_discovered); | |||
| 875 | if (added) { | |||
| 876 | // We can always add the object without synchronization: every thread has its | |||
| 877 | // own list head. | |||
| 878 | refs_list.add_as_head(obj); | |||
| 879 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Discovered reference (%s) (" INTPTR_FORMAT"0x%016" "l" "x" ": %s)", | |||
| 880 | discovery_is_mt() ? "mt" : "st", p2i(obj), obj->klass()->internal_name()); | |||
| 881 | } else { | |||
| 882 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Already discovered reference (mt) (" INTPTR_FORMAT"0x%016" "l" "x" ": %s)", | |||
| 883 | p2i(obj), obj->klass()->internal_name()); | |||
| 884 | } | |||
| 885 | } | |||
| 886 | ||||
| 887 | inline bool ReferenceProcessor::set_discovered_link_st(HeapWord* discovered_addr, | |||
| 888 | oop next_discovered) { | |||
| 889 | assert(!discovery_is_mt(), "must be")do { if (!(!discovery_is_mt())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 889, "assert(" "!discovery_is_mt()" ") failed", "must be"); ::breakpoint(); } } while (0); | |||
| 890 | ||||
| 891 | if (discovery_is_stw()) { | |||
| 892 | // Do a raw store here: the field will be visited later when processing | |||
| 893 | // the discovered references. | |||
| 894 | RawAccess<>::oop_store(discovered_addr, next_discovered); | |||
| 895 | } else { | |||
| 896 | HeapAccess<AS_NO_KEEPALIVE>::oop_store(discovered_addr, next_discovered); | |||
| 897 | } | |||
| 898 | // Always successful. | |||
| 899 | return true; | |||
| 900 | } | |||
| 901 | ||||
| 902 | inline bool ReferenceProcessor::set_discovered_link_mt(HeapWord* discovered_addr, | |||
| 903 | oop next_discovered) { | |||
| 904 | assert(discovery_is_mt(), "must be")do { if (!(discovery_is_mt())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 904, "assert(" "discovery_is_mt()" ") failed", "must be"); :: breakpoint(); } } while (0); | |||
| 905 | ||||
| 906 | // We must make sure this object is only enqueued once. Try to CAS into the discovered_addr. | |||
| 907 | oop retest; | |||
| 908 | if (discovery_is_stw()) { | |||
| 909 | // Try a raw store here, still making sure that we enqueue only once: the field | |||
| 910 | // will be visited later when processing the discovered references. | |||
| 911 | retest = RawAccess<>::oop_atomic_cmpxchg(discovered_addr, oop(NULL__null), next_discovered); | |||
| 912 | } else { | |||
| 913 | retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL__null), next_discovered); | |||
| 914 | } | |||
| 915 | return retest == NULL__null; | |||
| 916 | } | |||
| 917 | ||||
| 918 | #ifndef PRODUCT | |||
| 919 | // Concurrent discovery might allow us to observe j.l.References with NULL | |||
| 920 | // referents, being those cleared concurrently by mutators during (or after) discovery. | |||
| 921 | void ReferenceProcessor::verify_referent(oop obj) { | |||
| 922 | bool concurrent = discovery_is_concurrent(); | |||
| 923 | oop referent = java_lang_ref_Reference::unknown_referent_no_keepalive(obj); | |||
| 924 | assert(concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc::is_oop(referent),do { if (!(concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc ::is_oop(referent))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 927, "assert(" "concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc::is_oop(referent)" ") failed", "Bad referent " "0x%016" "l" "x" " found in Reference " "0x%016" "l" "x" " during %sconcurrent discovery ", p2i(referent ), p2i(obj), concurrent ? "" : "non-"); ::breakpoint(); } } while (0) | |||
| 925 | "Bad referent " INTPTR_FORMAT " found in Reference "do { if (!(concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc ::is_oop(referent))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 927, "assert(" "concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc::is_oop(referent)" ") failed", "Bad referent " "0x%016" "l" "x" " found in Reference " "0x%016" "l" "x" " during %sconcurrent discovery ", p2i(referent ), p2i(obj), concurrent ? "" : "non-"); ::breakpoint(); } } while (0) | |||
| 926 | INTPTR_FORMAT " during %sconcurrent discovery ",do { if (!(concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc ::is_oop(referent))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 927, "assert(" "concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc::is_oop(referent)" ") failed", "Bad referent " "0x%016" "l" "x" " found in Reference " "0x%016" "l" "x" " during %sconcurrent discovery ", p2i(referent ), p2i(obj), concurrent ? "" : "non-"); ::breakpoint(); } } while (0) | |||
| 927 | p2i(referent), p2i(obj), concurrent ? "" : "non-")do { if (!(concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc ::is_oop(referent))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 927, "assert(" "concurrent ? oopDesc::is_oop_or_null(referent) : oopDesc::is_oop(referent)" ") failed", "Bad referent " "0x%016" "l" "x" " found in Reference " "0x%016" "l" "x" " during %sconcurrent discovery ", p2i(referent ), p2i(obj), concurrent ? "" : "non-"); ::breakpoint(); } } while (0); | |||
| 928 | } | |||
| 929 | #endif | |||
| 930 | ||||
| 931 | bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const { | |||
| 932 | return _is_subject_to_discovery->do_object_b(obj); | |||
| 933 | } | |||
| 934 | ||||
| 935 | // We mention two of several possible choices here: | |||
| 936 | // #0: if the reference object is not in the "originating generation" | |||
| 937 | // (or part of the heap being collected, indicated by our "span") | |||
| 938 | // we don't treat it specially (i.e. we scan it as we would | |||
| 939 | // a normal oop, treating its references as strong references). | |||
| 940 | // This means that references can't be discovered unless their | |||
| 941 | // referent is also in the same span. This is the simplest, | |||
| 942 | // most "local" and most conservative approach, albeit one | |||
| 943 | // that may cause weak references to be enqueued least promptly. | |||
| 944 | // We call this choice the "ReferenceBasedDiscovery" policy. | |||
| 945 | // #1: the reference object may be in any generation (span), but if | |||
| 946 | // the referent is in the generation (span) being currently collected | |||
| 947 | // then we can discover the reference object, provided | |||
| 948 | // the object has not already been discovered by | |||
| 949 | // a different concurrently running discoverer (as may be the | |||
| 950 | // case, for instance, if the reference object is in G1 old gen and | |||
| 951 | // the referent in G1 young gen), and provided the processing | |||
| 952 | // of this reference object by the current collector will | |||
| 953 | // appear atomically to every other discoverer in the system. | |||
| 954 | // (Thus, for instance, a concurrent discoverer may not | |||
| 955 | // discover references in other generations even if the | |||
| 956 | // referent is in its own generation). This policy may, | |||
| 957 | // in certain cases, enqueue references somewhat sooner than | |||
| 958 | // might Policy #0 above, but at marginally increased cost | |||
| 959 | // and complexity in processing these references. | |||
| 960 | // We call this choice the "ReferentBasedDiscovery" policy. | |||
| 961 | bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { | |||
| 962 | // Make sure we are discovering refs (rather than processing discovered refs). | |||
| 963 | if (!_discovering_refs || !RegisterReferences) { | |||
| 964 | return false; | |||
| 965 | } | |||
| 966 | ||||
| 967 | if ((rt == REF_FINAL) && (java_lang_ref_Reference::next(obj) != NULL__null)) { | |||
| 968 | // Don't rediscover non-active FinalReferences. | |||
| 969 | return false; | |||
| 970 | } | |||
| 971 | ||||
| 972 | if (RefDiscoveryPolicy == ReferenceBasedDiscovery && | |||
| 973 | !is_subject_to_discovery(obj)) { | |||
| 974 | // Reference is not in the originating generation; | |||
| 975 | // don't treat it specially (i.e. we want to scan it as a normal | |||
| 976 | // object with strong references). | |||
| 977 | return false; | |||
| 978 | } | |||
| 979 | ||||
| 980 | // We only discover references whose referents are not (yet) | |||
| 981 | // known to be strongly reachable. | |||
| 982 | if (is_alive_non_header() != NULL__null) { | |||
| 983 | verify_referent(obj); | |||
| 984 | oop referent = java_lang_ref_Reference::unknown_referent_no_keepalive(obj); | |||
| 985 | if (is_alive_non_header()->do_object_b(referent)) { | |||
| 986 | return false; // referent is reachable | |||
| 987 | } | |||
| 988 | } | |||
| 989 | if (rt == REF_SOFT) { | |||
| 990 | // For soft refs we can decide now if these are not | |||
| 991 | // current candidates for clearing, in which case we | |||
| 992 | // can mark through them now, rather than delaying that | |||
| 993 | // to the reference-processing phase. Since all current | |||
| 994 | // time-stamp policies advance the soft-ref clock only | |||
| 995 | // at a full collection cycle, this is always currently | |||
| 996 | // accurate. | |||
| 997 | if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { | |||
| 998 | return false; | |||
| 999 | } | |||
| 1000 | } | |||
| 1001 | ||||
| 1002 | ResourceMark rm; // Needed for tracing. | |||
| 1003 | ||||
| 1004 | HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); | |||
| 1005 | const oop discovered = java_lang_ref_Reference::discovered(obj); | |||
| 1006 | assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered))do { if (!(oopDesc::is_oop_or_null(discovered))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1006, "assert(" "oopDesc::is_oop_or_null(discovered)" ") failed" , "Expected an oop or NULL for discovered field at " "0x%016" "l" "x", p2i(discovered)); ::breakpoint(); } } while (0); | |||
| 1007 | if (discovered != NULL__null) { | |||
| 1008 | // The reference has already been discovered... | |||
| 1009 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Already discovered reference (" INTPTR_FORMAT"0x%016" "l" "x" ": %s)", | |||
| 1010 | p2i(obj), obj->klass()->internal_name()); | |||
| 1011 | if (RefDiscoveryPolicy == ReferentBasedDiscovery) { | |||
| 1012 | // assumes that an object is not processed twice; | |||
| 1013 | // if it's been already discovered it must be on another | |||
| 1014 | // generation's discovered list; so we won't discover it. | |||
| 1015 | return false; | |||
| 1016 | } else { | |||
| 1017 | assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,do { if (!(RefDiscoveryPolicy == ReferenceBasedDiscovery)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1018, "assert(" "RefDiscoveryPolicy == ReferenceBasedDiscovery" ") failed", "Unrecognized policy"); ::breakpoint(); } } while (0) | |||
| 1018 | "Unrecognized policy")do { if (!(RefDiscoveryPolicy == ReferenceBasedDiscovery)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1018, "assert(" "RefDiscoveryPolicy == ReferenceBasedDiscovery" ") failed", "Unrecognized policy"); ::breakpoint(); } } while (0); | |||
| 1019 | // Check assumption that an object is not potentially | |||
| 1020 | // discovered twice except by concurrent collectors that potentially | |||
| 1021 | // trace the same Reference object twice. | |||
| 1022 | assert(UseG1GC, "Only possible with a concurrent marking collector")do { if (!(UseG1GC)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1022, "assert(" "UseG1GC" ") failed", "Only possible with a concurrent marking collector" ); ::breakpoint(); } } while (0); | |||
| 1023 | return true; | |||
| 1024 | } | |||
| 1025 | } | |||
| 1026 | ||||
| 1027 | if (RefDiscoveryPolicy == ReferentBasedDiscovery) { | |||
| 1028 | verify_referent(obj); | |||
| 1029 | // Discover if and only if EITHER: | |||
| 1030 | // .. reference is in our span, OR | |||
| 1031 | // .. we are a stw discoverer and referent is in our span | |||
| 1032 | if (is_subject_to_discovery(obj) || | |||
| 1033 | (discovery_is_stw() && | |||
| 1034 | is_subject_to_discovery(java_lang_ref_Reference::unknown_referent_no_keepalive(obj)))) { | |||
| 1035 | } else { | |||
| 1036 | return false; | |||
| 1037 | } | |||
| 1038 | } else { | |||
| 1039 | assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&do { if (!(RefDiscoveryPolicy == ReferenceBasedDiscovery && is_subject_to_discovery(obj))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1040, "assert(" "RefDiscoveryPolicy == ReferenceBasedDiscovery && is_subject_to_discovery(obj)" ") failed", "code inconsistency"); ::breakpoint(); } } while (0) | |||
| 1040 | is_subject_to_discovery(obj), "code inconsistency")do { if (!(RefDiscoveryPolicy == ReferenceBasedDiscovery && is_subject_to_discovery(obj))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1040, "assert(" "RefDiscoveryPolicy == ReferenceBasedDiscovery && is_subject_to_discovery(obj)" ") failed", "code inconsistency"); ::breakpoint(); } } while (0); | |||
| 1041 | } | |||
| 1042 | ||||
| 1043 | // Get the right type of discovered queue head. | |||
| 1044 | DiscoveredList* list = get_discovered_list(rt); | |||
| 1045 | if (list == NULL__null) { | |||
| 1046 | return false; // nothing special needs to be done | |||
| 1047 | } | |||
| 1048 | ||||
| 1049 | add_to_discovered_list(*list, obj, discovered_addr); | |||
| 1050 | ||||
| 1051 | assert(oopDesc::is_oop(obj), "Discovered a bad reference")do { if (!(oopDesc::is_oop(obj))) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1051, "assert(" "oopDesc::is_oop(obj)" ") failed", "Discovered a bad reference" ); ::breakpoint(); } } while (0); | |||
| 1052 | verify_referent(obj); | |||
| 1053 | return true; | |||
| 1054 | } | |||
| 1055 | ||||
| 1056 | void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive, | |||
| 1057 | EnqueueDiscoveredFieldClosure* enqueue, | |||
| 1058 | YieldClosure* yield, | |||
| 1059 | GCTimer* gc_timer) { | |||
| 1060 | // These lists can be handled here in any order and, indeed, concurrently. | |||
| 1061 | ||||
| 1062 | // Soft references | |||
| 1063 | { | |||
| 1064 | GCTraceTime(Debug, gc, ref)GCTraceTimeWrapper<LogLevel::Debug, (LogTag::_gc), (LogTag ::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG)> tm("Preclean SoftReferences", gc_timer); | |||
| 1065 | log_reflist("SoftRef before: ", _discoveredSoftRefs, _max_num_queues); | |||
| 1066 | for (uint i = 0; i < _max_num_queues; i++) { | |||
| 1067 | if (yield->should_return()) { | |||
| 1068 | return; | |||
| 1069 | } | |||
| 1070 | if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, | |||
| 1071 | enqueue, yield)) { | |||
| 1072 | log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues); | |||
| 1073 | return; | |||
| 1074 | } | |||
| 1075 | } | |||
| 1076 | log_reflist("SoftRef after: ", _discoveredSoftRefs, _max_num_queues); | |||
| 1077 | } | |||
| 1078 | ||||
| 1079 | // Weak references | |||
| 1080 | { | |||
| 1081 | GCTraceTime(Debug, gc, ref)GCTraceTimeWrapper<LogLevel::Debug, (LogTag::_gc), (LogTag ::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG)> tm("Preclean WeakReferences", gc_timer); | |||
| 1082 | log_reflist("WeakRef before: ", _discoveredWeakRefs, _max_num_queues); | |||
| 1083 | for (uint i = 0; i < _max_num_queues; i++) { | |||
| 1084 | if (yield->should_return()) { | |||
| 1085 | return; | |||
| 1086 | } | |||
| 1087 | if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, | |||
| 1088 | enqueue, yield)) { | |||
| 1089 | log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues); | |||
| 1090 | return; | |||
| 1091 | } | |||
| 1092 | } | |||
| 1093 | log_reflist("WeakRef after: ", _discoveredWeakRefs, _max_num_queues); | |||
| 1094 | } | |||
| 1095 | ||||
| 1096 | // Final references | |||
| 1097 | { | |||
| 1098 | GCTraceTime(Debug, gc, ref)GCTraceTimeWrapper<LogLevel::Debug, (LogTag::_gc), (LogTag ::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG)> tm("Preclean FinalReferences", gc_timer); | |||
| 1099 | log_reflist("FinalRef before: ", _discoveredFinalRefs, _max_num_queues); | |||
| 1100 | for (uint i = 0; i < _max_num_queues; i++) { | |||
| 1101 | if (yield->should_return()) { | |||
| 1102 | return; | |||
| 1103 | } | |||
| 1104 | if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, | |||
| 1105 | enqueue, yield)) { | |||
| 1106 | log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues); | |||
| 1107 | return; | |||
| 1108 | } | |||
| 1109 | } | |||
| 1110 | log_reflist("FinalRef after: ", _discoveredFinalRefs, _max_num_queues); | |||
| 1111 | } | |||
| 1112 | ||||
| 1113 | // Phantom references | |||
| 1114 | { | |||
| 1115 | GCTraceTime(Debug, gc, ref)GCTraceTimeWrapper<LogLevel::Debug, (LogTag::_gc), (LogTag ::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG)> tm("Preclean PhantomReferences", gc_timer); | |||
| 1116 | log_reflist("PhantomRef before: ", _discoveredPhantomRefs, _max_num_queues); | |||
| 1117 | for (uint i = 0; i < _max_num_queues; i++) { | |||
| 1118 | if (yield->should_return()) { | |||
| 1119 | return; | |||
| 1120 | } | |||
| 1121 | if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, | |||
| 1122 | enqueue, yield)) { | |||
| 1123 | log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues); | |||
| 1124 | return; | |||
| 1125 | } | |||
| 1126 | } | |||
| 1127 | log_reflist("PhantomRef after: ", _discoveredPhantomRefs, _max_num_queues); | |||
| 1128 | } | |||
| 1129 | } | |||
| 1130 | ||||
| 1131 | bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, | |||
| 1132 | BoolObjectClosure* is_alive, | |||
| 1133 | EnqueueDiscoveredFieldClosure* enqueue, | |||
| 1134 | YieldClosure* yield) { | |||
| 1135 | DiscoveredListIterator iter(refs_list, nullptr /* keep_alive */, is_alive, enqueue); | |||
| 1136 | while (iter.has_next()) { | |||
| 1137 | if (yield->should_return_fine_grain()) { | |||
| 1138 | return true; | |||
| 1139 | } | |||
| 1140 | iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)true); | |||
| 1141 | if (iter.referent() == nullptr) { | |||
| 1142 | log_preclean_ref(iter, "cleared"); | |||
| 1143 | iter.remove(); | |||
| 1144 | iter.move_to_next(); | |||
| 1145 | } else if (iter.is_referent_alive()) { | |||
| 1146 | log_preclean_ref(iter, "reachable"); | |||
| 1147 | iter.remove(); | |||
| 1148 | iter.move_to_next(); | |||
| 1149 | } else { | |||
| 1150 | iter.next(); | |||
| 1151 | } | |||
| 1152 | } | |||
| 1153 | ||||
| 1154 | if (iter.processed() > 0) { | |||
| 1155 | log_develop_trace(gc, ref)(!(LogImpl<(LogTag::_gc), (LogTag::_ref), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_ref), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>(" Dropped " SIZE_FORMAT"%" "l" "u" " Refs out of " SIZE_FORMAT"%" "l" "u" " Refs in discovered list " INTPTR_FORMAT"0x%016" "l" "x", | |||
| 1156 | iter.removed(), iter.processed(), p2i(&refs_list)); | |||
| 1157 | } | |||
| 1158 | return false; | |||
| 1159 | } | |||
| 1160 | ||||
| 1161 | const char* ReferenceProcessor::list_name(uint i) { | |||
| 1162 | assert(i <= _max_num_queues * number_of_subclasses_of_ref(),do { if (!(i <= _max_num_queues * number_of_subclasses_of_ref ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1163, "assert(" "i <= _max_num_queues * number_of_subclasses_of_ref()" ") failed", "Out of bounds index"); ::breakpoint(); } } while (0) | |||
| 1163 | "Out of bounds index")do { if (!(i <= _max_num_queues * number_of_subclasses_of_ref ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1163, "assert(" "i <= _max_num_queues * number_of_subclasses_of_ref()" ") failed", "Out of bounds index"); ::breakpoint(); } } while (0); | |||
| 1164 | ||||
| 1165 | int j = i / _max_num_queues; | |||
| 1166 | switch (j) { | |||
| 1167 | case 0: return "SoftRef"; | |||
| 1168 | case 1: return "WeakRef"; | |||
| 1169 | case 2: return "FinalRef"; | |||
| 1170 | case 3: return "PhantomRef"; | |||
| 1171 | } | |||
| 1172 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1172); ::breakpoint(); } while (0); | |||
| 1173 | return NULL__null; | |||
| 1174 | } | |||
| 1175 | ||||
| 1176 | uint RefProcMTDegreeAdjuster::ergo_proc_thread_count(size_t ref_count, | |||
| 1177 | uint max_threads, | |||
| 1178 | RefProcPhases phase) const { | |||
| 1179 | assert(0 < max_threads, "must allow at least one thread")do { if (!(0 < max_threads)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/referenceProcessor.cpp" , 1179, "assert(" "0 < max_threads" ") failed", "must allow at least one thread" ); ::breakpoint(); } } while (0); | |||
| 1180 | ||||
| 1181 | if (use_max_threads(phase) || (ReferencesPerThread == 0)) { | |||
| 1182 | return max_threads; | |||
| 1183 | } | |||
| 1184 | ||||
| 1185 | size_t thread_count = 1 + (ref_count / ReferencesPerThread); | |||
| 1186 | return (uint)MIN3(thread_count, | |||
| 1187 | static_cast<size_t>(max_threads), | |||
| 1188 | (size_t)os::active_processor_count()); | |||
| 1189 | } | |||
| 1190 | ||||
| 1191 | bool RefProcMTDegreeAdjuster::use_max_threads(RefProcPhases phase) const { | |||
| 1192 | // Even a small number of references in this phase could produce large amounts of work. | |||
| 1193 | return phase == ReferenceProcessor::KeepAliveFinalRefsPhase; | |||
| 1194 | } | |||
| 1195 | ||||
| 1196 | RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp, | |||
| 1197 | RefProcPhases phase, | |||
| 1198 | size_t ref_count): | |||
| 1199 | _rp(rp), | |||
| 1200 | _saved_num_queues(_rp->num_queues()) { | |||
| 1201 | uint workers = ergo_proc_thread_count(ref_count, _rp->num_queues(), phase); | |||
| 1202 | _rp->set_active_mt_degree(workers); | |||
| 1203 | } | |||
| 1204 | ||||
| 1205 | RefProcMTDegreeAdjuster::~RefProcMTDegreeAdjuster() { | |||
| 1206 | // Revert to previous status. | |||
| 1207 | _rp->set_active_mt_degree(_saved_num_queues); | |||
| 1208 | } |