File: | jdk/src/hotspot/share/gc/serial/tenuredGeneration.cpp |
Warning: | line 115, column 16 Value stored to 'capacity_after_gc' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/serial/genMarkSweep.hpp" |
27 | #include "gc/serial/tenuredGeneration.inline.hpp" |
28 | #include "gc/shared/blockOffsetTable.inline.hpp" |
29 | #include "gc/shared/cardGeneration.inline.hpp" |
30 | #include "gc/shared/collectorCounters.hpp" |
31 | #include "gc/shared/gcTimer.hpp" |
32 | #include "gc/shared/gcTrace.hpp" |
33 | #include "gc/shared/genCollectedHeap.hpp" |
34 | #include "gc/shared/genOopClosures.inline.hpp" |
35 | #include "gc/shared/generationSpec.hpp" |
36 | #include "gc/shared/space.hpp" |
37 | #include "logging/log.hpp" |
38 | #include "memory/allocation.inline.hpp" |
39 | #include "oops/oop.inline.hpp" |
40 | #include "runtime/java.hpp" |
41 | #include "utilities/macros.hpp" |
42 | |
43 | TenuredGeneration::TenuredGeneration(ReservedSpace rs, |
44 | size_t initial_byte_size, |
45 | size_t min_byte_size, |
46 | size_t max_byte_size, |
47 | CardTableRS* remset) : |
48 | CardGeneration(rs, initial_byte_size, remset) |
49 | { |
50 | HeapWord* bottom = (HeapWord*) _virtual_space.low(); |
51 | HeapWord* end = (HeapWord*) _virtual_space.high(); |
52 | _the_space = new TenuredSpace(_bts, MemRegion(bottom, end)); |
53 | _the_space->reset_saved_mark(); |
54 | // If we don't shrink the heap in steps, '_shrink_factor' is always 100%. |
55 | _shrink_factor = ShrinkHeapInSteps ? 0 : 100; |
56 | _capacity_at_prologue = 0; |
57 | |
58 | _gc_stats = new GCStats(); |
59 | |
60 | // initialize performance counters |
61 | |
62 | const char* gen_name = "old"; |
63 | // Generation Counters -- generation 1, 1 subspace |
64 | _gen_counters = new GenerationCounters(gen_name, 1, 1, |
65 | min_byte_size, max_byte_size, &_virtual_space); |
66 | |
67 | _gc_counters = new CollectorCounters("Serial full collection pauses", 1); |
68 | |
69 | _space_counters = new CSpaceCounters(gen_name, 0, |
70 | _virtual_space.reserved_size(), |
71 | _the_space, _gen_counters); |
72 | } |
73 | |
74 | void TenuredGeneration::gc_prologue(bool full) { |
75 | _capacity_at_prologue = capacity(); |
76 | _used_at_prologue = used(); |
77 | } |
78 | |
79 | bool TenuredGeneration::should_collect(bool full, |
80 | size_t size, |
81 | bool is_tlab) { |
82 | // This should be one big conditional or (||), but I want to be able to tell |
83 | // why it returns what it returns (without re-evaluating the conditionals |
84 | // in case they aren't idempotent), so I'm doing it this way. |
85 | // DeMorgan says it's okay. |
86 | if (full) { |
87 | log_trace(gc)(!(LogImpl<(LogTag::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("TenuredGeneration::should_collect: because full"); |
88 | return true; |
89 | } |
90 | if (should_allocate(size, is_tlab)) { |
91 | log_trace(gc)(!(LogImpl<(LogTag::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("TenuredGeneration::should_collect: because should_allocate(" SIZE_FORMAT"%" "l" "u" ")", size); |
92 | return true; |
93 | } |
94 | // If we don't have very much free space. |
95 | // XXX: 10000 should be a percentage of the capacity!!! |
96 | if (free() < 10000) { |
97 | log_trace(gc)(!(LogImpl<(LogTag::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("TenuredGeneration::should_collect: because free(): " SIZE_FORMAT"%" "l" "u", free()); |
98 | return true; |
99 | } |
100 | // If we had to expand to accommodate promotions from the young generation |
101 | if (_capacity_at_prologue < capacity()) { |
102 | log_trace(gc)(!(LogImpl<(LogTag::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("TenuredGeneration::should_collect: because_capacity_at_prologue: " SIZE_FORMAT"%" "l" "u" " < capacity(): " SIZE_FORMAT"%" "l" "u", |
103 | _capacity_at_prologue, capacity()); |
104 | return true; |
105 | } |
106 | |
107 | return false; |
108 | } |
109 | |
110 | void TenuredGeneration::compute_new_size() { |
111 | assert_locked_or_safepoint(Heap_lock); |
112 | |
113 | // Compute some numbers about the state of the heap. |
114 | const size_t used_after_gc = used(); |
115 | const size_t capacity_after_gc = capacity(); |
Value stored to 'capacity_after_gc' during its initialization is never read | |
116 | |
117 | CardGeneration::compute_new_size(); |
118 | |
119 | assert(used() == used_after_gc && used_after_gc <= capacity(),do { if (!(used() == used_after_gc && used_after_gc <= capacity())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/serial/tenuredGeneration.cpp" , 121, "assert(" "used() == used_after_gc && used_after_gc <= capacity()" ") failed", "used: " "%" "l" "u" " used_after_gc: " "%" "l" "u" " capacity: " "%" "l" "u", used(), used_after_gc, capacity() ); ::breakpoint(); } } while (0) |
120 | "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMATdo { if (!(used() == used_after_gc && used_after_gc <= capacity())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/serial/tenuredGeneration.cpp" , 121, "assert(" "used() == used_after_gc && used_after_gc <= capacity()" ") failed", "used: " "%" "l" "u" " used_after_gc: " "%" "l" "u" " capacity: " "%" "l" "u", used(), used_after_gc, capacity() ); ::breakpoint(); } } while (0) |
121 | " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity())do { if (!(used() == used_after_gc && used_after_gc <= capacity())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/serial/tenuredGeneration.cpp" , 121, "assert(" "used() == used_after_gc && used_after_gc <= capacity()" ") failed", "used: " "%" "l" "u" " used_after_gc: " "%" "l" "u" " capacity: " "%" "l" "u", used(), used_after_gc, capacity() ); ::breakpoint(); } } while (0); |
122 | } |
123 | |
124 | void TenuredGeneration::update_gc_stats(Generation* current_generation, |
125 | bool full) { |
126 | // If the young generation has been collected, gather any statistics |
127 | // that are of interest at this point. |
128 | bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation); |
129 | if (!full && current_is_young) { |
130 | // Calculate size of data promoted from the young generation |
131 | // before doing the collection. |
132 | size_t used_before_gc = used(); |
133 | |
134 | // If the young gen collection was skipped, then the |
135 | // number of promoted bytes will be 0 and adding it to the |
136 | // average will incorrectly lessen the average. It is, however, |
137 | // also possible that no promotion was needed. |
138 | if (used_before_gc >= _used_at_prologue) { |
139 | size_t promoted_in_bytes = used_before_gc - _used_at_prologue; |
140 | gc_stats()->avg_promoted()->sample(promoted_in_bytes); |
141 | } |
142 | } |
143 | } |
144 | |
145 | void TenuredGeneration::update_counters() { |
146 | if (UsePerfData) { |
147 | _space_counters->update_all(); |
148 | _gen_counters->update_all(); |
149 | } |
150 | } |
151 | |
152 | bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { |
153 | size_t available = max_contiguous_available(); |
154 | size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); |
155 | bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); |
156 | |
157 | log_trace(gc)(!(LogImpl<(LogTag::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT"%" "l" "u" ") %s av_promo(" SIZE_FORMAT"%" "l" "u" "), max_promo(" SIZE_FORMAT"%" "l" "u" ")", |
158 | res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes); |
159 | |
160 | return res; |
161 | } |
162 | |
163 | void TenuredGeneration::collect(bool full, |
164 | bool clear_all_soft_refs, |
165 | size_t size, |
166 | bool is_tlab) { |
167 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
168 | |
169 | // Temporarily expand the span of our ref processor, so |
170 | // refs discovery is over the entire heap, not just this generation |
171 | ReferenceProcessorSpanMutator |
172 | x(ref_processor(), gch->reserved_region()); |
173 | |
174 | STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); |
175 | gc_timer->register_gc_start(); |
176 | |
177 | SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); |
178 | gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); |
179 | |
180 | gch->pre_full_gc_dump(gc_timer); |
181 | |
182 | GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); |
183 | |
184 | gch->post_full_gc_dump(gc_timer); |
185 | |
186 | gc_timer->register_gc_end(); |
187 | |
188 | gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
189 | } |
190 | |
191 | HeapWord* |
192 | TenuredGeneration::expand_and_allocate(size_t word_size, bool is_tlab) { |
193 | assert(!is_tlab, "TenuredGeneration does not support TLAB allocation")do { if (!(!is_tlab)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/serial/tenuredGeneration.cpp" , 193, "assert(" "!is_tlab" ") failed", "TenuredGeneration does not support TLAB allocation" ); ::breakpoint(); } } while (0); |
194 | expand(word_size*HeapWordSize, _min_heap_delta_bytes); |
195 | return _the_space->allocate(word_size); |
196 | } |
197 | |
198 | bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) { |
199 | GCMutexLocker x(ExpandHeap_lock); |
200 | return CardGeneration::expand(bytes, expand_bytes); |
201 | } |
202 | |
203 | size_t TenuredGeneration::unsafe_max_alloc_nogc() const { |
204 | return _the_space->free(); |
205 | } |
206 | |
207 | size_t TenuredGeneration::contiguous_available() const { |
208 | return _the_space->free() + _virtual_space.uncommitted_size(); |
209 | } |
210 | |
211 | void TenuredGeneration::assert_correct_size_change_locking() { |
212 | assert_locked_or_safepoint(ExpandHeap_lock); |
213 | } |
214 | |
215 | // Currently nothing to do. |
216 | void TenuredGeneration::prepare_for_verify() {} |
217 | |
218 | void TenuredGeneration::object_iterate(ObjectClosure* blk) { |
219 | _the_space->object_iterate(blk); |
220 | } |
221 | |
222 | void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) { |
223 | // Create the BOT for the archive space. |
224 | TenuredSpace* space = (TenuredSpace*)_the_space; |
225 | space->initialize_threshold(); |
226 | HeapWord* start = archive_space.start(); |
227 | while (start < archive_space.end()) { |
228 | size_t word_size = _the_space->block_size(start); |
229 | space->alloc_block(start, start + word_size); |
230 | start += word_size; |
231 | } |
232 | } |
233 | |
234 | void TenuredGeneration::save_marks() { |
235 | _the_space->set_saved_mark(); |
236 | } |
237 | |
238 | void TenuredGeneration::reset_saved_marks() { |
239 | _the_space->reset_saved_mark(); |
240 | } |
241 | |
242 | bool TenuredGeneration::no_allocs_since_save_marks() { |
243 | return _the_space->saved_mark_at_top(); |
244 | } |
245 | |
246 | void TenuredGeneration::gc_epilogue(bool full) { |
247 | // update the generation and space performance counters |
248 | update_counters(); |
249 | if (ZapUnusedHeapArea) { |
250 | _the_space->check_mangled_unused_area_complete(); |
251 | } |
252 | } |
253 | |
254 | void TenuredGeneration::record_spaces_top() { |
255 | assert(ZapUnusedHeapArea, "Not mangling unused space")do { if (!(ZapUnusedHeapArea)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/serial/tenuredGeneration.cpp" , 255, "assert(" "ZapUnusedHeapArea" ") failed", "Not mangling unused space" ); ::breakpoint(); } } while (0); |
256 | _the_space->set_top_for_allocations(); |
257 | } |
258 | |
259 | void TenuredGeneration::verify() { |
260 | _the_space->verify(); |
261 | } |
262 | |
263 | void TenuredGeneration::print_on(outputStream* st) const { |
264 | Generation::print_on(st); |
265 | st->print(" the"); |
266 | _the_space->print_on(st); |
267 | } |