File: | jdk/src/hotspot/share/services/memReporter.cpp |
Warning: | line 528, column 11 Value stored to 'class_count_diff' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | #include "precompiled.hpp" |
25 | #include "memory/allocation.hpp" |
26 | #include "memory/metaspace.hpp" |
27 | #include "memory/metaspaceUtils.hpp" |
28 | #include "services/mallocTracker.hpp" |
29 | #include "services/memReporter.hpp" |
30 | #include "services/threadStackTracker.hpp" |
31 | #include "services/virtualMemoryTracker.hpp" |
32 | #include "utilities/globalDefinitions.hpp" |
33 | |
34 | size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const { |
35 | return malloc->malloc_size() + malloc->arena_size() + vm->reserved(); |
36 | } |
37 | |
38 | size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const { |
39 | return malloc->malloc_size() + malloc->arena_size() + vm->committed(); |
40 | } |
41 | |
42 | void MemReporterBase::print_total(size_t reserved, size_t committed) const { |
43 | const char* scale = current_scale(); |
44 | output()->print("reserved=" SIZE_FORMAT"%" "l" "u" "%s, committed=" SIZE_FORMAT"%" "l" "u" "%s", |
45 | amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale); |
46 | } |
47 | |
48 | void MemReporterBase::print_malloc(size_t amount, size_t count, MEMFLAGS flag) const { |
49 | const char* scale = current_scale(); |
50 | outputStream* out = output(); |
51 | const char* alloc_type = (flag == mtThreadStack) ? "" : "malloc="; |
52 | |
53 | if (flag != mtNone) { |
54 | out->print("(%s" SIZE_FORMAT"%" "l" "u" "%s type=%s", alloc_type, |
55 | amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag)); |
56 | } else { |
57 | out->print("(%s" SIZE_FORMAT"%" "l" "u" "%s", alloc_type, |
58 | amount_in_current_scale(amount), scale); |
59 | } |
60 | |
61 | if (count > 0) { |
62 | out->print(" #" SIZE_FORMAT"%" "l" "u" "", count); |
63 | } |
64 | |
65 | out->print(")"); |
66 | } |
67 | |
68 | void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const { |
69 | const char* scale = current_scale(); |
70 | output()->print("(mmap: reserved=" SIZE_FORMAT"%" "l" "u" "%s, committed=" SIZE_FORMAT"%" "l" "u" "%s)", |
71 | amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale); |
72 | } |
73 | |
74 | void MemReporterBase::print_malloc_line(size_t amount, size_t count) const { |
75 | output()->print("%28s", " "); |
76 | print_malloc(amount, count); |
77 | output()->print_cr(" "); |
78 | } |
79 | |
80 | void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const { |
81 | output()->print("%28s", " "); |
82 | print_virtual_memory(reserved, committed); |
83 | output()->print_cr(" "); |
84 | } |
85 | |
86 | void MemReporterBase::print_arena_line(size_t amount, size_t count) const { |
87 | const char* scale = current_scale(); |
88 | output()->print_cr("%27s (arena=" SIZE_FORMAT"%" "l" "u" "%s #" SIZE_FORMAT"%" "l" "u" ")", " ", |
89 | amount_in_current_scale(amount), scale, count); |
90 | } |
91 | |
92 | void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const { |
93 | const char* scale = current_scale(); |
94 | output()->print("[" PTR_FORMAT"0x%016" "l" "x" " - " PTR_FORMAT"0x%016" "l" "x" "] %s " SIZE_FORMAT"%" "l" "u" "%s", |
95 | p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale); |
96 | } |
97 | |
98 | |
99 | void MemSummaryReporter::report() { |
100 | outputStream* out = output(); |
101 | const size_t total_malloced_bytes = _malloc_snapshot->total(); |
102 | const size_t total_mmap_reserved_bytes = _vm_snapshot->total_reserved(); |
103 | const size_t total_mmap_committed_bytes = _vm_snapshot->total_committed(); |
104 | |
105 | size_t total_reserved_amount = total_malloced_bytes + total_mmap_reserved_bytes; |
106 | size_t total_committed_amount = total_malloced_bytes + total_mmap_committed_bytes; |
107 | |
108 | // Overall total |
109 | out->print_cr("\nNative Memory Tracking:\n"); |
110 | |
111 | if (scale() > 1) { |
112 | out->print_cr("(Omitting categories weighting less than 1%s)", current_scale()); |
113 | out->cr(); |
114 | } |
115 | |
116 | out->print("Total: "); |
117 | print_total(total_reserved_amount, total_committed_amount); |
118 | out->cr(); |
119 | out->print_cr(" malloc: " SIZE_FORMAT"%" "l" "u" "%s #" SIZE_FORMAT"%" "l" "u", |
120 | amount_in_current_scale(total_malloced_bytes), current_scale(), |
121 | _malloc_snapshot->total_count()); |
122 | out->print(" mmap: "); |
123 | print_total(total_mmap_reserved_bytes, total_mmap_committed_bytes); |
124 | out->cr(); |
125 | out->cr(); |
126 | |
127 | // Summary by memory type |
128 | for (int index = 0; index < mt_number_of_types; index ++) { |
129 | MEMFLAGS flag = NMTUtil::index_to_flag(index); |
130 | // thread stack is reported as part of thread category |
131 | if (flag == mtThreadStack) continue; |
132 | MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag); |
133 | VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag); |
134 | |
135 | report_summary_of_type(flag, malloc_memory, virtual_memory); |
136 | } |
137 | } |
138 | |
139 | void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag, |
140 | MallocMemory* malloc_memory, VirtualMemory* virtual_memory) { |
141 | |
142 | size_t reserved_amount = reserved_total (malloc_memory, virtual_memory); |
143 | size_t committed_amount = committed_total(malloc_memory, virtual_memory); |
144 | |
145 | // Count thread's native stack in "Thread" category |
146 | if (flag == mtThread) { |
147 | if (ThreadStackTracker::track_as_vm()) { |
148 | const VirtualMemory* thread_stack_usage = |
149 | (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack); |
150 | reserved_amount += thread_stack_usage->reserved(); |
151 | committed_amount += thread_stack_usage->committed(); |
152 | } else { |
153 | const MallocMemory* thread_stack_usage = |
154 | (const MallocMemory*)_malloc_snapshot->by_type(mtThreadStack); |
155 | reserved_amount += thread_stack_usage->malloc_size(); |
156 | committed_amount += thread_stack_usage->malloc_size(); |
157 | } |
158 | } else if (flag == mtNMT) { |
159 | // Count malloc headers in "NMT" category |
160 | reserved_amount += _malloc_snapshot->malloc_overhead()->size(); |
161 | committed_amount += _malloc_snapshot->malloc_overhead()->size(); |
162 | } |
163 | |
164 | if (amount_in_current_scale(reserved_amount) > 0) { |
165 | outputStream* out = output(); |
166 | const char* scale = current_scale(); |
167 | out->print("-%26s (", NMTUtil::flag_to_name(flag)); |
168 | print_total(reserved_amount, committed_amount); |
169 | out->print_cr(")"); |
170 | |
171 | if (flag == mtClass) { |
172 | // report class count |
173 | out->print_cr("%27s (classes #" SIZE_FORMAT"%" "l" "u" ")", |
174 | " ", (_instance_class_count + _array_class_count)); |
175 | out->print_cr("%27s ( instance classes #" SIZE_FORMAT"%" "l" "u" ", array classes #" SIZE_FORMAT"%" "l" "u" ")", |
176 | " ", _instance_class_count, _array_class_count); |
177 | } else if (flag == mtThread) { |
178 | if (ThreadStackTracker::track_as_vm()) { |
179 | const VirtualMemory* thread_stack_usage = |
180 | _vm_snapshot->by_type(mtThreadStack); |
181 | // report thread count |
182 | out->print_cr("%27s (thread #" SIZE_FORMAT"%" "l" "u" ")", " ", ThreadStackTracker::thread_count()); |
183 | out->print("%27s (stack: ", " "); |
184 | print_total(thread_stack_usage->reserved(), thread_stack_usage->committed()); |
185 | } else { |
186 | MallocMemory* thread_stack_memory = _malloc_snapshot->by_type(mtThreadStack); |
187 | const char* scale = current_scale(); |
188 | // report thread count |
189 | assert(ThreadStackTracker::thread_count() == 0, "Not used")do { if (!(ThreadStackTracker::thread_count() == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 189, "assert(" "ThreadStackTracker::thread_count() == 0" ") failed" , "Not used"); ::breakpoint(); } } while (0); |
190 | out->print_cr("%27s (thread #" SIZE_FORMAT"%" "l" "u" ")", " ", thread_stack_memory->malloc_count()); |
191 | out->print("%27s (Stack: " SIZE_FORMAT"%" "l" "u" "%s", " ", |
192 | amount_in_current_scale(thread_stack_memory->malloc_size()), scale); |
193 | } |
194 | out->print_cr(")"); |
195 | } |
196 | |
197 | // report malloc'd memory |
198 | if (amount_in_current_scale(malloc_memory->malloc_size()) > 0) { |
199 | // We don't know how many arena chunks are in used, so don't report the count |
200 | size_t count = (flag == mtChunk) ? 0 : malloc_memory->malloc_count(); |
201 | print_malloc_line(malloc_memory->malloc_size(), count); |
202 | } |
203 | |
204 | if (amount_in_current_scale(virtual_memory->reserved()) > 0) { |
205 | print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed()); |
206 | } |
207 | |
208 | if (amount_in_current_scale(malloc_memory->arena_size()) > 0) { |
209 | print_arena_line(malloc_memory->arena_size(), malloc_memory->arena_count()); |
210 | } |
211 | |
212 | if (flag == mtNMT && |
213 | amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) { |
214 | out->print_cr("%27s (tracking overhead=" SIZE_FORMAT"%" "l" "u" "%s)", " ", |
215 | amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale); |
216 | } else if (flag == mtClass) { |
217 | // Metadata information |
218 | report_metadata(Metaspace::NonClassType); |
219 | if (Metaspace::using_class_space()) { |
220 | report_metadata(Metaspace::ClassType); |
221 | } |
222 | } |
223 | out->print_cr(" "); |
224 | } |
225 | } |
226 | |
227 | void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const { |
228 | assert(type == Metaspace::NonClassType || type == Metaspace::ClassType,do { if (!(type == Metaspace::NonClassType || type == Metaspace ::ClassType)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 229, "assert(" "type == Metaspace::NonClassType || type == Metaspace::ClassType" ") failed", "Invalid metadata type"); ::breakpoint(); } } while (0) |
229 | "Invalid metadata type")do { if (!(type == Metaspace::NonClassType || type == Metaspace ::ClassType)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 229, "assert(" "type == Metaspace::NonClassType || type == Metaspace::ClassType" ") failed", "Invalid metadata type"); ::breakpoint(); } } while (0); |
230 | const char* name = (type == Metaspace::NonClassType) ? |
231 | "Metadata: " : "Class space:"; |
232 | |
233 | outputStream* out = output(); |
234 | const char* scale = current_scale(); |
235 | const MetaspaceStats stats = MetaspaceUtils::get_statistics(type); |
236 | |
237 | size_t waste = stats.committed() - stats.used(); |
238 | float waste_percentage = stats.committed() > 0 ? (((float)waste * 100)/stats.committed()) : 0.0f; |
239 | |
240 | out->print_cr("%27s ( %s)", " ", name); |
241 | out->print("%27s ( ", " "); |
242 | print_total(stats.reserved(), stats.committed()); |
243 | out->print_cr(")"); |
244 | out->print_cr("%27s ( used=" SIZE_FORMAT"%" "l" "u" "%s)", " ", amount_in_current_scale(stats.used()), scale); |
245 | out->print_cr("%27s ( waste=" SIZE_FORMAT"%" "l" "u" "%s =%2.2f%%)", " ", amount_in_current_scale(waste), |
246 | scale, waste_percentage); |
247 | } |
248 | |
249 | void MemDetailReporter::report_detail() { |
250 | // Start detail report |
251 | outputStream* out = output(); |
252 | out->print_cr("Details:\n"); |
253 | |
254 | int num_omitted = |
255 | report_malloc_sites() + |
256 | report_virtual_memory_allocation_sites(); |
257 | if (num_omitted > 0) { |
258 | assert(scale() > 1, "sanity")do { if (!(scale() > 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 258, "assert(" "scale() > 1" ") failed", "sanity"); ::breakpoint (); } } while (0); |
259 | out->print_cr("(%d call sites weighting less than 1%s each omitted.)", |
260 | num_omitted, current_scale()); |
261 | out->cr(); |
262 | } |
263 | } |
264 | |
265 | int MemDetailReporter::report_malloc_sites() { |
266 | MallocSiteIterator malloc_itr = _baseline.malloc_sites(MemBaseline::by_size); |
267 | if (malloc_itr.is_empty()) return 0; |
268 | |
269 | outputStream* out = output(); |
270 | |
271 | const MallocSite* malloc_site; |
272 | int num_omitted = 0; |
273 | while ((malloc_site = malloc_itr.next()) != NULL__null) { |
274 | // Don't report free sites; does not count toward omitted count. |
275 | if (malloc_site->size() == 0) { |
276 | continue; |
277 | } |
278 | // Don't report if site has allocated less than one unit of whatever our scale is |
279 | if (scale() > 1 && amount_in_current_scale(malloc_site->size()) == 0) { |
280 | num_omitted ++; |
281 | continue; |
282 | } |
283 | const NativeCallStack* stack = malloc_site->call_stack(); |
284 | stack->print_on(out); |
285 | out->print("%29s", " "); |
286 | MEMFLAGS flag = malloc_site->flag(); |
287 | assert(NMTUtil::flag_is_valid(flag) && flag != mtNone,do { if (!(NMTUtil::flag_is_valid(flag) && flag != mtNone )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 288, "assert(" "NMTUtil::flag_is_valid(flag) && flag != mtNone" ") failed", "Must have a valid memory type"); ::breakpoint() ; } } while (0) |
288 | "Must have a valid memory type")do { if (!(NMTUtil::flag_is_valid(flag) && flag != mtNone )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 288, "assert(" "NMTUtil::flag_is_valid(flag) && flag != mtNone" ") failed", "Must have a valid memory type"); ::breakpoint() ; } } while (0); |
289 | print_malloc(malloc_site->size(), malloc_site->count(),flag); |
290 | out->print_cr("\n"); |
291 | } |
292 | return num_omitted; |
293 | } |
294 | |
295 | int MemDetailReporter::report_virtual_memory_allocation_sites() { |
296 | VirtualMemorySiteIterator virtual_memory_itr = |
297 | _baseline.virtual_memory_sites(MemBaseline::by_size); |
298 | |
299 | if (virtual_memory_itr.is_empty()) return 0; |
300 | |
301 | outputStream* out = output(); |
302 | const VirtualMemoryAllocationSite* virtual_memory_site; |
303 | int num_omitted = 0; |
304 | while ((virtual_memory_site = virtual_memory_itr.next()) != NULL__null) { |
305 | // Don't report free sites; does not count toward omitted count. |
306 | if (virtual_memory_site->reserved() == 0) { |
307 | continue; |
308 | } |
309 | // Don't report if site has reserved less than one unit of whatever our scale is |
310 | if (scale() > 1 && amount_in_current_scale(virtual_memory_site->reserved()) == 0) { |
311 | num_omitted++; |
312 | continue; |
313 | } |
314 | const NativeCallStack* stack = virtual_memory_site->call_stack(); |
315 | stack->print_on(out); |
316 | out->print("%28s (", " "); |
317 | print_total(virtual_memory_site->reserved(), virtual_memory_site->committed()); |
318 | MEMFLAGS flag = virtual_memory_site->flag(); |
319 | if (flag != mtNone) { |
320 | out->print(" Type=%s", NMTUtil::flag_to_name(flag)); |
321 | } |
322 | out->print_cr(")\n"); |
323 | } |
324 | return num_omitted; |
325 | } |
326 | |
327 | |
328 | void MemDetailReporter::report_virtual_memory_map() { |
329 | // Virtual memory map always in base address order |
330 | VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations(); |
331 | const ReservedMemoryRegion* rgn; |
332 | |
333 | output()->print_cr("Virtual memory map:"); |
334 | while ((rgn = itr.next()) != NULL__null) { |
335 | report_virtual_memory_region(rgn); |
336 | } |
337 | } |
338 | |
339 | void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) { |
340 | assert(reserved_rgn != NULL, "NULL pointer")do { if (!(reserved_rgn != __null)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 340, "assert(" "reserved_rgn != __null" ") failed", "NULL pointer" ); ::breakpoint(); } } while (0); |
341 | |
342 | // Don't report if size is too small |
343 | if (amount_in_current_scale(reserved_rgn->size()) == 0) return; |
344 | |
345 | outputStream* out = output(); |
346 | const char* scale = current_scale(); |
347 | const NativeCallStack* stack = reserved_rgn->call_stack(); |
348 | bool all_committed = reserved_rgn->size() == reserved_rgn->committed_size(); |
349 | const char* region_type = (all_committed ? "reserved and committed" : "reserved"); |
350 | out->print_cr(" "); |
351 | print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size()); |
352 | out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag())); |
353 | if (stack->is_empty()) { |
354 | out->print_cr(" "); |
355 | } else { |
356 | out->print_cr(" from"); |
357 | stack->print_on(out, 4); |
358 | } |
359 | |
360 | if (all_committed) { |
361 | CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); |
362 | const CommittedMemoryRegion* committed_rgn = itr.next(); |
363 | if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) { |
364 | // One region spanning the entire reserved region, with the same stack trace. |
365 | // Don't print this regions because the "reserved and committed" line above |
366 | // already indicates that the region is comitted. |
367 | assert(itr.next() == NULL, "Unexpectedly more than one regions")do { if (!(itr.next() == __null)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 367, "assert(" "itr.next() == __null" ") failed", "Unexpectedly more than one regions" ); ::breakpoint(); } } while (0); |
368 | return; |
369 | } |
370 | } |
371 | |
372 | CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); |
373 | const CommittedMemoryRegion* committed_rgn; |
374 | while ((committed_rgn = itr.next()) != NULL__null) { |
375 | // Don't report if size is too small |
376 | if (amount_in_current_scale(committed_rgn->size()) == 0) continue; |
377 | stack = committed_rgn->call_stack(); |
378 | out->print("\n\t"); |
379 | print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size()); |
380 | if (stack->is_empty()) { |
381 | out->print_cr(" "); |
382 | } else { |
383 | out->print_cr(" from"); |
384 | stack->print_on(out, 12); |
385 | } |
386 | } |
387 | } |
388 | |
389 | void MemSummaryDiffReporter::report_diff() { |
390 | outputStream* out = output(); |
391 | out->print_cr("\nNative Memory Tracking:\n"); |
392 | |
393 | if (scale() > 1) { |
394 | out->print_cr("(Omitting categories weighting less than 1%s)", current_scale()); |
395 | out->cr(); |
396 | } |
397 | |
398 | // Overall diff |
399 | out->print("Total: "); |
400 | print_virtual_memory_diff(_current_baseline.total_reserved_memory(), |
401 | _current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(), |
402 | _early_baseline.total_committed_memory()); |
403 | |
404 | out->print_cr("\n"); |
405 | |
406 | // Summary diff by memory type |
407 | for (int index = 0; index < mt_number_of_types; index ++) { |
408 | MEMFLAGS flag = NMTUtil::index_to_flag(index); |
409 | // thread stack is reported as part of thread category |
410 | if (flag == mtThreadStack) continue; |
411 | diff_summary_of_type(flag, |
412 | _early_baseline.malloc_memory(flag), |
413 | _early_baseline.virtual_memory(flag), |
414 | _early_baseline.metaspace_stats(), |
415 | _current_baseline.malloc_memory(flag), |
416 | _current_baseline.virtual_memory(flag), |
417 | _current_baseline.metaspace_stats()); |
418 | } |
419 | } |
420 | |
421 | void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count, |
422 | size_t early_amount, size_t early_count, MEMFLAGS flags) const { |
423 | const char* scale = current_scale(); |
424 | outputStream* out = output(); |
425 | const char* alloc_type = (flags == mtThread) ? "" : "malloc="; |
426 | |
427 | out->print("%s" SIZE_FORMAT"%" "l" "u" "%s", alloc_type, amount_in_current_scale(current_amount), scale); |
428 | // Report type only if it is valid and not under "thread" category |
429 | if (flags != mtNone && flags != mtThread) { |
430 | out->print(" type=%s", NMTUtil::flag_to_name(flags)); |
431 | } |
432 | |
433 | long amount_diff = diff_in_current_scale(current_amount, early_amount); |
434 | if (amount_diff != 0) { |
435 | out->print(" %+ld%s", amount_diff, scale); |
436 | } |
437 | if (current_count > 0) { |
438 | out->print(" #" SIZE_FORMAT"%" "l" "u" "", current_count); |
439 | if (current_count != early_count) { |
440 | out->print(" %+d", (int)(current_count - early_count)); |
441 | } |
442 | } |
443 | } |
444 | |
445 | void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count, |
446 | size_t early_amount, size_t early_count) const { |
447 | const char* scale = current_scale(); |
448 | outputStream* out = output(); |
449 | out->print("arena=" SIZE_FORMAT"%" "l" "u" "%s", amount_in_current_scale(current_amount), scale); |
450 | if (diff_in_current_scale(current_amount, early_amount) != 0) { |
451 | out->print(" %+ld", diff_in_current_scale(current_amount, early_amount)); |
452 | } |
453 | |
454 | out->print(" #" SIZE_FORMAT"%" "l" "u" "", current_count); |
455 | if (current_count != early_count) { |
456 | out->print(" %+d", (int)(current_count - early_count)); |
457 | } |
458 | } |
459 | |
460 | void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed, |
461 | size_t early_reserved, size_t early_committed) const { |
462 | const char* scale = current_scale(); |
463 | outputStream* out = output(); |
464 | out->print("reserved=" SIZE_FORMAT"%" "l" "u" "%s", amount_in_current_scale(current_reserved), scale); |
465 | long reserved_diff = diff_in_current_scale(current_reserved, early_reserved); |
466 | if (reserved_diff != 0) { |
467 | out->print(" %+ld%s", reserved_diff, scale); |
468 | } |
469 | |
470 | out->print(", committed=" SIZE_FORMAT"%" "l" "u" "%s", amount_in_current_scale(current_committed), scale); |
471 | long committed_diff = diff_in_current_scale(current_committed, early_committed); |
472 | if (committed_diff != 0) { |
473 | out->print(" %+ld%s", committed_diff, scale); |
474 | } |
475 | } |
476 | |
477 | |
478 | void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, |
479 | const MallocMemory* early_malloc, const VirtualMemory* early_vm, |
480 | const MetaspaceCombinedStats& early_ms, |
481 | const MallocMemory* current_malloc, const VirtualMemory* current_vm, |
482 | const MetaspaceCombinedStats& current_ms) const { |
483 | |
484 | outputStream* out = output(); |
485 | const char* scale = current_scale(); |
486 | |
487 | // Total reserved and committed memory in current baseline |
488 | size_t current_reserved_amount = reserved_total (current_malloc, current_vm); |
489 | size_t current_committed_amount = committed_total(current_malloc, current_vm); |
490 | |
491 | // Total reserved and committed memory in early baseline |
492 | size_t early_reserved_amount = reserved_total(early_malloc, early_vm); |
493 | size_t early_committed_amount = committed_total(early_malloc, early_vm); |
494 | |
495 | // Adjust virtual memory total |
496 | if (flag == mtThread) { |
497 | const VirtualMemory* early_thread_stack_usage = |
498 | _early_baseline.virtual_memory(mtThreadStack); |
499 | const VirtualMemory* current_thread_stack_usage = |
500 | _current_baseline.virtual_memory(mtThreadStack); |
501 | |
502 | early_reserved_amount += early_thread_stack_usage->reserved(); |
503 | early_committed_amount += early_thread_stack_usage->committed(); |
504 | |
505 | current_reserved_amount += current_thread_stack_usage->reserved(); |
506 | current_committed_amount += current_thread_stack_usage->committed(); |
507 | } else if (flag == mtNMT) { |
508 | early_reserved_amount += _early_baseline.malloc_tracking_overhead(); |
509 | early_committed_amount += _early_baseline.malloc_tracking_overhead(); |
510 | |
511 | current_reserved_amount += _current_baseline.malloc_tracking_overhead(); |
512 | current_committed_amount += _current_baseline.malloc_tracking_overhead(); |
513 | } |
514 | |
515 | if (amount_in_current_scale(current_reserved_amount) > 0 || |
516 | diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) { |
517 | |
518 | // print summary line |
519 | out->print("-%26s (", NMTUtil::flag_to_name(flag)); |
520 | print_virtual_memory_diff(current_reserved_amount, current_committed_amount, |
521 | early_reserved_amount, early_committed_amount); |
522 | out->print_cr(")"); |
523 | |
524 | // detail lines |
525 | if (flag == mtClass) { |
526 | // report class count |
527 | out->print("%27s (classes #" SIZE_FORMAT"%" "l" "u" "", " ", _current_baseline.class_count()); |
528 | int class_count_diff = (int)(_current_baseline.class_count() - |
Value stored to 'class_count_diff' during its initialization is never read | |
529 | _early_baseline.class_count()); |
530 | if (_current_baseline.class_count() != _early_baseline.class_count()) { |
531 | out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count())); |
532 | } |
533 | out->print_cr(")"); |
534 | |
535 | out->print("%27s ( instance classes #" SIZE_FORMAT"%" "l" "u", " ", _current_baseline.instance_class_count()); |
536 | if (_current_baseline.instance_class_count() != _early_baseline.instance_class_count()) { |
537 | out->print(" %+d", (int)(_current_baseline.instance_class_count() - _early_baseline.instance_class_count())); |
538 | } |
539 | out->print(", array classes #" SIZE_FORMAT"%" "l" "u", _current_baseline.array_class_count()); |
540 | if (_current_baseline.array_class_count() != _early_baseline.array_class_count()) { |
541 | out->print(" %+d", (int)(_current_baseline.array_class_count() - _early_baseline.array_class_count())); |
542 | } |
543 | out->print_cr(")"); |
544 | |
545 | } else if (flag == mtThread) { |
546 | // report thread count |
547 | out->print("%27s (thread #" SIZE_FORMAT"%" "l" "u" "", " ", _current_baseline.thread_count()); |
548 | int thread_count_diff = (int)(_current_baseline.thread_count() - |
549 | _early_baseline.thread_count()); |
550 | if (thread_count_diff != 0) { |
551 | out->print(" %+d", thread_count_diff); |
552 | } |
553 | out->print_cr(")"); |
554 | |
555 | out->print("%27s (stack: ", " "); |
556 | if (ThreadStackTracker::track_as_vm()) { |
557 | // report thread stack |
558 | const VirtualMemory* current_thread_stack = |
559 | _current_baseline.virtual_memory(mtThreadStack); |
560 | const VirtualMemory* early_thread_stack = |
561 | _early_baseline.virtual_memory(mtThreadStack); |
562 | |
563 | print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(), |
564 | early_thread_stack->reserved(), early_thread_stack->committed()); |
565 | } else { |
566 | const MallocMemory* current_thread_stack = |
567 | _current_baseline.malloc_memory(mtThreadStack); |
568 | const MallocMemory* early_thread_stack = |
569 | _early_baseline.malloc_memory(mtThreadStack); |
570 | |
571 | print_malloc_diff(current_thread_stack->malloc_size(), current_thread_stack->malloc_count(), |
572 | early_thread_stack->malloc_size(), early_thread_stack->malloc_count(), flag); |
573 | } |
574 | out->print_cr(")"); |
575 | } |
576 | |
577 | // Report malloc'd memory |
578 | size_t current_malloc_amount = current_malloc->malloc_size(); |
579 | size_t early_malloc_amount = early_malloc->malloc_size(); |
580 | if (amount_in_current_scale(current_malloc_amount) > 0 || |
581 | diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) { |
582 | out->print("%28s(", " "); |
583 | print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(), |
584 | early_malloc_amount, early_malloc->malloc_count(), mtNone); |
585 | out->print_cr(")"); |
586 | } |
587 | |
588 | // Report virtual memory |
589 | if (amount_in_current_scale(current_vm->reserved()) > 0 || |
590 | diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) { |
591 | out->print("%27s (mmap: ", " "); |
592 | print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(), |
593 | early_vm->reserved(), early_vm->committed()); |
594 | out->print_cr(")"); |
595 | } |
596 | |
597 | // Report arena memory |
598 | if (amount_in_current_scale(current_malloc->arena_size()) > 0 || |
599 | diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) { |
600 | out->print("%28s(", " "); |
601 | print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(), |
602 | early_malloc->arena_size(), early_malloc->arena_count()); |
603 | out->print_cr(")"); |
604 | } |
605 | |
606 | // Report native memory tracking overhead |
607 | if (flag == mtNMT) { |
608 | size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead()); |
609 | size_t early_tracking_overhead = amount_in_current_scale(_early_baseline.malloc_tracking_overhead()); |
610 | |
611 | out->print("%27s (tracking overhead=" SIZE_FORMAT"%" "l" "u" "%s", " ", |
612 | amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale); |
613 | |
614 | long overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(), |
615 | _early_baseline.malloc_tracking_overhead()); |
616 | if (overhead_diff != 0) { |
617 | out->print(" %+ld%s", overhead_diff, scale); |
618 | } |
619 | out->print_cr(")"); |
620 | } else if (flag == mtClass) { |
621 | print_metaspace_diff(current_ms, early_ms); |
622 | } |
623 | out->print_cr(" "); |
624 | } |
625 | } |
626 | |
627 | void MemSummaryDiffReporter::print_metaspace_diff(const MetaspaceCombinedStats& current_ms, |
628 | const MetaspaceCombinedStats& early_ms) const { |
629 | print_metaspace_diff("Metadata", current_ms.non_class_space_stats(), early_ms.non_class_space_stats()); |
630 | if (Metaspace::using_class_space()) { |
631 | print_metaspace_diff("Class space", current_ms.class_space_stats(), early_ms.class_space_stats()); |
632 | } |
633 | } |
634 | |
635 | void MemSummaryDiffReporter::print_metaspace_diff(const char* header, |
636 | const MetaspaceStats& current_stats, |
637 | const MetaspaceStats& early_stats) const { |
638 | outputStream* out = output(); |
639 | const char* scale = current_scale(); |
640 | |
641 | out->print_cr("%27s: ( %s)", " ", header); |
642 | out->print("%27s ( ", " "); |
643 | print_virtual_memory_diff(current_stats.reserved(), |
644 | current_stats.committed(), |
645 | early_stats.reserved(), |
646 | early_stats.committed()); |
647 | out->print_cr(")"); |
648 | |
649 | long diff_used = diff_in_current_scale(current_stats.used(), |
650 | early_stats.used()); |
651 | |
652 | size_t current_waste = current_stats.committed() - current_stats.used(); |
653 | size_t early_waste = early_stats.committed() - early_stats.used(); |
654 | long diff_waste = diff_in_current_scale(current_waste, early_waste); |
655 | |
656 | // Diff used |
657 | out->print("%27s ( used=" SIZE_FORMAT"%" "l" "u" "%s", " ", |
658 | amount_in_current_scale(current_stats.used()), scale); |
659 | if (diff_used != 0) { |
660 | out->print(" %+ld%s", diff_used, scale); |
661 | } |
662 | out->print_cr(")"); |
663 | |
664 | // Diff waste |
665 | const float waste_percentage = current_stats.committed() == 0 ? 0.0f : |
666 | (current_waste * 100.0f) / current_stats.committed(); |
667 | out->print("%27s ( waste=" SIZE_FORMAT"%" "l" "u" "%s =%2.2f%%", " ", |
668 | amount_in_current_scale(current_waste), scale, waste_percentage); |
669 | if (diff_waste != 0) { |
670 | out->print(" %+ld%s", diff_waste, scale); |
671 | } |
672 | out->print_cr(")"); |
673 | } |
674 | |
675 | void MemDetailDiffReporter::report_diff() { |
676 | MemSummaryDiffReporter::report_diff(); |
677 | diff_malloc_sites(); |
678 | diff_virtual_memory_sites(); |
679 | } |
680 | |
681 | void MemDetailDiffReporter::diff_malloc_sites() const { |
682 | MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site_and_type); |
683 | MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site_and_type); |
684 | |
685 | const MallocSite* early_site = early_itr.next(); |
686 | const MallocSite* current_site = current_itr.next(); |
687 | |
688 | while (early_site != NULL__null || current_site != NULL__null) { |
689 | if (early_site == NULL__null) { |
690 | new_malloc_site(current_site); |
691 | current_site = current_itr.next(); |
692 | } else if (current_site == NULL__null) { |
693 | old_malloc_site(early_site); |
694 | early_site = early_itr.next(); |
695 | } else { |
696 | int compVal = current_site->call_stack()->compare(*early_site->call_stack()); |
697 | if (compVal < 0) { |
698 | new_malloc_site(current_site); |
699 | current_site = current_itr.next(); |
700 | } else if (compVal > 0) { |
701 | old_malloc_site(early_site); |
702 | early_site = early_itr.next(); |
703 | } else { |
704 | diff_malloc_site(early_site, current_site); |
705 | early_site = early_itr.next(); |
706 | current_site = current_itr.next(); |
707 | } |
708 | } |
709 | } |
710 | } |
711 | |
712 | void MemDetailDiffReporter::diff_virtual_memory_sites() const { |
713 | VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site); |
714 | VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site); |
715 | |
716 | const VirtualMemoryAllocationSite* early_site = early_itr.next(); |
717 | const VirtualMemoryAllocationSite* current_site = current_itr.next(); |
718 | |
719 | while (early_site != NULL__null || current_site != NULL__null) { |
720 | if (early_site == NULL__null) { |
721 | new_virtual_memory_site(current_site); |
722 | current_site = current_itr.next(); |
723 | } else if (current_site == NULL__null) { |
724 | old_virtual_memory_site(early_site); |
725 | early_site = early_itr.next(); |
726 | } else { |
727 | int compVal = current_site->call_stack()->compare(*early_site->call_stack()); |
728 | if (compVal < 0) { |
729 | new_virtual_memory_site(current_site); |
730 | current_site = current_itr.next(); |
731 | } else if (compVal > 0) { |
732 | old_virtual_memory_site(early_site); |
733 | early_site = early_itr.next(); |
734 | } else { |
735 | diff_virtual_memory_site(early_site, current_site); |
736 | early_site = early_itr.next(); |
737 | current_site = current_itr.next(); |
738 | } |
739 | } |
740 | } |
741 | } |
742 | |
743 | |
744 | void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const { |
745 | diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(), |
746 | 0, 0, malloc_site->flag()); |
747 | } |
748 | |
749 | void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const { |
750 | diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(), |
751 | malloc_site->count(), malloc_site->flag()); |
752 | } |
753 | |
754 | void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early, |
755 | const MallocSite* current) const { |
756 | if (early->flag() != current->flag()) { |
757 | // If malloc site type changed, treat it as deallocation of old type and |
758 | // allocation of new type. |
759 | old_malloc_site(early); |
760 | new_malloc_site(current); |
761 | } else { |
762 | diff_malloc_site(current->call_stack(), current->size(), current->count(), |
763 | early->size(), early->count(), early->flag()); |
764 | } |
765 | } |
766 | |
767 | void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size, |
768 | size_t current_count, size_t early_size, size_t early_count, MEMFLAGS flags) const { |
769 | outputStream* out = output(); |
770 | |
771 | assert(stack != NULL, "NULL stack")do { if (!(stack != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 771, "assert(" "stack != __null" ") failed", "NULL stack"); ::breakpoint(); } } while (0); |
772 | |
773 | if (diff_in_current_scale(current_size, early_size) == 0) { |
774 | return; |
775 | } |
776 | |
777 | stack->print_on(out); |
778 | out->print("%28s (", " "); |
779 | print_malloc_diff(current_size, current_count, |
780 | early_size, early_count, flags); |
781 | |
782 | out->print_cr(")\n"); |
783 | } |
784 | |
785 | |
786 | void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { |
787 | diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag()); |
788 | } |
789 | |
790 | void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { |
791 | diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag()); |
792 | } |
793 | |
794 | void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early, |
795 | const VirtualMemoryAllocationSite* current) const { |
796 | assert(early->flag() == current->flag(), "Should be the same")do { if (!(early->flag() == current->flag())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/services/memReporter.cpp" , 796, "assert(" "early->flag() == current->flag()" ") failed" , "Should be the same"); ::breakpoint(); } } while (0); |
797 | diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(), |
798 | early->reserved(), early->committed(), current->flag()); |
799 | } |
800 | |
801 | void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, |
802 | size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const { |
803 | outputStream* out = output(); |
804 | |
805 | // no change |
806 | if (diff_in_current_scale(current_reserved, early_reserved) == 0 && |
807 | diff_in_current_scale(current_committed, early_committed) == 0) { |
808 | return; |
809 | } |
810 | |
811 | stack->print_on(out); |
812 | out->print("%28s (mmap: ", " "); |
813 | print_virtual_memory_diff(current_reserved, current_committed, |
814 | early_reserved, early_committed); |
815 | |
816 | if (flag != mtNone) { |
817 | out->print(" Type=%s", NMTUtil::flag_to_name(flag)); |
818 | } |
819 | |
820 | out->print_cr(")\n"); |
821 | } |