File: | jdk/src/hotspot/share/memory/virtualspace.cpp |
Warning: | line 775, column 10 Value stored to 'upper' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "logging/log.hpp" |
27 | #include "memory/resourceArea.hpp" |
28 | #include "memory/virtualspace.hpp" |
29 | #include "oops/compressedOops.hpp" |
30 | #include "oops/markWord.hpp" |
31 | #include "oops/oop.inline.hpp" |
32 | #include "runtime/globals_extension.hpp" |
33 | #include "runtime/java.hpp" |
34 | #include "runtime/os.hpp" |
35 | #include "services/memTracker.hpp" |
36 | #include "utilities/align.hpp" |
37 | #include "utilities/formatBuffer.hpp" |
38 | #include "utilities/powerOfTwo.hpp" |
39 | |
40 | // ReservedSpace |
41 | |
42 | // Dummy constructor |
43 | ReservedSpace::ReservedSpace() : _base(NULL__null), _size(0), _noaccess_prefix(0), |
44 | _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) { |
45 | } |
46 | |
47 | ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) { |
48 | // Want to use large pages where possible. If the size is |
49 | // not large page aligned the mapping will be a mix of |
50 | // large and normal pages. |
51 | size_t page_size = os::page_size_for_region_unaligned(size, 1); |
52 | size_t alignment = os::vm_allocation_granularity(); |
53 | initialize(size, alignment, page_size, NULL__null, false); |
54 | } |
55 | |
56 | ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) { |
57 | // When a page size is given we don't want to mix large |
58 | // and normal pages. If the size is not a multiple of the |
59 | // page size it will be aligned up to achieve this. |
60 | size_t alignment = os::vm_allocation_granularity();; |
61 | if (preferred_page_size != (size_t)os::vm_page_size()) { |
62 | alignment = MAX2(preferred_page_size, alignment); |
63 | size = align_up(size, alignment); |
64 | } |
65 | initialize(size, alignment, preferred_page_size, NULL__null, false); |
66 | } |
67 | |
68 | ReservedSpace::ReservedSpace(size_t size, |
69 | size_t alignment, |
70 | size_t page_size, |
71 | char* requested_address) : _fd_for_heap(-1) { |
72 | initialize(size, alignment, page_size, requested_address, false); |
73 | } |
74 | |
75 | ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size, |
76 | bool special, bool executable) : _fd_for_heap(-1) { |
77 | assert((size % os::vm_allocation_granularity()) == 0,do { if (!((size % os::vm_allocation_granularity()) == 0)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 78, "assert(" "(size % os::vm_allocation_granularity()) == 0" ") failed", "size not allocation aligned"); ::breakpoint(); } } while (0) |
78 | "size not allocation aligned")do { if (!((size % os::vm_allocation_granularity()) == 0)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 78, "assert(" "(size % os::vm_allocation_granularity()) == 0" ") failed", "size not allocation aligned"); ::breakpoint(); } } while (0); |
79 | initialize_members(base, size, alignment, page_size, special, executable); |
80 | } |
81 | |
82 | // Helper method |
83 | static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) { |
84 | if (fd != -1) { |
85 | return os::attempt_map_memory_to_file_at(base, size, fd); |
86 | } |
87 | return os::attempt_reserve_memory_at(base, size, executable); |
88 | } |
89 | |
90 | // Helper method |
91 | static char* map_or_reserve_memory(size_t size, int fd, bool executable) { |
92 | if (fd != -1) { |
93 | return os::map_memory_to_file(size, fd); |
94 | } |
95 | return os::reserve_memory(size, executable); |
96 | } |
97 | |
98 | // Helper method |
99 | static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) { |
100 | if (fd != -1) { |
101 | return os::map_memory_to_file_aligned(size, alignment, fd); |
102 | } |
103 | return os::reserve_memory_aligned(size, alignment, executable); |
104 | } |
105 | |
106 | // Helper method |
107 | static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) { |
108 | if (is_file_mapped) { |
109 | if (!os::unmap_memory(base, size)) { |
110 | fatal("os::unmap_memory failed")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 110, "os::unmap_memory failed"); ::breakpoint(); } while (0 ); |
111 | } |
112 | } else if (!os::release_memory(base, size)) { |
113 | fatal("os::release_memory failed")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 113, "os::release_memory failed"); ::breakpoint(); } while ( 0); |
114 | } |
115 | } |
116 | |
117 | // Helper method |
118 | static bool failed_to_reserve_as_requested(char* base, char* requested_address) { |
119 | if (base == requested_address || requested_address == NULL__null) { |
120 | return false; // did not fail |
121 | } |
122 | |
123 | if (base != NULL__null) { |
124 | // Different reserve address may be acceptable in other cases |
125 | // but for compressed oops heap should be at requested address. |
126 | assert(UseCompressedOops, "currently requested address used only for compressed oops")do { if (!(UseCompressedOops)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 126, "assert(" "UseCompressedOops" ") failed", "currently requested address used only for compressed oops" ); ::breakpoint(); } } while (0); |
127 | log_debug(gc, heap, coops)(!(LogImpl<(LogTag::_gc), (LogTag::_heap), (LogTag::_coops ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_heap), (LogTag::_coops), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("Reserved memory not at requested address: " PTR_FORMAT"0x%016" "l" "x" " vs " PTR_FORMAT"0x%016" "l" "x", p2i(base), p2i(requested_address)); |
128 | } |
129 | return true; |
130 | } |
131 | |
132 | static bool use_explicit_large_pages(size_t page_size) { |
133 | return !os::can_commit_large_page_memory() && |
134 | page_size != (size_t) os::vm_page_size(); |
135 | } |
136 | |
137 | static bool large_pages_requested() { |
138 | return UseLargePages && |
139 | (!FLAG_IS_DEFAULT(UseLargePages)(JVMFlag::is_default(Flag_UseLargePages_enum)) || !FLAG_IS_DEFAULT(LargePageSizeInBytes)(JVMFlag::is_default(Flag_LargePageSizeInBytes_enum))); |
140 | } |
141 | |
142 | static char* reserve_memory(char* requested_address, const size_t size, |
143 | const size_t alignment, int fd, bool exec) { |
144 | char* base; |
145 | // If the memory was requested at a particular address, use |
146 | // os::attempt_reserve_memory_at() to avoid mapping over something |
147 | // important. If the reservation fails, return NULL. |
148 | if (requested_address != 0) { |
149 | assert(is_aligned(requested_address, alignment),do { if (!(is_aligned(requested_address, alignment))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 151, "assert(" "is_aligned(requested_address, alignment)" ") failed" , "Requested address " "0x%016" "l" "x" " must be aligned to " "%" "l" "u", p2i(requested_address), alignment); ::breakpoint (); } } while (0) |
150 | "Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,do { if (!(is_aligned(requested_address, alignment))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 151, "assert(" "is_aligned(requested_address, alignment)" ") failed" , "Requested address " "0x%016" "l" "x" " must be aligned to " "%" "l" "u", p2i(requested_address), alignment); ::breakpoint (); } } while (0) |
151 | p2i(requested_address), alignment)do { if (!(is_aligned(requested_address, alignment))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 151, "assert(" "is_aligned(requested_address, alignment)" ") failed" , "Requested address " "0x%016" "l" "x" " must be aligned to " "%" "l" "u", p2i(requested_address), alignment); ::breakpoint (); } } while (0); |
152 | base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec); |
153 | } else { |
154 | // Optimistically assume that the OS returns an aligned base pointer. |
155 | // When reserving a large address range, most OSes seem to align to at |
156 | // least 64K. |
157 | base = map_or_reserve_memory(size, fd, exec); |
158 | // Check alignment constraints. This is only needed when there is |
159 | // no requested address. |
160 | if (!is_aligned(base, alignment)) { |
161 | // Base not aligned, retry. |
162 | unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/); |
163 | // Map using the requested alignment. |
164 | base = map_or_reserve_memory_aligned(size, alignment, fd, exec); |
165 | } |
166 | } |
167 | |
168 | return base; |
169 | } |
170 | |
171 | static char* reserve_memory_special(char* requested_address, const size_t size, |
172 | const size_t alignment, const size_t page_size, bool exec) { |
173 | |
174 | log_trace(pagesize)(!(LogImpl<(LogTag::_pagesize), (LogTag::__NO_TAG), (LogTag ::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag:: __NO_TAG)>::is_level(LogLevel::Trace))) ? (void)0 : LogImpl <(LogTag::_pagesize), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::write<LogLevel::Trace>("Attempt special mapping: size: " SIZE_FORMAT"%" "l" "u" "%s, " |
175 | "alignment: " SIZE_FORMAT"%" "l" "u" "%s", |
176 | byte_size_in_exact_unit(size), exact_unit_for_byte_size(size), |
177 | byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment)); |
178 | |
179 | char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec); |
180 | if (base != NULL__null) { |
181 | // Check alignment constraints. |
182 | assert(is_aligned(base, alignment),do { if (!(is_aligned(base, alignment))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 185, "assert(" "is_aligned(base, alignment)" ") failed", "reserve_memory_special() returned an unaligned address, base: " "0x%016" "l" "x" " alignment: " "0x%" "l" "x", p2i(base), alignment ); ::breakpoint(); } } while (0) |
183 | "reserve_memory_special() returned an unaligned address, base: " PTR_FORMATdo { if (!(is_aligned(base, alignment))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 185, "assert(" "is_aligned(base, alignment)" ") failed", "reserve_memory_special() returned an unaligned address, base: " "0x%016" "l" "x" " alignment: " "0x%" "l" "x", p2i(base), alignment ); ::breakpoint(); } } while (0) |
184 | " alignment: " SIZE_FORMAT_HEX,do { if (!(is_aligned(base, alignment))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 185, "assert(" "is_aligned(base, alignment)" ") failed", "reserve_memory_special() returned an unaligned address, base: " "0x%016" "l" "x" " alignment: " "0x%" "l" "x", p2i(base), alignment ); ::breakpoint(); } } while (0) |
185 | p2i(base), alignment)do { if (!(is_aligned(base, alignment))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 185, "assert(" "is_aligned(base, alignment)" ") failed", "reserve_memory_special() returned an unaligned address, base: " "0x%016" "l" "x" " alignment: " "0x%" "l" "x", p2i(base), alignment ); ::breakpoint(); } } while (0); |
186 | } else { |
187 | if (large_pages_requested()) { |
188 | log_debug(gc, heap, coops)(!(LogImpl<(LogTag::_gc), (LogTag::_heap), (LogTag::_coops ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_heap), (LogTag::_coops), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("Reserve regular memory without large pages"); |
189 | } |
190 | } |
191 | return base; |
192 | } |
193 | |
194 | void ReservedSpace::clear_members() { |
195 | initialize_members(NULL__null, 0, 0, 0, false, false); |
196 | } |
197 | |
198 | void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment, |
199 | size_t page_size, bool special, bool executable) { |
200 | _base = base; |
201 | _size = size; |
202 | _alignment = alignment; |
203 | _page_size = page_size; |
204 | _special = special; |
205 | _executable = executable; |
206 | _noaccess_prefix = 0; |
207 | } |
208 | |
209 | void ReservedSpace::reserve(size_t size, |
210 | size_t alignment, |
211 | size_t page_size, |
212 | char* requested_address, |
213 | bool executable) { |
214 | assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment")do { if (!(is_aligned(size, alignment))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 214, "assert(" "is_aligned(size, alignment)" ") failed", "Size must be aligned to the requested alignment" ); ::breakpoint(); } } while (0); |
215 | |
216 | // There are basically three different cases that we need to handle below: |
217 | // - Mapping backed by a file |
218 | // - Mapping backed by explicit large pages |
219 | // - Mapping backed by normal pages or transparent huge pages |
220 | // The first two have restrictions that requires the whole mapping to be |
221 | // committed up front. To record this the ReservedSpace is marked 'special'. |
222 | |
223 | if (_fd_for_heap != -1) { |
224 | // When there is a backing file directory for this space then whether |
225 | // large pages are allocated is up to the filesystem of the backing file. |
226 | // So UseLargePages is not taken into account for this reservation. |
227 | char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable); |
228 | if (base != NULL__null) { |
229 | initialize_members(base, size, alignment, os::vm_page_size(), true, executable); |
230 | } |
231 | // Always return, not possible to fall back to reservation not using a file. |
232 | return; |
233 | } else if (use_explicit_large_pages(page_size)) { |
234 | // System can't commit large pages i.e. use transparent huge pages and |
235 | // the caller requested large pages. To satisfy this request we use |
236 | // explicit large pages and these have to be committed up front to ensure |
237 | // no reservations are lost. |
238 | |
239 | char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable); |
240 | if (base != NULL__null) { |
241 | // Successful reservation using large pages. |
242 | initialize_members(base, size, alignment, page_size, true, executable); |
243 | return; |
244 | } |
245 | // Failed to reserve explicit large pages, fall back to normal reservation. |
246 | page_size = os::vm_page_size(); |
247 | } |
248 | |
249 | // Not a 'special' reservation. |
250 | char* base = reserve_memory(requested_address, size, alignment, -1, executable); |
251 | if (base != NULL__null) { |
252 | // Successful mapping. |
253 | initialize_members(base, size, alignment, page_size, false, executable); |
254 | } |
255 | } |
256 | |
257 | void ReservedSpace::initialize(size_t size, |
258 | size_t alignment, |
259 | size_t page_size, |
260 | char* requested_address, |
261 | bool executable) { |
262 | const size_t granularity = os::vm_allocation_granularity(); |
263 | assert((size & (granularity - 1)) == 0,do { if (!((size & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 264, "assert(" "(size & (granularity - 1)) == 0" ") failed" , "size not aligned to os::vm_allocation_granularity()"); ::breakpoint (); } } while (0) |
264 | "size not aligned to os::vm_allocation_granularity()")do { if (!((size & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 264, "assert(" "(size & (granularity - 1)) == 0" ") failed" , "size not aligned to os::vm_allocation_granularity()"); ::breakpoint (); } } while (0); |
265 | assert((alignment & (granularity - 1)) == 0,do { if (!((alignment & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 266, "assert(" "(alignment & (granularity - 1)) == 0" ") failed" , "alignment not aligned to os::vm_allocation_granularity()") ; ::breakpoint(); } } while (0) |
266 | "alignment not aligned to os::vm_allocation_granularity()")do { if (!((alignment & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 266, "assert(" "(alignment & (granularity - 1)) == 0" ") failed" , "alignment not aligned to os::vm_allocation_granularity()") ; ::breakpoint(); } } while (0); |
267 | assert(alignment == 0 || is_power_of_2((intptr_t)alignment),do { if (!(alignment == 0 || is_power_of_2((intptr_t)alignment ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 268, "assert(" "alignment == 0 || is_power_of_2((intptr_t)alignment)" ") failed", "not a power of 2"); ::breakpoint(); } } while ( 0) |
268 | "not a power of 2")do { if (!(alignment == 0 || is_power_of_2((intptr_t)alignment ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 268, "assert(" "alignment == 0 || is_power_of_2((intptr_t)alignment)" ") failed", "not a power of 2"); ::breakpoint(); } } while ( 0); |
269 | assert(page_size >= (size_t) os::vm_page_size(), "Invalid page size")do { if (!(page_size >= (size_t) os::vm_page_size())) { (* g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 269, "assert(" "page_size >= (size_t) os::vm_page_size()" ") failed", "Invalid page size"); ::breakpoint(); } } while ( 0); |
270 | assert(is_power_of_2(page_size), "Invalid page size")do { if (!(is_power_of_2(page_size))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 270, "assert(" "is_power_of_2(page_size)" ") failed", "Invalid page size" ); ::breakpoint(); } } while (0); |
271 | |
272 | clear_members(); |
273 | |
274 | if (size == 0) { |
275 | return; |
276 | } |
277 | |
278 | // Adjust alignment to not be 0. |
279 | alignment = MAX2(alignment, (size_t)os::vm_page_size()); |
280 | |
281 | // Reserve the memory. |
282 | reserve(size, alignment, page_size, requested_address, executable); |
283 | |
284 | // Check that the requested address is used if given. |
285 | if (failed_to_reserve_as_requested(_base, requested_address)) { |
286 | // OS ignored the requested address, release the reservation. |
287 | release(); |
288 | return; |
289 | } |
290 | } |
291 | |
292 | ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) { |
293 | assert(partition_size <= size(), "partition failed")do { if (!(partition_size <= size())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 293, "assert(" "partition_size <= size()" ") failed", "partition failed" ); ::breakpoint(); } } while (0); |
294 | ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable()); |
295 | return result; |
296 | } |
297 | |
298 | |
299 | ReservedSpace |
300 | ReservedSpace::last_part(size_t partition_size, size_t alignment) { |
301 | assert(partition_size <= size(), "partition failed")do { if (!(partition_size <= size())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 301, "assert(" "partition_size <= size()" ") failed", "partition failed" ); ::breakpoint(); } } while (0); |
302 | ReservedSpace result(base() + partition_size, size() - partition_size, |
303 | alignment, page_size(), special(), executable()); |
304 | return result; |
305 | } |
306 | |
307 | |
308 | size_t ReservedSpace::page_align_size_up(size_t size) { |
309 | return align_up(size, os::vm_page_size()); |
310 | } |
311 | |
312 | |
313 | size_t ReservedSpace::page_align_size_down(size_t size) { |
314 | return align_down(size, os::vm_page_size()); |
315 | } |
316 | |
317 | |
318 | size_t ReservedSpace::allocation_align_size_up(size_t size) { |
319 | return align_up(size, os::vm_allocation_granularity()); |
320 | } |
321 | |
322 | void ReservedSpace::release() { |
323 | if (is_reserved()) { |
324 | char *real_base = _base - _noaccess_prefix; |
325 | const size_t real_size = _size + _noaccess_prefix; |
326 | if (special()) { |
327 | if (_fd_for_heap != -1) { |
328 | os::unmap_memory(real_base, real_size); |
329 | } else { |
330 | os::release_memory_special(real_base, real_size); |
331 | } |
332 | } else{ |
333 | os::release_memory(real_base, real_size); |
334 | } |
335 | clear_members(); |
336 | } |
337 | } |
338 | |
339 | static size_t noaccess_prefix_size(size_t alignment) { |
340 | return lcm(os::vm_page_size(), alignment); |
341 | } |
342 | |
343 | void ReservedHeapSpace::establish_noaccess_prefix() { |
344 | assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big")do { if (!(_alignment >= (size_t)os::vm_page_size())) { (* g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 344, "assert(" "_alignment >= (size_t)os::vm_page_size()" ") failed", "must be at least page size big"); ::breakpoint( ); } } while (0); |
345 | _noaccess_prefix = noaccess_prefix_size(_alignment); |
346 | |
347 | if (base() && base() + _size > (char *)OopEncodingHeapMax) { |
348 | if (true |
349 | WIN64_ONLY(&& !UseLargePages) |
350 | AIX_ONLY(&& os::vm_page_size() != 64*K)) { |
351 | // Protect memory at the base of the allocated region. |
352 | // If special, the page was committed (only matters on windows) |
353 | if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) { |
354 | fatal("cannot protect protection page")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 354, "cannot protect protection page"); ::breakpoint(); } while (0); |
355 | } |
356 | log_debug(gc, heap, coops)(!(LogImpl<(LogTag::_gc), (LogTag::_heap), (LogTag::_coops ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_heap), (LogTag::_coops), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("Protected page at the reserved heap base: " |
357 | PTR_FORMAT"0x%016" "l" "x" " / " INTX_FORMAT"%" "l" "d" " bytes", |
358 | p2i(_base), |
359 | _noaccess_prefix); |
360 | assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?")do { if (!(CompressedOops::use_implicit_null_checks() == true )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 360, "assert(" "CompressedOops::use_implicit_null_checks() == true" ") failed", "not initialized?"); ::breakpoint(); } } while ( 0); |
361 | } else { |
362 | CompressedOops::set_use_implicit_null_checks(false); |
363 | } |
364 | } |
365 | |
366 | _base += _noaccess_prefix; |
367 | _size -= _noaccess_prefix; |
368 | assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment")do { if (!(((uintptr_t)_base % _alignment == 0))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 368, "assert(" "((uintptr_t)_base % _alignment == 0)" ") failed" , "must be exactly of required alignment"); ::breakpoint(); } } while (0); |
369 | } |
370 | |
371 | // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'. |
372 | // Does not check whether the reserved memory actually is at requested_address, as the memory returned |
373 | // might still fulfill the wishes of the caller. |
374 | // Assures the memory is aligned to 'alignment'. |
375 | // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first. |
376 | void ReservedHeapSpace::try_reserve_heap(size_t size, |
377 | size_t alignment, |
378 | size_t page_size, |
379 | char* requested_address) { |
380 | if (_base != NULL__null) { |
381 | // We tried before, but we didn't like the address delivered. |
382 | release(); |
383 | } |
384 | |
385 | // Try to reserve the memory for the heap. |
386 | log_trace(gc, heap, coops)(!(LogImpl<(LogTag::_gc), (LogTag::_heap), (LogTag::_coops ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_heap), (LogTag::_coops), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Trying to allocate at address " PTR_FORMAT"0x%016" "l" "x" |
387 | " heap of size " SIZE_FORMAT_HEX"0x%" "l" "x", |
388 | p2i(requested_address), |
389 | size); |
390 | |
391 | reserve(size, alignment, page_size, requested_address, false); |
392 | |
393 | // Check alignment constraints. |
394 | if (is_reserved() && !is_aligned(_base, _alignment)) { |
395 | // Base not aligned, retry. |
396 | release(); |
397 | } |
398 | } |
399 | |
400 | void ReservedHeapSpace::try_reserve_range(char *highest_start, |
401 | char *lowest_start, |
402 | size_t attach_point_alignment, |
403 | char *aligned_heap_base_min_address, |
404 | char *upper_bound, |
405 | size_t size, |
406 | size_t alignment, |
407 | size_t page_size) { |
408 | const size_t attach_range = highest_start - lowest_start; |
409 | // Cap num_attempts at possible number. |
410 | // At least one is possible even for 0 sized attach range. |
411 | const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1; |
412 | const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible); |
413 | |
414 | const size_t stepsize = (attach_range == 0) ? // Only one try. |
415 | (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment); |
416 | |
417 | // Try attach points from top to bottom. |
418 | char* attach_point = highest_start; |
419 | while (attach_point >= lowest_start && |
420 | attach_point <= highest_start && // Avoid wrap around. |
421 | ((_base == NULL__null) || |
422 | (_base < aligned_heap_base_min_address || _base + size > upper_bound))) { |
423 | try_reserve_heap(size, alignment, page_size, attach_point); |
424 | attach_point -= stepsize; |
425 | } |
426 | } |
427 | |
428 | #define SIZE_64K((uint64_t) (0x10000ULL)) ((uint64_t) UCONST64( 0x10000)(0x10000ULL)) |
429 | #define SIZE_256M((uint64_t) (0x10000000ULL)) ((uint64_t) UCONST64( 0x10000000)(0x10000000ULL)) |
430 | #define SIZE_32G((uint64_t) (0x800000000ULL)) ((uint64_t) UCONST64( 0x800000000)(0x800000000ULL)) |
431 | |
432 | // Helper for heap allocation. Returns an array with addresses |
433 | // (OS-specific) which are suited for disjoint base mode. Array is |
434 | // NULL terminated. |
435 | static char** get_attach_addresses_for_disjoint_mode() { |
436 | static uint64_t addresses[] = { |
437 | 2 * SIZE_32G((uint64_t) (0x800000000ULL)), |
438 | 3 * SIZE_32G((uint64_t) (0x800000000ULL)), |
439 | 4 * SIZE_32G((uint64_t) (0x800000000ULL)), |
440 | 8 * SIZE_32G((uint64_t) (0x800000000ULL)), |
441 | 10 * SIZE_32G((uint64_t) (0x800000000ULL)), |
442 | 1 * SIZE_64K((uint64_t) (0x10000ULL)) * SIZE_32G((uint64_t) (0x800000000ULL)), |
443 | 2 * SIZE_64K((uint64_t) (0x10000ULL)) * SIZE_32G((uint64_t) (0x800000000ULL)), |
444 | 3 * SIZE_64K((uint64_t) (0x10000ULL)) * SIZE_32G((uint64_t) (0x800000000ULL)), |
445 | 4 * SIZE_64K((uint64_t) (0x10000ULL)) * SIZE_32G((uint64_t) (0x800000000ULL)), |
446 | 16 * SIZE_64K((uint64_t) (0x10000ULL)) * SIZE_32G((uint64_t) (0x800000000ULL)), |
447 | 32 * SIZE_64K((uint64_t) (0x10000ULL)) * SIZE_32G((uint64_t) (0x800000000ULL)), |
448 | 34 * SIZE_64K((uint64_t) (0x10000ULL)) * SIZE_32G((uint64_t) (0x800000000ULL)), |
449 | 0 |
450 | }; |
451 | |
452 | // Sort out addresses smaller than HeapBaseMinAddress. This assumes |
453 | // the array is sorted. |
454 | uint i = 0; |
455 | while (addresses[i] != 0 && |
456 | (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) { |
457 | i++; |
458 | } |
459 | uint start = i; |
460 | |
461 | // Avoid more steps than requested. |
462 | i = 0; |
463 | while (addresses[start+i] != 0) { |
464 | if (i == HeapSearchSteps) { |
465 | addresses[start+i] = 0; |
466 | break; |
467 | } |
468 | i++; |
469 | } |
470 | |
471 | return (char**) &addresses[start]; |
472 | } |
473 | |
474 | void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) { |
475 | guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,do { if (!(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 476, "guarantee(" "size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax" ") failed", "can not allocate compressed oop heap for this size" ); ::breakpoint(); } } while (0) |
476 | "can not allocate compressed oop heap for this size")do { if (!(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 476, "guarantee(" "size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax" ") failed", "can not allocate compressed oop heap for this size" ); ::breakpoint(); } } while (0); |
477 | guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small")do { if (!(alignment == MAX2(alignment, (size_t)os::vm_page_size ()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 477, "guarantee(" "alignment == MAX2(alignment, (size_t)os::vm_page_size())" ") failed", "alignment too small"); ::breakpoint(); } } while (0); |
478 | |
479 | const size_t granularity = os::vm_allocation_granularity(); |
480 | assert((size & (granularity - 1)) == 0,do { if (!((size & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 481, "assert(" "(size & (granularity - 1)) == 0" ") failed" , "size not aligned to os::vm_allocation_granularity()"); ::breakpoint (); } } while (0) |
481 | "size not aligned to os::vm_allocation_granularity()")do { if (!((size & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 481, "assert(" "(size & (granularity - 1)) == 0" ") failed" , "size not aligned to os::vm_allocation_granularity()"); ::breakpoint (); } } while (0); |
482 | assert((alignment & (granularity - 1)) == 0,do { if (!((alignment & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 483, "assert(" "(alignment & (granularity - 1)) == 0" ") failed" , "alignment not aligned to os::vm_allocation_granularity()") ; ::breakpoint(); } } while (0) |
483 | "alignment not aligned to os::vm_allocation_granularity()")do { if (!((alignment & (granularity - 1)) == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 483, "assert(" "(alignment & (granularity - 1)) == 0" ") failed" , "alignment not aligned to os::vm_allocation_granularity()") ; ::breakpoint(); } } while (0); |
484 | assert(alignment == 0 || is_power_of_2((intptr_t)alignment),do { if (!(alignment == 0 || is_power_of_2((intptr_t)alignment ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 485, "assert(" "alignment == 0 || is_power_of_2((intptr_t)alignment)" ") failed", "not a power of 2"); ::breakpoint(); } } while ( 0) |
485 | "not a power of 2")do { if (!(alignment == 0 || is_power_of_2((intptr_t)alignment ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 485, "assert(" "alignment == 0 || is_power_of_2((intptr_t)alignment)" ") failed", "not a power of 2"); ::breakpoint(); } } while ( 0); |
486 | |
487 | // The necessary attach point alignment for generated wish addresses. |
488 | // This is needed to increase the chance of attaching for mmap and shmat. |
489 | const size_t os_attach_point_alignment = |
490 | AIX_ONLY(SIZE_256M) // Known shm boundary alignment. |
491 | NOT_AIX(os::vm_allocation_granularity())os::vm_allocation_granularity(); |
492 | const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment); |
493 | |
494 | char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment); |
495 | size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ? |
496 | noaccess_prefix_size(alignment) : 0; |
497 | |
498 | // Attempt to alloc at user-given address. |
499 | if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)(JVMFlag::is_default(Flag_HeapBaseMinAddress_enum))) { |
500 | try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address); |
501 | if (_base != aligned_heap_base_min_address) { // Enforce this exact address. |
502 | release(); |
503 | } |
504 | } |
505 | |
506 | // Keep heap at HeapBaseMinAddress. |
507 | if (_base == NULL__null) { |
508 | |
509 | // Try to allocate the heap at addresses that allow efficient oop compression. |
510 | // Different schemes are tried, in order of decreasing optimization potential. |
511 | // |
512 | // For this, try_reserve_heap() is called with the desired heap base addresses. |
513 | // A call into the os layer to allocate at a given address can return memory |
514 | // at a different address than requested. Still, this might be memory at a useful |
515 | // address. try_reserve_heap() always returns this allocated memory, as only here |
516 | // the criteria for a good heap are checked. |
517 | |
518 | // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops). |
519 | // Give it several tries from top of range to bottom. |
520 | if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) { |
521 | |
522 | // Calc address range within we try to attach (range of possible start addresses). |
523 | char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); |
524 | char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment); |
525 | try_reserve_range(highest_start, lowest_start, attach_point_alignment, |
526 | aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size); |
527 | } |
528 | |
529 | // zerobased: Attempt to allocate in the lower 32G. |
530 | // But leave room for the compressed class pointers, which is allocated above |
531 | // the heap. |
532 | char *zerobased_max = (char *)OopEncodingHeapMax; |
533 | const size_t class_space = align_up(CompressedClassSpaceSize, alignment); |
534 | // For small heaps, save some space for compressed class pointer |
535 | // space so it can be decoded with no base. |
536 | if (UseCompressedClassPointers && !UseSharedSpaces && |
537 | OopEncodingHeapMax <= KlassEncodingMetaspaceMax && |
538 | (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) { |
539 | zerobased_max = (char *)OopEncodingHeapMax - class_space; |
540 | } |
541 | |
542 | // Give it several tries from top of range to bottom. |
543 | if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible. |
544 | ((_base == NULL__null) || // No previous try succeeded. |
545 | (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. |
546 | |
547 | // Calc address range within we try to attach (range of possible start addresses). |
548 | char *const highest_start = align_down(zerobased_max - size, attach_point_alignment); |
549 | // Need to be careful about size being guaranteed to be less |
550 | // than UnscaledOopHeapMax due to type constraints. |
551 | char *lowest_start = aligned_heap_base_min_address; |
552 | uint64_t unscaled_end = UnscaledOopHeapMax - size; |
553 | if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large |
554 | lowest_start = MAX2(lowest_start, (char*)unscaled_end); |
555 | } |
556 | lowest_start = align_up(lowest_start, attach_point_alignment); |
557 | try_reserve_range(highest_start, lowest_start, attach_point_alignment, |
558 | aligned_heap_base_min_address, zerobased_max, size, alignment, page_size); |
559 | } |
560 | |
561 | // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently |
562 | // implement null checks. |
563 | noaccess_prefix = noaccess_prefix_size(alignment); |
564 | |
565 | // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode. |
566 | char** addresses = get_attach_addresses_for_disjoint_mode(); |
567 | int i = 0; |
568 | while (addresses[i] && // End of array not yet reached. |
569 | ((_base == NULL__null) || // No previous try succeeded. |
570 | (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address. |
571 | !CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address. |
572 | char* const attach_point = addresses[i]; |
573 | assert(attach_point >= aligned_heap_base_min_address, "Flag support broken")do { if (!(attach_point >= aligned_heap_base_min_address)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 573, "assert(" "attach_point >= aligned_heap_base_min_address" ") failed", "Flag support broken"); ::breakpoint(); } } while (0); |
574 | try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point); |
575 | i++; |
576 | } |
577 | |
578 | // Last, desperate try without any placement. |
579 | if (_base == NULL__null) { |
580 | log_trace(gc, heap, coops)(!(LogImpl<(LogTag::_gc), (LogTag::_heap), (LogTag::_coops ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Trace))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_heap), (LogTag::_coops), (LogTag::__NO_TAG) , (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Trace>("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX"0x%" "l" "x", size + noaccess_prefix); |
581 | initialize(size + noaccess_prefix, alignment, page_size, NULL__null, false); |
582 | } |
583 | } |
584 | } |
585 | |
586 | ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() { |
587 | |
588 | if (size == 0) { |
589 | return; |
590 | } |
591 | |
592 | if (heap_allocation_directory != NULL__null) { |
593 | _fd_for_heap = os::create_file_for_heap(heap_allocation_directory); |
594 | if (_fd_for_heap == -1) { |
595 | vm_exit_during_initialization( |
596 | err_msg("Could not create file for Heap at location %s", heap_allocation_directory)); |
597 | } |
598 | // When there is a backing file directory for this space then whether |
599 | // large pages are allocated is up to the filesystem of the backing file. |
600 | // If requested, let the user know that explicit large pages can't be used. |
601 | if (use_explicit_large_pages(page_size) && large_pages_requested()) { |
602 | log_debug(gc, heap)(!(LogImpl<(LogTag::_gc), (LogTag::_heap), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG), (LogTag::__NO_TAG) >::is_level(LogLevel::Debug))) ? (void)0 : LogImpl<(LogTag ::_gc), (LogTag::_heap), (LogTag::__NO_TAG), (LogTag::__NO_TAG ), (LogTag::__NO_TAG), (LogTag::__NO_TAG)>::write<LogLevel ::Debug>("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set."); |
603 | } |
604 | } |
605 | |
606 | // Heap size should be aligned to alignment, too. |
607 | guarantee(is_aligned(size, alignment), "set by caller")do { if (!(is_aligned(size, alignment))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 607, "guarantee(" "is_aligned(size, alignment)" ") failed", "set by caller"); ::breakpoint(); } } while (0); |
608 | |
609 | if (UseCompressedOops) { |
610 | initialize_compressed_heap(size, alignment, page_size); |
611 | if (_size > size) { |
612 | // We allocated heap with noaccess prefix. |
613 | // It can happen we get a zerobased/unscaled heap with noaccess prefix, |
614 | // if we had to try at arbitrary address. |
615 | establish_noaccess_prefix(); |
616 | } |
617 | } else { |
618 | initialize(size, alignment, page_size, NULL__null, false); |
619 | } |
620 | |
621 | assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,do { if (!(markWord::encode_pointer_as_mark(_base).decode_pointer () == _base)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 622, "assert(" "markWord::encode_pointer_as_mark(_base).decode_pointer() == _base" ") failed", "area must be distinguishable from marks for mark-sweep" ); ::breakpoint(); } } while (0) |
622 | "area must be distinguishable from marks for mark-sweep")do { if (!(markWord::encode_pointer_as_mark(_base).decode_pointer () == _base)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 622, "assert(" "markWord::encode_pointer_as_mark(_base).decode_pointer() == _base" ") failed", "area must be distinguishable from marks for mark-sweep" ); ::breakpoint(); } } while (0); |
623 | assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],do { if (!(markWord::encode_pointer_as_mark(&_base[size]) .decode_pointer() == &_base[size])) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 624, "assert(" "markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size]" ") failed", "area must be distinguishable from marks for mark-sweep" ); ::breakpoint(); } } while (0) |
624 | "area must be distinguishable from marks for mark-sweep")do { if (!(markWord::encode_pointer_as_mark(&_base[size]) .decode_pointer() == &_base[size])) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 624, "assert(" "markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size]" ") failed", "area must be distinguishable from marks for mark-sweep" ); ::breakpoint(); } } while (0); |
625 | |
626 | if (base() != NULL__null) { |
627 | MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); |
628 | } |
629 | |
630 | if (_fd_for_heap != -1) { |
631 | os::close(_fd_for_heap); |
632 | } |
633 | } |
634 | |
635 | MemRegion ReservedHeapSpace::region() const { |
636 | return MemRegion((HeapWord*)base(), (HeapWord*)end()); |
637 | } |
638 | |
639 | // Reserve space for code segment. Same as Java heap only we mark this as |
640 | // executable. |
641 | ReservedCodeSpace::ReservedCodeSpace(size_t r_size, |
642 | size_t rs_align, |
643 | size_t rs_page_size) : ReservedSpace() { |
644 | initialize(r_size, rs_align, rs_page_size, /*requested address*/ NULL__null, /*executable*/ true); |
645 | MemTracker::record_virtual_memory_type((address)base(), mtCode); |
646 | } |
647 | |
648 | // VirtualSpace |
649 | |
650 | VirtualSpace::VirtualSpace() { |
651 | _low_boundary = NULL__null; |
652 | _high_boundary = NULL__null; |
653 | _low = NULL__null; |
654 | _high = NULL__null; |
655 | _lower_high = NULL__null; |
656 | _middle_high = NULL__null; |
657 | _upper_high = NULL__null; |
658 | _lower_high_boundary = NULL__null; |
659 | _middle_high_boundary = NULL__null; |
660 | _upper_high_boundary = NULL__null; |
661 | _lower_alignment = 0; |
662 | _middle_alignment = 0; |
663 | _upper_alignment = 0; |
664 | _special = false; |
665 | _executable = false; |
666 | } |
667 | |
668 | |
669 | bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { |
670 | const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1); |
671 | return initialize_with_granularity(rs, committed_size, max_commit_granularity); |
672 | } |
673 | |
674 | bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { |
675 | if(!rs.is_reserved()) return false; // allocation failed. |
676 | assert(_low_boundary == NULL, "VirtualSpace already initialized")do { if (!(_low_boundary == __null)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 676, "assert(" "_low_boundary == __null" ") failed", "VirtualSpace already initialized" ); ::breakpoint(); } } while (0); |
677 | assert(max_commit_granularity > 0, "Granularity must be non-zero.")do { if (!(max_commit_granularity > 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 677, "assert(" "max_commit_granularity > 0" ") failed", "Granularity must be non-zero." ); ::breakpoint(); } } while (0); |
678 | |
679 | _low_boundary = rs.base(); |
680 | _high_boundary = low_boundary() + rs.size(); |
681 | |
682 | _low = low_boundary(); |
683 | _high = low(); |
684 | |
685 | _special = rs.special(); |
686 | _executable = rs.executable(); |
687 | |
688 | // When a VirtualSpace begins life at a large size, make all future expansion |
689 | // and shrinking occur aligned to a granularity of large pages. This avoids |
690 | // fragmentation of physical addresses that inhibits the use of large pages |
691 | // by the OS virtual memory system. Empirically, we see that with a 4MB |
692 | // page size, the only spaces that get handled this way are codecache and |
693 | // the heap itself, both of which provide a substantial performance |
694 | // boost in many benchmarks when covered by large pages. |
695 | // |
696 | // No attempt is made to force large page alignment at the very top and |
697 | // bottom of the space if they are not aligned so already. |
698 | _lower_alignment = os::vm_page_size(); |
699 | _middle_alignment = max_commit_granularity; |
700 | _upper_alignment = os::vm_page_size(); |
701 | |
702 | // End of each region |
703 | _lower_high_boundary = align_up(low_boundary(), middle_alignment()); |
704 | _middle_high_boundary = align_down(high_boundary(), middle_alignment()); |
705 | _upper_high_boundary = high_boundary(); |
706 | |
707 | // High address of each region |
708 | _lower_high = low_boundary(); |
709 | _middle_high = lower_high_boundary(); |
710 | _upper_high = middle_high_boundary(); |
711 | |
712 | // commit to initial size |
713 | if (committed_size > 0) { |
714 | if (!expand_by(committed_size)) { |
715 | return false; |
716 | } |
717 | } |
718 | return true; |
719 | } |
720 | |
721 | |
722 | VirtualSpace::~VirtualSpace() { |
723 | release(); |
724 | } |
725 | |
726 | |
727 | void VirtualSpace::release() { |
728 | // This does not release memory it reserved. |
729 | // Caller must release via rs.release(); |
730 | _low_boundary = NULL__null; |
731 | _high_boundary = NULL__null; |
732 | _low = NULL__null; |
733 | _high = NULL__null; |
734 | _lower_high = NULL__null; |
735 | _middle_high = NULL__null; |
736 | _upper_high = NULL__null; |
737 | _lower_high_boundary = NULL__null; |
738 | _middle_high_boundary = NULL__null; |
739 | _upper_high_boundary = NULL__null; |
740 | _lower_alignment = 0; |
741 | _middle_alignment = 0; |
742 | _upper_alignment = 0; |
743 | _special = false; |
744 | _executable = false; |
745 | } |
746 | |
747 | |
748 | size_t VirtualSpace::committed_size() const { |
749 | return pointer_delta(high(), low(), sizeof(char)); |
750 | } |
751 | |
752 | |
753 | size_t VirtualSpace::reserved_size() const { |
754 | return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); |
755 | } |
756 | |
757 | |
758 | size_t VirtualSpace::uncommitted_size() const { |
759 | return reserved_size() - committed_size(); |
760 | } |
761 | |
762 | size_t VirtualSpace::actual_committed_size() const { |
763 | // Special VirtualSpaces commit all reserved space up front. |
764 | if (special()) { |
765 | return reserved_size(); |
766 | } |
767 | |
768 | size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char)); |
769 | size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char)); |
770 | size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char)); |
771 | |
772 | #ifdef ASSERT1 |
773 | size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char)); |
774 | size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char)); |
775 | size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char)); |
Value stored to 'upper' during its initialization is never read | |
776 | |
777 | if (committed_high > 0) { |
778 | assert(committed_low == lower, "Must be")do { if (!(committed_low == lower)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 778, "assert(" "committed_low == lower" ") failed", "Must be" ); ::breakpoint(); } } while (0); |
779 | assert(committed_middle == middle, "Must be")do { if (!(committed_middle == middle)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 779, "assert(" "committed_middle == middle" ") failed", "Must be" ); ::breakpoint(); } } while (0); |
780 | } |
781 | |
782 | if (committed_middle > 0) { |
783 | assert(committed_low == lower, "Must be")do { if (!(committed_low == lower)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 783, "assert(" "committed_low == lower" ") failed", "Must be" ); ::breakpoint(); } } while (0); |
784 | } |
785 | if (committed_middle < middle) { |
786 | assert(committed_high == 0, "Must be")do { if (!(committed_high == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 786, "assert(" "committed_high == 0" ") failed", "Must be") ; ::breakpoint(); } } while (0); |
787 | } |
788 | |
789 | if (committed_low < lower) { |
790 | assert(committed_high == 0, "Must be")do { if (!(committed_high == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 790, "assert(" "committed_high == 0" ") failed", "Must be") ; ::breakpoint(); } } while (0); |
791 | assert(committed_middle == 0, "Must be")do { if (!(committed_middle == 0)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 791, "assert(" "committed_middle == 0" ") failed", "Must be" ); ::breakpoint(); } } while (0); |
792 | } |
793 | #endif |
794 | |
795 | return committed_low + committed_middle + committed_high; |
796 | } |
797 | |
798 | |
799 | bool VirtualSpace::contains(const void* p) const { |
800 | return low() <= (const char*) p && (const char*) p < high(); |
801 | } |
802 | |
803 | static void pretouch_expanded_memory(void* start, void* end) { |
804 | assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment")do { if (!(is_aligned(start, os::vm_page_size()))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 804, "assert(" "is_aligned(start, os::vm_page_size())" ") failed" , "Unexpected alignment"); ::breakpoint(); } } while (0); |
805 | assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment")do { if (!(is_aligned(end, os::vm_page_size()))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 805, "assert(" "is_aligned(end, os::vm_page_size())" ") failed" , "Unexpected alignment"); ::breakpoint(); } } while (0); |
806 | |
807 | os::pretouch_memory(start, end); |
808 | } |
809 | |
810 | static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) { |
811 | if (os::commit_memory(start, size, alignment, executable)) { |
812 | if (pre_touch || AlwaysPreTouch) { |
813 | pretouch_expanded_memory(start, start + size); |
814 | } |
815 | return true; |
816 | } |
817 | |
818 | debug_only(warning(warning( "INFO: os::commit_memory(" "0x%016" "l" "x" ", " "0x%016" "l" "x" " size=" "%" "l" "u" ", executable=%d) failed", p2i( start), p2i(start + size), size, executable); |
819 | "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMATwarning( "INFO: os::commit_memory(" "0x%016" "l" "x" ", " "0x%016" "l" "x" " size=" "%" "l" "u" ", executable=%d) failed", p2i( start), p2i(start + size), size, executable); |
820 | " size=" SIZE_FORMAT ", executable=%d) failed",warning( "INFO: os::commit_memory(" "0x%016" "l" "x" ", " "0x%016" "l" "x" " size=" "%" "l" "u" ", executable=%d) failed", p2i( start), p2i(start + size), size, executable); |
821 | p2i(start), p2i(start + size), size, executable);)warning( "INFO: os::commit_memory(" "0x%016" "l" "x" ", " "0x%016" "l" "x" " size=" "%" "l" "u" ", executable=%d) failed", p2i( start), p2i(start + size), size, executable); |
822 | |
823 | return false; |
824 | } |
825 | |
826 | /* |
827 | First we need to determine if a particular virtual space is using large |
828 | pages. This is done at the initialize function and only virtual spaces |
829 | that are larger than LargePageSizeInBytes use large pages. Once we |
830 | have determined this, all expand_by and shrink_by calls must grow and |
831 | shrink by large page size chunks. If a particular request |
832 | is within the current large page, the call to commit and uncommit memory |
833 | can be ignored. In the case that the low and high boundaries of this |
834 | space is not large page aligned, the pages leading to the first large |
835 | page address and the pages after the last large page address must be |
836 | allocated with default pages. |
837 | */ |
838 | bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { |
839 | if (uncommitted_size() < bytes) { |
840 | return false; |
841 | } |
842 | |
843 | if (special()) { |
844 | // don't commit memory if the entire space is pinned in memory |
845 | _high += bytes; |
846 | return true; |
847 | } |
848 | |
849 | char* previous_high = high(); |
850 | char* unaligned_new_high = high() + bytes; |
851 | assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary")do { if (!(unaligned_new_high <= high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 851, "assert(" "unaligned_new_high <= high_boundary()" ") failed" , "cannot expand by more than upper boundary"); ::breakpoint( ); } } while (0); |
852 | |
853 | // Calculate where the new high for each of the regions should be. If |
854 | // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned |
855 | // then the unaligned lower and upper new highs would be the |
856 | // lower_high() and upper_high() respectively. |
857 | char* unaligned_lower_new_high = MIN2(unaligned_new_high, lower_high_boundary()); |
858 | char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary()); |
859 | char* unaligned_upper_new_high = MIN2(unaligned_new_high, upper_high_boundary()); |
860 | |
861 | // Align the new highs based on the regions alignment. lower and upper |
862 | // alignment will always be default page size. middle alignment will be |
863 | // LargePageSizeInBytes if the actual size of the virtual space is in |
864 | // fact larger than LargePageSizeInBytes. |
865 | char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); |
866 | char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); |
867 | char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); |
868 | |
869 | // Determine which regions need to grow in this expand_by call. |
870 | // If you are growing in the lower region, high() must be in that |
871 | // region so calculate the size based on high(). For the middle and |
872 | // upper regions, determine the starting point of growth based on the |
873 | // location of high(). By getting the MAX of the region's low address |
874 | // (or the previous region's high address) and high(), we can tell if it |
875 | // is an intra or inter region growth. |
876 | size_t lower_needs = 0; |
877 | if (aligned_lower_new_high > lower_high()) { |
878 | lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); |
879 | } |
880 | size_t middle_needs = 0; |
881 | if (aligned_middle_new_high > middle_high()) { |
882 | middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); |
883 | } |
884 | size_t upper_needs = 0; |
885 | if (aligned_upper_new_high > upper_high()) { |
886 | upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); |
887 | } |
888 | |
889 | // Check contiguity. |
890 | assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 891, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
891 | "high address must be contained within the region")do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 891, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
892 | assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 893, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
893 | "high address must be contained within the region")do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 893, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
894 | assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 895, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
895 | "high address must be contained within the region")do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 895, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
896 | |
897 | // Commit regions |
898 | if (lower_needs > 0) { |
899 | assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region")do { if (!(lower_high() + lower_needs <= lower_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 899, "assert(" "lower_high() + lower_needs <= lower_high_boundary()" ") failed", "must not expand beyond region"); ::breakpoint() ; } } while (0); |
900 | if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) { |
901 | return false; |
902 | } |
903 | _lower_high += lower_needs; |
904 | } |
905 | |
906 | if (middle_needs > 0) { |
907 | assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region")do { if (!(middle_high() + middle_needs <= middle_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 907, "assert(" "middle_high() + middle_needs <= middle_high_boundary()" ") failed", "must not expand beyond region"); ::breakpoint() ; } } while (0); |
908 | if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) { |
909 | return false; |
910 | } |
911 | _middle_high += middle_needs; |
912 | } |
913 | |
914 | if (upper_needs > 0) { |
915 | assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region")do { if (!(upper_high() + upper_needs <= upper_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 915, "assert(" "upper_high() + upper_needs <= upper_high_boundary()" ") failed", "must not expand beyond region"); ::breakpoint() ; } } while (0); |
916 | if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) { |
917 | return false; |
918 | } |
919 | _upper_high += upper_needs; |
920 | } |
921 | |
922 | _high += bytes; |
923 | return true; |
924 | } |
925 | |
926 | // A page is uncommitted if the contents of the entire page is deemed unusable. |
927 | // Continue to decrement the high() pointer until it reaches a page boundary |
928 | // in which case that particular page can now be uncommitted. |
929 | void VirtualSpace::shrink_by(size_t size) { |
930 | if (committed_size() < size) |
931 | fatal("Cannot shrink virtual space to negative size")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 931, "Cannot shrink virtual space to negative size"); ::breakpoint (); } while (0); |
932 | |
933 | if (special()) { |
934 | // don't uncommit if the entire space is pinned in memory |
935 | _high -= size; |
936 | return; |
937 | } |
938 | |
939 | char* unaligned_new_high = high() - size; |
940 | assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary")do { if (!(unaligned_new_high >= low_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 940, "assert(" "unaligned_new_high >= low_boundary()" ") failed" , "cannot shrink past lower boundary"); ::breakpoint(); } } while (0); |
941 | |
942 | // Calculate new unaligned address |
943 | char* unaligned_upper_new_high = |
944 | MAX2(unaligned_new_high, middle_high_boundary()); |
945 | char* unaligned_middle_new_high = |
946 | MAX2(unaligned_new_high, lower_high_boundary()); |
947 | char* unaligned_lower_new_high = |
948 | MAX2(unaligned_new_high, low_boundary()); |
949 | |
950 | // Align address to region's alignment |
951 | char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); |
952 | char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); |
953 | char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); |
954 | |
955 | // Determine which regions need to shrink |
956 | size_t upper_needs = 0; |
957 | if (aligned_upper_new_high < upper_high()) { |
958 | upper_needs = |
959 | pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); |
960 | } |
961 | size_t middle_needs = 0; |
962 | if (aligned_middle_new_high < middle_high()) { |
963 | middle_needs = |
964 | pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); |
965 | } |
966 | size_t lower_needs = 0; |
967 | if (aligned_lower_new_high < lower_high()) { |
968 | lower_needs = |
969 | pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); |
970 | } |
971 | |
972 | // Check contiguity. |
973 | assert(middle_high_boundary() <= upper_high() &&do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 975, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
974 | upper_high() <= upper_high_boundary(),do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 975, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
975 | "high address must be contained within the region")do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 975, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
976 | assert(lower_high_boundary() <= middle_high() &&do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 978, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
977 | middle_high() <= middle_high_boundary(),do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 978, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
978 | "high address must be contained within the region")do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 978, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
979 | assert(low_boundary() <= lower_high() &&do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 981, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
980 | lower_high() <= lower_high_boundary(),do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 981, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
981 | "high address must be contained within the region")do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 981, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
982 | |
983 | // Uncommit |
984 | if (upper_needs > 0) { |
985 | assert(middle_high_boundary() <= aligned_upper_new_high &&do { if (!(middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 987, "assert(" "middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0) |
986 | aligned_upper_new_high + upper_needs <= upper_high_boundary(),do { if (!(middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 987, "assert(" "middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0) |
987 | "must not shrink beyond region")do { if (!(middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 987, "assert(" "middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0); |
988 | if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) { |
989 | debug_only(warning("os::uncommit_memory failed"))warning("os::uncommit_memory failed"); |
990 | return; |
991 | } else { |
992 | _upper_high -= upper_needs; |
993 | } |
994 | } |
995 | if (middle_needs > 0) { |
996 | assert(lower_high_boundary() <= aligned_middle_new_high &&do { if (!(lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 998, "assert(" "lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0) |
997 | aligned_middle_new_high + middle_needs <= middle_high_boundary(),do { if (!(lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 998, "assert(" "lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0) |
998 | "must not shrink beyond region")do { if (!(lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 998, "assert(" "lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0); |
999 | if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) { |
1000 | debug_only(warning("os::uncommit_memory failed"))warning("os::uncommit_memory failed"); |
1001 | return; |
1002 | } else { |
1003 | _middle_high -= middle_needs; |
1004 | } |
1005 | } |
1006 | if (lower_needs > 0) { |
1007 | assert(low_boundary() <= aligned_lower_new_high &&do { if (!(low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1009, "assert(" "low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0) |
1008 | aligned_lower_new_high + lower_needs <= lower_high_boundary(),do { if (!(low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1009, "assert(" "low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0) |
1009 | "must not shrink beyond region")do { if (!(low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1009, "assert(" "low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary()" ") failed", "must not shrink beyond region"); ::breakpoint() ; } } while (0); |
1010 | if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) { |
1011 | debug_only(warning("os::uncommit_memory failed"))warning("os::uncommit_memory failed"); |
1012 | return; |
1013 | } else { |
1014 | _lower_high -= lower_needs; |
1015 | } |
1016 | } |
1017 | |
1018 | _high -= size; |
1019 | } |
1020 | |
1021 | #ifndef PRODUCT |
1022 | void VirtualSpace::check_for_contiguity() { |
1023 | // Check contiguity. |
1024 | assert(low_boundary() <= lower_high() &&do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1026, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
1025 | lower_high() <= lower_high_boundary(),do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1026, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
1026 | "high address must be contained within the region")do { if (!(low_boundary() <= lower_high() && lower_high () <= lower_high_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1026, "assert(" "low_boundary() <= lower_high() && lower_high() <= lower_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
1027 | assert(lower_high_boundary() <= middle_high() &&do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1029, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
1028 | middle_high() <= middle_high_boundary(),do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1029, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
1029 | "high address must be contained within the region")do { if (!(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1029, "assert(" "lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
1030 | assert(middle_high_boundary() <= upper_high() &&do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1032, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
1031 | upper_high() <= upper_high_boundary(),do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1032, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0) |
1032 | "high address must be contained within the region")do { if (!(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1032, "assert(" "middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary()" ") failed", "high address must be contained within the region" ); ::breakpoint(); } } while (0); |
1033 | assert(low() >= low_boundary(), "low")do { if (!(low() >= low_boundary())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1033, "assert(" "low() >= low_boundary()" ") failed", "low" ); ::breakpoint(); } } while (0); |
1034 | assert(low_boundary() <= lower_high_boundary(), "lower high boundary")do { if (!(low_boundary() <= lower_high_boundary())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1034, "assert(" "low_boundary() <= lower_high_boundary()" ") failed", "lower high boundary"); ::breakpoint(); } } while (0); |
1035 | assert(upper_high_boundary() <= high_boundary(), "upper high boundary")do { if (!(upper_high_boundary() <= high_boundary())) { (* g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1035, "assert(" "upper_high_boundary() <= high_boundary()" ") failed", "upper high boundary"); ::breakpoint(); } } while (0); |
1036 | assert(high() <= upper_high(), "upper high")do { if (!(high() <= upper_high())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/memory/virtualspace.cpp" , 1036, "assert(" "high() <= upper_high()" ") failed", "upper high" ); ::breakpoint(); } } while (0); |
1037 | } |
1038 | |
1039 | void VirtualSpace::print_on(outputStream* out) const { |
1040 | out->print ("Virtual space:"); |
1041 | if (special()) out->print(" (pinned in memory)"); |
1042 | out->cr(); |
1043 | out->print_cr(" - committed: " SIZE_FORMAT"%" "l" "u", committed_size()); |
1044 | out->print_cr(" - reserved: " SIZE_FORMAT"%" "l" "u", reserved_size()); |
1045 | out->print_cr(" - [low, high]: [" INTPTR_FORMAT"0x%016" "l" "x" ", " INTPTR_FORMAT"0x%016" "l" "x" "]", p2i(low()), p2i(high())); |
1046 | out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT"0x%016" "l" "x" ", " INTPTR_FORMAT"0x%016" "l" "x" "]", p2i(low_boundary()), p2i(high_boundary())); |
1047 | } |
1048 | |
1049 | void VirtualSpace::print() const { |
1050 | print_on(tty); |
1051 | } |
1052 | |
1053 | #endif |