Bug Summary

File:jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
Warning:line 147, column 8
Value stored to 'in_native' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name barrierSetC2.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -mthread-model posix -fno-delete-null-pointer-checks -mframe-pointer=all -relaxed-aliasing -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/libjvm/objs/precompiled -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D _GNU_SOURCE -D _REENTRANT -D LIBC=gnu -D LINUX -D VM_LITTLE_ENDIAN -D _LP64=1 -D ASSERT -D CHECK_UNHANDLED_OOPS -D TARGET_ARCH_x86 -D INCLUDE_SUFFIX_OS=_linux -D INCLUDE_SUFFIX_CPU=_x86 -D INCLUDE_SUFFIX_COMPILER=_gcc -D TARGET_COMPILER_gcc -D AMD64 -D HOTSPOT_LIB_ARCH="amd64" -D COMPILER1 -D COMPILER2 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -I /home/daniel/Projects/java/jdk/src/hotspot/share/precompiled -I /home/daniel/Projects/java/jdk/src/hotspot/share/include -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix/include -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base/linux -I /home/daniel/Projects/java/jdk/src/java.base/share/native/libjimage -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -D _FORTIFY_SOURCE=2 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-format-zero-length -Wno-unused-parameter -Wno-unused -Wno-parentheses -Wno-comment -Wno-unknown-pragmas -Wno-address -Wno-delete-non-virtual-dtor -Wno-char-subscripts -Wno-array-bounds -Wno-int-in-bool-context -Wno-ignored-qualifiers -Wno-missing-field-initializers -Wno-implicit-fallthrough -Wno-empty-body -Wno-strict-overflow -Wno-sequence-point -Wno-maybe-uninitialized -Wno-misleading-indentation -Wno-cast-function-type -Wno-shift-negative-value -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /home/daniel/Projects/java/jdk/make/hotspot -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -stack-protector 1 -fno-rtti -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -o /home/daniel/Projects/java/scan/2021-12-21-193737-8510-1 -x c++ /home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
1/*
2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/tlab_globals.hpp"
27#include "gc/shared/c2/barrierSetC2.hpp"
28#include "opto/arraycopynode.hpp"
29#include "opto/convertnode.hpp"
30#include "opto/graphKit.hpp"
31#include "opto/idealKit.hpp"
32#include "opto/macro.hpp"
33#include "opto/narrowptrnode.hpp"
34#include "opto/runtime.hpp"
35#include "utilities/macros.hpp"
36
37// By default this is a no-op.
38void BarrierSetC2::resolve_address(C2Access& access) const { }
39
40void* C2ParseAccess::barrier_set_state() const {
41 return _kit->barrier_set_state();
42}
43
44PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
45
46bool C2Access::needs_cpu_membar() const {
47 bool mismatched = (_decorators & C2_MISMATCHED) != 0;
48 bool is_unordered = (_decorators & MO_UNORDERED) != 0;
49
50 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
51 bool in_heap = (_decorators & IN_HEAP) != 0;
52 bool in_native = (_decorators & IN_NATIVE) != 0;
53 bool is_mixed = !in_heap && !in_native;
54
55 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
56 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
57 bool is_atomic = is_read && is_write;
58
59 if (is_atomic) {
60 // Atomics always need to be wrapped in CPU membars
61 return true;
62 }
63
64 if (anonymous) {
65 // We will need memory barriers unless we can determine a unique
66 // alias category for this reference. (Note: If for some reason
67 // the barriers get omitted and the unsafe reference begins to "pollute"
68 // the alias analysis of the rest of the graph, either Compile::can_alias
69 // or Compile::must_alias will throw a diagnostic assert.)
70 if (is_mixed || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) {
71 return true;
72 }
73 } else {
74 assert(!is_mixed, "not unsafe")do { if (!(!is_mixed)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 74, "assert(" "!is_mixed" ") failed", "not unsafe"); ::breakpoint
(); } } while (0)
;
75 }
76
77 return false;
78}
79
80Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
81 DecoratorSet decorators = access.decorators();
82
83 bool mismatched = (decorators & C2_MISMATCHED) != 0;
84 bool unaligned = (decorators & C2_UNALIGNED) != 0;
85 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
86 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
87
88 bool in_native = (decorators & IN_NATIVE) != 0;
89 assert(!in_native || (unsafe && !access.is_oop()), "not supported yet")do { if (!(!in_native || (unsafe && !access.is_oop())
)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 89, "assert(" "!in_native || (unsafe && !access.is_oop())"
") failed", "not supported yet"); ::breakpoint(); } } while (
0)
;
90
91 MemNode::MemOrd mo = access.mem_node_mo();
92
93 Node* store;
94 if (access.is_parse_access()) {
95 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
96
97 GraphKit* kit = parse_access.kit();
98 if (access.type() == T_DOUBLE) {
99 Node* new_val = kit->dstore_rounding(val.node());
100 val.set_node(new_val);
101 }
102
103 store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
104 access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
105 } else {
106 assert(!requires_atomic_access, "not yet supported")do { if (!(!requires_atomic_access)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 106, "assert(" "!requires_atomic_access" ") failed", "not yet supported"
); ::breakpoint(); } } while (0)
;
107 assert(access.is_opt_access(), "either parse or opt access")do { if (!(access.is_opt_access())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 107, "assert(" "access.is_opt_access()" ") failed", "either parse or opt access"
); ::breakpoint(); } } while (0)
;
108 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
109 Node* ctl = opt_access.ctl();
110 MergeMemNode* mm = opt_access.mem();
111 PhaseGVN& gvn = opt_access.gvn();
112 const TypePtr* adr_type = access.addr().type();
113 int alias = gvn.C->get_alias_index(adr_type);
114 Node* mem = mm->memory_at(alias);
115
116 StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), access.type(), mo);
117 if (unaligned) {
118 st->set_unaligned_access();
119 }
120 if (mismatched) {
121 st->set_mismatched_access();
122 }
123 store = gvn.transform(st);
124 if (store == st) {
125 mm->set_memory_at(alias, st);
126 }
127 }
128 access.set_raw_access(store);
129
130 return store;
131}
132
133Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
134 DecoratorSet decorators = access.decorators();
135
136 Node* adr = access.addr().node();
137 const TypePtr* adr_type = access.addr().type();
138
139 bool mismatched = (decorators & C2_MISMATCHED) != 0;
140 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
141 bool unaligned = (decorators & C2_UNALIGNED) != 0;
142 bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
143 bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
144 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
145 bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
146
147 bool in_native = (decorators & IN_NATIVE) != 0;
Value stored to 'in_native' during its initialization is never read
148
149 MemNode::MemOrd mo = access.mem_node_mo();
150 LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
151
152 Node* load;
153 if (access.is_parse_access()) {
154 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
155 GraphKit* kit = parse_access.kit();
156 Node* control = control_dependent ? kit->control() : NULL__null;
157
158 if (immutable) {
159 assert(!requires_atomic_access, "can't ensure atomicity")do { if (!(!requires_atomic_access)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 159, "assert(" "!requires_atomic_access" ") failed", "can't ensure atomicity"
); ::breakpoint(); } } while (0)
;
160 Compile* C = Compile::current();
161 Node* mem = kit->immutable_memory();
162 load = LoadNode::make(kit->gvn(), control, mem, adr,
163 adr_type, val_type, access.type(), mo, dep, unaligned,
164 mismatched, unsafe, access.barrier_data());
165 load = kit->gvn().transform(load);
166 } else {
167 load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
168 dep, requires_atomic_access, unaligned, mismatched, unsafe,
169 access.barrier_data());
170 }
171 } else {
172 assert(!requires_atomic_access, "not yet supported")do { if (!(!requires_atomic_access)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 172, "assert(" "!requires_atomic_access" ") failed", "not yet supported"
); ::breakpoint(); } } while (0)
;
173 assert(access.is_opt_access(), "either parse or opt access")do { if (!(access.is_opt_access())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 173, "assert(" "access.is_opt_access()" ") failed", "either parse or opt access"
); ::breakpoint(); } } while (0)
;
174 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
175 Node* control = control_dependent ? opt_access.ctl() : NULL__null;
176 MergeMemNode* mm = opt_access.mem();
177 PhaseGVN& gvn = opt_access.gvn();
178 Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
179 load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo,
180 dep, unaligned, mismatched, unsafe, access.barrier_data());
181 load = gvn.transform(load);
182 }
183 access.set_raw_access(load);
184
185 return load;
186}
187
188class C2AccessFence: public StackObj {
189 C2Access& _access;
190 Node* _leading_membar;
191
192public:
193 C2AccessFence(C2Access& access) :
194 _access(access), _leading_membar(NULL__null) {
195 GraphKit* kit = NULL__null;
196 if (access.is_parse_access()) {
197 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
198 kit = parse_access.kit();
199 }
200 DecoratorSet decorators = access.decorators();
201
202 bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
203 bool is_read = (decorators & C2_READ_ACCESS) != 0;
204 bool is_atomic = is_read && is_write;
205
206 bool is_volatile = (decorators & MO_SEQ_CST) != 0;
207 bool is_release = (decorators & MO_RELEASE) != 0;
208
209 if (is_atomic) {
210 assert(kit != NULL, "unsupported at optimization time")do { if (!(kit != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 210, "assert(" "kit != __null" ") failed", "unsupported at optimization time"
); ::breakpoint(); } } while (0)
;
211 // Memory-model-wise, a LoadStore acts like a little synchronized
212 // block, so needs barriers on each side. These don't translate
213 // into actual barriers on most machines, but we still need rest of
214 // compiler to respect ordering.
215 if (is_release) {
216 _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
217 } else if (is_volatile) {
218 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
219 _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
220 } else {
221 _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
222 }
223 }
224 } else if (is_write) {
225 // If reference is volatile, prevent following memory ops from
226 // floating down past the volatile write. Also prevents commoning
227 // another volatile read.
228 if (is_volatile || is_release) {
229 assert(kit != NULL, "unsupported at optimization time")do { if (!(kit != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 229, "assert(" "kit != __null" ") failed", "unsupported at optimization time"
); ::breakpoint(); } } while (0)
;
230 _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
231 }
232 } else {
233 // Memory barrier to prevent normal and 'unsafe' accesses from
234 // bypassing each other. Happens after null checks, so the
235 // exception paths do not take memory state from the memory barrier,
236 // so there's no problems making a strong assert about mixing users
237 // of safe & unsafe memory.
238 if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
239 assert(kit != NULL, "unsupported at optimization time")do { if (!(kit != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 239, "assert(" "kit != __null" ") failed", "unsupported at optimization time"
); ::breakpoint(); } } while (0)
;
240 _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
241 }
242 }
243
244 if (access.needs_cpu_membar()) {
245 assert(kit != NULL, "unsupported at optimization time")do { if (!(kit != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 245, "assert(" "kit != __null" ") failed", "unsupported at optimization time"
); ::breakpoint(); } } while (0)
;
246 kit->insert_mem_bar(Op_MemBarCPUOrder);
247 }
248
249 if (is_atomic) {
250 // 4984716: MemBars must be inserted before this
251 // memory node in order to avoid a false
252 // dependency which will confuse the scheduler.
253 access.set_memory();
254 }
255 }
256
257 ~C2AccessFence() {
258 GraphKit* kit = NULL__null;
259 if (_access.is_parse_access()) {
260 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(_access);
261 kit = parse_access.kit();
262 }
263 DecoratorSet decorators = _access.decorators();
264
265 bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
266 bool is_read = (decorators & C2_READ_ACCESS) != 0;
267 bool is_atomic = is_read && is_write;
268
269 bool is_volatile = (decorators & MO_SEQ_CST) != 0;
270 bool is_acquire = (decorators & MO_ACQUIRE) != 0;
271
272 // If reference is volatile, prevent following volatiles ops from
273 // floating up before the volatile access.
274 if (_access.needs_cpu_membar()) {
275 kit->insert_mem_bar(Op_MemBarCPUOrder);
276 }
277
278 if (is_atomic) {
279 assert(kit != NULL, "unsupported at optimization time")do { if (!(kit != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 279, "assert(" "kit != __null" ") failed", "unsupported at optimization time"
); ::breakpoint(); } } while (0)
;
280 if (is_acquire || is_volatile) {
281 Node* n = _access.raw_access();
282 Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
283 if (_leading_membar != NULL__null) {
284 MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
285 }
286 }
287 } else if (is_write) {
288 // If not multiple copy atomic, we do the MemBarVolatile before the load.
289 if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
290 assert(kit != NULL, "unsupported at optimization time")do { if (!(kit != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 290, "assert(" "kit != __null" ") failed", "unsupported at optimization time"
); ::breakpoint(); } } while (0)
;
291 Node* n = _access.raw_access();
292 Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar
293 if (_leading_membar != NULL__null) {
294 MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
295 }
296 }
297 } else {
298 if (is_volatile || is_acquire) {
299 assert(kit != NULL, "unsupported at optimization time")do { if (!(kit != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 299, "assert(" "kit != __null" ") failed", "unsupported at optimization time"
); ::breakpoint(); } } while (0)
;
300 Node* n = _access.raw_access();
301 assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected")do { if (!(_leading_membar == __null || support_IRIW_for_not_multiple_copy_atomic_cpu
)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 301, "assert(" "_leading_membar == __null || support_IRIW_for_not_multiple_copy_atomic_cpu"
") failed", "no leading membar expected"); ::breakpoint(); }
} while (0)
;
302 Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
303 mb->as_MemBar()->set_trailing_load();
304 }
305 }
306 }
307};
308
309Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
310 C2AccessFence fence(access);
311 resolve_address(access);
312 return store_at_resolved(access, val);
313}
314
315Node* BarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
316 C2AccessFence fence(access);
317 resolve_address(access);
318 return load_at_resolved(access, val_type);
319}
320
321MemNode::MemOrd C2Access::mem_node_mo() const {
322 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
323 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
324 if ((_decorators & MO_SEQ_CST) != 0) {
325 if (is_write && is_read) {
326 // For atomic operations
327 return MemNode::seqcst;
328 } else if (is_write) {
329 return MemNode::release;
330 } else {
331 assert(is_read, "what else?")do { if (!(is_read)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 331, "assert(" "is_read" ") failed", "what else?"); ::breakpoint
(); } } while (0)
;
332 return MemNode::acquire;
333 }
334 } else if ((_decorators & MO_RELEASE) != 0) {
335 return MemNode::release;
336 } else if ((_decorators & MO_ACQUIRE) != 0) {
337 return MemNode::acquire;
338 } else if (is_write) {
339 // Volatile fields need releasing stores.
340 // Non-volatile fields also need releasing stores if they hold an
341 // object reference, because the object reference might point to
342 // a freshly created object.
343 // Conservatively release stores of object references.
344 return StoreNode::release_if_reference(_type);
345 } else {
346 return MemNode::unordered;
347 }
348}
349
350void C2Access::fixup_decorators() {
351 bool default_mo = (_decorators & MO_DECORATOR_MASK) == 0;
352 bool is_unordered = (_decorators & MO_UNORDERED) != 0 || default_mo;
353 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
354
355 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
356 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
357
358 if (AlwaysAtomicAccesses && is_unordered) {
359 _decorators &= ~MO_DECORATOR_MASK; // clear the MO bits
360 _decorators |= MO_RELAXED; // Force the MO_RELAXED decorator with AlwaysAtomicAccess
361 }
362
363 _decorators = AccessInternal::decorator_fixup(_decorators);
364
365 if (is_read && !is_write && anonymous) {
366 // To be valid, unsafe loads may depend on other conditions than
367 // the one that guards them: pin the Load node
368 _decorators |= C2_CONTROL_DEPENDENT_LOAD;
369 _decorators |= C2_UNKNOWN_CONTROL_LOAD;
370 const TypePtr* adr_type = _addr.type();
371 Node* adr = _addr.node();
372 if (!needs_cpu_membar() && adr_type->isa_instptr()) {
373 assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null")do { if (!(adr_type->meet(TypePtr::NULL_PTR) != adr_type->
remove_speculative())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 373, "assert(" "adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative()"
") failed", "should be not null"); ::breakpoint(); } } while
(0)
;
374 intptr_t offset = Type::OffsetBot;
375 AddPNode::Ideal_base_and_offset(adr, &gvn(), offset);
376 if (offset >= 0) {
377 int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
378 if (offset < s) {
379 // Guaranteed to be a valid access, no need to pin it
380 _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
381 _decorators ^= C2_UNKNOWN_CONTROL_LOAD;
382 }
383 }
384 }
385 }
386}
387
388//--------------------------- atomic operations---------------------------------
389
390void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const {
391 if (!access.needs_pinning()) {
392 return;
393 }
394 // SCMemProjNodes represent the memory state of a LoadStore. Their
395 // main role is to prevent LoadStore nodes from being optimized away
396 // when their results aren't used.
397 assert(access.is_parse_access(), "entry not supported at optimization time")do { if (!(access.is_parse_access())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 397, "assert(" "access.is_parse_access()" ") failed", "entry not supported at optimization time"
); ::breakpoint(); } } while (0)
;
398 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
399 GraphKit* kit = parse_access.kit();
400 Node* load_store = access.raw_access();
401 assert(load_store != NULL, "must pin atomic op")do { if (!(load_store != __null)) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 401, "assert(" "load_store != __null" ") failed", "must pin atomic op"
); ::breakpoint(); } } while (0)
;
402 Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
403 kit->set_memory(proj, access.alias_idx());
404}
405
406void C2AtomicParseAccess::set_memory() {
407 Node *mem = _kit->memory(_alias_idx);
408 _memory = mem;
409}
410
411Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
412 Node* new_val, const Type* value_type) const {
413 GraphKit* kit = access.kit();
414 MemNode::MemOrd mo = access.mem_node_mo();
415 Node* mem = access.memory();
416
417 Node* adr = access.addr().node();
418 const TypePtr* adr_type = access.addr().type();
419
420 Node* load_store = NULL__null;
421
422 if (access.is_oop()) {
423#ifdef _LP641
424 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
425 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
426 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
427 load_store = new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo);
428 } else
429#endif
430 {
431 load_store = new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo);
432 }
433 } else {
434 switch (access.type()) {
435 case T_BYTE: {
436 load_store = new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
437 break;
438 }
439 case T_SHORT: {
440 load_store = new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
441 break;
442 }
443 case T_INT: {
444 load_store = new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
445 break;
446 }
447 case T_LONG: {
448 load_store = new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
449 break;
450 }
451 default:
452 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 452); ::breakpoint(); } while (0)
;
453 }
454 }
455
456 load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
457 load_store = kit->gvn().transform(load_store);
458
459 access.set_raw_access(load_store);
460 pin_atomic_op(access);
461
462#ifdef _LP641
463 if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
464 return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
465 }
466#endif
467
468 return load_store;
469}
470
471Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
472 Node* new_val, const Type* value_type) const {
473 GraphKit* kit = access.kit();
474 DecoratorSet decorators = access.decorators();
475 MemNode::MemOrd mo = access.mem_node_mo();
476 Node* mem = access.memory();
477 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
478 Node* load_store = NULL__null;
479 Node* adr = access.addr().node();
480
481 if (access.is_oop()) {
482#ifdef _LP641
483 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
484 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
485 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
486 if (is_weak_cas) {
487 load_store = new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
488 } else {
489 load_store = new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
490 }
491 } else
492#endif
493 {
494 if (is_weak_cas) {
495 load_store = new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
496 } else {
497 load_store = new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
498 }
499 }
500 } else {
501 switch(access.type()) {
502 case T_BYTE: {
503 if (is_weak_cas) {
504 load_store = new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
505 } else {
506 load_store = new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
507 }
508 break;
509 }
510 case T_SHORT: {
511 if (is_weak_cas) {
512 load_store = new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
513 } else {
514 load_store = new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
515 }
516 break;
517 }
518 case T_INT: {
519 if (is_weak_cas) {
520 load_store = new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
521 } else {
522 load_store = new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
523 }
524 break;
525 }
526 case T_LONG: {
527 if (is_weak_cas) {
528 load_store = new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
529 } else {
530 load_store = new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
531 }
532 break;
533 }
534 default:
535 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 535); ::breakpoint(); } while (0)
;
536 }
537 }
538
539 load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
540 load_store = kit->gvn().transform(load_store);
541
542 access.set_raw_access(load_store);
543 pin_atomic_op(access);
544
545 return load_store;
546}
547
548Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
549 GraphKit* kit = access.kit();
550 Node* mem = access.memory();
551 Node* adr = access.addr().node();
552 const TypePtr* adr_type = access.addr().type();
553 Node* load_store = NULL__null;
554
555 if (access.is_oop()) {
556#ifdef _LP641
557 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
558 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
559 load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
560 } else
561#endif
562 {
563 load_store = new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr());
564 }
565 } else {
566 switch (access.type()) {
567 case T_BYTE:
568 load_store = new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type);
569 break;
570 case T_SHORT:
571 load_store = new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type);
572 break;
573 case T_INT:
574 load_store = new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type);
575 break;
576 case T_LONG:
577 load_store = new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type);
578 break;
579 default:
580 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 580); ::breakpoint(); } while (0)
;
581 }
582 }
583
584 load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
585 load_store = kit->gvn().transform(load_store);
586
587 access.set_raw_access(load_store);
588 pin_atomic_op(access);
589
590#ifdef _LP641
591 if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
592 return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
593 }
594#endif
595
596 return load_store;
597}
598
599Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
600 Node* load_store = NULL__null;
601 GraphKit* kit = access.kit();
602 Node* adr = access.addr().node();
603 const TypePtr* adr_type = access.addr().type();
604 Node* mem = access.memory();
605
606 switch(access.type()) {
607 case T_BYTE:
608 load_store = new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type);
609 break;
610 case T_SHORT:
611 load_store = new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type);
612 break;
613 case T_INT:
614 load_store = new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type);
615 break;
616 case T_LONG:
617 load_store = new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type);
618 break;
619 default:
620 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 620); ::breakpoint(); } while (0)
;
621 }
622
623 load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
624 load_store = kit->gvn().transform(load_store);
625
626 access.set_raw_access(load_store);
627 pin_atomic_op(access);
628
629 return load_store;
630}
631
632Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
633 Node* new_val, const Type* value_type) const {
634 C2AccessFence fence(access);
635 resolve_address(access);
636 return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
637}
638
639Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
640 Node* new_val, const Type* value_type) const {
641 C2AccessFence fence(access);
642 resolve_address(access);
643 return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
644}
645
646Node* BarrierSetC2::atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
647 C2AccessFence fence(access);
648 resolve_address(access);
649 return atomic_xchg_at_resolved(access, new_val, value_type);
650}
651
652Node* BarrierSetC2::atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
653 C2AccessFence fence(access);
654 resolve_address(access);
655 return atomic_add_at_resolved(access, new_val, value_type);
656}
657
658int BarrierSetC2::arraycopy_payload_base_offset(bool is_array) {
659 // Exclude the header but include array length to copy by 8 bytes words.
660 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
661 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
662 instanceOopDesc::base_offset_in_bytes();
663 // base_off:
664 // 8 - 32-bit VM
665 // 12 - 64-bit VM, compressed klass
666 // 16 - 64-bit VM, normal klass
667 if (base_off % BytesPerLong != 0) {
668 assert(UseCompressedClassPointers, "")do { if (!(UseCompressedClassPointers)) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 668, "assert(" "UseCompressedClassPointers" ") failed", "")
; ::breakpoint(); } } while (0)
;
669 if (is_array) {
670 // Exclude length to copy by 8 bytes words.
671 base_off += sizeof(int);
672 } else {
673 // Include klass to copy by 8 bytes words.
674 base_off = instanceOopDesc::klass_offset_in_bytes();
675 }
676 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment")do { if (!(base_off % BytesPerLong == 0)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp"
, 676, "assert(" "base_off % BytesPerLong == 0" ") failed", "expect 8 bytes alignment"
); ::breakpoint(); } } while (0)
;
677 }
678 return base_off;
679}
680
681void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
682 int base_off = arraycopy_payload_base_offset(is_array);
683 Node* payload_size = size;
684 Node* offset = kit->MakeConXlongcon(base_off);
685 payload_size = kit->gvn().transform(new SubXNodeSubLNode(payload_size, offset));
686 payload_size = kit->gvn().transform(new URShiftXNodeURShiftLNode(payload_size, kit->intcon(LogBytesPerLong)));
687 ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, payload_size, true, false);
688 if (is_array) {
689 ac->set_clone_array();
690 } else {
691 ac->set_clone_inst();
692 }
693 Node* n = kit->gvn().transform(ac);
694 if (n == ac) {
695 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
696 ac->set_adr_type(TypeRawPtr::BOTTOM);
697 kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
698 } else {
699 kit->set_all_memory(n);
700 }
701}
702
703Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
704 Node*& i_o, Node*& needgc_ctrl,
705 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
706 intx prefetch_lines) const {
707
708 Node* eden_top_adr;
709 Node* eden_end_adr;
710
711 macro->set_eden_pointers(eden_top_adr, eden_end_adr);
712
713 // Load Eden::end. Loop invariant and hoisted.
714 //
715 // Note: We set the control input on "eden_end" and "old_eden_top" when using
716 // a TLAB to work around a bug where these values were being moved across
717 // a safepoint. These are not oops, so they cannot be include in the oop
718 // map, but they can be changed by a GC. The proper way to fix this would
719 // be to set the raw memory state when generating a SafepointNode. However
720 // this will require extensive changes to the loop optimization in order to
721 // prevent a degradation of the optimization.
722 // See comment in memnode.hpp, around line 227 in class LoadPNode.
723 Node *eden_end = macro->make_load(toobig_false, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
724
725 // We need a Region for the loop-back contended case.
726 enum { fall_in_path = 1, contended_loopback_path = 2 };
727 Node *contended_region;
728 Node *contended_phi_rawmem;
729 if (UseTLAB) {
730 contended_region = toobig_false;
731 contended_phi_rawmem = mem;
732 } else {
733 contended_region = new RegionNode(3);
734 contended_phi_rawmem = new PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
735 // Now handle the passing-too-big test. We fall into the contended
736 // loop-back merge point.
737 contended_region ->init_req(fall_in_path, toobig_false);
738 contended_phi_rawmem->init_req(fall_in_path, mem);
739 macro->transform_later(contended_region);
740 macro->transform_later(contended_phi_rawmem);
741 }
742
743 // Load(-locked) the heap top.
744 // See note above concerning the control input when using a TLAB
745 Node *old_eden_top = UseTLAB
746 ? new LoadPNode (toobig_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
747 : new LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
748
749 macro->transform_later(old_eden_top);
750 // Add to heap top to get a new heap top
751 Node *new_eden_top = new AddPNode(macro->top(), old_eden_top, size_in_bytes);
752 macro->transform_later(new_eden_top);
753 // Check for needing a GC; compare against heap end
754 Node *needgc_cmp = new CmpPNode(new_eden_top, eden_end);
755 macro->transform_later(needgc_cmp);
756 Node *needgc_bol = new BoolNode(needgc_cmp, BoolTest::ge);
757 macro->transform_later(needgc_bol);
758 IfNode *needgc_iff = new IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4)(1e-4f), COUNT_UNKNOWN(-1.0f));
759 macro->transform_later(needgc_iff);
760
761 // Plug the failing-heap-space-need-gc test into the slow-path region
762 Node *needgc_true = new IfTrueNode(needgc_iff);
763 macro->transform_later(needgc_true);
764 needgc_ctrl = needgc_true;
765
766 // No need for a GC. Setup for the Store-Conditional
767 Node *needgc_false = new IfFalseNode(needgc_iff);
768 macro->transform_later(needgc_false);
769
770 i_o = macro->prefetch_allocation(i_o, needgc_false, contended_phi_rawmem,
771 old_eden_top, new_eden_top, prefetch_lines);
772
773 Node* fast_oop = old_eden_top;
774
775 // Store (-conditional) the modified eden top back down.
776 // StorePConditional produces flags for a test PLUS a modified raw
777 // memory state.
778 if (UseTLAB) {
779 Node* store_eden_top =
780 new StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr,
781 TypeRawPtr::BOTTOM, new_eden_top, MemNode::unordered);
782 macro->transform_later(store_eden_top);
783 fast_oop_ctrl = needgc_false; // No contention, so this is the fast path
784 fast_oop_rawmem = store_eden_top;
785 } else {
786 Node* store_eden_top =
787 new StorePConditionalNode(needgc_false, contended_phi_rawmem, eden_top_adr,
788 new_eden_top, fast_oop/*old_eden_top*/);
789 macro->transform_later(store_eden_top);
790 Node *contention_check = new BoolNode(store_eden_top, BoolTest::ne);
791 macro->transform_later(contention_check);
792 store_eden_top = new SCMemProjNode(store_eden_top);
793 macro->transform_later(store_eden_top);
794
795 // If not using TLABs, check to see if there was contention.
796 IfNode *contention_iff = new IfNode (needgc_false, contention_check, PROB_MIN(1e-6f), COUNT_UNKNOWN(-1.0f));
797 macro->transform_later(contention_iff);
798 Node *contention_true = new IfTrueNode(contention_iff);
799 macro->transform_later(contention_true);
800 // If contention, loopback and try again.
801 contended_region->init_req(contended_loopback_path, contention_true);
802 contended_phi_rawmem->init_req(contended_loopback_path, store_eden_top);
803
804 // Fast-path succeeded with no contention!
805 Node *contention_false = new IfFalseNode(contention_iff);
806 macro->transform_later(contention_false);
807 fast_oop_ctrl = contention_false;
808
809 // Bump total allocated bytes for this thread
810 Node* thread = new ThreadLocalNode();
811 macro->transform_later(thread);
812 Node* alloc_bytes_adr = macro->basic_plus_adr(macro->top()/*not oop*/, thread,
813 in_bytes(JavaThread::allocated_bytes_offset()));
814 Node* alloc_bytes = macro->make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
815 0, TypeLong::LONG, T_LONG);
816#ifdef _LP641
817 Node* alloc_size = size_in_bytes;
818#else
819 Node* alloc_size = new ConvI2LNode(size_in_bytes);
820 macro->transform_later(alloc_size);
821#endif
822 Node* new_alloc_bytes = new AddLNode(alloc_bytes, alloc_size);
823 macro->transform_later(new_alloc_bytes);
824 fast_oop_rawmem = macro->make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
825 0, new_alloc_bytes, T_LONG);
826 }
827 return fast_oop;
828}
829
830#define XTOP LP64_ONLY(COMMA phase->top()), phase->top()
831
832void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
833 Node* ctrl = ac->in(TypeFunc::Control);
834 Node* mem = ac->in(TypeFunc::Memory);
835 Node* src = ac->in(ArrayCopyNode::Src);
836 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
837 Node* dest = ac->in(ArrayCopyNode::Dest);
838 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
839 Node* length = ac->in(ArrayCopyNode::Length);
840
841 Node* payload_src = phase->basic_plus_adr(src, src_offset);
842 Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
843
844 const char* copyfunc_name = "arraycopy";
845 address copyfunc_addr = phase->basictype2arraycopy(T_LONG, NULL__null, NULL__null, true, copyfunc_name, true);
846
847 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
848 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
849
850 Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
851 phase->transform_later(call);
852
853 phase->igvn().replace_node(ac, call);
854}
855
856#undef XTOP