Bug Summary

File:jdk/src/hotspot/share/opto/matcher.cpp
Warning:line 1186, column 9
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name matcher.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -mthread-model posix -fno-delete-null-pointer-checks -mframe-pointer=all -relaxed-aliasing -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/libjvm/objs/precompiled -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D _GNU_SOURCE -D _REENTRANT -D LIBC=gnu -D LINUX -D VM_LITTLE_ENDIAN -D _LP64=1 -D ASSERT -D CHECK_UNHANDLED_OOPS -D TARGET_ARCH_x86 -D INCLUDE_SUFFIX_OS=_linux -D INCLUDE_SUFFIX_CPU=_x86 -D INCLUDE_SUFFIX_COMPILER=_gcc -D TARGET_COMPILER_gcc -D AMD64 -D HOTSPOT_LIB_ARCH="amd64" -D COMPILER1 -D COMPILER2 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -I /home/daniel/Projects/java/jdk/src/hotspot/share/precompiled -I /home/daniel/Projects/java/jdk/src/hotspot/share/include -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix/include -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/support/modules_include/java.base/linux -I /home/daniel/Projects/java/jdk/src/java.base/share/native/libjimage -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc/adfiles -I /home/daniel/Projects/java/jdk/src/hotspot/share -I /home/daniel/Projects/java/jdk/src/hotspot/os/linux -I /home/daniel/Projects/java/jdk/src/hotspot/os/posix -I /home/daniel/Projects/java/jdk/src/hotspot/cpu/x86 -I /home/daniel/Projects/java/jdk/src/hotspot/os_cpu/linux_x86 -I /home/daniel/Projects/java/jdk/build/linux-x86_64-server-fastdebug/hotspot/variant-server/gensrc -D _FORTIFY_SOURCE=2 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/x86_64-linux-gnu/c++/7.5.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.5.0/../../../../include/c++/7.5.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-format-zero-length -Wno-unused-parameter -Wno-unused -Wno-parentheses -Wno-comment -Wno-unknown-pragmas -Wno-address -Wno-delete-non-virtual-dtor -Wno-char-subscripts -Wno-array-bounds -Wno-int-in-bool-context -Wno-ignored-qualifiers -Wno-missing-field-initializers -Wno-implicit-fallthrough -Wno-empty-body -Wno-strict-overflow -Wno-sequence-point -Wno-maybe-uninitialized -Wno-misleading-indentation -Wno-cast-function-type -Wno-shift-negative-value -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /home/daniel/Projects/java/jdk/make/hotspot -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -stack-protector 1 -fno-rtti -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -o /home/daniel/Projects/java/scan/2021-12-21-193737-8510-1 -x c++ /home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp

/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp

1/*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/barrierSet.hpp"
27#include "gc/shared/c2/barrierSetC2.hpp"
28#include "memory/allocation.inline.hpp"
29#include "memory/resourceArea.hpp"
30#include "oops/compressedOops.hpp"
31#include "opto/ad.hpp"
32#include "opto/addnode.hpp"
33#include "opto/callnode.hpp"
34#include "opto/idealGraphPrinter.hpp"
35#include "opto/matcher.hpp"
36#include "opto/memnode.hpp"
37#include "opto/movenode.hpp"
38#include "opto/opcodes.hpp"
39#include "opto/regmask.hpp"
40#include "opto/rootnode.hpp"
41#include "opto/runtime.hpp"
42#include "opto/type.hpp"
43#include "opto/vectornode.hpp"
44#include "runtime/os.hpp"
45#include "runtime/sharedRuntime.hpp"
46#include "utilities/align.hpp"
47
48OptoReg::Name OptoReg::c_frame_pointer;
49
50const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
51RegMask Matcher::mreg2regmask[_last_Mach_Reg];
52RegMask Matcher::caller_save_regmask;
53RegMask Matcher::caller_save_regmask_exclude_soe;
54RegMask Matcher::mh_caller_save_regmask;
55RegMask Matcher::mh_caller_save_regmask_exclude_soe;
56RegMask Matcher::STACK_ONLY_mask;
57RegMask Matcher::c_frame_ptr_mask;
58const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
59const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
60
61//---------------------------Matcher-------------------------------------------
62Matcher::Matcher()
63: PhaseTransform( Phase::Ins_Select ),
64 _states_arena(Chunk::medium_size, mtCompiler),
65 _visited(&_states_arena),
66 _shared(&_states_arena),
67 _dontcare(&_states_arena),
68 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
69 _swallowed(swallowed),
70 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
71 _end_inst_chain_rule(_END_INST_CHAIN_RULE),
72 _must_clone(must_clone),
73 _shared_nodes(C->comp_arena()),
74#ifndef PRODUCT
75 _old2new_map(C->comp_arena()),
76 _new2old_map(C->comp_arena()),
77 _reused(C->comp_arena()),
78#endif // !PRODUCT
79 _allocation_started(false),
80 _ruleName(ruleName),
81 _register_save_policy(register_save_policy),
82 _c_reg_save_policy(c_reg_save_policy),
83 _register_save_type(register_save_type) {
84 C->set_matcher(this);
85
86 idealreg2spillmask [Op_RegI] = NULL__null;
87 idealreg2spillmask [Op_RegN] = NULL__null;
88 idealreg2spillmask [Op_RegL] = NULL__null;
89 idealreg2spillmask [Op_RegF] = NULL__null;
90 idealreg2spillmask [Op_RegD] = NULL__null;
91 idealreg2spillmask [Op_RegP] = NULL__null;
92 idealreg2spillmask [Op_VecA] = NULL__null;
93 idealreg2spillmask [Op_VecS] = NULL__null;
94 idealreg2spillmask [Op_VecD] = NULL__null;
95 idealreg2spillmask [Op_VecX] = NULL__null;
96 idealreg2spillmask [Op_VecY] = NULL__null;
97 idealreg2spillmask [Op_VecZ] = NULL__null;
98 idealreg2spillmask [Op_RegFlags] = NULL__null;
99 idealreg2spillmask [Op_RegVectMask] = NULL__null;
100
101 idealreg2debugmask [Op_RegI] = NULL__null;
102 idealreg2debugmask [Op_RegN] = NULL__null;
103 idealreg2debugmask [Op_RegL] = NULL__null;
104 idealreg2debugmask [Op_RegF] = NULL__null;
105 idealreg2debugmask [Op_RegD] = NULL__null;
106 idealreg2debugmask [Op_RegP] = NULL__null;
107 idealreg2debugmask [Op_VecA] = NULL__null;
108 idealreg2debugmask [Op_VecS] = NULL__null;
109 idealreg2debugmask [Op_VecD] = NULL__null;
110 idealreg2debugmask [Op_VecX] = NULL__null;
111 idealreg2debugmask [Op_VecY] = NULL__null;
112 idealreg2debugmask [Op_VecZ] = NULL__null;
113 idealreg2debugmask [Op_RegFlags] = NULL__null;
114 idealreg2debugmask [Op_RegVectMask] = NULL__null;
115
116 idealreg2mhdebugmask[Op_RegI] = NULL__null;
117 idealreg2mhdebugmask[Op_RegN] = NULL__null;
118 idealreg2mhdebugmask[Op_RegL] = NULL__null;
119 idealreg2mhdebugmask[Op_RegF] = NULL__null;
120 idealreg2mhdebugmask[Op_RegD] = NULL__null;
121 idealreg2mhdebugmask[Op_RegP] = NULL__null;
122 idealreg2mhdebugmask[Op_VecA] = NULL__null;
123 idealreg2mhdebugmask[Op_VecS] = NULL__null;
124 idealreg2mhdebugmask[Op_VecD] = NULL__null;
125 idealreg2mhdebugmask[Op_VecX] = NULL__null;
126 idealreg2mhdebugmask[Op_VecY] = NULL__null;
127 idealreg2mhdebugmask[Op_VecZ] = NULL__null;
128 idealreg2mhdebugmask[Op_RegFlags] = NULL__null;
129 idealreg2mhdebugmask[Op_RegVectMask] = NULL__null;
130
131 debug_only(_mem_node = NULL;)_mem_node = __null; // Ideal memory node consumed by mach node
132}
133
134//------------------------------warp_incoming_stk_arg------------------------
135// This warps a VMReg into an OptoReg::Name
136OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
137 OptoReg::Name warped;
138 if( reg->is_stack() ) { // Stack slot argument?
139 warped = OptoReg::add(_old_SP, reg->reg2stack() );
140 warped = OptoReg::add(warped, C->out_preserve_stack_slots());
141 if( warped >= _in_arg_limit )
142 _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
143 if (!RegMask::can_represent_arg(warped)) {
144 // the compiler cannot represent this method's calling sequence
145 C->record_method_not_compilable("unsupported incoming calling sequence");
146 return OptoReg::Bad;
147 }
148 return warped;
149 }
150 return OptoReg::as_OptoReg(reg);
151}
152
153//---------------------------compute_old_SP------------------------------------
154OptoReg::Name Compile::compute_old_SP() {
155 int fixed = fixed_slots();
156 int preserve = in_preserve_stack_slots();
157 return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
158}
159
160
161
162#ifdef ASSERT1
163void Matcher::verify_new_nodes_only(Node* xroot) {
164 // Make sure that the new graph only references new nodes
165 ResourceMark rm;
166 Unique_Node_List worklist;
167 VectorSet visited;
168 worklist.push(xroot);
169 while (worklist.size() > 0) {
170 Node* n = worklist.pop();
171 visited.set(n->_idx);
172 assert(C->node_arena()->contains(n), "dead node")do { if (!(C->node_arena()->contains(n))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 172, "assert(" "C->node_arena()->contains(n)" ") failed"
, "dead node"); ::breakpoint(); } } while (0)
;
173 for (uint j = 0; j < n->req(); j++) {
174 Node* in = n->in(j);
175 if (in != NULL__null) {
176 assert(C->node_arena()->contains(in), "dead node")do { if (!(C->node_arena()->contains(in))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 176, "assert(" "C->node_arena()->contains(in)" ") failed"
, "dead node"); ::breakpoint(); } } while (0)
;
177 if (!visited.test(in->_idx)) {
178 worklist.push(in);
179 }
180 }
181 }
182 }
183}
184#endif
185
186
187//---------------------------match---------------------------------------------
188void Matcher::match( ) {
189 if( MaxLabelRootDepth < 100 ) { // Too small?
1
Assuming 'MaxLabelRootDepth' is >= 100
2
Taking false branch
190 assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum")do { if (!(false)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 190, "assert(" "false" ") failed", "invalid MaxLabelRootDepth, increase it to 100 minimum"
); ::breakpoint(); } } while (0)
;
191 MaxLabelRootDepth = 100;
192 }
193 // One-time initialization of some register masks.
194 init_spill_mask( C->root()->in(1) );
195 _return_addr_mask = return_addr();
196#ifdef _LP641
197 // Pointers take 2 slots in 64-bit land
198 _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
199#endif
200
201 // Map a Java-signature return type into return register-value
202 // machine registers for 0, 1 and 2 returned values.
203 const TypeTuple *range = C->tf()->range();
204 if( range->cnt() > TypeFunc::Parms ) { // If not a void function
3
Assuming the condition is false
4
Taking false branch
205 // Get ideal-register return type
206 uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
207 // Get machine return register
208 uint sop = C->start()->Opcode();
209 OptoRegPair regs = return_value(ireg);
210
211 // And mask for same
212 _return_value_mask = RegMask(regs.first());
213 if( OptoReg::is_valid(regs.second()) )
214 _return_value_mask.Insert(regs.second());
215 }
216
217 // ---------------
218 // Frame Layout
219
220 // Need the method signature to determine the incoming argument types,
221 // because the types determine which registers the incoming arguments are
222 // in, and this affects the matched code.
223 const TypeTuple *domain = C->tf()->domain();
224 uint argcnt = domain->cnt() - TypeFunc::Parms;
225 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt )(BasicType*) resource_allocate_bytes((argcnt) * sizeof(BasicType
))
;
226 VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt )(VMRegPair*) resource_allocate_bytes((argcnt) * sizeof(VMRegPair
))
;
227 _parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt )(OptoRegPair*) resource_allocate_bytes((argcnt) * sizeof(OptoRegPair
))
;
228 _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt )(RegMask*) resource_allocate_bytes((argcnt) * sizeof(RegMask)
)
;
229 uint i;
230 for( i = 0; i<argcnt; i++ ) {
5
Assuming 'i' is >= 'argcnt'
6
Loop condition is false. Execution continues on line 236
231 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
232 }
233
234 // Pass array of ideal registers and length to USER code (from the AD file)
235 // that will convert this to an array of register numbers.
236 const StartNode *start = C->start();
237 start->calling_convention( sig_bt, vm_parm_regs, argcnt );
238#ifdef ASSERT1
239 // Sanity check users' calling convention. Real handy while trying to
240 // get the initial port correct.
241 { for (uint i = 0; i<argcnt; i++) {
7
Loop condition is false. Execution continues on line 268
242 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
243 assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" )do { if (!(domain->field_at(i+TypeFunc::Parms)==Type::HALF
)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 243, "assert(" "domain->field_at(i+TypeFunc::Parms)==Type::HALF"
") failed", "only allowed on halve"); ::breakpoint(); } } while
(0)
;
244 _parm_regs[i].set_bad();
245 continue;
246 }
247 VMReg parm_reg = vm_parm_regs[i].first();
248 assert(parm_reg->is_valid(), "invalid arg?")do { if (!(parm_reg->is_valid())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 248, "assert(" "parm_reg->is_valid()" ") failed", "invalid arg?"
); ::breakpoint(); } } while (0)
;
249 if (parm_reg->is_reg()) {
250 OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
251 assert(can_be_java_arg(opto_parm_reg) ||do { if (!(can_be_java_arg(opto_parm_reg) || C->stub_function
() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg
== inline_cache_reg())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 254, "assert(" "can_be_java_arg(opto_parm_reg) || C->stub_function() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg == inline_cache_reg()"
") failed", "parameters in register must be preserved by runtime stubs"
); ::breakpoint(); } } while (0)
252 C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||do { if (!(can_be_java_arg(opto_parm_reg) || C->stub_function
() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg
== inline_cache_reg())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 254, "assert(" "can_be_java_arg(opto_parm_reg) || C->stub_function() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg == inline_cache_reg()"
") failed", "parameters in register must be preserved by runtime stubs"
); ::breakpoint(); } } while (0)
253 opto_parm_reg == inline_cache_reg(),do { if (!(can_be_java_arg(opto_parm_reg) || C->stub_function
() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg
== inline_cache_reg())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 254, "assert(" "can_be_java_arg(opto_parm_reg) || C->stub_function() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg == inline_cache_reg()"
") failed", "parameters in register must be preserved by runtime stubs"
); ::breakpoint(); } } while (0)
254 "parameters in register must be preserved by runtime stubs")do { if (!(can_be_java_arg(opto_parm_reg) || C->stub_function
() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg
== inline_cache_reg())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 254, "assert(" "can_be_java_arg(opto_parm_reg) || C->stub_function() == ((address)((address_word)(OptoRuntime::rethrow_C))) || opto_parm_reg == inline_cache_reg()"
") failed", "parameters in register must be preserved by runtime stubs"
); ::breakpoint(); } } while (0)
;
255 }
256 for (uint j = 0; j < i; j++) {
257 assert(parm_reg != vm_parm_regs[j].first(),do { if (!(parm_reg != vm_parm_regs[j].first())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 258, "assert(" "parm_reg != vm_parm_regs[j].first()" ") failed"
, "calling conv. must produce distinct regs"); ::breakpoint()
; } } while (0)
258 "calling conv. must produce distinct regs")do { if (!(parm_reg != vm_parm_regs[j].first())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 258, "assert(" "parm_reg != vm_parm_regs[j].first()" ") failed"
, "calling conv. must produce distinct regs"); ::breakpoint()
; } } while (0)
;
259 }
260 }
261 }
262#endif
263
264 // Do some initial frame layout.
265
266 // Compute the old incoming SP (may be called FP) as
267 // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
268 _old_SP = C->compute_old_SP();
269 assert( is_even(_old_SP), "must be even" )do { if (!(is_even(_old_SP))) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 269, "assert(" "is_even(_old_SP)" ") failed", "must be even"
); ::breakpoint(); } } while (0)
;
8
Taking false branch
9
Loop condition is false. Exiting loop
270
271 // Compute highest incoming stack argument as
272 // _old_SP + out_preserve_stack_slots + incoming argument size.
273 _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
274 assert( is_even(_in_arg_limit), "out_preserve must be even" )do { if (!(is_even(_in_arg_limit))) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 274, "assert(" "is_even(_in_arg_limit)" ") failed", "out_preserve must be even"
); ::breakpoint(); } } while (0)
;
10
Taking false branch
11
Loop condition is false. Exiting loop
275 for( i = 0; i < argcnt; i++ ) {
12
Loop condition is false. Execution continues on line 304
276 // Permit args to have no register
277 _calling_convention_mask[i].Clear();
278 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
279 continue;
280 }
281 // calling_convention returns stack arguments as a count of
282 // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
283 // the allocators point of view, taking into account all the
284 // preserve area, locks & pad2.
285
286 OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
287 if( OptoReg::is_valid(reg1))
288 _calling_convention_mask[i].Insert(reg1);
289
290 OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
291 if( OptoReg::is_valid(reg2))
292 _calling_convention_mask[i].Insert(reg2);
293
294 // Saved biased stack-slot register number
295 _parm_regs[i].set_pair(reg2, reg1);
296 }
297
298 // Finally, make sure the incoming arguments take up an even number of
299 // words, in case the arguments or locals need to contain doubleword stack
300 // slots. The rest of the system assumes that stack slot pairs (in
301 // particular, in the spill area) which look aligned will in fact be
302 // aligned relative to the stack pointer in the target machine. Double
303 // stack slots will always be allocated aligned.
304 _new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
305
306 // Compute highest outgoing stack argument as
307 // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
308 _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
309 assert( is_even(_out_arg_limit), "out_preserve must be even" )do { if (!(is_even(_out_arg_limit))) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 309, "assert(" "is_even(_out_arg_limit)" ") failed", "out_preserve must be even"
); ::breakpoint(); } } while (0)
;
13
Taking false branch
14
Loop condition is false. Exiting loop
310
311 if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
15
Taking true branch
312 // the compiler cannot represent this method's calling sequence
313 C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
314 }
315
316 if (C->failing()) return; // bailed out on incoming arg failure
16
Taking false branch
317
318 // ---------------
319 // Collect roots of matcher trees. Every node for which
320 // _shared[_idx] is cleared is guaranteed to not be shared, and thus
321 // can be a valid interior of some tree.
322 find_shared( C->root() );
323 find_shared( C->top() );
324
325 C->print_method(PHASE_BEFORE_MATCHING);
326
327 // Create new ideal node ConP #NULL even if it does exist in old space
328 // to avoid false sharing if the corresponding mach node is not used.
329 // The corresponding mach node is only used in rare cases for derived
330 // pointers.
331 Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
332
333 // Swap out to old-space; emptying new-space
334 Arena *old = C->node_arena()->move_contents(C->old_arena());
335
336 // Save debug and profile information for nodes in old space:
337 _old_node_note_array = C->node_note_array();
338 if (_old_node_note_array != NULL__null) {
17
Assuming field '_old_node_note_array' is equal to NULL
18
Taking false branch
339 C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
340 (C->comp_arena(), _old_node_note_array->length(),
341 0, NULL__null));
342 }
343
344 // Pre-size the new_node table to avoid the need for range checks.
345 grow_new_node_array(C->unique());
346
347 // Reset node counter so MachNodes start with _idx at 0
348 int live_nodes = C->live_nodes();
349 C->set_unique(0);
350 C->reset_dead_node_list();
351
352 // Recursively match trees from old space into new space.
353 // Correct leaves of new-space Nodes; they point to old-space.
354 _visited.clear();
355 C->set_cached_top_node(xform( C->top(), live_nodes ));
19
Calling 'Matcher::xform'
356 if (!C->failing()) {
357 Node* xroot = xform( C->root(), 1 );
358 if (xroot == NULL__null) {
359 Matcher::soft_match_failure(); // recursive matching process failed
360 C->record_method_not_compilable("instruction match failed");
361 } else {
362 // During matching shared constants were attached to C->root()
363 // because xroot wasn't available yet, so transfer the uses to
364 // the xroot.
365 for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
366 Node* n = C->root()->fast_out(j);
367 if (C->node_arena()->contains(n)) {
368 assert(n->in(0) == C->root(), "should be control user")do { if (!(n->in(0) == C->root())) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 368, "assert(" "n->in(0) == C->root()" ") failed", "should be control user"
); ::breakpoint(); } } while (0)
;
369 n->set_req(0, xroot);
370 --j;
371 --jmax;
372 }
373 }
374
375 // Generate new mach node for ConP #NULL
376 assert(new_ideal_null != NULL, "sanity")do { if (!(new_ideal_null != __null)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 376, "assert(" "new_ideal_null != __null" ") failed", "sanity"
); ::breakpoint(); } } while (0)
;
377 _mach_null = match_tree(new_ideal_null);
378 // Don't set control, it will confuse GCM since there are no uses.
379 // The control will be set when this node is used first time
380 // in find_base_for_derived().
381 assert(_mach_null != NULL, "")do { if (!(_mach_null != __null)) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 381, "assert(" "_mach_null != __null" ") failed", ""); ::breakpoint
(); } } while (0)
;
382
383 C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL__null);
384
385#ifdef ASSERT1
386 verify_new_nodes_only(xroot);
387#endif
388 }
389 }
390 if (C->top() == NULL__null || C->root() == NULL__null) {
391 C->record_method_not_compilable("graph lost"); // %%% cannot happen?
392 }
393 if (C->failing()) {
394 // delete old;
395 old->destruct_contents();
396 return;
397 }
398 assert( C->top(), "" )do { if (!(C->top())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 398, "assert(" "C->top()" ") failed", ""); ::breakpoint(
); } } while (0)
;
399 assert( C->root(), "" )do { if (!(C->root())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 399, "assert(" "C->root()" ") failed", ""); ::breakpoint
(); } } while (0)
;
400 validate_null_checks();
401
402 // Now smoke old-space
403 NOT_DEBUG( old->destruct_contents() );
404
405 // ------------------------
406 // Set up save-on-entry registers.
407 Fixup_Save_On_Entry( );
408
409 { // Cleanup mach IR after selection phase is over.
410 Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
411 do_postselect_cleanup();
412 if (C->failing()) return;
413 assert(verify_after_postselect_cleanup(), "")do { if (!(verify_after_postselect_cleanup())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 413, "assert(" "verify_after_postselect_cleanup()" ") failed"
, ""); ::breakpoint(); } } while (0)
;
414 }
415}
416
417//------------------------------Fixup_Save_On_Entry----------------------------
418// The stated purpose of this routine is to take care of save-on-entry
419// registers. However, the overall goal of the Match phase is to convert into
420// machine-specific instructions which have RegMasks to guide allocation.
421// So what this procedure really does is put a valid RegMask on each input
422// to the machine-specific variations of all Return, TailCall and Halt
423// instructions. It also adds edgs to define the save-on-entry values (and of
424// course gives them a mask).
425
426static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
427 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size )(RegMask*) resource_allocate_bytes((size) * sizeof(RegMask));
428 // Do all the pre-defined register masks
429 rms[TypeFunc::Control ] = RegMask::Empty;
430 rms[TypeFunc::I_O ] = RegMask::Empty;
431 rms[TypeFunc::Memory ] = RegMask::Empty;
432 rms[TypeFunc::ReturnAdr] = ret_adr;
433 rms[TypeFunc::FramePtr ] = fp;
434 return rms;
435}
436
437const int Matcher::scalable_predicate_reg_slots() {
438 assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),do { if (!(Matcher::has_predicated_vectors() && Matcher
::supports_scalable_vector())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 439, "assert(" "Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector()"
") failed", "scalable predicate vector should be supported")
; ::breakpoint(); } } while (0)
439 "scalable predicate vector should be supported")do { if (!(Matcher::has_predicated_vectors() && Matcher
::supports_scalable_vector())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 439, "assert(" "Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector()"
") failed", "scalable predicate vector should be supported")
; ::breakpoint(); } } while (0)
;
440 int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;
441 // We assume each predicate register is one-eighth of the size of
442 // scalable vector register, one mask bit per vector byte.
443 int predicate_reg_bit_size = vector_reg_bit_size >> 3;
444 // Compute number of slots which is required when scalable predicate
445 // register is spilled. E.g. if scalable vector register is 640 bits,
446 // predicate register is 80 bits, which is 2.5 * slots.
447 // We will round up the slot number to power of 2, which is required
448 // by find_first_set().
449 int slots = predicate_reg_bit_size & (BitsPerInt - 1)
450 ? (predicate_reg_bit_size >> LogBitsPerInt) + 1
451 : predicate_reg_bit_size >> LogBitsPerInt;
452 return round_up_power_of_2(slots);
453}
454
455#define NOF_STACK_MASKS(3*13) (3*13)
456
457// Create the initial stack mask used by values spilling to the stack.
458// Disallow any debug info in outgoing argument areas by setting the
459// initial mask accordingly.
460void Matcher::init_first_stack_mask() {
461
462 // Allocate storage for spill masks as masks for the appropriate load type.
463 RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS(3*13));
464
465 // Initialize empty placeholder masks into the newly allocated arena
466 for (int i = 0; i < NOF_STACK_MASKS(3*13); i++) {
467 new (rms + i) RegMask();
468 }
469
470 idealreg2spillmask [Op_RegN] = &rms[0];
471 idealreg2spillmask [Op_RegI] = &rms[1];
472 idealreg2spillmask [Op_RegL] = &rms[2];
473 idealreg2spillmask [Op_RegF] = &rms[3];
474 idealreg2spillmask [Op_RegD] = &rms[4];
475 idealreg2spillmask [Op_RegP] = &rms[5];
476
477 idealreg2debugmask [Op_RegN] = &rms[6];
478 idealreg2debugmask [Op_RegI] = &rms[7];
479 idealreg2debugmask [Op_RegL] = &rms[8];
480 idealreg2debugmask [Op_RegF] = &rms[9];
481 idealreg2debugmask [Op_RegD] = &rms[10];
482 idealreg2debugmask [Op_RegP] = &rms[11];
483
484 idealreg2mhdebugmask[Op_RegN] = &rms[12];
485 idealreg2mhdebugmask[Op_RegI] = &rms[13];
486 idealreg2mhdebugmask[Op_RegL] = &rms[14];
487 idealreg2mhdebugmask[Op_RegF] = &rms[15];
488 idealreg2mhdebugmask[Op_RegD] = &rms[16];
489 idealreg2mhdebugmask[Op_RegP] = &rms[17];
490
491 idealreg2spillmask [Op_VecA] = &rms[18];
492 idealreg2spillmask [Op_VecS] = &rms[19];
493 idealreg2spillmask [Op_VecD] = &rms[20];
494 idealreg2spillmask [Op_VecX] = &rms[21];
495 idealreg2spillmask [Op_VecY] = &rms[22];
496 idealreg2spillmask [Op_VecZ] = &rms[23];
497
498 idealreg2debugmask [Op_VecA] = &rms[24];
499 idealreg2debugmask [Op_VecS] = &rms[25];
500 idealreg2debugmask [Op_VecD] = &rms[26];
501 idealreg2debugmask [Op_VecX] = &rms[27];
502 idealreg2debugmask [Op_VecY] = &rms[28];
503 idealreg2debugmask [Op_VecZ] = &rms[29];
504
505 idealreg2mhdebugmask[Op_VecA] = &rms[30];
506 idealreg2mhdebugmask[Op_VecS] = &rms[31];
507 idealreg2mhdebugmask[Op_VecD] = &rms[32];
508 idealreg2mhdebugmask[Op_VecX] = &rms[33];
509 idealreg2mhdebugmask[Op_VecY] = &rms[34];
510 idealreg2mhdebugmask[Op_VecZ] = &rms[35];
511
512 idealreg2spillmask [Op_RegVectMask] = &rms[36];
513 idealreg2debugmask [Op_RegVectMask] = &rms[37];
514 idealreg2mhdebugmask[Op_RegVectMask] = &rms[38];
515
516 OptoReg::Name i;
517
518 // At first, start with the empty mask
519 C->FIRST_STACK_mask().Clear();
520
521 // Add in the incoming argument area
522 OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
523 for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
524 C->FIRST_STACK_mask().Insert(i);
525 }
526 // Add in all bits past the outgoing argument area
527 guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),do { if (!(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit
,-1)))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 528, "guarantee(" "RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))"
") failed", "must be able to represent all call arguments in reg mask"
); ::breakpoint(); } } while (0)
528 "must be able to represent all call arguments in reg mask")do { if (!(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit
,-1)))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 528, "guarantee(" "RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))"
") failed", "must be able to represent all call arguments in reg mask"
); ::breakpoint(); } } while (0)
;
529 OptoReg::Name init = _out_arg_limit;
530 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
531 C->FIRST_STACK_mask().Insert(i);
532 }
533 // Finally, set the "infinite stack" bit.
534 C->FIRST_STACK_mask().set_AllStack();
535
536 // Make spill masks. Registers for their class, plus FIRST_STACK_mask.
537 RegMask aligned_stack_mask = C->FIRST_STACK_mask();
538 // Keep spill masks aligned.
539 aligned_stack_mask.clear_to_pairs();
540 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack")do { if (!(aligned_stack_mask.is_AllStack())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 540, "assert(" "aligned_stack_mask.is_AllStack()" ") failed"
, "should be infinite stack"); ::breakpoint(); } } while (0)
;
541 RegMask scalable_stack_mask = aligned_stack_mask;
542
543 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
544#ifdef _LP641
545 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
546 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
547 idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
548#else
549 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
550#endif
551 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
552 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
553 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
554 idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
555 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
556 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
557 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
558 idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
559
560 if (Matcher::has_predicated_vectors()) {
561 *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
562 idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
563 } else {
564 *idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
565 }
566
567 if (Matcher::vector_size_supported(T_BYTE,4)) {
568 *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
569 idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
570 } else {
571 *idealreg2spillmask[Op_VecS] = RegMask::Empty;
572 }
573
574 if (Matcher::vector_size_supported(T_FLOAT,2)) {
575 // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
576 // RA guarantees such alignment since it is needed for Double and Long values.
577 *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
578 idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
579 } else {
580 *idealreg2spillmask[Op_VecD] = RegMask::Empty;
581 }
582
583 if (Matcher::vector_size_supported(T_FLOAT,4)) {
584 // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
585 //
586 // RA can use input arguments stack slots for spills but until RA
587 // we don't know frame size and offset of input arg stack slots.
588 //
589 // Exclude last input arg stack slots to avoid spilling vectors there
590 // otherwise vector spills could stomp over stack slots in caller frame.
591 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
592 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
593 aligned_stack_mask.Remove(in);
594 in = OptoReg::add(in, -1);
595 }
596 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
597 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack")do { if (!(aligned_stack_mask.is_AllStack())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 597, "assert(" "aligned_stack_mask.is_AllStack()" ") failed"
, "should be infinite stack"); ::breakpoint(); } } while (0)
;
598 *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
599 idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
600 } else {
601 *idealreg2spillmask[Op_VecX] = RegMask::Empty;
602 }
603
604 if (Matcher::vector_size_supported(T_FLOAT,8)) {
605 // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
606 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
607 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
608 aligned_stack_mask.Remove(in);
609 in = OptoReg::add(in, -1);
610 }
611 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
612 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack")do { if (!(aligned_stack_mask.is_AllStack())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 612, "assert(" "aligned_stack_mask.is_AllStack()" ") failed"
, "should be infinite stack"); ::breakpoint(); } } while (0)
;
613 *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
614 idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
615 } else {
616 *idealreg2spillmask[Op_VecY] = RegMask::Empty;
617 }
618
619 if (Matcher::vector_size_supported(T_FLOAT,16)) {
620 // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
621 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
622 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
623 aligned_stack_mask.Remove(in);
624 in = OptoReg::add(in, -1);
625 }
626 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
627 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack")do { if (!(aligned_stack_mask.is_AllStack())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 627, "assert(" "aligned_stack_mask.is_AllStack()" ") failed"
, "should be infinite stack"); ::breakpoint(); } } while (0)
;
628 *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
629 idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
630 } else {
631 *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
632 }
633
634 if (Matcher::supports_scalable_vector()) {
635 int k = 1;
636 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
637 if (Matcher::has_predicated_vectors()) {
638 // Exclude last input arg stack slots to avoid spilling vector register there,
639 // otherwise RegVectMask spills could stomp over stack slots in caller frame.
640 for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
641 scalable_stack_mask.Remove(in);
642 in = OptoReg::add(in, -1);
643 }
644
645 // For RegVectMask
646 scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
647 assert(scalable_stack_mask.is_AllStack(), "should be infinite stack")do { if (!(scalable_stack_mask.is_AllStack())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 647, "assert(" "scalable_stack_mask.is_AllStack()" ") failed"
, "should be infinite stack"); ::breakpoint(); } } while (0)
;
648 *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
649 idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
650 }
651
652 // Exclude last input arg stack slots to avoid spilling vector register there,
653 // otherwise vector spills could stomp over stack slots in caller frame.
654 for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
655 scalable_stack_mask.Remove(in);
656 in = OptoReg::add(in, -1);
657 }
658
659 // For VecA
660 scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
661 assert(scalable_stack_mask.is_AllStack(), "should be infinite stack")do { if (!(scalable_stack_mask.is_AllStack())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 661, "assert(" "scalable_stack_mask.is_AllStack()" ") failed"
, "should be infinite stack"); ::breakpoint(); } } while (0)
;
662 *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
663 idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
664 } else {
665 *idealreg2spillmask[Op_VecA] = RegMask::Empty;
666 }
667
668 if (UseFPUForSpilling) {
669 // This mask logic assumes that the spill operations are
670 // symmetric and that the registers involved are the same size.
671 // On sparc for instance we may have to use 64 bit moves will
672 // kill 2 registers when used with F0-F31.
673 idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
674 idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
675#ifdef _LP641
676 idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
677 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
678 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
679 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
680#else
681 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
682#ifdef ARM
683 // ARM has support for moving 64bit values between a pair of
684 // integer registers and a double register
685 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
686 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
687#endif
688#endif
689 }
690
691 // Make up debug masks. Any spill slot plus callee-save (SOE) registers.
692 // Caller-save (SOC, AS) registers are assumed to be trashable by the various
693 // inline-cache fixup routines.
694 *idealreg2debugmask [Op_RegN] = *idealreg2spillmask[Op_RegN];
695 *idealreg2debugmask [Op_RegI] = *idealreg2spillmask[Op_RegI];
696 *idealreg2debugmask [Op_RegL] = *idealreg2spillmask[Op_RegL];
697 *idealreg2debugmask [Op_RegF] = *idealreg2spillmask[Op_RegF];
698 *idealreg2debugmask [Op_RegD] = *idealreg2spillmask[Op_RegD];
699 *idealreg2debugmask [Op_RegP] = *idealreg2spillmask[Op_RegP];
700 *idealreg2debugmask [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
701
702 *idealreg2debugmask [Op_VecA] = *idealreg2spillmask[Op_VecA];
703 *idealreg2debugmask [Op_VecS] = *idealreg2spillmask[Op_VecS];
704 *idealreg2debugmask [Op_VecD] = *idealreg2spillmask[Op_VecD];
705 *idealreg2debugmask [Op_VecX] = *idealreg2spillmask[Op_VecX];
706 *idealreg2debugmask [Op_VecY] = *idealreg2spillmask[Op_VecY];
707 *idealreg2debugmask [Op_VecZ] = *idealreg2spillmask[Op_VecZ];
708
709 *idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
710 *idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
711 *idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
712 *idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
713 *idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
714 *idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
715 *idealreg2mhdebugmask[Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
716
717 *idealreg2mhdebugmask[Op_VecA] = *idealreg2spillmask[Op_VecA];
718 *idealreg2mhdebugmask[Op_VecS] = *idealreg2spillmask[Op_VecS];
719 *idealreg2mhdebugmask[Op_VecD] = *idealreg2spillmask[Op_VecD];
720 *idealreg2mhdebugmask[Op_VecX] = *idealreg2spillmask[Op_VecX];
721 *idealreg2mhdebugmask[Op_VecY] = *idealreg2spillmask[Op_VecY];
722 *idealreg2mhdebugmask[Op_VecZ] = *idealreg2spillmask[Op_VecZ];
723
724 // Prevent stub compilations from attempting to reference
725 // callee-saved (SOE) registers from debug info
726 bool exclude_soe = !Compile::current()->is_method_compilation();
727 RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
728 RegMask* mh_caller_save_mask = exclude_soe ? &mh_caller_save_regmask_exclude_soe : &mh_caller_save_regmask;
729
730 idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
731 idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
732 idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
733 idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
734 idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
735 idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
736 idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
737
738 idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
739 idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
740 idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
741 idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
742 idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
743 idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
744
745 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(*mh_caller_save_mask);
746 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(*mh_caller_save_mask);
747 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(*mh_caller_save_mask);
748 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(*mh_caller_save_mask);
749 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(*mh_caller_save_mask);
750 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(*mh_caller_save_mask);
751 idealreg2mhdebugmask[Op_RegVectMask]->SUBTRACT(*mh_caller_save_mask);
752
753 idealreg2mhdebugmask[Op_VecA]->SUBTRACT(*mh_caller_save_mask);
754 idealreg2mhdebugmask[Op_VecS]->SUBTRACT(*mh_caller_save_mask);
755 idealreg2mhdebugmask[Op_VecD]->SUBTRACT(*mh_caller_save_mask);
756 idealreg2mhdebugmask[Op_VecX]->SUBTRACT(*mh_caller_save_mask);
757 idealreg2mhdebugmask[Op_VecY]->SUBTRACT(*mh_caller_save_mask);
758 idealreg2mhdebugmask[Op_VecZ]->SUBTRACT(*mh_caller_save_mask);
759}
760
761//---------------------------is_save_on_entry----------------------------------
762bool Matcher::is_save_on_entry(int reg) {
763 return
764 _register_save_policy[reg] == 'E' ||
765 _register_save_policy[reg] == 'A'; // Save-on-entry register?
766}
767
768//---------------------------Fixup_Save_On_Entry-------------------------------
769void Matcher::Fixup_Save_On_Entry( ) {
770 init_first_stack_mask();
771
772 Node *root = C->root(); // Short name for root
773 // Count number of save-on-entry registers.
774 uint soe_cnt = number_of_saved_registers();
775 uint i;
776
777 // Find the procedure Start Node
778 StartNode *start = C->start();
779 assert( start, "Expect a start node" )do { if (!(start)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 779, "assert(" "start" ") failed", "Expect a start node"); ::
breakpoint(); } } while (0)
;
780
781 // Input RegMask array shared by all Returns.
782 // The type for doubles and longs has a count of 2, but
783 // there is only 1 returned value
784 uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
785 RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
786 // Returns have 0 or 1 returned values depending on call signature.
787 // Return register is specified by return_value in the AD file.
788 if (ret_edge_cnt > TypeFunc::Parms)
789 ret_rms[TypeFunc::Parms+0] = _return_value_mask;
790
791 // Input RegMask array shared by all Rethrows.
792 uint reth_edge_cnt = TypeFunc::Parms+1;
793 RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
794 // Rethrow takes exception oop only, but in the argument 0 slot.
795 OptoReg::Name reg = find_receiver();
796 if (reg >= 0) {
797 reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
798#ifdef _LP641
799 // Need two slots for ptrs in 64-bit land
800 reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
801#endif
802 }
803
804 // Input RegMask array shared by all TailCalls
805 uint tail_call_edge_cnt = TypeFunc::Parms+2;
806 RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
807
808 // Input RegMask array shared by all TailJumps
809 uint tail_jump_edge_cnt = TypeFunc::Parms+2;
810 RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
811
812 // TailCalls have 2 returned values (target & moop), whose masks come
813 // from the usual MachNode/MachOper mechanism. Find a sample
814 // TailCall to extract these masks and put the correct masks into
815 // the tail_call_rms array.
816 for( i=1; i < root->req(); i++ ) {
817 MachReturnNode *m = root->in(i)->as_MachReturn();
818 if( m->ideal_Opcode() == Op_TailCall ) {
819 tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
820 tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
821 break;
822 }
823 }
824
825 // TailJumps have 2 returned values (target & ex_oop), whose masks come
826 // from the usual MachNode/MachOper mechanism. Find a sample
827 // TailJump to extract these masks and put the correct masks into
828 // the tail_jump_rms array.
829 for( i=1; i < root->req(); i++ ) {
830 MachReturnNode *m = root->in(i)->as_MachReturn();
831 if( m->ideal_Opcode() == Op_TailJump ) {
832 tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
833 tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
834 break;
835 }
836 }
837
838 // Input RegMask array shared by all Halts
839 uint halt_edge_cnt = TypeFunc::Parms;
840 RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
841
842 // Capture the return input masks into each exit flavor
843 for( i=1; i < root->req(); i++ ) {
844 MachReturnNode *exit = root->in(i)->as_MachReturn();
845 switch( exit->ideal_Opcode() ) {
846 case Op_Return : exit->_in_rms = ret_rms; break;
847 case Op_Rethrow : exit->_in_rms = reth_rms; break;
848 case Op_TailCall : exit->_in_rms = tail_call_rms; break;
849 case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
850 case Op_Halt : exit->_in_rms = halt_rms; break;
851 default : ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 851); ::breakpoint(); } while (0)
;
852 }
853 }
854
855 // Next unused projection number from Start.
856 int proj_cnt = C->tf()->domain()->cnt();
857
858 // Do all the save-on-entry registers. Make projections from Start for
859 // them, and give them a use at the exit points. To the allocator, they
860 // look like incoming register arguments.
861 for( i = 0; i < _last_Mach_Reg; i++ ) {
862 if( is_save_on_entry(i) ) {
863
864 // Add the save-on-entry to the mask array
865 ret_rms [ ret_edge_cnt] = mreg2regmask[i];
866 reth_rms [ reth_edge_cnt] = mreg2regmask[i];
867 tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
868 tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
869 // Halts need the SOE registers, but only in the stack as debug info.
870 // A just-prior uncommon-trap or deoptimization will use the SOE regs.
871 halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
872
873 Node *mproj;
874
875 // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
876 // into a single RegD.
877 if( (i&1) == 0 &&
878 _register_save_type[i ] == Op_RegF &&
879 _register_save_type[i+1] == Op_RegF &&
880 is_save_on_entry(i+1) ) {
881 // Add other bit for double
882 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
883 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
884 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
885 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
886 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
887 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
888 proj_cnt += 2; // Skip 2 for doubles
889 }
890 else if( (i&1) == 1 && // Else check for high half of double
891 _register_save_type[i-1] == Op_RegF &&
892 _register_save_type[i ] == Op_RegF &&
893 is_save_on_entry(i-1) ) {
894 ret_rms [ ret_edge_cnt] = RegMask::Empty;
895 reth_rms [ reth_edge_cnt] = RegMask::Empty;
896 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
897 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
898 halt_rms [ halt_edge_cnt] = RegMask::Empty;
899 mproj = C->top();
900 }
901 // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
902 // into a single RegL.
903 else if( (i&1) == 0 &&
904 _register_save_type[i ] == Op_RegI &&
905 _register_save_type[i+1] == Op_RegI &&
906 is_save_on_entry(i+1) ) {
907 // Add other bit for long
908 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
909 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
910 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
911 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
912 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
913 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
914 proj_cnt += 2; // Skip 2 for longs
915 }
916 else if( (i&1) == 1 && // Else check for high half of long
917 _register_save_type[i-1] == Op_RegI &&
918 _register_save_type[i ] == Op_RegI &&
919 is_save_on_entry(i-1) ) {
920 ret_rms [ ret_edge_cnt] = RegMask::Empty;
921 reth_rms [ reth_edge_cnt] = RegMask::Empty;
922 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
923 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
924 halt_rms [ halt_edge_cnt] = RegMask::Empty;
925 mproj = C->top();
926 } else {
927 // Make a projection for it off the Start
928 mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
929 }
930
931 ret_edge_cnt ++;
932 reth_edge_cnt ++;
933 tail_call_edge_cnt ++;
934 tail_jump_edge_cnt ++;
935 halt_edge_cnt ++;
936
937 // Add a use of the SOE register to all exit paths
938 for( uint j=1; j < root->req(); j++ )
939 root->in(j)->add_req(mproj);
940 } // End of if a save-on-entry register
941 } // End of for all machine registers
942}
943
944//------------------------------init_spill_mask--------------------------------
945void Matcher::init_spill_mask( Node *ret ) {
946 if( idealreg2regmask[Op_RegI] ) return; // One time only init
947
948 OptoReg::c_frame_pointer = c_frame_pointer();
949 c_frame_ptr_mask = c_frame_pointer();
950#ifdef _LP641
951 // pointers are twice as big
952 c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
953#endif
954
955 // Start at OptoReg::stack0()
956 STACK_ONLY_mask.Clear();
957 OptoReg::Name init = OptoReg::stack2reg(0);
958 // STACK_ONLY_mask is all stack bits
959 OptoReg::Name i;
960 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
961 STACK_ONLY_mask.Insert(i);
962 // Also set the "infinite stack" bit.
963 STACK_ONLY_mask.set_AllStack();
964
965 for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) {
966 // Copy the register names over into the shared world.
967 // SharedInfo::regName[i] = regName[i];
968 // Handy RegMasks per machine register
969 mreg2regmask[i].Insert(i);
970
971 // Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
972 if (_register_save_policy[i] == 'C' ||
973 _register_save_policy[i] == 'A') {
974 caller_save_regmask.Insert(i);
975 mh_caller_save_regmask.Insert(i);
976 }
977 // Exclude save-on-entry registers from debug masks for stub compilations.
978 if (_register_save_policy[i] == 'C' ||
979 _register_save_policy[i] == 'A' ||
980 _register_save_policy[i] == 'E') {
981 caller_save_regmask_exclude_soe.Insert(i);
982 mh_caller_save_regmask_exclude_soe.Insert(i);
983 }
984 }
985
986 // Also exclude the register we use to save the SP for MethodHandle
987 // invokes to from the corresponding MH debug masks
988 const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
989 mh_caller_save_regmask.OR(sp_save_mask);
990 mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
991
992 // Grab the Frame Pointer
993 Node *fp = ret->in(TypeFunc::FramePtr);
994 // Share frame pointer while making spill ops
995 set_shared(fp);
996
997// Get the ADLC notion of the right regmask, for each basic type.
998#ifdef _LP641
999 idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
1000#endif
1001 idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
1002 idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
1003 idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
1004 idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
1005 idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
1006 idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
1007 idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
1008 idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
1009 idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
1010 idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
1011 idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
1012 idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
1013}
1014
1015#ifdef ASSERT1
1016static void match_alias_type(Compile* C, Node* n, Node* m) {
1017 if (!VerifyAliases) return; // do not go looking for trouble by default
1018 const TypePtr* nat = n->adr_type();
1019 const TypePtr* mat = m->adr_type();
1020 int nidx = C->get_alias_index(nat);
1021 int midx = C->get_alias_index(mat);
1022 // Detune the assert for cases like (AndI 0xFF (LoadB p)).
1023 if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
1024 for (uint i = 1; i < n->req(); i++) {
1025 Node* n1 = n->in(i);
1026 const TypePtr* n1at = n1->adr_type();
1027 if (n1at != NULL__null) {
1028 nat = n1at;
1029 nidx = C->get_alias_index(n1at);
1030 }
1031 }
1032 }
1033 // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
1034 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
1035 switch (n->Opcode()) {
1036 case Op_PrefetchAllocation:
1037 nidx = Compile::AliasIdxRaw;
1038 nat = TypeRawPtr::BOTTOM;
1039 break;
1040 }
1041 }
1042 if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
1043 switch (n->Opcode()) {
1044 case Op_ClearArray:
1045 midx = Compile::AliasIdxRaw;
1046 mat = TypeRawPtr::BOTTOM;
1047 break;
1048 }
1049 }
1050 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
1051 switch (n->Opcode()) {
1052 case Op_Return:
1053 case Op_Rethrow:
1054 case Op_Halt:
1055 case Op_TailCall:
1056 case Op_TailJump:
1057 nidx = Compile::AliasIdxBot;
1058 nat = TypePtr::BOTTOM;
1059 break;
1060 }
1061 }
1062 if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
1063 switch (n->Opcode()) {
1064 case Op_StrComp:
1065 case Op_StrEquals:
1066 case Op_StrIndexOf:
1067 case Op_StrIndexOfChar:
1068 case Op_AryEq:
1069 case Op_HasNegatives:
1070 case Op_MemBarVolatile:
1071 case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
1072 case Op_StrInflatedCopy:
1073 case Op_StrCompressedCopy:
1074 case Op_OnSpinWait:
1075 case Op_EncodeISOArray:
1076 nidx = Compile::AliasIdxTop;
1077 nat = NULL__null;
1078 break;
1079 }
1080 }
1081 if (nidx != midx) {
1082 if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
1083 tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
1084 n->dump();
1085 m->dump();
1086 }
1087 assert(C->subsume_loads() && C->must_alias(nat, midx),do { if (!(C->subsume_loads() && C->must_alias(
nat, midx))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1088, "assert(" "C->subsume_loads() && C->must_alias(nat, midx)"
") failed", "must not lose alias info when matching"); ::breakpoint
(); } } while (0)
1088 "must not lose alias info when matching")do { if (!(C->subsume_loads() && C->must_alias(
nat, midx))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1088, "assert(" "C->subsume_loads() && C->must_alias(nat, midx)"
") failed", "must not lose alias info when matching"); ::breakpoint
(); } } while (0)
;
1089 }
1090}
1091#endif
1092
1093//------------------------------xform------------------------------------------
1094// Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1095// Node in new-space. Given a new-space Node, recursively walk his children.
1096Node *Matcher::transform( Node *n ) { ShouldNotCallThis()do { (*g_assert_poison) = 'X';; report_should_not_call("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1096); ::breakpoint(); } while (0)
; return n; }
1097Node *Matcher::xform( Node *n, int max_stack ) {
1098 // Use one stack to keep both: child's node/state and parent's node/index
1099 MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1100 mstack.push(n, Visit, NULL__null, -1); // set NULL as parent to indicate root
1101 while (mstack.is_nonempty()) {
20
Calling 'Node_Stack::is_nonempty'
22
Returning from 'Node_Stack::is_nonempty'
23
Loop condition is true. Entering loop body
1102 C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1103 if (C->failing()) return NULL__null;
24
Calling 'Compile::failing'
27
Returning from 'Compile::failing'
28
Taking false branch
1104 n = mstack.node(); // Leave node on stack
1105 Node_State nstate = mstack.state();
1106 if (nstate
28.1
'nstate' is equal to Visit
28.1
'nstate' is equal to Visit
28.1
'nstate' is equal to Visit
28.1
'nstate' is equal to Visit
== Visit) {
29
Taking true branch
1107 mstack.set_state(Post_Visit);
1108 Node *oldn = n;
1109 // Old-space or new-space check
1110 if (!C->node_arena()->contains(n)) {
30
Assuming the condition is false
31
Taking false branch
1111 // Old space!
1112 Node* m;
1113 if (has_new_node(n)) { // Not yet Label/Reduced
1114 m = new_node(n);
1115 } else {
1116 if (!is_dontcare(n)) { // Matcher can match this guy
1117 // Calls match special. They match alone with no children.
1118 // Their children, the incoming arguments, match normally.
1119 m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1120 if (C->failing()) return NULL__null;
1121 if (m == NULL__null) { Matcher::soft_match_failure(); return NULL__null; }
1122 if (n->is_MemBar()) {
1123 m->as_MachMemBar()->set_adr_type(n->adr_type());
1124 }
1125 } else { // Nothing the matcher cares about
1126 if (n->is_Proj() && n->in(0) != NULL__null && n->in(0)->is_Multi()) { // Projections?
1127 // Convert to machine-dependent projection
1128 m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1129 NOT_PRODUCT(record_new2old(m, n);)record_new2old(m, n);
1130 if (m->in(0) != NULL__null) // m might be top
1131 collect_null_checks(m, n);
1132 } else { // Else just a regular 'ol guy
1133 m = n->clone(); // So just clone into new-space
1134 NOT_PRODUCT(record_new2old(m, n);)record_new2old(m, n);
1135 // Def-Use edges will be added incrementally as Uses
1136 // of this node are matched.
1137 assert(m->outcnt() == 0, "no Uses of this clone yet")do { if (!(m->outcnt() == 0)) { (*g_assert_poison) = 'X';;
report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1137, "assert(" "m->outcnt() == 0" ") failed", "no Uses of this clone yet"
); ::breakpoint(); } } while (0)
;
1138 }
1139 }
1140
1141 set_new_node(n, m); // Map old to new
1142 if (_old_node_note_array != NULL__null) {
1143 Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1144 n->_idx);
1145 C->set_node_notes_at(m->_idx, nn);
1146 }
1147 debug_only(match_alias_type(C, n, m))match_alias_type(C, n, m);
1148 }
1149 n = m; // n is now a new-space node
1150 mstack.set_node(n);
1151 }
1152
1153 // New space!
1154 if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
32
Calling 'VectorSet::test_set'
36
Returning from 'VectorSet::test_set'
37
Taking false branch
1155
1156 int i;
1157 // Put precedence edges on stack first (match them last).
1158 for (i = oldn->req(); (uint)i < oldn->len(); i++) {
38
Assuming the condition is false
39
Loop condition is false. Execution continues on line 1166
1159 Node *m = oldn->in(i);
1160 if (m == NULL__null) break;
1161 // set -1 to call add_prec() instead of set_req() during Step1
1162 mstack.push(m, Visit, n, -1);
1163 }
1164
1165 // Handle precedence edges for interior nodes
1166 for (i = n->len()-1; (uint)i >= n->req(); i--) {
40
Assuming the condition is false
41
Loop condition is false. Execution continues on line 1175
1167 Node *m = n->in(i);
1168 if (m == NULL__null || C->node_arena()->contains(m)) continue;
1169 n->rm_prec(i);
1170 // set -1 to call add_prec() instead of set_req() during Step1
1171 mstack.push(m, Visit, n, -1);
1172 }
1173
1174 // For constant debug info, I'd rather have unmatched constants.
1175 int cnt = n->req();
1176 JVMState* jvms = n->jvms();
42
'jvms' initialized here
1177 int debug_cnt = jvms ? jvms->debug_start() : cnt;
43
Assuming 'jvms' is null
44
'?' condition is false
1178
1179 // Now do only debug info. Clone constants rather than matching.
1180 // Constants are represented directly in the debug info without
1181 // the need for executable machine instructions.
1182 // Monitor boxes are also represented directly.
1183 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
45
Assuming 'i' is >= 'debug_cnt'
46
Loop condition is true. Entering loop body
1184 Node *m = n->in(i); // Get input
1185 int op = m->Opcode();
1186 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites")do { if (!((op == Op_BoxLock) == jvms->is_monitor_use(i)))
{ (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1186, "assert(" "(op == Op_BoxLock) == jvms->is_monitor_use(i)"
") failed", "boxes only at monitor sites"); ::breakpoint(); }
} while (0)
;
47
Assuming 'op' is not equal to Op_BoxLock
48
Called C++ object pointer is null
1187 if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1188 op == Op_ConF || op == Op_ConD || op == Op_ConL
1189 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
1190 ) {
1191 m = m->clone();
1192 NOT_PRODUCT(record_new2old(m, n))record_new2old(m, n);
1193 mstack.push(m, Post_Visit, n, i); // Don't need to visit
1194 mstack.push(m->in(0), Visit, m, 0);
1195 } else {
1196 mstack.push(m, Visit, n, i);
1197 }
1198 }
1199
1200 // And now walk his children, and convert his inputs to new-space.
1201 for( ; i >= 0; --i ) { // For all normal inputs do
1202 Node *m = n->in(i); // Get input
1203 if(m != NULL__null)
1204 mstack.push(m, Visit, n, i);
1205 }
1206
1207 }
1208 else if (nstate == Post_Visit) {
1209 // Set xformed input
1210 Node *p = mstack.parent();
1211 if (p != NULL__null) { // root doesn't have parent
1212 int i = (int)mstack.index();
1213 if (i >= 0)
1214 p->set_req(i, n); // required input
1215 else if (i == -1)
1216 p->add_prec(n); // precedence input
1217 else
1218 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1218); ::breakpoint(); } while (0)
;
1219 }
1220 mstack.pop(); // remove processed node from stack
1221 }
1222 else {
1223 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1223); ::breakpoint(); } while (0)
;
1224 }
1225 } // while (mstack.is_nonempty())
1226 return n; // Return new-space Node
1227}
1228
1229//------------------------------warp_outgoing_stk_arg------------------------
1230OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1231 // Convert outgoing argument location to a pre-biased stack offset
1232 if (reg->is_stack()) {
1233 OptoReg::Name warped = reg->reg2stack();
1234 // Adjust the stack slot offset to be the register number used
1235 // by the allocator.
1236 warped = OptoReg::add(begin_out_arg_area, warped);
1237 // Keep track of the largest numbered stack slot used for an arg.
1238 // Largest used slot per call-site indicates the amount of stack
1239 // that is killed by the call.
1240 if( warped >= out_arg_limit_per_call )
1241 out_arg_limit_per_call = OptoReg::add(warped,1);
1242 if (!RegMask::can_represent_arg(warped)) {
1243 C->record_method_not_compilable("unsupported calling sequence");
1244 return OptoReg::Bad;
1245 }
1246 return warped;
1247 }
1248 return OptoReg::as_OptoReg(reg);
1249}
1250
1251
1252//------------------------------match_sfpt-------------------------------------
1253// Helper function to match call instructions. Calls match special.
1254// They match alone with no children. Their children, the incoming
1255// arguments, match normally.
1256MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1257 MachSafePointNode *msfpt = NULL__null;
1258 MachCallNode *mcall = NULL__null;
1259 uint cnt;
1260 // Split out case for SafePoint vs Call
1261 CallNode *call;
1262 const TypeTuple *domain;
1263 ciMethod* method = NULL__null;
1264 bool is_method_handle_invoke = false; // for special kill effects
1265 if( sfpt->is_Call() ) {
1266 call = sfpt->as_Call();
1267 domain = call->tf()->domain();
1268 cnt = domain->cnt();
1269
1270 // Match just the call, nothing else
1271 MachNode *m = match_tree(call);
1272 if (C->failing()) return NULL__null;
1273 if( m == NULL__null ) { Matcher::soft_match_failure(); return NULL__null; }
1274
1275 // Copy data from the Ideal SafePoint to the machine version
1276 mcall = m->as_MachCall();
1277
1278 mcall->set_tf( call->tf());
1279 mcall->set_entry_point( call->entry_point());
1280 mcall->set_cnt( call->cnt());
1281 mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
1282
1283 if( mcall->is_MachCallJava() ) {
1284 MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1285 const CallJavaNode *call_java = call->as_CallJava();
1286 assert(call_java->validate_symbolic_info(), "inconsistent info")do { if (!(call_java->validate_symbolic_info())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1286, "assert(" "call_java->validate_symbolic_info()" ") failed"
, "inconsistent info"); ::breakpoint(); } } while (0)
;
1287 method = call_java->method();
1288 mcall_java->_method = method;
1289 mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1290 is_method_handle_invoke = call_java->is_method_handle_invoke();
1291 mcall_java->_method_handle_invoke = is_method_handle_invoke;
1292 mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1293 mcall_java->_arg_escape = call_java->arg_escape();
1294 if (is_method_handle_invoke) {
1295 C->set_has_method_handle_invokes(true);
1296 }
1297 if( mcall_java->is_MachCallStaticJava() )
1298 mcall_java->as_MachCallStaticJava()->_name =
1299 call_java->as_CallStaticJava()->_name;
1300 if( mcall_java->is_MachCallDynamicJava() )
1301 mcall_java->as_MachCallDynamicJava()->_vtable_index =
1302 call_java->as_CallDynamicJava()->_vtable_index;
1303 }
1304 else if( mcall->is_MachCallRuntime() ) {
1305 MachCallRuntimeNode* mach_call_rt = mcall->as_MachCallRuntime();
1306 mach_call_rt->_name = call->as_CallRuntime()->_name;
1307 mach_call_rt->_leaf_no_fp = call->is_CallLeafNoFP();
1308 }
1309 else if( mcall->is_MachCallNative() ) {
1310 MachCallNativeNode* mach_call_native = mcall->as_MachCallNative();
1311 CallNativeNode* call_native = call->as_CallNative();
1312 mach_call_native->_name = call_native->_name;
1313 mach_call_native->_arg_regs = call_native->_arg_regs;
1314 mach_call_native->_ret_regs = call_native->_ret_regs;
1315 }
1316 msfpt = mcall;
1317 }
1318 // This is a non-call safepoint
1319 else {
1320 call = NULL__null;
1321 domain = NULL__null;
1322 MachNode *mn = match_tree(sfpt);
1323 if (C->failing()) return NULL__null;
1324 msfpt = mn->as_MachSafePoint();
1325 cnt = TypeFunc::Parms;
1326 }
1327 msfpt->_has_ea_local_in_scope = sfpt->has_ea_local_in_scope();
1328
1329 // Advertise the correct memory effects (for anti-dependence computation).
1330 msfpt->set_adr_type(sfpt->adr_type());
1331
1332 // Allocate a private array of RegMasks. These RegMasks are not shared.
1333 msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt )(RegMask*) resource_allocate_bytes((cnt) * sizeof(RegMask));
1334 // Empty them all.
1335 for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
1336
1337 // Do all the pre-defined non-Empty register masks
1338 msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1339 msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1340
1341 // Place first outgoing argument can possibly be put.
1342 OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1343 assert( is_even(begin_out_arg_area), "" )do { if (!(is_even(begin_out_arg_area))) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1343, "assert(" "is_even(begin_out_arg_area)" ") failed", ""
); ::breakpoint(); } } while (0)
;
1344 // Compute max outgoing register number per call site.
1345 OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1346 // Calls to C may hammer extra stack slots above and beyond any arguments.
1347 // These are usually backing store for register arguments for varargs.
1348 if( call != NULL__null && call->is_CallRuntime() )
1349 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1350 if( call != NULL__null && call->is_CallNative() )
1351 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call, call->as_CallNative()->_shadow_space_bytes);
1352
1353
1354 // Do the normal argument list (parameters) register masks
1355 int argcnt = cnt - TypeFunc::Parms;
1356 if( argcnt > 0 ) { // Skip it all if we have no args
1357 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt )(BasicType*) resource_allocate_bytes((argcnt) * sizeof(BasicType
))
;
1358 VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt )(VMRegPair*) resource_allocate_bytes((argcnt) * sizeof(VMRegPair
))
;
1359 int i;
1360 for( i = 0; i < argcnt; i++ ) {
1361 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1362 }
1363 // V-call to pick proper calling convention
1364 call->calling_convention( sig_bt, parm_regs, argcnt );
1365
1366#ifdef ASSERT1
1367 // Sanity check users' calling convention. Really handy during
1368 // the initial porting effort. Fairly expensive otherwise.
1369 { for (int i = 0; i<argcnt; i++) {
1370 if( !parm_regs[i].first()->is_valid() &&
1371 !parm_regs[i].second()->is_valid() ) continue;
1372 VMReg reg1 = parm_regs[i].first();
1373 VMReg reg2 = parm_regs[i].second();
1374 for (int j = 0; j < i; j++) {
1375 if( !parm_regs[j].first()->is_valid() &&
1376 !parm_regs[j].second()->is_valid() ) continue;
1377 VMReg reg3 = parm_regs[j].first();
1378 VMReg reg4 = parm_regs[j].second();
1379 if( !reg1->is_valid() ) {
1380 assert( !reg2->is_valid(), "valid halvsies" )do { if (!(!reg2->is_valid())) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1380, "assert(" "!reg2->is_valid()" ") failed", "valid halvsies"
); ::breakpoint(); } } while (0)
;
1381 } else if( !reg3->is_valid() ) {
1382 assert( !reg4->is_valid(), "valid halvsies" )do { if (!(!reg4->is_valid())) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1382, "assert(" "!reg4->is_valid()" ") failed", "valid halvsies"
); ::breakpoint(); } } while (0)
;
1383 } else {
1384 assert( reg1 != reg2, "calling conv. must produce distinct regs")do { if (!(reg1 != reg2)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1384, "assert(" "reg1 != reg2" ") failed", "calling conv. must produce distinct regs"
); ::breakpoint(); } } while (0)
;
1385 assert( reg1 != reg3, "calling conv. must produce distinct regs")do { if (!(reg1 != reg3)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1385, "assert(" "reg1 != reg3" ") failed", "calling conv. must produce distinct regs"
); ::breakpoint(); } } while (0)
;
1386 assert( reg1 != reg4, "calling conv. must produce distinct regs")do { if (!(reg1 != reg4)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1386, "assert(" "reg1 != reg4" ") failed", "calling conv. must produce distinct regs"
); ::breakpoint(); } } while (0)
;
1387 assert( reg2 != reg3, "calling conv. must produce distinct regs")do { if (!(reg2 != reg3)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1387, "assert(" "reg2 != reg3" ") failed", "calling conv. must produce distinct regs"
); ::breakpoint(); } } while (0)
;
1388 assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs")do { if (!(reg2 != reg4 || !reg2->is_valid())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1388, "assert(" "reg2 != reg4 || !reg2->is_valid()" ") failed"
, "calling conv. must produce distinct regs"); ::breakpoint()
; } } while (0)
;
1389 assert( reg3 != reg4, "calling conv. must produce distinct regs")do { if (!(reg3 != reg4)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1389, "assert(" "reg3 != reg4" ") failed", "calling conv. must produce distinct regs"
); ::breakpoint(); } } while (0)
;
1390 }
1391 }
1392 }
1393 }
1394#endif
1395
1396 // Visit each argument. Compute its outgoing register mask.
1397 // Return results now can have 2 bits returned.
1398 // Compute max over all outgoing arguments both per call-site
1399 // and over the entire method.
1400 for( i = 0; i < argcnt; i++ ) {
1401 // Address of incoming argument mask to fill in
1402 RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1403 VMReg first = parm_regs[i].first();
1404 VMReg second = parm_regs[i].second();
1405 if(!first->is_valid() &&
1406 !second->is_valid()) {
1407 continue; // Avoid Halves
1408 }
1409 // Handle case where arguments are in vector registers.
1410 if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
1411 OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
1412 OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
1413 assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd)do { if (!(reg_fst <= reg_snd)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1413, "assert(" "reg_fst <= reg_snd" ") failed", "fst=%d snd=%d"
, reg_fst, reg_snd); ::breakpoint(); } } while (0)
;
1414 for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
1415 rm->Insert(r);
1416 }
1417 }
1418 // Grab first register, adjust stack slots and insert in mask.
1419 OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
1420 if (OptoReg::is_valid(reg1))
1421 rm->Insert( reg1 );
1422 // Grab second register (if any), adjust stack slots and insert in mask.
1423 OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
1424 if (OptoReg::is_valid(reg2))
1425 rm->Insert( reg2 );
1426 } // End of for all arguments
1427 }
1428
1429 // Compute the max stack slot killed by any call. These will not be
1430 // available for debug info, and will be used to adjust FIRST_STACK_mask
1431 // after all call sites have been visited.
1432 if( _out_arg_limit < out_arg_limit_per_call)
1433 _out_arg_limit = out_arg_limit_per_call;
1434
1435 if (mcall) {
1436 // Kill the outgoing argument area, including any non-argument holes and
1437 // any legacy C-killed slots. Use Fat-Projections to do the killing.
1438 // Since the max-per-method covers the max-per-call-site and debug info
1439 // is excluded on the max-per-method basis, debug info cannot land in
1440 // this killed area.
1441 uint r_cnt = mcall->tf()->range()->cnt();
1442 MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1443 if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1444 C->record_method_not_compilable("unsupported outgoing calling sequence");
1445 } else {
1446 for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1447 proj->_rout.Insert(OptoReg::Name(i));
1448 }
1449 if (proj->_rout.is_NotEmpty()) {
1450 push_projection(proj);
1451 }
1452 }
1453 // Transfer the safepoint information from the call to the mcall
1454 // Move the JVMState list
1455 msfpt->set_jvms(sfpt->jvms());
1456 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1457 jvms->set_map(sfpt);
1458 }
1459
1460 // Debug inputs begin just after the last incoming parameter
1461 assert((mcall == NULL) || (mcall->jvms() == NULL) ||do { if (!((mcall == __null) || (mcall->jvms() == __null) ||
(mcall->jvms()->debug_start() + mcall->_jvmadj == mcall
->tf()->domain()->cnt()))) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1462, "assert(" "(mcall == __null) || (mcall->jvms() == __null) || (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt())"
") failed", ""); ::breakpoint(); } } while (0)
1462 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "")do { if (!((mcall == __null) || (mcall->jvms() == __null) ||
(mcall->jvms()->debug_start() + mcall->_jvmadj == mcall
->tf()->domain()->cnt()))) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1462, "assert(" "(mcall == __null) || (mcall->jvms() == __null) || (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt())"
") failed", ""); ::breakpoint(); } } while (0)
;
1463
1464 // Add additional edges.
1465 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1466 // For these calls we can not add MachConstantBase in expand(), as the
1467 // ins are not complete then.
1468 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1469 if (msfpt->jvms() &&
1470 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1471 // We added an edge before jvms, so we must adapt the position of the ins.
1472 msfpt->jvms()->adapt_position(+1);
1473 }
1474 }
1475
1476 // Registers killed by the call are set in the local scheduling pass
1477 // of Global Code Motion.
1478 return msfpt;
1479}
1480
1481//---------------------------match_tree----------------------------------------
1482// Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1483// of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1484// making GotoNodes while building the CFG and in init_spill_mask() to identify
1485// a Load's result RegMask for memoization in idealreg2regmask[]
1486MachNode *Matcher::match_tree( const Node *n ) {
1487 assert( n->Opcode() != Op_Phi, "cannot match" )do { if (!(n->Opcode() != Op_Phi)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1487, "assert(" "n->Opcode() != Op_Phi" ") failed", "cannot match"
); ::breakpoint(); } } while (0)
;
1488 assert( !n->is_block_start(), "cannot match" )do { if (!(!n->is_block_start())) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1488, "assert(" "!n->is_block_start()" ") failed", "cannot match"
); ::breakpoint(); } } while (0)
;
1489 // Set the mark for all locally allocated State objects.
1490 // When this call returns, the _states_arena arena will be reset
1491 // freeing all State objects.
1492 ResourceMark rm( &_states_arena );
1493
1494 LabelRootDepth = 0;
1495
1496 // StoreNodes require their Memory input to match any LoadNodes
1497 Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1498#ifdef ASSERT1
1499 Node* save_mem_node = _mem_node;
1500 _mem_node = n->is_Store() ? (Node*)n : NULL__null;
1501#endif
1502 // State object for root node of match tree
1503 // Allocate it on _states_arena - stack allocation can cause stack overflow.
1504 State *s = new (&_states_arena) State;
1505 s->_kids[0] = NULL__null;
1506 s->_kids[1] = NULL__null;
1507 s->_leaf = (Node*)n;
1508 // Label the input tree, allocating labels from top-level arena
1509 Node* root_mem = mem;
1510 Label_Root(n, s, n->in(0), root_mem);
1511 if (C->failing()) return NULL__null;
1512
1513 // The minimum cost match for the whole tree is found at the root State
1514 uint mincost = max_juint;
1515 uint cost = max_juint;
1516 uint i;
1517 for (i = 0; i < NUM_OPERANDS139; i++) {
1518 if (s->valid(i) && // valid entry and
1519 s->cost(i) < cost && // low cost and
1520 s->rule(i) >= NUM_OPERANDS139) {// not an operand
1521 mincost = i;
1522 cost = s->cost(i);
1523 }
1524 }
1525 if (mincost == max_juint) {
1526#ifndef PRODUCT
1527 tty->print("No matching rule for:");
1528 s->dump();
1529#endif
1530 Matcher::soft_match_failure();
1531 return NULL__null;
1532 }
1533 // Reduce input tree based upon the state labels to machine Nodes
1534 MachNode *m = ReduceInst(s, s->rule(mincost), mem);
1535 // New-to-old mapping is done in ReduceInst, to cover complex instructions.
1536 NOT_PRODUCT(_old2new_map.map(n->_idx, m);)_old2new_map.map(n->_idx, m);
1537
1538 // Add any Matcher-ignored edges
1539 uint cnt = n->req();
1540 uint start = 1;
1541 if( mem != (Node*)1 ) start = MemNode::Memory+1;
1542 if( n->is_AddP() ) {
1543 assert( mem == (Node*)1, "" )do { if (!(mem == (Node*)1)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1543, "assert(" "mem == (Node*)1" ") failed", ""); ::breakpoint
(); } } while (0)
;
1544 start = AddPNode::Base+1;
1545 }
1546 for( i = start; i < cnt; i++ ) {
1547 if( !n->match_edge(i) ) {
1548 if( i < m->req() )
1549 m->ins_req( i, n->in(i) );
1550 else
1551 m->add_req( n->in(i) );
1552 }
1553 }
1554
1555 debug_only( _mem_node = save_mem_node; )_mem_node = save_mem_node;
1556 return m;
1557}
1558
1559
1560//------------------------------match_into_reg---------------------------------
1561// Choose to either match this Node in a register or part of the current
1562// match tree. Return true for requiring a register and false for matching
1563// as part of the current match tree.
1564static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1565
1566 const Type *t = m->bottom_type();
1567
1568 if (t->singleton()) {
1569 // Never force constants into registers. Allow them to match as
1570 // constants or registers. Copies of the same value will share
1571 // the same register. See find_shared_node.
1572 return false;
1573 } else { // Not a constant
1574 // Stop recursion if they have different Controls.
1575 Node* m_control = m->in(0);
1576 // Control of load's memory can post-dominates load's control.
1577 // So use it since load can't float above its memory.
1578 Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL__null;
1579 if (control && m_control && control != m_control && control != mem_control) {
1580
1581 // Actually, we can live with the most conservative control we
1582 // find, if it post-dominates the others. This allows us to
1583 // pick up load/op/store trees where the load can float a little
1584 // above the store.
1585 Node *x = control;
1586 const uint max_scan = 6; // Arbitrary scan cutoff
1587 uint j;
1588 for (j=0; j<max_scan; j++) {
1589 if (x->is_Region()) // Bail out at merge points
1590 return true;
1591 x = x->in(0);
1592 if (x == m_control) // Does 'control' post-dominate
1593 break; // m->in(0)? If so, we can use it
1594 if (x == mem_control) // Does 'control' post-dominate
1595 break; // mem_control? If so, we can use it
1596 }
1597 if (j == max_scan) // No post-domination before scan end?
1598 return true; // Then break the match tree up
1599 }
1600 if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1601 (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1602 // These are commonly used in address expressions and can
1603 // efficiently fold into them on X64 in some cases.
1604 return false;
1605 }
1606 }
1607
1608 // Not forceable cloning. If shared, put it into a register.
1609 return shared;
1610}
1611
1612
1613//------------------------------Instruction Selection--------------------------
1614// Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1615// ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1616// things the Matcher does not match (e.g., Memory), and things with different
1617// Controls (hence forced into different blocks). We pass in the Control
1618// selected for this entire State tree.
1619
1620// The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1621// Store and the Load must have identical Memories (as well as identical
1622// pointers). Since the Matcher does not have anything for Memory (and
1623// does not handle DAGs), I have to match the Memory input myself. If the
1624// Tree root is a Store or if there are multiple Loads in the tree, I require
1625// all Loads to have the identical memory.
1626Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1627 // Since Label_Root is a recursive function, its possible that we might run
1628 // out of stack space. See bugs 6272980 & 6227033 for more info.
1629 LabelRootDepth++;
1630 if (LabelRootDepth > MaxLabelRootDepth) {
1631 C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1632 return NULL__null;
1633 }
1634 uint care = 0; // Edges matcher cares about
1635 uint cnt = n->req();
1636 uint i = 0;
1637
1638 // Examine children for memory state
1639 // Can only subsume a child into your match-tree if that child's memory state
1640 // is not modified along the path to another input.
1641 // It is unsafe even if the other inputs are separate roots.
1642 Node *input_mem = NULL__null;
1643 for( i = 1; i < cnt; i++ ) {
1644 if( !n->match_edge(i) ) continue;
1645 Node *m = n->in(i); // Get ith input
1646 assert( m, "expect non-null children" )do { if (!(m)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1646, "assert(" "m" ") failed", "expect non-null children")
; ::breakpoint(); } } while (0)
;
1647 if( m->is_Load() ) {
1648 if( input_mem == NULL__null ) {
1649 input_mem = m->in(MemNode::Memory);
1650 if (mem == (Node*)1) {
1651 // Save this memory to bail out if there's another memory access
1652 // to a different memory location in the same tree.
1653 mem = input_mem;
1654 }
1655 } else if( input_mem != m->in(MemNode::Memory) ) {
1656 input_mem = NodeSentinel(Node*)-1;
1657 }
1658 }
1659 }
1660
1661 for( i = 1; i < cnt; i++ ){// For my children
1662 if( !n->match_edge(i) ) continue;
1663 Node *m = n->in(i); // Get ith input
1664 // Allocate states out of a private arena
1665 State *s = new (&_states_arena) State;
1666 svec->_kids[care++] = s;
1667 assert( care <= 2, "binary only for now" )do { if (!(care <= 2)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1667, "assert(" "care <= 2" ") failed", "binary only for now"
); ::breakpoint(); } } while (0)
;
1668
1669 // Recursively label the State tree.
1670 s->_kids[0] = NULL__null;
1671 s->_kids[1] = NULL__null;
1672 s->_leaf = m;
1673
1674 // Check for leaves of the State Tree; things that cannot be a part of
1675 // the current tree. If it finds any, that value is matched as a
1676 // register operand. If not, then the normal matching is used.
1677 if( match_into_reg(n, m, control, i, is_shared(m)) ||
1678 // Stop recursion if this is a LoadNode and there is another memory access
1679 // to a different memory location in the same tree (for example, a StoreNode
1680 // at the root of this tree or another LoadNode in one of the children).
1681 ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1682 // Can NOT include the match of a subtree when its memory state
1683 // is used by any of the other subtrees
1684 (input_mem == NodeSentinel(Node*)-1) ) {
1685 // Print when we exclude matching due to different memory states at input-loads
1686 if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel(Node*)-1)
1687 && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1688 tty->print_cr("invalid input_mem");
1689 }
1690 // Switch to a register-only opcode; this value must be in a register
1691 // and cannot be subsumed as part of a larger instruction.
1692 s->DFA( m->ideal_reg(), m );
1693
1694 } else {
1695 // If match tree has no control and we do, adopt it for entire tree
1696 if( control == NULL__null && m->in(0) != NULL__null && m->req() > 1 )
1697 control = m->in(0); // Pick up control
1698 // Else match as a normal part of the match tree.
1699 control = Label_Root(m, s, control, mem);
1700 if (C->failing()) return NULL__null;
1701 }
1702 }
1703
1704 // Call DFA to match this node, and return
1705 svec->DFA( n->Opcode(), n );
1706
1707#ifdef ASSERT1
1708 uint x;
1709 for( x = 0; x < _LAST_MACH_OPER; x++ )
1710 if( svec->valid(x) )
1711 break;
1712
1713 if (x >= _LAST_MACH_OPER) {
1714 n->dump();
1715 svec->dump();
1716 assert( false, "bad AD file" )do { if (!(false)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1716, "assert(" "false" ") failed", "bad AD file"); ::breakpoint
(); } } while (0)
;
1717 }
1718#endif
1719 return control;
1720}
1721
1722
1723// Con nodes reduced using the same rule can share their MachNode
1724// which reduces the number of copies of a constant in the final
1725// program. The register allocator is free to split uses later to
1726// split live ranges.
1727MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1728 if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL__null;
1729
1730 // See if this Con has already been reduced using this rule.
1731 if (_shared_nodes.Size() <= leaf->_idx) return NULL__null;
1732 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1733 if (last != NULL__null && rule == last->rule()) {
1734 // Don't expect control change for DecodeN
1735 if (leaf->is_DecodeNarrowPtr())
1736 return last;
1737 // Get the new space root.
1738 Node* xroot = new_node(C->root());
1739 if (xroot == NULL__null) {
1740 // This shouldn't happen give the order of matching.
1741 return NULL__null;
1742 }
1743
1744 // Shared constants need to have their control be root so they
1745 // can be scheduled properly.
1746 Node* control = last->in(0);
1747 if (control != xroot) {
1748 if (control == NULL__null || control == C->root()) {
1749 last->set_req(0, xroot);
1750 } else {
1751 assert(false, "unexpected control")do { if (!(false)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1751, "assert(" "false" ") failed", "unexpected control"); ::
breakpoint(); } } while (0)
;
1752 return NULL__null;
1753 }
1754 }
1755 return last;
1756 }
1757 return NULL__null;
1758}
1759
1760
1761//------------------------------ReduceInst-------------------------------------
1762// Reduce a State tree (with given Control) into a tree of MachNodes.
1763// This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1764// complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1765// Each MachNode has a number of complicated MachOper operands; each
1766// MachOper also covers a further tree of Ideal Nodes.
1767
1768// The root of the Ideal match tree is always an instruction, so we enter
1769// the recursion here. After building the MachNode, we need to recurse
1770// the tree checking for these cases:
1771// (1) Child is an instruction -
1772// Build the instruction (recursively), add it as an edge.
1773// Build a simple operand (register) to hold the result of the instruction.
1774// (2) Child is an interior part of an instruction -
1775// Skip over it (do nothing)
1776// (3) Child is the start of a operand -
1777// Build the operand, place it inside the instruction
1778// Call ReduceOper.
1779MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1780 assert( rule >= NUM_OPERANDS, "called with operand rule" )do { if (!(rule >= 139)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1780, "assert(" "rule >= 139" ") failed", "called with operand rule"
); ::breakpoint(); } } while (0)
;
1781
1782 MachNode* shared_node = find_shared_node(s->_leaf, rule);
1783 if (shared_node != NULL__null) {
1784 return shared_node;
1785 }
1786
1787 // Build the object to represent this state & prepare for recursive calls
1788 MachNode *mach = s->MachNodeGenerator(rule);
1789 guarantee(mach != NULL, "Missing MachNode")do { if (!(mach != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1789, "guarantee(" "mach != NULL" ") failed", "Missing MachNode"
); ::breakpoint(); } } while (0)
;
1790 mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1791 assert( mach->_opnds[0] != NULL, "Missing result operand" )do { if (!(mach->_opnds[0] != __null)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1791, "assert(" "mach->_opnds[0] != __null" ") failed", "Missing result operand"
); ::breakpoint(); } } while (0)
;
1792 Node *leaf = s->_leaf;
1793 NOT_PRODUCT(record_new2old(mach, leaf);)record_new2old(mach, leaf);
1794 // Check for instruction or instruction chain rule
1795 if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1796 assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),do { if (!(C->node_arena()->contains(s->_leaf) || !has_new_node
(s->_leaf))) { (*g_assert_poison) = 'X';; report_vm_error(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1797, "assert(" "C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf)"
") failed", "duplicating node that's already been matched");
::breakpoint(); } } while (0)
1797 "duplicating node that's already been matched")do { if (!(C->node_arena()->contains(s->_leaf) || !has_new_node
(s->_leaf))) { (*g_assert_poison) = 'X';; report_vm_error(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1797, "assert(" "C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf)"
") failed", "duplicating node that's already been matched");
::breakpoint(); } } while (0)
;
1798 // Instruction
1799 mach->add_req( leaf->in(0) ); // Set initial control
1800 // Reduce interior of complex instruction
1801 ReduceInst_Interior( s, rule, mem, mach, 1 );
1802 } else {
1803 // Instruction chain rules are data-dependent on their inputs
1804 mach->add_req(0); // Set initial control to none
1805 ReduceInst_Chain_Rule( s, rule, mem, mach );
1806 }
1807
1808 // If a Memory was used, insert a Memory edge
1809 if( mem != (Node*)1 ) {
1810 mach->ins_req(MemNode::Memory,mem);
1811#ifdef ASSERT1
1812 // Verify adr type after matching memory operation
1813 const MachOper* oper = mach->memory_operand();
1814 if (oper != NULL__null && oper != (MachOper*)-1) {
1815 // It has a unique memory operand. Find corresponding ideal mem node.
1816 Node* m = NULL__null;
1817 if (leaf->is_Mem()) {
1818 m = leaf;
1819 } else {
1820 m = _mem_node;
1821 assert(m != NULL && m->is_Mem(), "expecting memory node")do { if (!(m != __null && m->is_Mem())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1821, "assert(" "m != __null && m->is_Mem()" ") failed"
, "expecting memory node"); ::breakpoint(); } } while (0)
;
1822 }
1823 const Type* mach_at = mach->adr_type();
1824 // DecodeN node consumed by an address may have different type
1825 // than its input. Don't compare types for such case.
1826 if (m->adr_type() != mach_at &&
1827 (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1828 (m->in(MemNode::Address)->is_AddP() &&
1829 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1830 (m->in(MemNode::Address)->is_AddP() &&
1831 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1832 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1833 mach_at = m->adr_type();
1834 }
1835 if (m->adr_type() != mach_at) {
1836 m->dump();
1837 tty->print_cr("mach:");
1838 mach->dump(1);
1839 }
1840 assert(m->adr_type() == mach_at, "matcher should not change adr type")do { if (!(m->adr_type() == mach_at)) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1840, "assert(" "m->adr_type() == mach_at" ") failed", "matcher should not change adr type"
); ::breakpoint(); } } while (0)
;
1841 }
1842#endif
1843 }
1844
1845 // If the _leaf is an AddP, insert the base edge
1846 if (leaf->is_AddP()) {
1847 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1848 }
1849
1850 uint number_of_projections_prior = number_of_projections();
1851
1852 // Perform any 1-to-many expansions required
1853 MachNode *ex = mach->Expand(s, _projection_list, mem);
1854 if (ex != mach) {
1855 assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match")do { if (!(ex->ideal_reg() == mach->ideal_reg())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1855, "assert(" "ex->ideal_reg() == mach->ideal_reg()"
") failed", "ideal types should match"); ::breakpoint(); } }
while (0)
;
1856 if( ex->in(1)->is_Con() )
1857 ex->in(1)->set_req(0, C->root());
1858 // Remove old node from the graph
1859 for( uint i=0; i<mach->req(); i++ ) {
1860 mach->set_req(i,NULL__null);
1861 }
1862 NOT_PRODUCT(record_new2old(ex, s->_leaf);)record_new2old(ex, s->_leaf);
1863 }
1864
1865 // PhaseChaitin::fixup_spills will sometimes generate spill code
1866 // via the matcher. By the time, nodes have been wired into the CFG,
1867 // and any further nodes generated by expand rules will be left hanging
1868 // in space, and will not get emitted as output code. Catch this.
1869 // Also, catch any new register allocation constraints ("projections")
1870 // generated belatedly during spill code generation.
1871 if (_allocation_started) {
1872 guarantee(ex == mach, "no expand rules during spill generation")do { if (!(ex == mach)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1872, "guarantee(" "ex == mach" ") failed", "no expand rules during spill generation"
); ::breakpoint(); } } while (0)
;
1873 guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation")do { if (!(number_of_projections_prior == number_of_projections
())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1873, "guarantee(" "number_of_projections_prior == number_of_projections()"
") failed", "no allocation during spill generation"); ::breakpoint
(); } } while (0)
;
1874 }
1875
1876 if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1877 // Record the con for sharing
1878 _shared_nodes.map(leaf->_idx, ex);
1879 }
1880
1881 // Have mach nodes inherit GC barrier data
1882 if (leaf->is_LoadStore()) {
1883 mach->set_barrier_data(leaf->as_LoadStore()->barrier_data());
1884 } else if (leaf->is_Mem()) {
1885 mach->set_barrier_data(leaf->as_Mem()->barrier_data());
1886 }
1887
1888 return ex;
1889}
1890
1891void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1892 for (uint i = n->req(); i < n->len(); i++) {
1893 if (n->in(i) != NULL__null) {
1894 mach->add_prec(n->in(i));
1895 }
1896 }
1897}
1898
1899void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* mach) {
1900 // 'op' is what I am expecting to receive
1901 int op = _leftOp[rule];
1902 // Operand type to catch childs result
1903 // This is what my child will give me.
1904 unsigned int opnd_class_instance = s->rule(op);
1905 // Choose between operand class or not.
1906 // This is what I will receive.
1907 int catch_op = (FIRST_OPERAND_CLASS138 <= op && op < NUM_OPERANDS139) ? opnd_class_instance : op;
1908 // New rule for child. Chase operand classes to get the actual rule.
1909 unsigned int newrule = s->rule(catch_op);
1910
1911 if (newrule < NUM_OPERANDS139) {
1912 // Chain from operand or operand class, may be output of shared node
1913 assert(opnd_class_instance < NUM_OPERANDS, "Bad AD file: Instruction chain rule must chain from operand")do { if (!(opnd_class_instance < 139)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1913, "assert(" "opnd_class_instance < 139" ") failed", "Bad AD file: Instruction chain rule must chain from operand"
); ::breakpoint(); } } while (0)
;
1914 // Insert operand into array of operands for this instruction
1915 mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1916
1917 ReduceOper(s, newrule, mem, mach);
1918 } else {
1919 // Chain from the result of an instruction
1920 assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand")do { if (!(newrule >= _LAST_MACH_OPER)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1920, "assert(" "newrule >= _LAST_MACH_OPER" ") failed",
"Do NOT chain from internal operand"); ::breakpoint(); } } while
(0)
;
1921 mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1922 Node *mem1 = (Node*)1;
1923 debug_only(Node *save_mem_node = _mem_node;)Node *save_mem_node = _mem_node;
1924 mach->add_req( ReduceInst(s, newrule, mem1) );
1925 debug_only(_mem_node = save_mem_node;)_mem_node = save_mem_node;
1926 }
1927 return;
1928}
1929
1930
1931uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1932 handle_precedence_edges(s->_leaf, mach);
1933
1934 if( s->_leaf->is_Load() ) {
1935 Node *mem2 = s->_leaf->in(MemNode::Memory);
1936 assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" )do { if (!(mem == (Node*)1 || mem == mem2)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1936, "assert(" "mem == (Node*)1 || mem == mem2" ") failed"
, "multiple Memories being matched at once?"); ::breakpoint()
; } } while (0)
;
1937 debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)if( mem == (Node*)1 ) _mem_node = s->_leaf;
1938 mem = mem2;
1939 }
1940 if( s->_leaf->in(0) != NULL__null && s->_leaf->req() > 1) {
1941 if( mach->in(0) == NULL__null )
1942 mach->set_req(0, s->_leaf->in(0));
1943 }
1944
1945 // Now recursively walk the state tree & add operand list.
1946 for( uint i=0; i<2; i++ ) { // binary tree
1947 State *newstate = s->_kids[i];
1948 if( newstate == NULL__null ) break; // Might only have 1 child
1949 // 'op' is what I am expecting to receive
1950 int op;
1951 if( i == 0 ) {
1952 op = _leftOp[rule];
1953 } else {
1954 op = _rightOp[rule];
1955 }
1956 // Operand type to catch childs result
1957 // This is what my child will give me.
1958 int opnd_class_instance = newstate->rule(op);
1959 // Choose between operand class or not.
1960 // This is what I will receive.
1961 int catch_op = (op >= FIRST_OPERAND_CLASS138 && op < NUM_OPERANDS139) ? opnd_class_instance : op;
1962 // New rule for child. Chase operand classes to get the actual rule.
1963 int newrule = newstate->rule(catch_op);
1964
1965 if (newrule < NUM_OPERANDS139) { // Operand/operandClass or internalOp/instruction?
1966 // Operand/operandClass
1967 // Insert operand into array of operands for this instruction
1968 mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
1969 ReduceOper(newstate, newrule, mem, mach);
1970
1971 } else { // Child is internal operand or new instruction
1972 if (newrule < _LAST_MACH_OPER) { // internal operand or instruction?
1973 // internal operand --> call ReduceInst_Interior
1974 // Interior of complex instruction. Do nothing but recurse.
1975 num_opnds = ReduceInst_Interior(newstate, newrule, mem, mach, num_opnds);
1976 } else {
1977 // instruction --> call build operand( ) to catch result
1978 // --> ReduceInst( newrule )
1979 mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
1980 Node *mem1 = (Node*)1;
1981 debug_only(Node *save_mem_node = _mem_node;)Node *save_mem_node = _mem_node;
1982 mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1983 debug_only(_mem_node = save_mem_node;)_mem_node = save_mem_node;
1984 }
1985 }
1986 assert( mach->_opnds[num_opnds-1], "" )do { if (!(mach->_opnds[num_opnds-1])) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 1986, "assert(" "mach->_opnds[num_opnds-1]" ") failed", ""
); ::breakpoint(); } } while (0)
;
1987 }
1988 return num_opnds;
1989}
1990
1991// This routine walks the interior of possible complex operands.
1992// At each point we check our children in the match tree:
1993// (1) No children -
1994// We are a leaf; add _leaf field as an input to the MachNode
1995// (2) Child is an internal operand -
1996// Skip over it ( do nothing )
1997// (3) Child is an instruction -
1998// Call ReduceInst recursively and
1999// and instruction as an input to the MachNode
2000void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
2001 assert( rule < _LAST_MACH_OPER, "called with operand rule" )do { if (!(rule < _LAST_MACH_OPER)) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2001, "assert(" "rule < _LAST_MACH_OPER" ") failed", "called with operand rule"
); ::breakpoint(); } } while (0)
;
2002 State *kid = s->_kids[0];
2003 assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" )do { if (!(kid == __null || s->_leaf->in(0) == __null))
{ (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2003, "assert(" "kid == __null || s->_leaf->in(0) == __null"
") failed", "internal operands have no control"); ::breakpoint
(); } } while (0)
;
2004
2005 // Leaf? And not subsumed?
2006 if( kid == NULL__null && !_swallowed[rule] ) {
2007 mach->add_req( s->_leaf ); // Add leaf pointer
2008 return; // Bail out
2009 }
2010
2011 if( s->_leaf->is_Load() ) {
2012 assert( mem == (Node*)1, "multiple Memories being matched at once?" )do { if (!(mem == (Node*)1)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2012, "assert(" "mem == (Node*)1" ") failed", "multiple Memories being matched at once?"
); ::breakpoint(); } } while (0)
;
2013 mem = s->_leaf->in(MemNode::Memory);
2014 debug_only(_mem_node = s->_leaf;)_mem_node = s->_leaf;
2015 }
2016
2017 handle_precedence_edges(s->_leaf, mach);
2018
2019 if( s->_leaf->in(0) && s->_leaf->req() > 1) {
2020 if( !mach->in(0) )
2021 mach->set_req(0,s->_leaf->in(0));
2022 else {
2023 assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" )do { if (!(s->_leaf->in(0) == mach->in(0))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2023, "assert(" "s->_leaf->in(0) == mach->in(0)" ") failed"
, "same instruction, differing controls?"); ::breakpoint(); }
} while (0)
;
2024 }
2025 }
2026
2027 for (uint i = 0; kid != NULL__null && i < 2; kid = s->_kids[1], i++) { // binary tree
2028 int newrule;
2029 if( i == 0) {
2030 newrule = kid->rule(_leftOp[rule]);
2031 } else {
2032 newrule = kid->rule(_rightOp[rule]);
2033 }
2034
2035 if (newrule < _LAST_MACH_OPER) { // Operand or instruction?
2036 // Internal operand; recurse but do nothing else
2037 ReduceOper(kid, newrule, mem, mach);
2038
2039 } else { // Child is a new instruction
2040 // Reduce the instruction, and add a direct pointer from this
2041 // machine instruction to the newly reduced one.
2042 Node *mem1 = (Node*)1;
2043 debug_only(Node *save_mem_node = _mem_node;)Node *save_mem_node = _mem_node;
2044 mach->add_req( ReduceInst( kid, newrule, mem1 ) );
2045 debug_only(_mem_node = save_mem_node;)_mem_node = save_mem_node;
2046 }
2047 }
2048}
2049
2050
2051// -------------------------------------------------------------------------
2052// Java-Java calling convention
2053// (what you use when Java calls Java)
2054
2055//------------------------------find_receiver----------------------------------
2056// For a given signature, return the OptoReg for parameter 0.
2057OptoReg::Name Matcher::find_receiver() {
2058 VMRegPair regs;
2059 BasicType sig_bt = T_OBJECT;
2060 SharedRuntime::java_calling_convention(&sig_bt, &regs, 1);
2061 // Return argument 0 register. In the LP64 build pointers
2062 // take 2 registers, but the VM wants only the 'main' name.
2063 return OptoReg::as_OptoReg(regs.first());
2064}
2065
2066bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
2067 if (n != NULL__null && m != NULL__null) {
2068 return VectorNode::is_vector_shift(n) &&
2069 VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
2070 }
2071 return false;
2072}
2073
2074bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2075 // Must clone all producers of flags, or we will not match correctly.
2076 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2077 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
2078 // are also there, so we may match a float-branch to int-flags and
2079 // expect the allocator to haul the flags from the int-side to the
2080 // fp-side. No can do.
2081 if (_must_clone[m->Opcode()]) {
2082 mstack.push(m, Visit);
2083 return true;
2084 }
2085 return pd_clone_node(n, m, mstack);
2086}
2087
2088bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2089 Node *off = m->in(AddPNode::Offset);
2090 if (off->is_Con()) {
2091 address_visited.test_set(m->_idx); // Flag as address_visited
2092 mstack.push(m->in(AddPNode::Address), Pre_Visit);
2093 // Clone X+offset as it also folds into most addressing expressions
2094 mstack.push(off, Visit);
2095 mstack.push(m->in(AddPNode::Base), Pre_Visit);
2096 return true;
2097 }
2098 return false;
2099}
2100
2101// A method-klass-holder may be passed in the inline_cache_reg
2102// and then expanded into the inline_cache_reg and a method_ptr register
2103// defined in ad_<arch>.cpp
2104
2105//------------------------------find_shared------------------------------------
2106// Set bits if Node is shared or otherwise a root
2107void Matcher::find_shared(Node* n) {
2108 // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2109 MStack mstack(C->live_nodes() * 2);
2110 // Mark nodes as address_visited if they are inputs to an address expression
2111 VectorSet address_visited;
2112 mstack.push(n, Visit); // Don't need to pre-visit root node
2113 while (mstack.is_nonempty()) {
2114 n = mstack.node(); // Leave node on stack
2115 Node_State nstate = mstack.state();
2116 uint nop = n->Opcode();
2117 if (nstate == Pre_Visit) {
2118 if (address_visited.test(n->_idx)) { // Visited in address already?
2119 // Flag as visited and shared now.
2120 set_visited(n);
2121 }
2122 if (is_visited(n)) { // Visited already?
2123 // Node is shared and has no reason to clone. Flag it as shared.
2124 // This causes it to match into a register for the sharing.
2125 set_shared(n); // Flag as shared and
2126 if (n->is_DecodeNarrowPtr()) {
2127 // Oop field/array element loads must be shared but since
2128 // they are shared through a DecodeN they may appear to have
2129 // a single use so force sharing here.
2130 set_shared(n->in(1));
2131 }
2132 mstack.pop(); // remove node from stack
2133 continue;
2134 }
2135 nstate = Visit; // Not already visited; so visit now
2136 }
2137 if (nstate == Visit) {
2138 mstack.set_state(Post_Visit);
2139 set_visited(n); // Flag as visited now
2140 bool mem_op = false;
2141 int mem_addr_idx = MemNode::Address;
2142 if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
2143 continue;
2144 }
2145 for (int i = n->req() - 1; i >= 0; --i) { // For my children
2146 Node* m = n->in(i); // Get ith input
2147 if (m == NULL__null) {
2148 continue; // Ignore NULLs
2149 }
2150 if (clone_node(n, m, mstack)) {
2151 continue;
2152 }
2153
2154 // Clone addressing expressions as they are "free" in memory access instructions
2155 if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2156 // When there are other uses besides address expressions
2157 // put it on stack and mark as shared.
2158 !is_visited(m)) {
2159 // Some inputs for address expression are not put on stack
2160 // to avoid marking them as shared and forcing them into register
2161 // if they are used only in address expressions.
2162 // But they should be marked as shared if there are other uses
2163 // besides address expressions.
2164
2165 if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2166 continue;
2167 }
2168 } // if( mem_op &&
2169 mstack.push(m, Pre_Visit);
2170 } // for(int i = ...)
2171 }
2172 else if (nstate == Alt_Post_Visit) {
2173 mstack.pop(); // Remove node from stack
2174 // We cannot remove the Cmp input from the Bool here, as the Bool may be
2175 // shared and all users of the Bool need to move the Cmp in parallel.
2176 // This leaves both the Bool and the If pointing at the Cmp. To
2177 // prevent the Matcher from trying to Match the Cmp along both paths
2178 // BoolNode::match_edge always returns a zero.
2179
2180 // We reorder the Op_If in a pre-order manner, so we can visit without
2181 // accidentally sharing the Cmp (the Bool and the If make 2 users).
2182 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2183 }
2184 else if (nstate == Post_Visit) {
2185 mstack.pop(); // Remove node from stack
2186
2187 // Now hack a few special opcodes
2188 uint opcode = n->Opcode();
2189 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2190 if (!gc_handled) {
2191 find_shared_post_visit(n, opcode);
2192 }
2193 }
2194 else {
2195 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2195); ::breakpoint(); } while (0)
;
2196 }
2197 } // end of while (mstack.is_nonempty())
2198}
2199
2200bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2201 switch(opcode) { // Handle some opcodes special
2202 case Op_Phi: // Treat Phis as shared roots
2203 case Op_Parm:
2204 case Op_Proj: // All handled specially during matching
2205 case Op_SafePointScalarObject:
2206 set_shared(n);
2207 set_dontcare(n);
2208 break;
2209 case Op_If:
2210 case Op_CountedLoopEnd:
2211 mstack.set_state(Alt_Post_Visit); // Alternative way
2212 // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
2213 // with matching cmp/branch in 1 instruction. The Matcher needs the
2214 // Bool and CmpX side-by-side, because it can only get at constants
2215 // that are at the leaves of Match trees, and the Bool's condition acts
2216 // as a constant here.
2217 mstack.push(n->in(1), Visit); // Clone the Bool
2218 mstack.push(n->in(0), Pre_Visit); // Visit control input
2219 return true; // while (mstack.is_nonempty())
2220 case Op_ConvI2D: // These forms efficiently match with a prior
2221 case Op_ConvI2F: // Load but not a following Store
2222 if( n->in(1)->is_Load() && // Prior load
2223 n->outcnt() == 1 && // Not already shared
2224 n->unique_out()->is_Store() ) // Following store
2225 set_shared(n); // Force it to be a root
2226 break;
2227 case Op_ReverseBytesI:
2228 case Op_ReverseBytesL:
2229 if( n->in(1)->is_Load() && // Prior load
2230 n->outcnt() == 1 ) // Not already shared
2231 set_shared(n); // Force it to be a root
2232 break;
2233 case Op_BoxLock: // Cant match until we get stack-regs in ADLC
2234 case Op_IfFalse:
2235 case Op_IfTrue:
2236 case Op_MachProj:
2237 case Op_MergeMem:
2238 case Op_Catch:
2239 case Op_CatchProj:
2240 case Op_CProj:
2241 case Op_JumpProj:
2242 case Op_JProj:
2243 case Op_NeverBranch:
2244 set_dontcare(n);
2245 break;
2246 case Op_Jump:
2247 mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared)
2248 mstack.push(n->in(0), Pre_Visit); // Visit Control input
2249 return true; // while (mstack.is_nonempty())
2250 case Op_StrComp:
2251 case Op_StrEquals:
2252 case Op_StrIndexOf:
2253 case Op_StrIndexOfChar:
2254 case Op_AryEq:
2255 case Op_HasNegatives:
2256 case Op_StrInflatedCopy:
2257 case Op_StrCompressedCopy:
2258 case Op_EncodeISOArray:
2259 case Op_FmaD:
2260 case Op_FmaF:
2261 case Op_FmaVD:
2262 case Op_FmaVF:
2263 case Op_MacroLogicV:
2264 case Op_LoadVectorMasked:
2265 case Op_VectorCmpMasked:
2266 case Op_VectorLoadMask:
2267 set_shared(n); // Force result into register (it will be anyways)
2268 break;
2269 case Op_ConP: { // Convert pointers above the centerline to NUL
2270 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2271 const TypePtr* tp = tn->type()->is_ptr();
2272 if (tp->_ptr == TypePtr::AnyNull) {
2273 tn->set_type(TypePtr::NULL_PTR);
2274 }
2275 break;
2276 }
2277 case Op_ConN: { // Convert narrow pointers above the centerline to NUL
2278 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2279 const TypePtr* tp = tn->type()->make_ptr();
2280 if (tp && tp->_ptr == TypePtr::AnyNull) {
2281 tn->set_type(TypeNarrowOop::NULL_PTR);
2282 }
2283 break;
2284 }
2285 case Op_Binary: // These are introduced in the Post_Visit state.
2286 ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2286); ::breakpoint(); } while (0)
;
2287 break;
2288 case Op_ClearArray:
2289 case Op_SafePoint:
2290 mem_op = true;
2291 break;
2292 default:
2293 if( n->is_Store() ) {
2294 // Do match stores, despite no ideal reg
2295 mem_op = true;
2296 break;
2297 }
2298 if( n->is_Mem() ) { // Loads and LoadStores
2299 mem_op = true;
2300 // Loads must be root of match tree due to prior load conflict
2301 if( C->subsume_loads() == false )
2302 set_shared(n);
2303 }
2304 // Fall into default case
2305 if( !n->ideal_reg() )
2306 set_dontcare(n); // Unmatchable Nodes
2307 } // end_switch
2308 return false;
2309}
2310
2311void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2312 if (n->is_predicated_vector()) {
2313 // Restructure into binary trees for Matching.
2314 if (n->req() == 4) {
2315 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2316 n->set_req(2, n->in(3));
2317 n->del_req(3);
2318 } else if (n->req() == 5) {
2319 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2320 n->set_req(2, new BinaryNode(n->in(3), n->in(4)));
2321 n->del_req(4);
2322 n->del_req(3);
2323 }
2324 return;
2325 }
2326
2327 switch(opcode) { // Handle some opcodes special
2328 case Op_StorePConditional:
2329 case Op_StoreIConditional:
2330 case Op_StoreLConditional:
2331 case Op_CompareAndExchangeB:
2332 case Op_CompareAndExchangeS:
2333 case Op_CompareAndExchangeI:
2334 case Op_CompareAndExchangeL:
2335 case Op_CompareAndExchangeP:
2336 case Op_CompareAndExchangeN:
2337 case Op_WeakCompareAndSwapB:
2338 case Op_WeakCompareAndSwapS:
2339 case Op_WeakCompareAndSwapI:
2340 case Op_WeakCompareAndSwapL:
2341 case Op_WeakCompareAndSwapP:
2342 case Op_WeakCompareAndSwapN:
2343 case Op_CompareAndSwapB:
2344 case Op_CompareAndSwapS:
2345 case Op_CompareAndSwapI:
2346 case Op_CompareAndSwapL:
2347 case Op_CompareAndSwapP:
2348 case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2349 Node* newval = n->in(MemNode::ValueIn);
2350 Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2351 Node* pair = new BinaryNode(oldval, newval);
2352 n->set_req(MemNode::ValueIn, pair);
2353 n->del_req(LoadStoreConditionalNode::ExpectedIn);
2354 break;
2355 }
2356 case Op_CMoveD: // Convert trinary to binary-tree
2357 case Op_CMoveF:
2358 case Op_CMoveI:
2359 case Op_CMoveL:
2360 case Op_CMoveN:
2361 case Op_CMoveP:
2362 case Op_CMoveVF:
2363 case Op_CMoveVD: {
2364 // Restructure into a binary tree for Matching. It's possible that
2365 // we could move this code up next to the graph reshaping for IfNodes
2366 // or vice-versa, but I do not want to debug this for Ladybird.
2367 // 10/2/2000 CNC.
2368 Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2369 n->set_req(1, pair1);
2370 Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2371 n->set_req(2, pair2);
2372 n->del_req(3);
2373 break;
2374 }
2375 case Op_VectorCmpMasked: {
2376 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2377 n->set_req(2, pair1);
2378 n->del_req(3);
2379 break;
2380 }
2381 case Op_MacroLogicV: {
2382 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2383 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2384 n->set_req(1, pair1);
2385 n->set_req(2, pair2);
2386 n->del_req(4);
2387 n->del_req(3);
2388 break;
2389 }
2390 case Op_StoreVectorMasked: {
2391 Node* pair = new BinaryNode(n->in(3), n->in(4));
2392 n->set_req(3, pair);
2393 n->del_req(4);
2394 break;
2395 }
2396 case Op_LoopLimit: {
2397 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2398 n->set_req(1, pair1);
2399 n->set_req(2, n->in(3));
2400 n->del_req(3);
2401 break;
2402 }
2403 case Op_StrEquals:
2404 case Op_StrIndexOfChar: {
2405 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2406 n->set_req(2, pair1);
2407 n->set_req(3, n->in(4));
2408 n->del_req(4);
2409 break;
2410 }
2411 case Op_StrComp:
2412 case Op_StrIndexOf: {
2413 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2414 n->set_req(2, pair1);
2415 Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2416 n->set_req(3, pair2);
2417 n->del_req(5);
2418 n->del_req(4);
2419 break;
2420 }
2421 case Op_StrCompressedCopy:
2422 case Op_StrInflatedCopy:
2423 case Op_EncodeISOArray: {
2424 // Restructure into a binary tree for Matching.
2425 Node* pair = new BinaryNode(n->in(3), n->in(4));
2426 n->set_req(3, pair);
2427 n->del_req(4);
2428 break;
2429 }
2430 case Op_FmaD:
2431 case Op_FmaF:
2432 case Op_FmaVD:
2433 case Op_FmaVF: {
2434 // Restructure into a binary tree for Matching.
2435 Node* pair = new BinaryNode(n->in(1), n->in(2));
2436 n->set_req(2, pair);
2437 n->set_req(1, n->in(3));
2438 n->del_req(3);
2439 break;
2440 }
2441 case Op_MulAddS2I: {
2442 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2443 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2444 n->set_req(1, pair1);
2445 n->set_req(2, pair2);
2446 n->del_req(4);
2447 n->del_req(3);
2448 break;
2449 }
2450 case Op_CopySignD:
2451 case Op_SignumF:
2452 case Op_SignumD: {
2453 Node* pair = new BinaryNode(n->in(2), n->in(3));
2454 n->set_req(2, pair);
2455 n->del_req(3);
2456 break;
2457 }
2458 case Op_VectorBlend:
2459 case Op_VectorInsert: {
2460 Node* pair = new BinaryNode(n->in(1), n->in(2));
2461 n->set_req(1, pair);
2462 n->set_req(2, n->in(3));
2463 n->del_req(3);
2464 break;
2465 }
2466 case Op_LoadVectorGatherMasked:
2467 case Op_StoreVectorScatter: {
2468 Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2469 n->set_req(MemNode::ValueIn, pair);
2470 n->del_req(MemNode::ValueIn+1);
2471 break;
2472 }
2473 case Op_StoreVectorScatterMasked: {
2474 Node* pair = new BinaryNode(n->in(MemNode::ValueIn+1), n->in(MemNode::ValueIn+2));
2475 n->set_req(MemNode::ValueIn+1, pair);
2476 n->del_req(MemNode::ValueIn+2);
2477 pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2478 n->set_req(MemNode::ValueIn, pair);
2479 n->del_req(MemNode::ValueIn+1);
2480 break;
2481 }
2482 case Op_VectorMaskCmp: {
2483 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2484 n->set_req(2, n->in(3));
2485 n->del_req(3);
2486 break;
2487 }
2488 default:
2489 break;
2490 }
2491}
2492
2493#ifndef PRODUCT
2494void Matcher::record_new2old(Node* newn, Node* old) {
2495 _new2old_map.map(newn->_idx, old);
2496 if (!_reused.test_set(old->_igv_idx)) {
2497 // Reuse the Ideal-level IGV identifier so that the node can be tracked
2498 // across matching. If there are multiple machine nodes expanded from the
2499 // same Ideal node, only one will reuse its IGV identifier.
2500 newn->_igv_idx = old->_igv_idx;
2501 }
2502}
2503
2504// machine-independent root to machine-dependent root
2505void Matcher::dump_old2new_map() {
2506 _old2new_map.dump();
2507}
2508#endif // !PRODUCT
2509
2510//---------------------------collect_null_checks-------------------------------
2511// Find null checks in the ideal graph; write a machine-specific node for
2512// it. Used by later implicit-null-check handling. Actually collects
2513// either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2514// value being tested.
2515void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2516 Node *iff = proj->in(0);
2517 if( iff->Opcode() == Op_If ) {
2518 // During matching If's have Bool & Cmp side-by-side
2519 BoolNode *b = iff->in(1)->as_Bool();
2520 Node *cmp = iff->in(2);
2521 int opc = cmp->Opcode();
2522 if (opc != Op_CmpP && opc != Op_CmpN) return;
2523
2524 const Type* ct = cmp->in(2)->bottom_type();
2525 if (ct == TypePtr::NULL_PTR ||
2526 (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2527
2528 bool push_it = false;
2529 if( proj->Opcode() == Op_IfTrue ) {
2530#ifndef PRODUCT
2531 extern int all_null_checks_found;
2532 all_null_checks_found++;
2533#endif
2534 if( b->_test._test == BoolTest::ne ) {
2535 push_it = true;
2536 }
2537 } else {
2538 assert( proj->Opcode() == Op_IfFalse, "" )do { if (!(proj->Opcode() == Op_IfFalse)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2538, "assert(" "proj->Opcode() == Op_IfFalse" ") failed"
, ""); ::breakpoint(); } } while (0)
;
2539 if( b->_test._test == BoolTest::eq ) {
2540 push_it = true;
2541 }
2542 }
2543 if( push_it ) {
2544 _null_check_tests.push(proj);
2545 Node* val = cmp->in(1);
2546#ifdef _LP641
2547 if (val->bottom_type()->isa_narrowoop() &&
2548 !Matcher::narrow_oop_use_complex_address()) {
2549 //
2550 // Look for DecodeN node which should be pinned to orig_proj.
2551 // On platforms (Sparc) which can not handle 2 adds
2552 // in addressing mode we have to keep a DecodeN node and
2553 // use it to do implicit NULL check in address.
2554 //
2555 // DecodeN node was pinned to non-null path (orig_proj) during
2556 // CastPP transformation in final_graph_reshaping_impl().
2557 //
2558 uint cnt = orig_proj->outcnt();
2559 for (uint i = 0; i < orig_proj->outcnt(); i++) {
2560 Node* d = orig_proj->raw_out(i);
2561 if (d->is_DecodeN() && d->in(1) == val) {
2562 val = d;
2563 val->set_req(0, NULL__null); // Unpin now.
2564 // Mark this as special case to distinguish from
2565 // a regular case: CmpP(DecodeN, NULL).
2566 val = (Node*)(((intptr_t)val) | 1);
2567 break;
2568 }
2569 }
2570 }
2571#endif
2572 _null_check_tests.push(val);
2573 }
2574 }
2575 }
2576}
2577
2578//---------------------------validate_null_checks------------------------------
2579// Its possible that the value being NULL checked is not the root of a match
2580// tree. If so, I cannot use the value in an implicit null check.
2581void Matcher::validate_null_checks( ) {
2582 uint cnt = _null_check_tests.size();
2583 for( uint i=0; i < cnt; i+=2 ) {
2584 Node *test = _null_check_tests[i];
2585 Node *val = _null_check_tests[i+1];
2586 bool is_decoden = ((intptr_t)val) & 1;
2587 val = (Node*)(((intptr_t)val) & ~1);
2588 if (has_new_node(val)) {
2589 Node* new_val = new_node(val);
2590 if (is_decoden) {
2591 assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity")do { if (!(val->is_DecodeNarrowPtr() && val->in
(0) == __null)) { (*g_assert_poison) = 'X';; report_vm_error(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2591, "assert(" "val->is_DecodeNarrowPtr() && val->in(0) == __null"
") failed", "sanity"); ::breakpoint(); } } while (0)
;
2592 // Note: new_val may have a control edge if
2593 // the original ideal node DecodeN was matched before
2594 // it was unpinned in Matcher::collect_null_checks().
2595 // Unpin the mach node and mark it.
2596 new_val->set_req(0, NULL__null);
2597 new_val = (Node*)(((intptr_t)new_val) | 1);
2598 }
2599 // Is a match-tree root, so replace with the matched value
2600 _null_check_tests.map(i+1, new_val);
2601 } else {
2602 // Yank from candidate list
2603 _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2604 _null_check_tests.map(i,_null_check_tests[--cnt]);
2605 _null_check_tests.pop();
2606 _null_check_tests.pop();
2607 i-=2;
2608 }
2609 }
2610}
2611
2612bool Matcher::gen_narrow_oop_implicit_null_checks() {
2613 // Advice matcher to perform null checks on the narrow oop side.
2614 // Implicit checks are not possible on the uncompressed oop side anyway
2615 // (at least not for read accesses).
2616 // Performs significantly better (especially on Power 6).
2617 if (!os::zero_page_read_protected()) {
2618 return true;
2619 }
2620 return CompressedOops::use_implicit_null_checks() &&
2621 (narrow_oop_use_complex_address() ||
2622 CompressedOops::base() != NULL__null);
2623}
2624
2625// Compute RegMask for an ideal register.
2626const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2627 const Type* t = Type::mreg2type[ideal_reg];
2628 if (t == NULL__null) {
2629 assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg)do { if (!(ideal_reg >= Op_VecA && ideal_reg <=
Op_VecZ)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2629, "assert(" "ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ"
") failed", "not a vector: %d", ideal_reg); ::breakpoint(); }
} while (0)
;
2630 return NULL__null; // not supported
2631 }
2632 Node* fp = ret->in(TypeFunc::FramePtr);
2633 Node* mem = ret->in(TypeFunc::Memory);
2634 const TypePtr* atp = TypePtr::BOTTOM;
2635 MemNode::MemOrd mo = MemNode::unordered;
2636
2637 Node* spill;
2638 switch (ideal_reg) {
2639 case Op_RegN: spill = new LoadNNode(NULL__null, mem, fp, atp, t->is_narrowoop(), mo); break;
2640 case Op_RegI: spill = new LoadINode(NULL__null, mem, fp, atp, t->is_int(), mo); break;
2641 case Op_RegP: spill = new LoadPNode(NULL__null, mem, fp, atp, t->is_ptr(), mo); break;
2642 case Op_RegF: spill = new LoadFNode(NULL__null, mem, fp, atp, t, mo); break;
2643 case Op_RegD: spill = new LoadDNode(NULL__null, mem, fp, atp, t, mo); break;
2644 case Op_RegL: spill = new LoadLNode(NULL__null, mem, fp, atp, t->is_long(), mo); break;
2645
2646 case Op_VecA: // fall-through
2647 case Op_VecS: // fall-through
2648 case Op_VecD: // fall-through
2649 case Op_VecX: // fall-through
2650 case Op_VecY: // fall-through
2651 case Op_VecZ: spill = new LoadVectorNode(NULL__null, mem, fp, atp, t->is_vect()); break;
2652 case Op_RegVectMask: return Matcher::predicate_reg_mask();
2653
2654 default: ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2654); ::breakpoint(); } while (0)
;
2655 }
2656 MachNode* mspill = match_tree(spill);
2657 assert(mspill != NULL, "matching failed: %d", ideal_reg)do { if (!(mspill != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2657, "assert(" "mspill != __null" ") failed", "matching failed: %d"
, ideal_reg); ::breakpoint(); } } while (0)
;
2658 // Handle generic vector operand case
2659 if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2660 specialize_mach_node(mspill);
2661 }
2662 return &mspill->out_RegMask();
2663}
2664
2665// Process Mach IR right after selection phase is over.
2666void Matcher::do_postselect_cleanup() {
2667 if (supports_generic_vector_operands) {
2668 specialize_generic_vector_operands();
2669 if (C->failing()) return;
2670 }
2671}
2672
2673//----------------------------------------------------------------------
2674// Generic machine operands elision.
2675//----------------------------------------------------------------------
2676
2677// Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
2678void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2679 assert(use->in(idx) == tmp, "not a user")do { if (!(use->in(idx) == tmp)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2679, "assert(" "use->in(idx) == tmp" ") failed", "not a user"
); ::breakpoint(); } } while (0)
;
2680 assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet")do { if (!(!Matcher::is_generic_vector(use->_opnds[0]))) {
(*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2680, "assert(" "!Matcher::is_generic_vector(use->_opnds[0])"
") failed", "use not processed yet"); ::breakpoint(); } } while
(0)
;
2681
2682 if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2683 tmp->_opnds[0] = use->_opnds[0]->clone();
2684 } else {
2685 uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2686 tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2687 }
2688}
2689
2690// Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
2691MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2692 assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates")do { if (!(Matcher::is_generic_vector(m->_opnds[opnd_idx])
)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2692, "assert(" "Matcher::is_generic_vector(m->_opnds[opnd_idx])"
") failed", "repeated updates"); ::breakpoint(); } } while (
0)
;
2693 Node* def = NULL__null;
2694 if (opnd_idx == 0) { // DEF
2695 def = m; // use mach node itself to compute vector operand type
2696 } else {
2697 int base_idx = m->operand_index(opnd_idx);
2698 def = m->in(base_idx);
2699 if (def->is_Mach()) {
2700 if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2701 specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2702 } else if (is_reg2reg_move(def->as_Mach())) {
2703 def = def->in(1); // skip over generic reg-to-reg moves
2704 }
2705 }
2706 }
2707 assert(def->bottom_type()->isa_vect(), "not a vector")do { if (!(def->bottom_type()->isa_vect())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2707, "assert(" "def->bottom_type()->isa_vect()" ") failed"
, "not a vector"); ::breakpoint(); } } while (0)
;
2708 uint ideal_vreg = def->bottom_type()->ideal_reg();
2709 return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2710}
2711
2712void Matcher::specialize_mach_node(MachNode* m) {
2713 assert(!m->is_MachTemp(), "processed along with its user")do { if (!(!m->is_MachTemp())) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2713, "assert(" "!m->is_MachTemp()" ") failed", "processed along with its user"
); ::breakpoint(); } } while (0)
;
2714 // For generic use operands pull specific register class operands from
2715 // its def instruction's output operand (def operand).
2716 for (uint i = 0; i < m->num_opnds(); i++) {
2717 if (Matcher::is_generic_vector(m->_opnds[i])) {
2718 m->_opnds[i] = specialize_vector_operand(m, i);
2719 }
2720 }
2721}
2722
2723// Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
2724void Matcher::specialize_generic_vector_operands() {
2725 assert(supports_generic_vector_operands, "sanity")do { if (!(supports_generic_vector_operands)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2725, "assert(" "supports_generic_vector_operands" ") failed"
, "sanity"); ::breakpoint(); } } while (0)
;
2726 ResourceMark rm;
2727
2728 // Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2729 // and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2730 Unique_Node_List live_nodes;
2731 C->identify_useful_nodes(live_nodes);
2732
2733 while (live_nodes.size() > 0) {
2734 MachNode* m = live_nodes.pop()->isa_Mach();
2735 if (m != NULL__null) {
2736 if (Matcher::is_reg2reg_move(m)) {
2737 // Register allocator properly handles vec <=> leg moves using register masks.
2738 int opnd_idx = m->operand_index(1);
2739 Node* def = m->in(opnd_idx);
2740 m->subsume_by(def, C);
2741 } else if (m->is_MachTemp()) {
2742 // process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2743 } else {
2744 specialize_mach_node(m);
2745 }
2746 }
2747 }
2748}
2749
2750uint Matcher::vector_length(const Node* n) {
2751 const TypeVect* vt = n->bottom_type()->is_vect();
2752 return vt->length();
2753}
2754
2755uint Matcher::vector_length(const MachNode* use, const MachOper* opnd) {
2756 int def_idx = use->operand_index(opnd);
2757 Node* def = use->in(def_idx);
2758 return def->bottom_type()->is_vect()->length();
2759}
2760
2761uint Matcher::vector_length_in_bytes(const Node* n) {
2762 const TypeVect* vt = n->bottom_type()->is_vect();
2763 return vt->length_in_bytes();
2764}
2765
2766uint Matcher::vector_length_in_bytes(const MachNode* use, const MachOper* opnd) {
2767 uint def_idx = use->operand_index(opnd);
2768 Node* def = use->in(def_idx);
2769 return def->bottom_type()->is_vect()->length_in_bytes();
2770}
2771
2772BasicType Matcher::vector_element_basic_type(const Node* n) {
2773 const TypeVect* vt = n->bottom_type()->is_vect();
2774 return vt->element_basic_type();
2775}
2776
2777BasicType Matcher::vector_element_basic_type(const MachNode* use, const MachOper* opnd) {
2778 int def_idx = use->operand_index(opnd);
2779 Node* def = use->in(def_idx);
2780 return def->bottom_type()->is_vect()->element_basic_type();
2781}
2782
2783#ifdef ASSERT1
2784bool Matcher::verify_after_postselect_cleanup() {
2785 assert(!C->failing(), "sanity")do { if (!(!C->failing())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2785, "assert(" "!C->failing()" ") failed", "sanity"); ::
breakpoint(); } } while (0)
;
2786 if (supports_generic_vector_operands) {
2787 Unique_Node_List useful;
2788 C->identify_useful_nodes(useful);
2789 for (uint i = 0; i < useful.size(); i++) {
2790 MachNode* m = useful.at(i)->isa_Mach();
2791 if (m != NULL__null) {
2792 assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed")do { if (!(!Matcher::is_reg2reg_move(m))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2792, "assert(" "!Matcher::is_reg2reg_move(m)" ") failed", "no MoveVec nodes allowed"
); ::breakpoint(); } } while (0)
;
2793 for (uint j = 0; j < m->num_opnds(); j++) {
2794 assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed")do { if (!(!Matcher::is_generic_vector(m->_opnds[j]))) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2794, "assert(" "!Matcher::is_generic_vector(m->_opnds[j])"
") failed", "no generic vector operands allowed"); ::breakpoint
(); } } while (0)
;
2795 }
2796 }
2797 }
2798 }
2799 return true;
2800}
2801#endif // ASSERT
2802
2803// Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2804// atomic instruction acting as a store_load barrier without any
2805// intervening volatile load, and thus we don't need a barrier here.
2806// We retain the Node to act as a compiler ordering barrier.
2807bool Matcher::post_store_load_barrier(const Node* vmb) {
2808 Compile* C = Compile::current();
2809 assert(vmb->is_MemBar(), "")do { if (!(vmb->is_MemBar())) { (*g_assert_poison) = 'X';;
report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2809, "assert(" "vmb->is_MemBar()" ") failed", ""); ::breakpoint
(); } } while (0)
;
2810 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "")do { if (!(vmb->Opcode() != Op_MemBarAcquire && vmb
->Opcode() != Op_LoadFence)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2810, "assert(" "vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence"
") failed", ""); ::breakpoint(); } } while (0)
;
2811 const MemBarNode* membar = vmb->as_MemBar();
2812
2813 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2814 Node* ctrl = NULL__null;
2815 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2816 Node* p = membar->fast_out(i);
2817 assert(p->is_Proj(), "only projections here")do { if (!(p->is_Proj())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2817, "assert(" "p->is_Proj()" ") failed", "only projections here"
); ::breakpoint(); } } while (0)
;
2818 if ((p->as_Proj()->_con == TypeFunc::Control) &&
2819 !C->node_arena()->contains(p)) { // Unmatched old-space only
2820 ctrl = p;
2821 break;
2822 }
2823 }
2824 assert((ctrl != NULL), "missing control projection")do { if (!((ctrl != __null))) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2824, "assert(" "(ctrl != __null)" ") failed", "missing control projection"
); ::breakpoint(); } } while (0)
;
2825
2826 for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2827 Node *x = ctrl->fast_out(j);
2828 int xop = x->Opcode();
2829
2830 // We don't need current barrier if we see another or a lock
2831 // before seeing volatile load.
2832 //
2833 // Op_Fastunlock previously appeared in the Op_* list below.
2834 // With the advent of 1-0 lock operations we're no longer guaranteed
2835 // that a monitor exit operation contains a serializing instruction.
2836
2837 if (xop == Op_MemBarVolatile ||
2838 xop == Op_CompareAndExchangeB ||
2839 xop == Op_CompareAndExchangeS ||
2840 xop == Op_CompareAndExchangeI ||
2841 xop == Op_CompareAndExchangeL ||
2842 xop == Op_CompareAndExchangeP ||
2843 xop == Op_CompareAndExchangeN ||
2844 xop == Op_WeakCompareAndSwapB ||
2845 xop == Op_WeakCompareAndSwapS ||
2846 xop == Op_WeakCompareAndSwapL ||
2847 xop == Op_WeakCompareAndSwapP ||
2848 xop == Op_WeakCompareAndSwapN ||
2849 xop == Op_WeakCompareAndSwapI ||
2850 xop == Op_CompareAndSwapB ||
2851 xop == Op_CompareAndSwapS ||
2852 xop == Op_CompareAndSwapL ||
2853 xop == Op_CompareAndSwapP ||
2854 xop == Op_CompareAndSwapN ||
2855 xop == Op_CompareAndSwapI ||
2856 BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
2857 return true;
2858 }
2859
2860 // Op_FastLock previously appeared in the Op_* list above.
2861 if (xop == Op_FastLock) {
2862 return true;
2863 }
2864
2865 if (x->is_MemBar()) {
2866 // We must retain this membar if there is an upcoming volatile
2867 // load, which will be followed by acquire membar.
2868 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2869 return false;
2870 } else {
2871 // For other kinds of barriers, check by pretending we
2872 // are them, and seeing if we can be removed.
2873 return post_store_load_barrier(x->as_MemBar());
2874 }
2875 }
2876
2877 // probably not necessary to check for these
2878 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2879 return false;
2880 }
2881 }
2882 return false;
2883}
2884
2885// Check whether node n is a branch to an uncommon trap that we could
2886// optimize as test with very high branch costs in case of going to
2887// the uncommon trap. The code must be able to be recompiled to use
2888// a cheaper test.
2889bool Matcher::branches_to_uncommon_trap(const Node *n) {
2890 // Don't do it for natives, adapters, or runtime stubs
2891 Compile *C = Compile::current();
2892 if (!C->is_method_compilation()) return false;
2893
2894 assert(n->is_If(), "You should only call this on if nodes.")do { if (!(n->is_If())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2894, "assert(" "n->is_If()" ") failed", "You should only call this on if nodes."
); ::breakpoint(); } } while (0)
;
2895 IfNode *ifn = n->as_If();
2896
2897 Node *ifFalse = NULL__null;
2898 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2899 if (ifn->fast_out(i)->is_IfFalse()) {
2900 ifFalse = ifn->fast_out(i);
2901 break;
2902 }
2903 }
2904 assert(ifFalse, "An If should have an ifFalse. Graph is broken.")do { if (!(ifFalse)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2904, "assert(" "ifFalse" ") failed", "An If should have an ifFalse. Graph is broken."
); ::breakpoint(); } } while (0)
;
2905
2906 Node *reg = ifFalse;
2907 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2908 // Alternatively use visited set? Seems too expensive.
2909 while (reg != NULL__null && cnt > 0) {
2910 CallNode *call = NULL__null;
2911 RegionNode *nxt_reg = NULL__null;
2912 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2913 Node *o = reg->fast_out(i);
2914 if (o->is_Call()) {
2915 call = o->as_Call();
2916 }
2917 if (o->is_Region()) {
2918 nxt_reg = o->as_Region();
2919 }
2920 }
2921
2922 if (call &&
2923 call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2924 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2925 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2926 jint tr_con = trtype->is_int()->get_con();
2927 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2928 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2929 assert((int)reason < (int)BitsPerInt, "recode bit map")do { if (!((int)reason < (int)BitsPerInt)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2929, "assert(" "(int)reason < (int)BitsPerInt" ") failed"
, "recode bit map"); ::breakpoint(); } } while (0)
;
2930
2931 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2932 && action != Deoptimization::Action_none) {
2933 // This uncommon trap is sure to recompile, eventually.
2934 // When that happens, C->too_many_traps will prevent
2935 // this transformation from happening again.
2936 return true;
2937 }
2938 }
2939 }
2940
2941 reg = nxt_reg;
2942 cnt--;
2943 }
2944
2945 return false;
2946}
2947
2948//=============================================================================
2949//---------------------------State---------------------------------------------
2950State::State(void) : _rule() {
2951#ifdef ASSERT1
2952 _id = 0;
2953 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe)(0xcafebabecafebabeLL);
2954 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d)(0xbaadf00dbaadf00dLL);
2955#endif
2956}
2957
2958#ifdef ASSERT1
2959State::~State() {
2960 _id = 99;
2961 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe)(0xcafebabecafebabeLL);
2962 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d)(0xbaadf00dbaadf00dLL);
2963 memset(_cost, -3, sizeof(_cost));
2964 memset(_rule, -3, sizeof(_rule));
2965}
2966#endif
2967
2968#ifndef PRODUCT
2969//---------------------------dump----------------------------------------------
2970void State::dump() {
2971 tty->print("\n");
2972 dump(0);
2973}
2974
2975void State::dump(int depth) {
2976 for (int j = 0; j < depth; j++) {
2977 tty->print(" ");
2978 }
2979 tty->print("--N: ");
2980 _leaf->dump();
2981 uint i;
2982 for (i = 0; i < _LAST_MACH_OPER; i++) {
2983 // Check for valid entry
2984 if (valid(i)) {
2985 for (int j = 0; j < depth; j++) {
2986 tty->print(" ");
2987 }
2988 assert(cost(i) != max_juint, "cost must be a valid value")do { if (!(cost(i) != max_juint)) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2988, "assert(" "cost(i) != max_juint" ") failed", "cost must be a valid value"
); ::breakpoint(); } } while (0)
;
2989 assert(rule(i) < _last_Mach_Node, "rule[i] must be valid rule")do { if (!(rule(i) < _last_Mach_Node)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/matcher.cpp"
, 2989, "assert(" "rule(i) < _last_Mach_Node" ") failed", "rule[i] must be valid rule"
); ::breakpoint(); } } while (0)
;
2990 tty->print_cr("%s %d %s",
2991 ruleName[i], cost(i), ruleName[rule(i)] );
2992 }
2993 }
2994 tty->cr();
2995
2996 for (i = 0; i < 2; i++) {
2997 if (_kids[i]) {
2998 _kids[i]->dump(depth + 1);
2999 }
3000 }
3001}
3002#endif

/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp

1/*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_OPTO_NODE_HPP
26#define SHARE_OPTO_NODE_HPP
27
28#include "libadt/vectset.hpp"
29#include "opto/compile.hpp"
30#include "opto/type.hpp"
31#include "utilities/copy.hpp"
32
33// Portions of code courtesy of Clifford Click
34
35// Optimization - Graph Style
36
37
38class AbstractLockNode;
39class AddNode;
40class AddPNode;
41class AliasInfo;
42class AllocateArrayNode;
43class AllocateNode;
44class ArrayCopyNode;
45class BaseCountedLoopNode;
46class BaseCountedLoopEndNode;
47class BlackholeNode;
48class Block;
49class BoolNode;
50class BoxLockNode;
51class CMoveNode;
52class CallDynamicJavaNode;
53class CallJavaNode;
54class CallLeafNode;
55class CallLeafNoFPNode;
56class CallNode;
57class CallRuntimeNode;
58class CallNativeNode;
59class CallStaticJavaNode;
60class CastFFNode;
61class CastDDNode;
62class CastVVNode;
63class CastIINode;
64class CastLLNode;
65class CatchNode;
66class CatchProjNode;
67class CheckCastPPNode;
68class ClearArrayNode;
69class CmpNode;
70class CodeBuffer;
71class ConstraintCastNode;
72class ConNode;
73class CompareAndSwapNode;
74class CompareAndExchangeNode;
75class CountedLoopNode;
76class CountedLoopEndNode;
77class DecodeNarrowPtrNode;
78class DecodeNNode;
79class DecodeNKlassNode;
80class EncodeNarrowPtrNode;
81class EncodePNode;
82class EncodePKlassNode;
83class FastLockNode;
84class FastUnlockNode;
85class HaltNode;
86class IfNode;
87class IfProjNode;
88class IfFalseNode;
89class IfTrueNode;
90class InitializeNode;
91class JVMState;
92class JumpNode;
93class JumpProjNode;
94class LoadNode;
95class LoadStoreNode;
96class LoadStoreConditionalNode;
97class LockNode;
98class LongCountedLoopNode;
99class LongCountedLoopEndNode;
100class LoopNode;
101class LShiftNode;
102class MachBranchNode;
103class MachCallDynamicJavaNode;
104class MachCallJavaNode;
105class MachCallLeafNode;
106class MachCallNode;
107class MachCallNativeNode;
108class MachCallRuntimeNode;
109class MachCallStaticJavaNode;
110class MachConstantBaseNode;
111class MachConstantNode;
112class MachGotoNode;
113class MachIfNode;
114class MachJumpNode;
115class MachNode;
116class MachNullCheckNode;
117class MachProjNode;
118class MachReturnNode;
119class MachSafePointNode;
120class MachSpillCopyNode;
121class MachTempNode;
122class MachMergeNode;
123class MachMemBarNode;
124class Matcher;
125class MemBarNode;
126class MemBarStoreStoreNode;
127class MemNode;
128class MergeMemNode;
129class MoveNode;
130class MulNode;
131class MultiNode;
132class MultiBranchNode;
133class NeverBranchNode;
134class Opaque1Node;
135class OuterStripMinedLoopNode;
136class OuterStripMinedLoopEndNode;
137class Node;
138class Node_Array;
139class Node_List;
140class Node_Stack;
141class OopMap;
142class ParmNode;
143class PCTableNode;
144class PhaseCCP;
145class PhaseGVN;
146class PhaseIterGVN;
147class PhaseRegAlloc;
148class PhaseTransform;
149class PhaseValues;
150class PhiNode;
151class Pipeline;
152class ProjNode;
153class RangeCheckNode;
154class RegMask;
155class RegionNode;
156class RootNode;
157class SafePointNode;
158class SafePointScalarObjectNode;
159class StartNode;
160class State;
161class StoreNode;
162class SubNode;
163class SubTypeCheckNode;
164class Type;
165class TypeNode;
166class UnlockNode;
167class VectorNode;
168class LoadVectorNode;
169class LoadVectorMaskedNode;
170class StoreVectorMaskedNode;
171class LoadVectorGatherNode;
172class StoreVectorNode;
173class StoreVectorScatterNode;
174class VectorMaskCmpNode;
175class VectorUnboxNode;
176class VectorSet;
177class VectorReinterpretNode;
178class ShiftVNode;
179
180// The type of all node counts and indexes.
181// It must hold at least 16 bits, but must also be fast to load and store.
182// This type, if less than 32 bits, could limit the number of possible nodes.
183// (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
184typedef unsigned int node_idx_t;
185
186
187#ifndef OPTO_DU_ITERATOR_ASSERT1
188#ifdef ASSERT1
189#define OPTO_DU_ITERATOR_ASSERT1 1
190#else
191#define OPTO_DU_ITERATOR_ASSERT1 0
192#endif
193#endif //OPTO_DU_ITERATOR_ASSERT
194
195#if OPTO_DU_ITERATOR_ASSERT1
196class DUIterator;
197class DUIterator_Fast;
198class DUIterator_Last;
199#else
200typedef uint DUIterator;
201typedef Node** DUIterator_Fast;
202typedef Node** DUIterator_Last;
203#endif
204
205// Node Sentinel
206#define NodeSentinel(Node*)-1 (Node*)-1
207
208// Unknown count frequency
209#define COUNT_UNKNOWN(-1.0f) (-1.0f)
210
211//------------------------------Node-------------------------------------------
212// Nodes define actions in the program. They create values, which have types.
213// They are both vertices in a directed graph and program primitives. Nodes
214// are labeled; the label is the "opcode", the primitive function in the lambda
215// calculus sense that gives meaning to the Node. Node inputs are ordered (so
216// that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
217// the Node's function. These inputs also define a Type equation for the Node.
218// Solving these Type equations amounts to doing dataflow analysis.
219// Control and data are uniformly represented in the graph. Finally, Nodes
220// have a unique dense integer index which is used to index into side arrays
221// whenever I have phase-specific information.
222
223class Node {
224 friend class VMStructs;
225
226 // Lots of restrictions on cloning Nodes
227 NONCOPYABLE(Node)Node(Node const&) = delete; Node& operator=(Node const
&) = delete
;
228
229public:
230 friend class Compile;
231 #if OPTO_DU_ITERATOR_ASSERT1
232 friend class DUIterator_Common;
233 friend class DUIterator;
234 friend class DUIterator_Fast;
235 friend class DUIterator_Last;
236 #endif
237
238 // Because Nodes come and go, I define an Arena of Node structures to pull
239 // from. This should allow fast access to node creation & deletion. This
240 // field is a local cache of a value defined in some "program fragment" for
241 // which these Nodes are just a part of.
242
243 inline void* operator new(size_t x) throw() {
244 Compile* C = Compile::current();
245 Node* n = (Node*)C->node_arena()->AmallocWords(x);
246 return (void*)n;
247 }
248
249 // Delete is a NOP
250 void operator delete( void *ptr ) {}
251 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
252 void destruct(PhaseValues* phase);
253
254 // Create a new Node. Required is the number is of inputs required for
255 // semantic correctness.
256 Node( uint required );
257
258 // Create a new Node with given input edges.
259 // This version requires use of the "edge-count" new.
260 // E.g. new (C,3) FooNode( C, NULL, left, right );
261 Node( Node *n0 );
262 Node( Node *n0, Node *n1 );
263 Node( Node *n0, Node *n1, Node *n2 );
264 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
265 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
266 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
267 Node( Node *n0, Node *n1, Node *n2, Node *n3,
268 Node *n4, Node *n5, Node *n6 );
269
270 // Clone an inherited Node given only the base Node type.
271 Node* clone() const;
272
273 // Clone a Node, immediately supplying one or two new edges.
274 // The first and second arguments, if non-null, replace in(1) and in(2),
275 // respectively.
276 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL__null) const {
277 Node* nn = clone();
278 if (in1 != NULL__null) nn->set_req(1, in1);
279 if (in2 != NULL__null) nn->set_req(2, in2);
280 return nn;
281 }
282
283private:
284 // Shared setup for the above constructors.
285 // Handles all interactions with Compile::current.
286 // Puts initial values in all Node fields except _idx.
287 // Returns the initial value for _idx, which cannot
288 // be initialized by assignment.
289 inline int Init(int req);
290
291//----------------- input edge handling
292protected:
293 friend class PhaseCFG; // Access to address of _in array elements
294 Node **_in; // Array of use-def references to Nodes
295 Node **_out; // Array of def-use references to Nodes
296
297 // Input edges are split into two categories. Required edges are required
298 // for semantic correctness; order is important and NULLs are allowed.
299 // Precedence edges are used to help determine execution order and are
300 // added, e.g., for scheduling purposes. They are unordered and not
301 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1
302 // are required, from _cnt to _max-1 are precedence edges.
303 node_idx_t _cnt; // Total number of required Node inputs.
304
305 node_idx_t _max; // Actual length of input array.
306
307 // Output edges are an unordered list of def-use edges which exactly
308 // correspond to required input edges which point from other nodes
309 // to this one. Thus the count of the output edges is the number of
310 // users of this node.
311 node_idx_t _outcnt; // Total number of Node outputs.
312
313 node_idx_t _outmax; // Actual length of output array.
314
315 // Grow the actual input array to the next larger power-of-2 bigger than len.
316 void grow( uint len );
317 // Grow the output array to the next larger power-of-2 bigger than len.
318 void out_grow( uint len );
319
320 public:
321 // Each Node is assigned a unique small/dense number. This number is used
322 // to index into auxiliary arrays of data and bit vectors.
323 // The field _idx is declared constant to defend against inadvertent assignments,
324 // since it is used by clients as a naked field. However, the field's value can be
325 // changed using the set_idx() method.
326 //
327 // The PhaseRenumberLive phase renumbers nodes based on liveness information.
328 // Therefore, it updates the value of the _idx field. The parse-time _idx is
329 // preserved in _parse_idx.
330 const node_idx_t _idx;
331 DEBUG_ONLY(const node_idx_t _parse_idx;)const node_idx_t _parse_idx;
332 // IGV node identifier. Two nodes, possibly in different compilation phases,
333 // have the same IGV identifier if (and only if) they are the very same node
334 // (same memory address) or one is "derived" from the other (by e.g.
335 // renumbering or matching). This identifier makes it possible to follow the
336 // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes.
337 NOT_PRODUCT(node_idx_t _igv_idx;)node_idx_t _igv_idx;
338
339 // Get the (read-only) number of input edges
340 uint req() const { return _cnt; }
341 uint len() const { return _max; }
342 // Get the (read-only) number of output edges
343 uint outcnt() const { return _outcnt; }
344
345#if OPTO_DU_ITERATOR_ASSERT1
346 // Iterate over the out-edges of this node. Deletions are illegal.
347 inline DUIterator outs() const;
348 // Use this when the out array might have changed to suppress asserts.
349 inline DUIterator& refresh_out_pos(DUIterator& i) const;
350 // Does the node have an out at this position? (Used for iteration.)
351 inline bool has_out(DUIterator& i) const;
352 inline Node* out(DUIterator& i) const;
353 // Iterate over the out-edges of this node. All changes are illegal.
354 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
355 inline Node* fast_out(DUIterator_Fast& i) const;
356 // Iterate over the out-edges of this node, deleting one at a time.
357 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
358 inline Node* last_out(DUIterator_Last& i) const;
359 // The inline bodies of all these methods are after the iterator definitions.
360#else
361 // Iterate over the out-edges of this node. Deletions are illegal.
362 // This iteration uses integral indexes, to decouple from array reallocations.
363 DUIterator outs() const { return 0; }
364 // Use this when the out array might have changed to suppress asserts.
365 DUIterator refresh_out_pos(DUIterator i) const { return i; }
366
367 // Reference to the i'th output Node. Error if out of bounds.
368 Node* out(DUIterator i) const { assert(i < _outcnt, "oob")do { if (!(i < _outcnt)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 368, "assert(" "i < _outcnt" ") failed", "oob"); ::breakpoint
(); } } while (0)
; return _out[i]; }
369 // Does the node have an out at this position? (Used for iteration.)
370 bool has_out(DUIterator i) const { return i < _outcnt; }
371
372 // Iterate over the out-edges of this node. All changes are illegal.
373 // This iteration uses a pointer internal to the out array.
374 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
375 Node** out = _out;
376 // Assign a limit pointer to the reference argument:
377 max = out + (ptrdiff_t)_outcnt;
378 // Return the base pointer:
379 return out;
380 }
381 Node* fast_out(DUIterator_Fast i) const { return *i; }
382 // Iterate over the out-edges of this node, deleting one at a time.
383 // This iteration uses a pointer internal to the out array.
384 DUIterator_Last last_outs(DUIterator_Last& min) const {
385 Node** out = _out;
386 // Assign a limit pointer to the reference argument:
387 min = out;
388 // Return the pointer to the start of the iteration:
389 return out + (ptrdiff_t)_outcnt - 1;
390 }
391 Node* last_out(DUIterator_Last i) const { return *i; }
392#endif
393
394 // Reference to the i'th input Node. Error if out of bounds.
395 Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max)do { if (!(i < _max)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 395, "assert(" "i < _max" ") failed", "oob: i=%d, _max=%d"
, i, _max); ::breakpoint(); } } while (0)
; return _in[i]; }
396 // Reference to the i'th input Node. NULL if out of bounds.
397 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL__null); }
398 // Reference to the i'th output Node. Error if out of bounds.
399 // Use this accessor sparingly. We are going trying to use iterators instead.
400 Node* raw_out(uint i) const { assert(i < _outcnt,"oob")do { if (!(i < _outcnt)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 400, "assert(" "i < _outcnt" ") failed", "oob"); ::breakpoint
(); } } while (0)
; return _out[i]; }
401 // Return the unique out edge.
402 Node* unique_out() const { assert(_outcnt==1,"not unique")do { if (!(_outcnt==1)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 402, "assert(" "_outcnt==1" ") failed", "not unique"); ::breakpoint
(); } } while (0)
; return _out[0]; }
403 // Delete out edge at position 'i' by moving last out edge to position 'i'
404 void raw_del_out(uint i) {
405 assert(i < _outcnt,"oob")do { if (!(i < _outcnt)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 405, "assert(" "i < _outcnt" ") failed", "oob"); ::breakpoint
(); } } while (0)
;
406 assert(_outcnt > 0,"oob")do { if (!(_outcnt > 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 406, "assert(" "_outcnt > 0" ") failed", "oob"); ::breakpoint
(); } } while (0)
;
407 #if OPTO_DU_ITERATOR_ASSERT1
408 // Record that a change happened here.
409 debug_only(_last_del = _out[i]; ++_del_tick)_last_del = _out[i]; ++_del_tick;
410 #endif
411 _out[i] = _out[--_outcnt];
412 // Smash the old edge so it can't be used accidentally.
413 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef)_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef;
414 }
415
416#ifdef ASSERT1
417 bool is_dead() const;
418#define is_not_dead(n)((n) == __null || !VerifyIterativeGVN || !((n)->is_dead())
)
((n) == NULL__null || !VerifyIterativeGVN || !((n)->is_dead()))
419 bool is_reachable_from_root() const;
420#endif
421 // Check whether node has become unreachable
422 bool is_unreachable(PhaseIterGVN &igvn) const;
423
424 // Set a required input edge, also updates corresponding output edge
425 void add_req( Node *n ); // Append a NEW required input
426 void add_req( Node *n0, Node *n1 ) {
427 add_req(n0); add_req(n1); }
428 void add_req( Node *n0, Node *n1, Node *n2 ) {
429 add_req(n0); add_req(n1); add_req(n2); }
430 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
431 void del_req( uint idx ); // Delete required edge & compact
432 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
433 void ins_req( uint i, Node *n ); // Insert a NEW required input
434 void set_req( uint i, Node *n ) {
435 assert( is_not_dead(n), "can not use dead node")do { if (!(((n) == __null || !VerifyIterativeGVN || !((n)->
is_dead())))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 435, "assert(" "((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))"
") failed", "can not use dead node"); ::breakpoint(); } } while
(0)
;
436 assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt)do { if (!(i < _cnt)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 436, "assert(" "i < _cnt" ") failed", "oob: i=%d, _cnt=%d"
, i, _cnt); ::breakpoint(); } } while (0)
;
437 assert( !VerifyHashTableKeys || _hash_lock == 0,do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 438, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed"
, "remove node from hash table before modifying it"); ::breakpoint
(); } } while (0)
438 "remove node from hash table before modifying it")do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 438, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed"
, "remove node from hash table before modifying it"); ::breakpoint
(); } } while (0)
;
439 Node** p = &_in[i]; // cache this._in, across the del_out call
440 if (*p != NULL__null) (*p)->del_out((Node *)this);
441 (*p) = n;
442 if (n != NULL__null) n->add_out((Node *)this);
443 Compile::current()->record_modified_node(this);
444 }
445 // Light version of set_req() to init inputs after node creation.
446 void init_req( uint i, Node *n ) {
447 assert( i == 0 && this == n ||do { if (!(i == 0 && this == n || ((n) == __null || !
VerifyIterativeGVN || !((n)->is_dead())))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 448, "assert(" "i == 0 && this == n || ((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))"
") failed", "can not use dead node"); ::breakpoint(); } } while
(0)
448 is_not_dead(n), "can not use dead node")do { if (!(i == 0 && this == n || ((n) == __null || !
VerifyIterativeGVN || !((n)->is_dead())))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 448, "assert(" "i == 0 && this == n || ((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))"
") failed", "can not use dead node"); ::breakpoint(); } } while
(0)
;
449 assert( i < _cnt, "oob")do { if (!(i < _cnt)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 449, "assert(" "i < _cnt" ") failed", "oob"); ::breakpoint
(); } } while (0)
;
450 assert( !VerifyHashTableKeys || _hash_lock == 0,do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 451, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed"
, "remove node from hash table before modifying it"); ::breakpoint
(); } } while (0)
451 "remove node from hash table before modifying it")do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 451, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed"
, "remove node from hash table before modifying it"); ::breakpoint
(); } } while (0)
;
452 assert( _in[i] == NULL, "sanity")do { if (!(_in[i] == __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 452, "assert(" "_in[i] == __null" ") failed", "sanity"); ::
breakpoint(); } } while (0)
;
453 _in[i] = n;
454 if (n != NULL__null) n->add_out((Node *)this);
455 Compile::current()->record_modified_node(this);
456 }
457 // Find first occurrence of n among my edges:
458 int find_edge(Node* n);
459 int find_prec_edge(Node* n) {
460 for (uint i = req(); i < len(); i++) {
461 if (_in[i] == n) return i;
462 if (_in[i] == NULL__null) {
463 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); )while ((++i) < len()) do { if (!(_in[i] == __null)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 463, "assert(" "_in[i] == __null" ") failed", "Gap in prec edges!"
); ::breakpoint(); } } while (0);
464 break;
465 }
466 }
467 return -1;
468 }
469 int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = NULL__null);
470 int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn);
471 // NULL out all inputs to eliminate incoming Def-Use edges.
472 void disconnect_inputs(Compile* C);
473
474 // Quickly, return true if and only if I am Compile::current()->top().
475 bool is_top() const {
476 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "")do { if (!((this == (Node*) Compile::current()->top()) == (
_out == __null))) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 476, "assert(" "(this == (Node*) Compile::current()->top()) == (_out == __null)"
") failed", ""); ::breakpoint(); } } while (0)
;
477 return (_out == NULL__null);
478 }
479 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
480 void setup_is_top();
481
482 // Strip away casting. (It is depth-limited.)
483 Node* uncast(bool keep_deps = false) const;
484 // Return whether two Nodes are equivalent, after stripping casting.
485 bool eqv_uncast(const Node* n, bool keep_deps = false) const {
486 return (this->uncast(keep_deps) == n->uncast(keep_deps));
487 }
488
489 // Find out of current node that matches opcode.
490 Node* find_out_with(int opcode);
491 // Return true if the current node has an out that matches opcode.
492 bool has_out_with(int opcode);
493 // Return true if the current node has an out that matches any of the opcodes.
494 bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
495
496private:
497 static Node* uncast_helper(const Node* n, bool keep_deps);
498
499 // Add an output edge to the end of the list
500 void add_out( Node *n ) {
501 if (is_top()) return;
502 if( _outcnt == _outmax ) out_grow(_outcnt);
503 _out[_outcnt++] = n;
504 }
505 // Delete an output edge
506 void del_out( Node *n ) {
507 if (is_top()) return;
508 Node** outp = &_out[_outcnt];
509 // Find and remove n
510 do {
511 assert(outp > _out, "Missing Def-Use edge")do { if (!(outp > _out)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 511, "assert(" "outp > _out" ") failed", "Missing Def-Use edge"
); ::breakpoint(); } } while (0)
;
512 } while (*--outp != n);
513 *outp = _out[--_outcnt];
514 // Smash the old edge so it can't be used accidentally.
515 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef)_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef;
516 // Record that a change happened here.
517 #if OPTO_DU_ITERATOR_ASSERT1
518 debug_only(_last_del = n; ++_del_tick)_last_del = n; ++_del_tick;
519 #endif
520 }
521 // Close gap after removing edge.
522 void close_prec_gap_at(uint gap) {
523 assert(_cnt <= gap && gap < _max, "no valid prec edge")do { if (!(_cnt <= gap && gap < _max)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 523, "assert(" "_cnt <= gap && gap < _max" ") failed"
, "no valid prec edge"); ::breakpoint(); } } while (0)
;
524 uint i = gap;
525 Node *last = NULL__null;
526 for (; i < _max-1; ++i) {
527 Node *next = _in[i+1];
528 if (next == NULL__null) break;
529 last = next;
530 }
531 _in[gap] = last; // Move last slot to empty one.
532 _in[i] = NULL__null; // NULL out last slot.
533 }
534
535public:
536 // Globally replace this node by a given new node, updating all uses.
537 void replace_by(Node* new_node);
538 // Globally replace this node by a given new node, updating all uses
539 // and cutting input edges of old node.
540 void subsume_by(Node* new_node, Compile* c) {
541 replace_by(new_node);
542 disconnect_inputs(c);
543 }
544 void set_req_X(uint i, Node *n, PhaseIterGVN *igvn);
545 void set_req_X(uint i, Node *n, PhaseGVN *gvn);
546 // Find the one non-null required input. RegionNode only
547 Node *nonnull_req() const;
548 // Add or remove precedence edges
549 void add_prec( Node *n );
550 void rm_prec( uint i );
551
552 // Note: prec(i) will not necessarily point to n if edge already exists.
553 void set_prec( uint i, Node *n ) {
554 assert(i < _max, "oob: i=%d, _max=%d", i, _max)do { if (!(i < _max)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 554, "assert(" "i < _max" ") failed", "oob: i=%d, _max=%d"
, i, _max); ::breakpoint(); } } while (0)
;
555 assert(is_not_dead(n), "can not use dead node")do { if (!(((n) == __null || !VerifyIterativeGVN || !((n)->
is_dead())))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 555, "assert(" "((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))"
") failed", "can not use dead node"); ::breakpoint(); } } while
(0)
;
556 assert(i >= _cnt, "not a precedence edge")do { if (!(i >= _cnt)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 556, "assert(" "i >= _cnt" ") failed", "not a precedence edge"
); ::breakpoint(); } } while (0)
;
557 // Avoid spec violation: duplicated prec edge.
558 if (_in[i] == n) return;
559 if (n == NULL__null || find_prec_edge(n) != -1) {
560 rm_prec(i);
561 return;
562 }
563 if (_in[i] != NULL__null) _in[i]->del_out((Node *)this);
564 _in[i] = n;
565 n->add_out((Node *)this);
566 }
567
568 // Set this node's index, used by cisc_version to replace current node
569 void set_idx(uint new_idx) {
570 const node_idx_t* ref = &_idx;
571 *(node_idx_t*)ref = new_idx;
572 }
573 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
574 void swap_edges(uint i1, uint i2) {
575 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH)uint check_hash = (VerifyHashTableKeys && _hash_lock)
? hash() : NO_HASH
;
576 // Def-Use info is unchanged
577 Node* n1 = in(i1);
578 Node* n2 = in(i2);
579 _in[i1] = n2;
580 _in[i2] = n1;
581 // If this node is in the hash table, make sure it doesn't need a rehash.
582 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code")do { if (!(check_hash == NO_HASH || check_hash == hash())) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 582, "assert(" "check_hash == NO_HASH || check_hash == hash()"
") failed", "edge swap must preserve hash code"); ::breakpoint
(); } } while (0)
;
583 }
584
585 // Iterators over input Nodes for a Node X are written as:
586 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
587 // NOTE: Required edges can contain embedded NULL pointers.
588
589//----------------- Other Node Properties
590
591 // Generate class IDs for (some) ideal nodes so that it is possible to determine
592 // the type of a node using a non-virtual method call (the method is_<Node>() below).
593 //
594 // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
595 // the type of the node the ID represents; another subset of an ID's bits are reserved
596 // for the superclasses of the node represented by the ID.
597 //
598 // By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
599 // returns false. A.is_A() returns true.
600 //
601 // If two classes, A and B, have the same superclass, a different bit of A's class id
602 // is reserved for A's type than for B's type. That bit is specified by the third
603 // parameter in the macro DEFINE_CLASS_ID.
604 //
605 // By convention, classes with deeper hierarchy are declared first. Moreover,
606 // classes with the same hierarchy depth are sorted by usage frequency.
607 //
608 // The query method masks the bits to cut off bits of subclasses and then compares
609 // the result with the class id (see the macro DEFINE_CLASS_QUERY below).
610 //
611 // Class_MachCall=30, ClassMask_MachCall=31
612 // 12 8 4 0
613 // 0 0 0 0 0 0 0 0 1 1 1 1 0
614 // | | | |
615 // | | | Bit_Mach=2
616 // | | Bit_MachReturn=4
617 // | Bit_MachSafePoint=8
618 // Bit_MachCall=16
619 //
620 // Class_CountedLoop=56, ClassMask_CountedLoop=63
621 // 12 8 4 0
622 // 0 0 0 0 0 0 0 1 1 1 0 0 0
623 // | | |
624 // | | Bit_Region=8
625 // | Bit_Loop=16
626 // Bit_CountedLoop=32
627
628 #define DEFINE_CLASS_ID(cl, supcl, subn) \
629 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
630 Class_##cl = Class_##supcl + Bit_##cl , \
631 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
632
633 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
634 // so that its values fit into 32 bits.
635 enum NodeClasses {
636 Bit_Node = 0x00000000,
637 Class_Node = 0x00000000,
638 ClassMask_Node = 0xFFFFFFFF,
639
640 DEFINE_CLASS_ID(Multi, Node, 0)
641 DEFINE_CLASS_ID(SafePoint, Multi, 0)
642 DEFINE_CLASS_ID(Call, SafePoint, 0)
643 DEFINE_CLASS_ID(CallJava, Call, 0)
644 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
645 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
646 DEFINE_CLASS_ID(CallRuntime, Call, 1)
647 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
648 DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0)
649 DEFINE_CLASS_ID(Allocate, Call, 2)
650 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
651 DEFINE_CLASS_ID(AbstractLock, Call, 3)
652 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
653 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
654 DEFINE_CLASS_ID(ArrayCopy, Call, 4)
655 DEFINE_CLASS_ID(CallNative, Call, 5)
656 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
657 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
658 DEFINE_CLASS_ID(Catch, PCTable, 0)
659 DEFINE_CLASS_ID(Jump, PCTable, 1)
660 DEFINE_CLASS_ID(If, MultiBranch, 1)
661 DEFINE_CLASS_ID(BaseCountedLoopEnd, If, 0)
662 DEFINE_CLASS_ID(CountedLoopEnd, BaseCountedLoopEnd, 0)
663 DEFINE_CLASS_ID(LongCountedLoopEnd, BaseCountedLoopEnd, 1)
664 DEFINE_CLASS_ID(RangeCheck, If, 1)
665 DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2)
666 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
667 DEFINE_CLASS_ID(Start, Multi, 2)
668 DEFINE_CLASS_ID(MemBar, Multi, 3)
669 DEFINE_CLASS_ID(Initialize, MemBar, 0)
670 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
671
672 DEFINE_CLASS_ID(Mach, Node, 1)
673 DEFINE_CLASS_ID(MachReturn, Mach, 0)
674 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
675 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
676 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
677 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
678 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
679 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
680 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
681 DEFINE_CLASS_ID(MachCallNative, MachCall, 2)
682 DEFINE_CLASS_ID(MachBranch, Mach, 1)
683 DEFINE_CLASS_ID(MachIf, MachBranch, 0)
684 DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
685 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2)
686 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2)
687 DEFINE_CLASS_ID(MachTemp, Mach, 3)
688 DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
689 DEFINE_CLASS_ID(MachConstant, Mach, 5)
690 DEFINE_CLASS_ID(MachJump, MachConstant, 0)
691 DEFINE_CLASS_ID(MachMerge, Mach, 6)
692 DEFINE_CLASS_ID(MachMemBar, Mach, 7)
693
694 DEFINE_CLASS_ID(Type, Node, 2)
695 DEFINE_CLASS_ID(Phi, Type, 0)
696 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
697 DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
698 DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1)
699 DEFINE_CLASS_ID(CastLL, ConstraintCast, 2)
700 DEFINE_CLASS_ID(CastFF, ConstraintCast, 3)
701 DEFINE_CLASS_ID(CastDD, ConstraintCast, 4)
702 DEFINE_CLASS_ID(CastVV, ConstraintCast, 5)
703 DEFINE_CLASS_ID(CMove, Type, 3)
704 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
705 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
706 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
707 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
708 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
709 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
710 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
711 DEFINE_CLASS_ID(Vector, Type, 7)
712 DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0)
713 DEFINE_CLASS_ID(VectorUnbox, Vector, 1)
714 DEFINE_CLASS_ID(VectorReinterpret, Vector, 2)
715 DEFINE_CLASS_ID(ShiftV, Vector, 3)
716
717 DEFINE_CLASS_ID(Proj, Node, 3)
718 DEFINE_CLASS_ID(CatchProj, Proj, 0)
719 DEFINE_CLASS_ID(JumpProj, Proj, 1)
720 DEFINE_CLASS_ID(IfProj, Proj, 2)
721 DEFINE_CLASS_ID(IfTrue, IfProj, 0)
722 DEFINE_CLASS_ID(IfFalse, IfProj, 1)
723 DEFINE_CLASS_ID(Parm, Proj, 4)
724 DEFINE_CLASS_ID(MachProj, Proj, 5)
725
726 DEFINE_CLASS_ID(Mem, Node, 4)
727 DEFINE_CLASS_ID(Load, Mem, 0)
728 DEFINE_CLASS_ID(LoadVector, Load, 0)
729 DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0)
730 DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 1)
731 DEFINE_CLASS_ID(Store, Mem, 1)
732 DEFINE_CLASS_ID(StoreVector, Store, 0)
733 DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0)
734 DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 1)
735 DEFINE_CLASS_ID(LoadStore, Mem, 2)
736 DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0)
737 DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0)
738 DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1)
739
740 DEFINE_CLASS_ID(Region, Node, 5)
741 DEFINE_CLASS_ID(Loop, Region, 0)
742 DEFINE_CLASS_ID(Root, Loop, 0)
743 DEFINE_CLASS_ID(BaseCountedLoop, Loop, 1)
744 DEFINE_CLASS_ID(CountedLoop, BaseCountedLoop, 0)
745 DEFINE_CLASS_ID(LongCountedLoop, BaseCountedLoop, 1)
746 DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2)
747
748 DEFINE_CLASS_ID(Sub, Node, 6)
749 DEFINE_CLASS_ID(Cmp, Sub, 0)
750 DEFINE_CLASS_ID(FastLock, Cmp, 0)
751 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
752 DEFINE_CLASS_ID(SubTypeCheck,Cmp, 2)
753
754 DEFINE_CLASS_ID(MergeMem, Node, 7)
755 DEFINE_CLASS_ID(Bool, Node, 8)
756 DEFINE_CLASS_ID(AddP, Node, 9)
757 DEFINE_CLASS_ID(BoxLock, Node, 10)
758 DEFINE_CLASS_ID(Add, Node, 11)
759 DEFINE_CLASS_ID(Mul, Node, 12)
760 DEFINE_CLASS_ID(ClearArray, Node, 14)
761 DEFINE_CLASS_ID(Halt, Node, 15)
762 DEFINE_CLASS_ID(Opaque1, Node, 16)
763 DEFINE_CLASS_ID(Move, Node, 17)
764 DEFINE_CLASS_ID(LShift, Node, 18)
765
766 _max_classes = ClassMask_Move
767 };
768 #undef DEFINE_CLASS_ID
769
770 // Flags are sorted by usage frequency.
771 enum NodeFlags {
772 Flag_is_Copy = 1 << 0, // should be first bit to avoid shift
773 Flag_rematerialize = 1 << 1,
774 Flag_needs_anti_dependence_check = 1 << 2,
775 Flag_is_macro = 1 << 3,
776 Flag_is_Con = 1 << 4,
777 Flag_is_cisc_alternate = 1 << 5,
778 Flag_is_dead_loop_safe = 1 << 6,
779 Flag_may_be_short_branch = 1 << 7,
780 Flag_avoid_back_to_back_before = 1 << 8,
781 Flag_avoid_back_to_back_after = 1 << 9,
782 Flag_has_call = 1 << 10,
783 Flag_is_reduction = 1 << 11,
784 Flag_is_scheduled = 1 << 12,
785 Flag_has_vector_mask_set = 1 << 13,
786 Flag_is_expensive = 1 << 14,
787 Flag_is_predicated_vector = 1 << 15,
788 Flag_for_post_loop_opts_igvn = 1 << 16,
789 _last_flag = Flag_for_post_loop_opts_igvn
790 };
791
792 class PD;
793
794private:
795 juint _class_id;
796 juint _flags;
797
798 static juint max_flags();
799
800protected:
801 // These methods should be called from constructors only.
802 void init_class_id(juint c) {
803 _class_id = c; // cast out const
804 }
805 void init_flags(uint fl) {
806 assert(fl <= max_flags(), "invalid node flag")do { if (!(fl <= max_flags())) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 806, "assert(" "fl <= max_flags()" ") failed", "invalid node flag"
); ::breakpoint(); } } while (0)
;
807 _flags |= fl;
808 }
809 void clear_flag(uint fl) {
810 assert(fl <= max_flags(), "invalid node flag")do { if (!(fl <= max_flags())) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 810, "assert(" "fl <= max_flags()" ") failed", "invalid node flag"
); ::breakpoint(); } } while (0)
;
811 _flags &= ~fl;
812 }
813
814public:
815 const juint class_id() const { return _class_id; }
816
817 const juint flags() const { return _flags; }
818
819 void add_flag(juint fl) { init_flags(fl); }
820
821 void remove_flag(juint fl) { clear_flag(fl); }
822
823 // Return a dense integer opcode number
824 virtual int Opcode() const;
825
826 // Virtual inherited Node size
827 virtual uint size_of() const;
828
829 // Other interesting Node properties
830 #define DEFINE_CLASS_QUERY(type) \
831 bool is_##type() const { \
832 return ((_class_id & ClassMask_##type) == Class_##type); \
833 } \
834 type##Node *as_##type() const { \
835 assert(is_##type(), "invalid node class: %s", Name())do { if (!(is_##type())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 835, "assert(" "is_##type()" ") failed", "invalid node class: %s"
, Name()); ::breakpoint(); } } while (0)
; \
836 return (type##Node*)this; \
837 } \
838 type##Node* isa_##type() const { \
839 return (is_##type()) ? as_##type() : NULL__null; \
840 }
841
842 DEFINE_CLASS_QUERY(AbstractLock)
843 DEFINE_CLASS_QUERY(Add)
844 DEFINE_CLASS_QUERY(AddP)
845 DEFINE_CLASS_QUERY(Allocate)
846 DEFINE_CLASS_QUERY(AllocateArray)
847 DEFINE_CLASS_QUERY(ArrayCopy)
848 DEFINE_CLASS_QUERY(BaseCountedLoop)
849 DEFINE_CLASS_QUERY(BaseCountedLoopEnd)
850 DEFINE_CLASS_QUERY(Bool)
851 DEFINE_CLASS_QUERY(BoxLock)
852 DEFINE_CLASS_QUERY(Call)
853 DEFINE_CLASS_QUERY(CallNative)
854 DEFINE_CLASS_QUERY(CallDynamicJava)
855 DEFINE_CLASS_QUERY(CallJava)
856 DEFINE_CLASS_QUERY(CallLeaf)
857 DEFINE_CLASS_QUERY(CallLeafNoFP)
858 DEFINE_CLASS_QUERY(CallRuntime)
859 DEFINE_CLASS_QUERY(CallStaticJava)
860 DEFINE_CLASS_QUERY(Catch)
861 DEFINE_CLASS_QUERY(CatchProj)
862 DEFINE_CLASS_QUERY(CheckCastPP)
863 DEFINE_CLASS_QUERY(CastII)
864 DEFINE_CLASS_QUERY(CastLL)
865 DEFINE_CLASS_QUERY(ConstraintCast)
866 DEFINE_CLASS_QUERY(ClearArray)
867 DEFINE_CLASS_QUERY(CMove)
868 DEFINE_CLASS_QUERY(Cmp)
869 DEFINE_CLASS_QUERY(CountedLoop)
870 DEFINE_CLASS_QUERY(CountedLoopEnd)
871 DEFINE_CLASS_QUERY(DecodeNarrowPtr)
872 DEFINE_CLASS_QUERY(DecodeN)
873 DEFINE_CLASS_QUERY(DecodeNKlass)
874 DEFINE_CLASS_QUERY(EncodeNarrowPtr)
875 DEFINE_CLASS_QUERY(EncodeP)
876 DEFINE_CLASS_QUERY(EncodePKlass)
877 DEFINE_CLASS_QUERY(FastLock)
878 DEFINE_CLASS_QUERY(FastUnlock)
879 DEFINE_CLASS_QUERY(Halt)
880 DEFINE_CLASS_QUERY(If)
881 DEFINE_CLASS_QUERY(RangeCheck)
882 DEFINE_CLASS_QUERY(IfProj)
883 DEFINE_CLASS_QUERY(IfFalse)
884 DEFINE_CLASS_QUERY(IfTrue)
885 DEFINE_CLASS_QUERY(Initialize)
886 DEFINE_CLASS_QUERY(Jump)
887 DEFINE_CLASS_QUERY(JumpProj)
888 DEFINE_CLASS_QUERY(LongCountedLoop)
889 DEFINE_CLASS_QUERY(LongCountedLoopEnd)
890 DEFINE_CLASS_QUERY(Load)
891 DEFINE_CLASS_QUERY(LoadStore)
892 DEFINE_CLASS_QUERY(LoadStoreConditional)
893 DEFINE_CLASS_QUERY(Lock)
894 DEFINE_CLASS_QUERY(Loop)
895 DEFINE_CLASS_QUERY(LShift)
896 DEFINE_CLASS_QUERY(Mach)
897 DEFINE_CLASS_QUERY(MachBranch)
898 DEFINE_CLASS_QUERY(MachCall)
899 DEFINE_CLASS_QUERY(MachCallNative)
900 DEFINE_CLASS_QUERY(MachCallDynamicJava)
901 DEFINE_CLASS_QUERY(MachCallJava)
902 DEFINE_CLASS_QUERY(MachCallLeaf)
903 DEFINE_CLASS_QUERY(MachCallRuntime)
904 DEFINE_CLASS_QUERY(MachCallStaticJava)
905 DEFINE_CLASS_QUERY(MachConstantBase)
906 DEFINE_CLASS_QUERY(MachConstant)
907 DEFINE_CLASS_QUERY(MachGoto)
908 DEFINE_CLASS_QUERY(MachIf)
909 DEFINE_CLASS_QUERY(MachJump)
910 DEFINE_CLASS_QUERY(MachNullCheck)
911 DEFINE_CLASS_QUERY(MachProj)
912 DEFINE_CLASS_QUERY(MachReturn)
913 DEFINE_CLASS_QUERY(MachSafePoint)
914 DEFINE_CLASS_QUERY(MachSpillCopy)
915 DEFINE_CLASS_QUERY(MachTemp)
916 DEFINE_CLASS_QUERY(MachMemBar)
917 DEFINE_CLASS_QUERY(MachMerge)
918 DEFINE_CLASS_QUERY(Mem)
919 DEFINE_CLASS_QUERY(MemBar)
920 DEFINE_CLASS_QUERY(MemBarStoreStore)
921 DEFINE_CLASS_QUERY(MergeMem)
922 DEFINE_CLASS_QUERY(Move)
923 DEFINE_CLASS_QUERY(Mul)
924 DEFINE_CLASS_QUERY(Multi)
925 DEFINE_CLASS_QUERY(MultiBranch)
926 DEFINE_CLASS_QUERY(Opaque1)
927 DEFINE_CLASS_QUERY(OuterStripMinedLoop)
928 DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd)
929 DEFINE_CLASS_QUERY(Parm)
930 DEFINE_CLASS_QUERY(PCTable)
931 DEFINE_CLASS_QUERY(Phi)
932 DEFINE_CLASS_QUERY(Proj)
933 DEFINE_CLASS_QUERY(Region)
934 DEFINE_CLASS_QUERY(Root)
935 DEFINE_CLASS_QUERY(SafePoint)
936 DEFINE_CLASS_QUERY(SafePointScalarObject)
937 DEFINE_CLASS_QUERY(Start)
938 DEFINE_CLASS_QUERY(Store)
939 DEFINE_CLASS_QUERY(Sub)
940 DEFINE_CLASS_QUERY(SubTypeCheck)
941 DEFINE_CLASS_QUERY(Type)
942 DEFINE_CLASS_QUERY(Vector)
943 DEFINE_CLASS_QUERY(VectorMaskCmp)
944 DEFINE_CLASS_QUERY(VectorUnbox)
945 DEFINE_CLASS_QUERY(VectorReinterpret);
946 DEFINE_CLASS_QUERY(LoadVector)
947 DEFINE_CLASS_QUERY(LoadVectorGather)
948 DEFINE_CLASS_QUERY(StoreVector)
949 DEFINE_CLASS_QUERY(StoreVectorScatter)
950 DEFINE_CLASS_QUERY(ShiftV)
951 DEFINE_CLASS_QUERY(Unlock)
952
953 #undef DEFINE_CLASS_QUERY
954
955 // duplicate of is_MachSpillCopy()
956 bool is_SpillCopy () const {
957 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
958 }
959
960 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
961 // The data node which is safe to leave in dead loop during IGVN optimization.
962 bool is_dead_loop_safe() const;
963
964 // is_Copy() returns copied edge index (0 or 1)
965 uint is_Copy() const { return (_flags & Flag_is_Copy); }
966
967 virtual bool is_CFG() const { return false; }
968
969 // If this node is control-dependent on a test, can it be
970 // rerouted to a dominating equivalent test? This is usually
971 // true of non-CFG nodes, but can be false for operations which
972 // depend for their correct sequencing on more than one test.
973 // (In that case, hoisting to a dominating test may silently
974 // skip some other important test.)
975 virtual bool depends_only_on_test() const { assert(!is_CFG(), "")do { if (!(!is_CFG())) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 975, "assert(" "!is_CFG()" ") failed", ""); ::breakpoint();
} } while (0)
; return true; };
976
977 // When building basic blocks, I need to have a notion of block beginning
978 // Nodes, next block selector Nodes (block enders), and next block
979 // projections. These calls need to work on their machine equivalents. The
980 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
981 bool is_block_start() const {
982 if ( is_Region() )
983 return this == (const Node*)in(0);
984 else
985 return is_Start();
986 }
987
988 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
989 // Goto and Return. This call also returns the block ending Node.
990 virtual const Node *is_block_proj() const;
991
992 // The node is a "macro" node which needs to be expanded before matching
993 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
994 // The node is expensive: the best control is set during loop opts
995 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL__null; }
996
997 // An arithmetic node which accumulates a data in a loop.
998 // It must have the loop's phi as input and provide a def to the phi.
999 bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; }
1000
1001 bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; }
1002
1003 // The node is a CountedLoopEnd with a mask annotation so as to emit a restore context
1004 bool has_vector_mask_set() const { return (_flags & Flag_has_vector_mask_set) != 0; }
1005
1006 // Used in lcm to mark nodes that have scheduled
1007 bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; }
1008
1009 bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; }
1010
1011//----------------- Optimization
1012
1013 // Get the worst-case Type output for this Node.
1014 virtual const class Type *bottom_type() const;
1015
1016 // If we find a better type for a node, try to record it permanently.
1017 // Return true if this node actually changed.
1018 // Be sure to do the hash_delete game in the "rehash" variant.
1019 void raise_bottom_type(const Type* new_type);
1020
1021 // Get the address type with which this node uses and/or defs memory,
1022 // or NULL if none. The address type is conservatively wide.
1023 // Returns non-null for calls, membars, loads, stores, etc.
1024 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
1025 virtual const class TypePtr *adr_type() const { return NULL__null; }
1026
1027 // Return an existing node which computes the same function as this node.
1028 // The optimistic combined algorithm requires this to return a Node which
1029 // is a small number of steps away (e.g., one of my inputs).
1030 virtual Node* Identity(PhaseGVN* phase);
1031
1032 // Return the set of values this Node can take on at runtime.
1033 virtual const Type* Value(PhaseGVN* phase) const;
1034
1035 // Return a node which is more "ideal" than the current node.
1036 // The invariants on this call are subtle. If in doubt, read the
1037 // treatise in node.cpp above the default implemention AND TEST WITH
1038 // +VerifyIterativeGVN!
1039 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1040
1041 // Some nodes have specific Ideal subgraph transformations only if they are
1042 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1043 // for the transformations to happen.
1044 bool has_special_unique_user() const;
1045
1046 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1047 Node* find_exact_control(Node* ctrl);
1048
1049 // Check if 'this' node dominates or equal to 'sub'.
1050 bool dominates(Node* sub, Node_List &nlist);
1051
1052protected:
1053 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
1054public:
1055
1056 // See if there is valid pipeline info
1057 static const Pipeline *pipeline_class();
1058 virtual const Pipeline *pipeline() const;
1059
1060 // Compute the latency from the def to this instruction of the ith input node
1061 uint latency(uint i);
1062
1063 // Hash & compare functions, for pessimistic value numbering
1064
1065 // If the hash function returns the special sentinel value NO_HASH,
1066 // the node is guaranteed never to compare equal to any other node.
1067 // If we accidentally generate a hash with value NO_HASH the node
1068 // won't go into the table and we'll lose a little optimization.
1069 static const uint NO_HASH = 0;
1070 virtual uint hash() const;
1071 virtual bool cmp( const Node &n ) const;
1072
1073 // Operation appears to be iteratively computed (such as an induction variable)
1074 // It is possible for this operation to return false for a loop-varying
1075 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
1076 bool is_iteratively_computed();
1077
1078 // Determine if a node is a counted loop induction variable.
1079 // NOTE: The method is defined in "loopnode.cpp".
1080 bool is_cloop_ind_var() const;
1081
1082 // Return a node with opcode "opc" and same inputs as "this" if one can
1083 // be found; Otherwise return NULL;
1084 Node* find_similar(int opc);
1085
1086 // Return the unique control out if only one. Null if none or more than one.
1087 Node* unique_ctrl_out() const;
1088
1089 // Set control or add control as precedence edge
1090 void ensure_control_or_add_prec(Node* c);
1091
1092//----------------- Code Generation
1093
1094 // Ideal register class for Matching. Zero means unmatched instruction
1095 // (these are cloned instead of converted to machine nodes).
1096 virtual uint ideal_reg() const;
1097
1098 static const uint NotAMachineReg; // must be > max. machine register
1099
1100 // Do we Match on this edge index or not? Generally false for Control
1101 // and true for everything else. Weird for calls & returns.
1102 virtual uint match_edge(uint idx) const;
1103
1104 // Register class output is returned in
1105 virtual const RegMask &out_RegMask() const;
1106 // Register class input is expected in
1107 virtual const RegMask &in_RegMask(uint) const;
1108 // Should we clone rather than spill this instruction?
1109 bool rematerialize() const;
1110
1111 // Return JVM State Object if this Node carries debug info, or NULL otherwise
1112 virtual JVMState* jvms() const;
1113
1114 // Print as assembly
1115 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
1116 // Emit bytes starting at parameter 'ptr'
1117 // Bump 'ptr' by the number of output bytes
1118 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
1119 // Size of instruction in bytes
1120 virtual uint size(PhaseRegAlloc *ra_) const;
1121
1122 // Convenience function to extract an integer constant from a node.
1123 // If it is not an integer constant (either Con, CastII, or Mach),
1124 // return value_if_unknown.
1125 jint find_int_con(jint value_if_unknown) const {
1126 const TypeInt* t = find_int_type();
1127 return (t != NULL__null && t->is_con()) ? t->get_con() : value_if_unknown;
1128 }
1129 // Return the constant, knowing it is an integer constant already
1130 jint get_int() const {
1131 const TypeInt* t = find_int_type();
1132 guarantee(t != NULL, "must be con")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1132, "guarantee(" "t != NULL" ") failed", "must be con"); ::
breakpoint(); } } while (0)
;
1133 return t->get_con();
1134 }
1135 // Here's where the work is done. Can produce non-constant int types too.
1136 const TypeInt* find_int_type() const;
1137 const TypeInteger* find_integer_type(BasicType bt) const;
1138
1139 // Same thing for long (and intptr_t, via type.hpp):
1140 jlong get_long() const {
1141 const TypeLong* t = find_long_type();
1142 guarantee(t != NULL, "must be con")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1142, "guarantee(" "t != NULL" ") failed", "must be con"); ::
breakpoint(); } } while (0)
;
1143 return t->get_con();
1144 }
1145 jlong find_long_con(jint value_if_unknown) const {
1146 const TypeLong* t = find_long_type();
1147 return (t != NULL__null && t->is_con()) ? t->get_con() : value_if_unknown;
1148 }
1149 const TypeLong* find_long_type() const;
1150
1151 jlong get_integer_as_long(BasicType bt) const {
1152 const TypeInteger* t = find_integer_type(bt);
1153 guarantee(t != NULL, "must be con")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1153, "guarantee(" "t != NULL" ") failed", "must be con"); ::
breakpoint(); } } while (0)
;
1154 return t->get_con_as_long(bt);
1155 }
1156 const TypePtr* get_ptr_type() const;
1157
1158 // These guys are called by code generated by ADLC:
1159 intptr_t get_ptr() const;
1160 intptr_t get_narrowcon() const;
1161 jdouble getd() const;
1162 jfloat getf() const;
1163
1164 // Nodes which are pinned into basic blocks
1165 virtual bool pinned() const { return false; }
1166
1167 // Nodes which use memory without consuming it, hence need antidependences
1168 // More specifically, needs_anti_dependence_check returns true iff the node
1169 // (a) does a load, and (b) does not perform a store (except perhaps to a
1170 // stack slot or some other unaliased location).
1171 bool needs_anti_dependence_check() const;
1172
1173 // Return which operand this instruction may cisc-spill. In other words,
1174 // return operand position that can convert from reg to memory access
1175 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
1176 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
1177
1178 // Whether this is a memory-writing machine node.
1179 bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); }
1180
1181//----------------- Printing, etc
1182#ifndef PRODUCT
1183 private:
1184 int _indent;
1185
1186 public:
1187 void set_indent(int indent) { _indent = indent; }
1188
1189 private:
1190 static bool add_to_worklist(Node* n, Node_List* worklist, Arena* old_arena, VectorSet* old_space, VectorSet* new_space);
1191public:
1192 Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx.
1193 Node* find_ctrl(int idx); // Search control ancestors for the given idx.
1194 void dump() const { dump("\n"); } // Print this node.
1195 void dump(const char* suffix, bool mark = false, outputStream *st = tty) const; // Print this node.
1196 void dump(int depth) const; // Print this node, recursively to depth d
1197 void dump_ctrl(int depth) const; // Print control nodes, to depth d
1198 void dump_comp() const; // Print this node in compact representation.
1199 // Print this node in compact representation.
1200 void dump_comp(const char* suffix, outputStream *st = tty) const;
1201 virtual void dump_req(outputStream *st = tty) const; // Print required-edge info
1202 virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info
1203 virtual void dump_out(outputStream *st = tty) const; // Print the output edge info
1204 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
1205 // Print compact per-node info
1206 virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); }
1207 void dump_related() const; // Print related nodes (depends on node at hand).
1208 // Print related nodes up to given depths for input and output nodes.
1209 void dump_related(uint d_in, uint d_out) const;
1210 void dump_related_compact() const; // Print related nodes in compact representation.
1211 // Collect related nodes.
1212 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
1213 // Collect nodes starting from this node, explicitly including/excluding control and data links.
1214 void collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const;
1215
1216 // Node collectors, to be used in implementations of Node::rel().
1217 // Collect the entire data input graph. Include control inputs if requested.
1218 void collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const;
1219 // Collect the entire control input graph. Include data inputs if requested.
1220 void collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const;
1221 // Collect the entire output graph until hitting and including control nodes.
1222 void collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const;
1223
1224 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
1225 static void verify(int verify_depth, VectorSet& visited, Node_List& worklist);
1226
1227 // This call defines a class-unique string used to identify class instances
1228 virtual const char *Name() const;
1229
1230 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1231 // RegMask Print Functions
1232 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
1233 void dump_out_regmask() { out_RegMask().dump(); }
1234 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; }
1235 void fast_dump() const {
1236 tty->print("%4d: %-17s", _idx, Name());
1237 for (uint i = 0; i < len(); i++)
1238 if (in(i))
1239 tty->print(" %4d", in(i)->_idx);
1240 else
1241 tty->print(" NULL");
1242 tty->print("\n");
1243 }
1244#endif
1245#ifdef ASSERT1
1246 void verify_construction();
1247 bool verify_jvms(const JVMState* jvms) const;
1248 int _debug_idx; // Unique value assigned to every node.
1249 int debug_idx() const { return _debug_idx; }
1250 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
1251
1252 Node* _debug_orig; // Original version of this, if any.
1253 Node* debug_orig() const { return _debug_orig; }
1254 void set_debug_orig(Node* orig); // _debug_orig = orig
1255 void dump_orig(outputStream *st, bool print_key = true) const;
1256
1257 int _hash_lock; // Barrier to modifications of nodes in the hash table
1258 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?")do { if (!(_hash_lock < 99)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1258, "assert(" "_hash_lock < 99" ") failed", "in too many hash tables?"
); ::breakpoint(); } } while (0)
; }
1259 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks")do { if (!(_hash_lock >= 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1259, "assert(" "_hash_lock >= 0" ") failed", "mispaired hash locks"
); ::breakpoint(); } } while (0)
; }
1260
1261 static void init_NodeProperty();
1262
1263 #if OPTO_DU_ITERATOR_ASSERT1
1264 const Node* _last_del; // The last deleted node.
1265 uint _del_tick; // Bumped when a deletion happens..
1266 #endif
1267#endif
1268};
1269
1270inline bool not_a_node(const Node* n) {
1271 if (n == NULL__null) return true;
1272 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
1273 if (*(address*)n == badAddress((address)::badAddressVal)) return true; // kill by Node::destruct
1274 return false;
1275}
1276
1277//-----------------------------------------------------------------------------
1278// Iterators over DU info, and associated Node functions.
1279
1280#if OPTO_DU_ITERATOR_ASSERT1
1281
1282// Common code for assertion checking on DU iterators.
1283class DUIterator_Common {
1284#ifdef ASSERT1
1285 protected:
1286 bool _vdui; // cached value of VerifyDUIterators
1287 const Node* _node; // the node containing the _out array
1288 uint _outcnt; // cached node->_outcnt
1289 uint _del_tick; // cached node->_del_tick
1290 Node* _last; // last value produced by the iterator
1291
1292 void sample(const Node* node); // used by c'tor to set up for verifies
1293 void verify(const Node* node, bool at_end_ok = false);
1294 void verify_resync();
1295 void reset(const DUIterator_Common& that);
1296
1297// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1298 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1299#else
1300 #define I_VDUI_ONLY(i,x) { }
1301#endif //ASSERT
1302};
1303
1304#define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1305
1306// Default DU iterator. Allows appends onto the out array.
1307// Allows deletion from the out array only at the current point.
1308// Usage:
1309// for (DUIterator i = x->outs(); x->has_out(i); i++) {
1310// Node* y = x->out(i);
1311// ...
1312// }
1313// Compiles in product mode to a unsigned integer index, which indexes
1314// onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1315// also reloads x->_outcnt. If you delete, you must perform "--i" just
1316// before continuing the loop. You must delete only the last-produced
1317// edge. You must delete only a single copy of the last-produced edge,
1318// or else you must delete all copies at once (the first time the edge
1319// is produced by the iterator).
1320class DUIterator : public DUIterator_Common {
1321 friend class Node;
1322
1323 // This is the index which provides the product-mode behavior.
1324 // Whatever the product-mode version of the system does to the
1325 // DUI index is done to this index. All other fields in
1326 // this class are used only for assertion checking.
1327 uint _idx;
1328
1329 #ifdef ASSERT1
1330 uint _refresh_tick; // Records the refresh activity.
1331
1332 void sample(const Node* node); // Initialize _refresh_tick etc.
1333 void verify(const Node* node, bool at_end_ok = false);
1334 void verify_increment(); // Verify an increment operation.
1335 void verify_resync(); // Verify that we can back up over a deletion.
1336 void verify_finish(); // Verify that the loop terminated properly.
1337 void refresh(); // Resample verification info.
1338 void reset(const DUIterator& that); // Resample after assignment.
1339 #endif
1340
1341 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1342 { _idx = 0; debug_only(sample(node))sample(node); }
1343
1344 public:
1345 // initialize to garbage; clear _vdui to disable asserts
1346 DUIterator()
1347 { /*initialize to garbage*/ debug_only(_vdui = false)_vdui = false; }
1348
1349 DUIterator(const DUIterator& that)
1350 { _idx = that._idx; debug_only(_vdui = false; reset(that))_vdui = false; reset(that); }
1351
1352 void operator++(int dummy_to_specify_postfix_op)
1353 { _idx++; VDUI_ONLY(verify_increment()); }
1354
1355 void operator--()
1356 { VDUI_ONLY(verify_resync()); --_idx; }
1357
1358 ~DUIterator()
1359 { VDUI_ONLY(verify_finish()); }
1360
1361 void operator=(const DUIterator& that)
1362 { _idx = that._idx; debug_only(reset(that))reset(that); }
1363};
1364
1365DUIterator Node::outs() const
1366 { return DUIterator(this, 0); }
1367DUIterator& Node::refresh_out_pos(DUIterator& i) const
1368 { I_VDUI_ONLY(i, i.refresh()); return i; }
1369bool Node::has_out(DUIterator& i) const
1370 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1371Node* Node::out(DUIterator& i) const
1372 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=)i._last= _out[i._idx]; }
1373
1374
1375// Faster DU iterator. Disallows insertions into the out array.
1376// Allows deletion from the out array only at the current point.
1377// Usage:
1378// for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1379// Node* y = x->fast_out(i);
1380// ...
1381// }
1382// Compiles in product mode to raw Node** pointer arithmetic, with
1383// no reloading of pointers from the original node x. If you delete,
1384// you must perform "--i; --imax" just before continuing the loop.
1385// If you delete multiple copies of the same edge, you must decrement
1386// imax, but not i, multiple times: "--i, imax -= num_edges".
1387class DUIterator_Fast : public DUIterator_Common {
1388 friend class Node;
1389 friend class DUIterator_Last;
1390
1391 // This is the pointer which provides the product-mode behavior.
1392 // Whatever the product-mode version of the system does to the
1393 // DUI pointer is done to this pointer. All other fields in
1394 // this class are used only for assertion checking.
1395 Node** _outp;
1396
1397 #ifdef ASSERT1
1398 void verify(const Node* node, bool at_end_ok = false);
1399 void verify_limit();
1400 void verify_resync();
1401 void verify_relimit(uint n);
1402 void reset(const DUIterator_Fast& that);
1403 #endif
1404
1405 // Note: offset must be signed, since -1 is sometimes passed
1406 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1407 { _outp = node->_out + offset; debug_only(sample(node))sample(node); }
1408
1409 public:
1410 // initialize to garbage; clear _vdui to disable asserts
1411 DUIterator_Fast()
1412 { /*initialize to garbage*/ debug_only(_vdui = false)_vdui = false; }
1413
1414 DUIterator_Fast(const DUIterator_Fast& that)
1415 { _outp = that._outp; debug_only(_vdui = false; reset(that))_vdui = false; reset(that); }
1416
1417 void operator++(int dummy_to_specify_postfix_op)
1418 { _outp++; VDUI_ONLY(verify(_node, true)); }
1419
1420 void operator--()
1421 { VDUI_ONLY(verify_resync()); --_outp; }
1422
1423 void operator-=(uint n) // applied to the limit only
1424 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1425
1426 bool operator<(DUIterator_Fast& limit) {
1427 I_VDUI_ONLY(*this, this->verify(_node, true));
1428 I_VDUI_ONLY(limit, limit.verify_limit());
1429 return _outp < limit._outp;
1430 }
1431
1432 void operator=(const DUIterator_Fast& that)
1433 { _outp = that._outp; debug_only(reset(that))reset(that); }
1434};
1435
1436DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1437 // Assign a limit pointer to the reference argument:
1438 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1439 // Return the base pointer:
1440 return DUIterator_Fast(this, 0);
1441}
1442Node* Node::fast_out(DUIterator_Fast& i) const {
1443 I_VDUI_ONLY(i, i.verify(this));
1444 return debug_only(i._last=)i._last= *i._outp;
1445}
1446
1447
1448// Faster DU iterator. Requires each successive edge to be removed.
1449// Does not allow insertion of any edges.
1450// Usage:
1451// for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1452// Node* y = x->last_out(i);
1453// ...
1454// }
1455// Compiles in product mode to raw Node** pointer arithmetic, with
1456// no reloading of pointers from the original node x.
1457class DUIterator_Last : private DUIterator_Fast {
1458 friend class Node;
1459
1460 #ifdef ASSERT1
1461 void verify(const Node* node, bool at_end_ok = false);
1462 void verify_limit();
1463 void verify_step(uint num_edges);
1464 #endif
1465
1466 // Note: offset must be signed, since -1 is sometimes passed
1467 DUIterator_Last(const Node* node, ptrdiff_t offset)
1468 : DUIterator_Fast(node, offset) { }
1469
1470 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1471 void operator<(int) {} // do not use
1472
1473 public:
1474 DUIterator_Last() { }
1475 // initialize to garbage
1476
1477 DUIterator_Last(const DUIterator_Last& that) = default;
1478
1479 void operator--()
1480 { _outp--; VDUI_ONLY(verify_step(1)); }
1481
1482 void operator-=(uint n)
1483 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1484
1485 bool operator>=(DUIterator_Last& limit) {
1486 I_VDUI_ONLY(*this, this->verify(_node, true));
1487 I_VDUI_ONLY(limit, limit.verify_limit());
1488 return _outp >= limit._outp;
1489 }
1490
1491 DUIterator_Last& operator=(const DUIterator_Last& that) = default;
1492};
1493
1494DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1495 // Assign a limit pointer to the reference argument:
1496 imin = DUIterator_Last(this, 0);
1497 // Return the initial pointer:
1498 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1499}
1500Node* Node::last_out(DUIterator_Last& i) const {
1501 I_VDUI_ONLY(i, i.verify(this));
1502 return debug_only(i._last=)i._last= *i._outp;
1503}
1504
1505#endif //OPTO_DU_ITERATOR_ASSERT
1506
1507#undef I_VDUI_ONLY
1508#undef VDUI_ONLY
1509
1510// An Iterator that truly follows the iterator pattern. Doesn't
1511// support deletion but could be made to.
1512//
1513// for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1514// Node* m = i.get();
1515//
1516class SimpleDUIterator : public StackObj {
1517 private:
1518 Node* node;
1519 DUIterator_Fast i;
1520 DUIterator_Fast imax;
1521 public:
1522 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1523 bool has_next() { return i < imax; }
1524 void next() { i++; }
1525 Node* get() { return node->fast_out(i); }
1526};
1527
1528
1529//-----------------------------------------------------------------------------
1530// Map dense integer indices to Nodes. Uses classic doubling-array trick.
1531// Abstractly provides an infinite array of Node*'s, initialized to NULL.
1532// Note that the constructor just zeros things, and since I use Arena
1533// allocation I do not need a destructor to reclaim storage.
1534class Node_Array : public ResourceObj {
1535 friend class VMStructs;
1536protected:
1537 Arena* _a; // Arena to allocate in
1538 uint _max;
1539 Node** _nodes;
1540 void grow( uint i ); // Grow array node to fit
1541public:
1542 Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) {
1543 _nodes = NEW_ARENA_ARRAY(a, Node*, max)(Node**) (a)->Amalloc((max) * sizeof(Node*));
1544 clear();
1545 }
1546
1547 Node_Array(Node_Array* na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
1548 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
1549 { return (i<_max) ? _nodes[i] : (Node*)NULL__null; }
1550 Node* at(uint i) const { assert(i<_max,"oob")do { if (!(i<_max)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1550, "assert(" "i<_max" ") failed", "oob"); ::breakpoint
(); } } while (0)
; return _nodes[i]; }
1551 Node** adr() { return _nodes; }
1552 // Extend the mapping: index i maps to Node *n.
1553 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
1554 void insert( uint i, Node *n );
1555 void remove( uint i ); // Remove, preserving order
1556 // Clear all entries in _nodes to NULL but keep storage
1557 void clear() {
1558 Copy::zero_to_bytes(_nodes, _max * sizeof(Node*));
1559 }
1560
1561 uint Size() const { return _max; }
1562 void dump() const;
1563};
1564
1565class Node_List : public Node_Array {
1566 friend class VMStructs;
1567 uint _cnt;
1568public:
1569 Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {}
1570 Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {}
1571 bool contains(const Node* n) const {
1572 for (uint e = 0; e < size(); e++) {
1573 if (at(e) == n) return true;
1574 }
1575 return false;
1576 }
1577 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1578 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1579 void push( Node *b ) { map(_cnt++,b); }
1580 void yank( Node *n ); // Find and remove
1581 Node *pop() { return _nodes[--_cnt]; }
1582 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1583 void copy(const Node_List& from) {
1584 if (from._max > _max) {
1585 grow(from._max);
1586 }
1587 _cnt = from._cnt;
1588 Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*));
1589 }
1590
1591 uint size() const { return _cnt; }
1592 void dump() const;
1593 void dump_simple() const;
1594};
1595
1596//------------------------------Unique_Node_List-------------------------------
1597class Unique_Node_List : public Node_List {
1598 friend class VMStructs;
1599 VectorSet _in_worklist;
1600 uint _clock_index; // Index in list where to pop from next
1601public:
1602 Unique_Node_List() : Node_List(), _clock_index(0) {}
1603 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1604
1605 void remove( Node *n );
1606 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
1607 VectorSet& member_set(){ return _in_worklist; }
1608
1609 void push(Node* b) {
1610 if( !_in_worklist.test_set(b->_idx) )
1611 Node_List::push(b);
1612 }
1613 Node *pop() {
1614 if( _clock_index >= size() ) _clock_index = 0;
1615 Node *b = at(_clock_index);
1616 map( _clock_index, Node_List::pop());
1617 if (size() != 0) _clock_index++; // Always start from 0
1618 _in_worklist.remove(b->_idx);
1619 return b;
1620 }
1621 Node *remove(uint i) {
1622 Node *b = Node_List::at(i);
1623 _in_worklist.remove(b->_idx);
1624 map(i,Node_List::pop());
1625 return b;
1626 }
1627 void yank(Node *n) {
1628 _in_worklist.remove(n->_idx);
1629 Node_List::yank(n);
1630 }
1631 void clear() {
1632 _in_worklist.clear(); // Discards storage but grows automatically
1633 Node_List::clear();
1634 _clock_index = 0;
1635 }
1636
1637 // Used after parsing to remove useless nodes before Iterative GVN
1638 void remove_useless_nodes(VectorSet& useful);
1639
1640 bool contains(const Node* n) const {
1641 fatal("use faster member() instead")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1641, "use faster member() instead"); ::breakpoint(); } while
(0)
;
1642 return false;
1643 }
1644
1645#ifndef PRODUCT
1646 void print_set() const { _in_worklist.print(); }
1647#endif
1648};
1649
1650// Inline definition of Compile::record_for_igvn must be deferred to this point.
1651inline void Compile::record_for_igvn(Node* n) {
1652 _for_igvn->push(n);
1653}
1654
1655//------------------------------Node_Stack-------------------------------------
1656class Node_Stack {
1657 friend class VMStructs;
1658protected:
1659 struct INode {
1660 Node *node; // Processed node
1661 uint indx; // Index of next node's child
1662 };
1663 INode *_inode_top; // tos, stack grows up
1664 INode *_inode_max; // End of _inodes == _inodes + _max
1665 INode *_inodes; // Array storage for the stack
1666 Arena *_a; // Arena to allocate in
1667 void grow();
1668public:
1669 Node_Stack(int size) {
1670 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1671 _a = Thread::current()->resource_area();
1672 _inodes = NEW_ARENA_ARRAY( _a, INode, max )(INode*) (_a)->Amalloc((max) * sizeof(INode));
1673 _inode_max = _inodes + max;
1674 _inode_top = _inodes - 1; // stack is empty
1675 }
1676
1677 Node_Stack(Arena *a, int size) : _a(a) {
1678 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1679 _inodes = NEW_ARENA_ARRAY( _a, INode, max )(INode*) (_a)->Amalloc((max) * sizeof(INode));
1680 _inode_max = _inodes + max;
1681 _inode_top = _inodes - 1; // stack is empty
1682 }
1683
1684 void pop() {
1685 assert(_inode_top >= _inodes, "node stack underflow")do { if (!(_inode_top >= _inodes)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1685, "assert(" "_inode_top >= _inodes" ") failed", "node stack underflow"
); ::breakpoint(); } } while (0)
;
1686 --_inode_top;
1687 }
1688 void push(Node *n, uint i) {
1689 ++_inode_top;
1690 if (_inode_top >= _inode_max) grow();
1691 INode *top = _inode_top; // optimization
1692 top->node = n;
1693 top->indx = i;
1694 }
1695 Node *node() const {
1696 return _inode_top->node;
1697 }
1698 Node* node_at(uint i) const {
1699 assert(_inodes + i <= _inode_top, "in range")do { if (!(_inodes + i <= _inode_top)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1699, "assert(" "_inodes + i <= _inode_top" ") failed", "in range"
); ::breakpoint(); } } while (0)
;
1700 return _inodes[i].node;
1701 }
1702 uint index() const {
1703 return _inode_top->indx;
1704 }
1705 uint index_at(uint i) const {
1706 assert(_inodes + i <= _inode_top, "in range")do { if (!(_inodes + i <= _inode_top)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1706, "assert(" "_inodes + i <= _inode_top" ") failed", "in range"
); ::breakpoint(); } } while (0)
;
1707 return _inodes[i].indx;
1708 }
1709 void set_node(Node *n) {
1710 _inode_top->node = n;
1711 }
1712 void set_index(uint i) {
1713 _inode_top->indx = i;
1714 }
1715 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
1716 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
1717 bool is_nonempty() const { return (_inode_top >= _inodes); }
21
Returning the value 1, which participates in a condition later
1718 bool is_empty() const { return (_inode_top < _inodes); }
1719 void clear() { _inode_top = _inodes - 1; } // retain storage
1720
1721 // Node_Stack is used to map nodes.
1722 Node* find(uint idx) const;
1723};
1724
1725
1726//-----------------------------Node_Notes--------------------------------------
1727// Debugging or profiling annotations loosely and sparsely associated
1728// with some nodes. See Compile::node_notes_at for the accessor.
1729class Node_Notes {
1730 friend class VMStructs;
1731 JVMState* _jvms;
1732
1733public:
1734 Node_Notes(JVMState* jvms = NULL__null) {
1735 _jvms = jvms;
1736 }
1737
1738 JVMState* jvms() { return _jvms; }
1739 void set_jvms(JVMState* x) { _jvms = x; }
1740
1741 // True if there is nothing here.
1742 bool is_clear() {
1743 return (_jvms == NULL__null);
1744 }
1745
1746 // Make there be nothing here.
1747 void clear() {
1748 _jvms = NULL__null;
1749 }
1750
1751 // Make a new, clean node notes.
1752 static Node_Notes* make(Compile* C) {
1753 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1)(Node_Notes*) (C->comp_arena())->Amalloc((1) * sizeof(Node_Notes
))
;
1754 nn->clear();
1755 return nn;
1756 }
1757
1758 Node_Notes* clone(Compile* C) {
1759 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1)(Node_Notes*) (C->comp_arena())->Amalloc((1) * sizeof(Node_Notes
))
;
1760 (*nn) = (*this);
1761 return nn;
1762 }
1763
1764 // Absorb any information from source.
1765 bool update_from(Node_Notes* source) {
1766 bool changed = false;
1767 if (source != NULL__null) {
1768 if (source->jvms() != NULL__null) {
1769 set_jvms(source->jvms());
1770 changed = true;
1771 }
1772 }
1773 return changed;
1774 }
1775};
1776
1777// Inlined accessors for Compile::node_nodes that require the preceding class:
1778inline Node_Notes*
1779Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
1780 int idx, bool can_grow) {
1781 assert(idx >= 0, "oob")do { if (!(idx >= 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1781, "assert(" "idx >= 0" ") failed", "oob"); ::breakpoint
(); } } while (0)
;
1782 int block_idx = (idx >> _log2_node_notes_block_size);
1783 int grow_by = (block_idx - (arr == NULL__null? 0: arr->length()));
1784 if (grow_by >= 0) {
1785 if (!can_grow) return NULL__null;
1786 grow_node_notes(arr, grow_by + 1);
1787 }
1788 if (arr == NULL__null) return NULL__null;
1789 // (Every element of arr is a sub-array of length _node_notes_block_size.)
1790 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
1791}
1792
1793inline bool
1794Compile::set_node_notes_at(int idx, Node_Notes* value) {
1795 if (value == NULL__null || value->is_clear())
1796 return false; // nothing to write => write nothing
1797 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
1798 assert(loc != NULL, "")do { if (!(loc != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1798, "assert(" "loc != __null" ") failed", ""); ::breakpoint
(); } } while (0)
;
1799 return loc->update_from(value);
1800}
1801
1802
1803//------------------------------TypeNode---------------------------------------
1804// Node with a Type constant.
1805class TypeNode : public Node {
1806protected:
1807 virtual uint hash() const; // Check the type
1808 virtual bool cmp( const Node &n ) const;
1809 virtual uint size_of() const; // Size is bigger
1810 const Type* const _type;
1811public:
1812 void set_type(const Type* t) {
1813 assert(t != NULL, "sanity")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1813, "assert(" "t != __null" ") failed", "sanity"); ::breakpoint
(); } } while (0)
;
1814 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH)uint check_hash = (VerifyHashTableKeys && _hash_lock)
? hash() : NO_HASH
;
1815 *(const Type**)&_type = t; // cast away const-ness
1816 // If this node is in the hash table, make sure it doesn't need a rehash.
1817 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code")do { if (!(check_hash == NO_HASH || check_hash == hash())) { (
*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1817, "assert(" "check_hash == NO_HASH || check_hash == hash()"
") failed", "type change must preserve hash code"); ::breakpoint
(); } } while (0)
;
1818 }
1819 const Type* type() const { assert(_type != NULL, "sanity")do { if (!(_type != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1819, "assert(" "_type != __null" ") failed", "sanity"); ::
breakpoint(); } } while (0)
; return _type; };
1820 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
1821 init_class_id(Class_Type);
1822 }
1823 virtual const Type* Value(PhaseGVN* phase) const;
1824 virtual const Type *bottom_type() const;
1825 virtual uint ideal_reg() const;
1826#ifndef PRODUCT
1827 virtual void dump_spec(outputStream *st) const;
1828 virtual void dump_compact_spec(outputStream *st) const;
1829#endif
1830};
1831
1832#include "opto/opcodes.hpp"
1833
1834#define Op_IL(op)inline int Op_op(BasicType bt) { do { if (!(bt == T_INT || bt
== T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1834, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_opI
; } return Op_opL; }
\
1835 inline int Op_ ## op(BasicType bt) { \
1836 assert(bt == T_INT || bt == T_LONG, "only for int or longs")do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1836, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0)
; \
1837 if (bt == T_INT) { \
1838 return Op_## op ## I; \
1839 } \
1840 return Op_## op ## L; \
1841}
1842
1843Op_IL(Add)inline int Op_Add(BasicType bt) { do { if (!(bt == T_INT || bt
== T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1843, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_AddI
; } return Op_AddL; }
1844Op_IL(Sub)inline int Op_Sub(BasicType bt) { do { if (!(bt == T_INT || bt
== T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1844, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_SubI
; } return Op_SubL; }
1845Op_IL(Mul)inline int Op_Mul(BasicType bt) { do { if (!(bt == T_INT || bt
== T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1845, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_MulI
; } return Op_MulL; }
1846Op_IL(URShift)inline int Op_URShift(BasicType bt) { do { if (!(bt == T_INT ||
bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1846, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_URShiftI
; } return Op_URShiftL; }
1847Op_IL(LShift)inline int Op_LShift(BasicType bt) { do { if (!(bt == T_INT ||
bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error(
"/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1847, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_LShiftI
; } return Op_LShiftL; }
1848Op_IL(Xor)inline int Op_Xor(BasicType bt) { do { if (!(bt == T_INT || bt
== T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1848, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_XorI
; } return Op_XorL; }
1849Op_IL(Cmp)inline int Op_Cmp(BasicType bt) { do { if (!(bt == T_INT || bt
== T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1849, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_CmpI
; } return Op_CmpL; }
1850
1851inline int Op_Cmp_unsigned(BasicType bt) {
1852 assert(bt == T_INT || bt == T_LONG, "only for int or longs")do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1852, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0)
;
1853 if (bt == T_INT) {
1854 return Op_CmpU;
1855 }
1856 return Op_CmpUL;
1857}
1858
1859inline int Op_Cast(BasicType bt) {
1860 assert(bt == T_INT || bt == T_LONG, "only for int or longs")do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp"
, 1860, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs"
); ::breakpoint(); } } while (0)
;
1861 if (bt == T_INT) {
1862 return Op_CastII;
1863 }
1864 return Op_CastLL;
1865}
1866
1867#endif // SHARE_OPTO_NODE_HPP

/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp

1/*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_OPTO_COMPILE_HPP
26#define SHARE_OPTO_COMPILE_HPP
27
28#include "asm/codeBuffer.hpp"
29#include "ci/compilerInterface.hpp"
30#include "code/debugInfoRec.hpp"
31#include "compiler/compiler_globals.hpp"
32#include "compiler/compilerOracle.hpp"
33#include "compiler/compileBroker.hpp"
34#include "compiler/compilerEvent.hpp"
35#include "libadt/dict.hpp"
36#include "libadt/vectset.hpp"
37#include "memory/resourceArea.hpp"
38#include "oops/methodData.hpp"
39#include "opto/idealGraphPrinter.hpp"
40#include "opto/phasetype.hpp"
41#include "opto/phase.hpp"
42#include "opto/regmask.hpp"
43#include "runtime/deoptimization.hpp"
44#include "runtime/sharedRuntime.hpp"
45#include "runtime/timerTrace.hpp"
46#include "runtime/vmThread.hpp"
47#include "utilities/ticks.hpp"
48
49class AbstractLockNode;
50class AddPNode;
51class Block;
52class Bundle;
53class CallGenerator;
54class CloneMap;
55class ConnectionGraph;
56class IdealGraphPrinter;
57class InlineTree;
58class Int_Array;
59class Matcher;
60class MachConstantNode;
61class MachConstantBaseNode;
62class MachNode;
63class MachOper;
64class MachSafePointNode;
65class Node;
66class Node_Array;
67class Node_List;
68class Node_Notes;
69class NodeCloneInfo;
70class OptoReg;
71class PhaseCFG;
72class PhaseGVN;
73class PhaseIterGVN;
74class PhaseRegAlloc;
75class PhaseCCP;
76class PhaseOutput;
77class RootNode;
78class relocInfo;
79class Scope;
80class StartNode;
81class SafePointNode;
82class JVMState;
83class Type;
84class TypeData;
85class TypeInt;
86class TypeInteger;
87class TypePtr;
88class TypeOopPtr;
89class TypeFunc;
90class TypeVect;
91class Unique_Node_List;
92class nmethod;
93class Node_Stack;
94struct Final_Reshape_Counts;
95
96enum LoopOptsMode {
97 LoopOptsDefault,
98 LoopOptsNone,
99 LoopOptsMaxUnroll,
100 LoopOptsShenandoahExpand,
101 LoopOptsShenandoahPostExpand,
102 LoopOptsSkipSplitIf,
103 LoopOptsVerify
104};
105
106typedef unsigned int node_idx_t;
107class NodeCloneInfo {
108 private:
109 uint64_t _idx_clone_orig;
110 public:
111
112 void set_idx(node_idx_t idx) {
113 _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)(0xFFFFFFFF00000000LL)) | idx;
114 }
115 node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); }
116
117 void set_gen(int generation) {
118 uint64_t g = (uint64_t)generation << 32;
119 _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g;
120 }
121 int gen() const { return (int)(_idx_clone_orig >> 32); }
122
123 void set(uint64_t x) { _idx_clone_orig = x; }
124 void set(node_idx_t x, int g) { set_idx(x); set_gen(g); }
125 uint64_t get() const { return _idx_clone_orig; }
126
127 NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
128 NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
129
130 void dump() const;
131};
132
133class CloneMap {
134 friend class Compile;
135 private:
136 bool _debug;
137 Dict* _dict;
138 int _clone_idx; // current cloning iteration/generation in loop unroll
139 public:
140 void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
141 node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; }
142 Dict* dict() const { return _dict; }
143 void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed")do { if (!(_dict->operator[](_2p(key)) == __null)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 143, "assert(" "_dict->operator[](_2p(key)) == __null" ") failed"
, "key existed"); ::breakpoint(); } } while (0)
; _dict->Insert(_2p(key), (void*)val); }
144 void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); }
145 void remove(node_idx_t key) { _dict->Delete(_2p(key)); }
146 uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); }
147 node_idx_t idx(node_idx_t key) const { return NodeCloneInfo(value(key)).idx(); }
148 int gen(node_idx_t key) const { return NodeCloneInfo(value(key)).gen(); }
149 int gen(const void* k) const { return gen(_2_node_idx_t(k)); }
150 int max_gen() const;
151 void clone(Node* old, Node* nnn, int gen);
152 void verify_insert_and_clone(Node* old, Node* nnn, int gen);
153 void dump(node_idx_t key) const;
154
155 int clone_idx() const { return _clone_idx; }
156 void set_clone_idx(int x) { _clone_idx = x; }
157 bool is_debug() const { return _debug; }
158 void set_debug(bool debug) { _debug = debug; }
159 static const char* debug_option_name;
160
161 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
162 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
163};
164
165class Options {
166 friend class Compile;
167 friend class VMStructs;
168 private:
169 const bool _subsume_loads; // Load can be matched as part of a larger op.
170 const bool _do_escape_analysis; // Do escape analysis.
171 const bool _do_iterative_escape_analysis; // Do iterative escape analysis.
172 const bool _eliminate_boxing; // Do boxing elimination.
173 const bool _do_locks_coarsening; // Do locks coarsening
174 const bool _install_code; // Install the code that was compiled
175 public:
176 Options(bool subsume_loads, bool do_escape_analysis,
177 bool do_iterative_escape_analysis,
178 bool eliminate_boxing, bool do_locks_coarsening,
179 bool install_code) :
180 _subsume_loads(subsume_loads),
181 _do_escape_analysis(do_escape_analysis),
182 _do_iterative_escape_analysis(do_iterative_escape_analysis),
183 _eliminate_boxing(eliminate_boxing),
184 _do_locks_coarsening(do_locks_coarsening),
185 _install_code(install_code) {
186 }
187
188 static Options for_runtime_stub() {
189 return Options(
190 /* subsume_loads = */ true,
191 /* do_escape_analysis = */ false,
192 /* do_iterative_escape_analysis = */ false,
193 /* eliminate_boxing = */ false,
194 /* do_lock_coarsening = */ false,
195 /* install_code = */ true
196 );
197 }
198};
199
200//------------------------------Compile----------------------------------------
201// This class defines a top-level Compiler invocation.
202
203class Compile : public Phase {
204 friend class VMStructs;
205
206 public:
207 // Fixed alias indexes. (See also MergeMemNode.)
208 enum {
209 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
210 AliasIdxBot = 2, // pseudo-index, aliases to everything
211 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
212 };
213
214 // Variant of TraceTime(NULL, &_t_accumulator, CITime);
215 // Integrated with logging. If logging is turned on, and CITimeVerbose is true,
216 // then brackets are put into the log, with time stamps and node counts.
217 // (The time collection itself is always conditionalized on CITime.)
218 class TracePhase : public TraceTime {
219 private:
220 Compile* C;
221 CompileLog* _log;
222 const char* _phase_name;
223 bool _dolog;
224 public:
225 TracePhase(const char* name, elapsedTimer* accumulator);
226 ~TracePhase();
227 };
228
229 // Information per category of alias (memory slice)
230 class AliasType {
231 private:
232 friend class Compile;
233
234 int _index; // unique index, used with MergeMemNode
235 const TypePtr* _adr_type; // normalized address type
236 ciField* _field; // relevant instance field, or null if none
237 const Type* _element; // relevant array element type, or null if none
238 bool _is_rewritable; // false if the memory is write-once only
239 int _general_index; // if this is type is an instance, the general
240 // type that this is an instance of
241
242 void Init(int i, const TypePtr* at);
243
244 public:
245 int index() const { return _index; }
246 const TypePtr* adr_type() const { return _adr_type; }
247 ciField* field() const { return _field; }
248 const Type* element() const { return _element; }
249 bool is_rewritable() const { return _is_rewritable; }
250 bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
251 int general_index() const { return (_general_index != 0) ? _general_index : _index; }
252
253 void set_rewritable(bool z) { _is_rewritable = z; }
254 void set_field(ciField* f) {
255 assert(!_field,"")do { if (!(!_field)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 255, "assert(" "!_field" ") failed", ""); ::breakpoint(); }
} while (0)
;
256 _field = f;
257 if (f->is_final() || f->is_stable()) {
258 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
259 _is_rewritable = false;
260 }
261 }
262 void set_element(const Type* e) {
263 assert(_element == NULL, "")do { if (!(_element == __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 263, "assert(" "_element == __null" ") failed", ""); ::breakpoint
(); } } while (0)
;
264 _element = e;
265 }
266
267 BasicType basic_type() const;
268
269 void print_on(outputStream* st) PRODUCT_RETURN;
270 };
271
272 enum {
273 logAliasCacheSize = 6,
274 AliasCacheSize = (1<<logAliasCacheSize)
275 };
276 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type
277 enum {
278 trapHistLength = MethodData::_trap_hist_limit
279 };
280
281 private:
282 // Fixed parameters to this compilation.
283 const int _compile_id;
284 const Options _options; // Compilation options
285 ciMethod* _method; // The method being compiled.
286 int _entry_bci; // entry bci for osr methods.
287 const TypeFunc* _tf; // My kind of signature
288 InlineTree* _ilt; // Ditto (temporary).
289 address _stub_function; // VM entry for stub being compiled, or NULL
290 const char* _stub_name; // Name of stub or adapter being compiled, or NULL
291 address _stub_entry_point; // Compile code entry for generated stub, or NULL
292
293 // Control of this compilation.
294 int _max_inline_size; // Max inline size for this compilation
295 int _freq_inline_size; // Max hot method inline size for this compilation
296 int _fixed_slots; // count of frame slots not allocated by the register
297 // allocator i.e. locks, original deopt pc, etc.
298 uintx _max_node_limit; // Max unique node count during a single compilation.
299
300 bool _post_loop_opts_phase; // Loop opts are finished.
301
302 int _major_progress; // Count of something big happening
303 bool _inlining_progress; // progress doing incremental inlining?
304 bool _inlining_incrementally;// Are we doing incremental inlining (post parse)
305 bool _do_cleanup; // Cleanup is needed before proceeding with incremental inlining
306 bool _has_loops; // True if the method _may_ have some loops
307 bool _has_split_ifs; // True if the method _may_ have some split-if
308 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
309 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
310 bool _has_boxed_value; // True if a boxed object is allocated
311 bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
312 uint _max_vector_size; // Maximum size of generated vectors
313 bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper
314 uint _trap_hist[trapHistLength]; // Cumulative traps
315 bool _trap_can_recompile; // Have we emitted a recompiling trap?
316 uint _decompile_count; // Cumulative decompilation counts.
317 bool _do_inlining; // True if we intend to do inlining
318 bool _do_scheduling; // True if we intend to do scheduling
319 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
320 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
321 bool _use_cmove; // True if CMove should be used without profitability analysis
322 bool _age_code; // True if we need to profile code age (decrement the aging counter)
323 int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
324 bool _print_assembly; // True if we should dump assembly code for this compilation
325 bool _print_inlining; // True if we should print inlining for this compilation
326 bool _print_intrinsics; // True if we should print intrinsics for this compilation
327#ifndef PRODUCT
328 uint _igv_idx; // Counter for IGV node identifiers
329 bool _trace_opto_output;
330 bool _print_ideal;
331 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
332#endif
333 bool _has_irreducible_loop; // Found irreducible loops
334 // JSR 292
335 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
336 RTMState _rtm_state; // State of Restricted Transactional Memory usage
337 int _loop_opts_cnt; // loop opts round
338 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
339 uint _stress_seed; // Seed for stress testing
340
341 // Compilation environment.
342 Arena _comp_arena; // Arena with lifetime equivalent to Compile
343 void* _barrier_set_state; // Potential GC barrier state for Compile
344 ciEnv* _env; // CI interface
345 DirectiveSet* _directive; // Compiler directive
346 CompileLog* _log; // from CompilerThread
347 const char* _failure_reason; // for record_failure/failing pattern
348 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
349 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
350 GrowableArray<Node*> _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
351 GrowableArray<Node*> _skeleton_predicate_opaqs; // List of Opaque4 nodes for the loop skeleton predicates.
352 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
353 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
354 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
355 ConnectionGraph* _congraph;
356#ifndef PRODUCT
357 IdealGraphPrinter* _printer;
358 static IdealGraphPrinter* _debug_file_printer;
359 static IdealGraphPrinter* _debug_network_printer;
360#endif
361
362
363 // Node management
364 uint _unique; // Counter for unique Node indices
365 VectorSet _dead_node_list; // Set of dead nodes
366 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N).
367 // So use this to keep count and make the call O(1).
368 DEBUG_ONLY(Unique_Node_List* _modified_nodes;)Unique_Node_List* _modified_nodes; // List of nodes which inputs were modified
369 DEBUG_ONLY(bool _phase_optimize_finished;)bool _phase_optimize_finished; // Used for live node verification while creating new nodes
370
371 debug_only(static int _debug_idx;)static int _debug_idx; // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
372 Arena _node_arena; // Arena for new-space Nodes
373 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
374 RootNode* _root; // Unique root of compilation, or NULL after bail-out.
375 Node* _top; // Unique top node. (Reset by various phases.)
376
377 Node* _immutable_memory; // Initial memory state
378
379 Node* _recent_alloc_obj;
380 Node* _recent_alloc_ctl;
381
382 // Constant table
383 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton.
384
385
386 // Blocked array of debugging and profiling information,
387 // tracked per node.
388 enum { _log2_node_notes_block_size = 8,
389 _node_notes_block_size = (1<<_log2_node_notes_block_size)
390 };
391 GrowableArray<Node_Notes*>* _node_note_array;
392 Node_Notes* _default_node_notes; // default notes for new nodes
393
394 // After parsing and every bulk phase we hang onto the Root instruction.
395 // The RootNode instruction is where the whole program begins. It produces
396 // the initial Control and BOTTOM for everybody else.
397
398 // Type management
399 Arena _Compile_types; // Arena for all types
400 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
401 Dict* _type_dict; // Intern table
402 CloneMap _clone_map; // used for recording history of cloned nodes
403 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
404 ciMethod* _last_tf_m; // Cache for
405 const TypeFunc* _last_tf; // TypeFunc::make
406 AliasType** _alias_types; // List of alias types seen so far.
407 int _num_alias_types; // Logical length of _alias_types
408 int _max_alias_types; // Physical length of _alias_types
409 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
410
411 // Parsing, optimization
412 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
413 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
414
415 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after main parsing has finished.
416 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
417 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
418
419 GrowableArray<CallGenerator*> _vector_reboxing_late_inlines; // same but for vector reboxing operations
420
421 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
422 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
423
424 GrowableArray<RuntimeStub*> _native_invokers;
425
426 // Inlining may not happen in parse order which would make
427 // PrintInlining output confusing. Keep track of PrintInlining
428 // pieces in order.
429 class PrintInliningBuffer : public CHeapObj<mtCompiler> {
430 private:
431 CallGenerator* _cg;
432 stringStream _ss;
433 static const size_t default_stream_buffer_size = 128;
434
435 public:
436 PrintInliningBuffer()
437 : _cg(NULL__null), _ss(default_stream_buffer_size) {}
438
439 stringStream* ss() { return &_ss; }
440 CallGenerator* cg() { return _cg; }
441 void set_cg(CallGenerator* cg) { _cg = cg; }
442 };
443
444 stringStream* _print_inlining_stream;
445 GrowableArray<PrintInliningBuffer*>* _print_inlining_list;
446 int _print_inlining_idx;
447 char* _print_inlining_output;
448
449 // Only keep nodes in the expensive node list that need to be optimized
450 void cleanup_expensive_nodes(PhaseIterGVN &igvn);
451 // Use for sorting expensive nodes to bring similar nodes together
452 static int cmp_expensive_nodes(Node** n1, Node** n2);
453 // Expensive nodes list already sorted?
454 bool expensive_nodes_sorted() const;
455 // Remove the speculative part of types and clean up the graph
456 void remove_speculative_types(PhaseIterGVN &igvn);
457
458 void* _replay_inline_data; // Pointer to data loaded from file
459
460 void print_inlining_stream_free();
461 void print_inlining_init();
462 void print_inlining_reinit();
463 void print_inlining_commit();
464 void print_inlining_push();
465 PrintInliningBuffer* print_inlining_current();
466
467 void log_late_inline_failure(CallGenerator* cg, const char* msg);
468 DEBUG_ONLY(bool _exception_backedge;)bool _exception_backedge;
469
470 public:
471
472 void* barrier_set_state() const { return _barrier_set_state; }
473
474 outputStream* print_inlining_stream() const {
475 assert(print_inlining() || print_intrinsics(), "PrintInlining off?")do { if (!(print_inlining() || print_intrinsics())) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 475, "assert(" "print_inlining() || print_intrinsics()" ") failed"
, "PrintInlining off?"); ::breakpoint(); } } while (0)
;
476 return _print_inlining_stream;
477 }
478
479 void print_inlining_update(CallGenerator* cg);
480 void print_inlining_update_delayed(CallGenerator* cg);
481 void print_inlining_move_to(CallGenerator* cg);
482 void print_inlining_assert_ready();
483 void print_inlining_reset();
484
485 void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL__null) {
486 stringStream ss;
487 CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg);
488 print_inlining_stream()->print("%s", ss.as_string());
489 }
490
491#ifndef PRODUCT
492 IdealGraphPrinter* printer() { return _printer; }
493#endif
494
495 void log_late_inline(CallGenerator* cg);
496 void log_inline_id(CallGenerator* cg);
497 void log_inline_failure(const char* msg);
498
499 void* replay_inline_data() const { return _replay_inline_data; }
500
501 // Dump inlining replay data to the stream.
502 void dump_inline_data(outputStream* out);
503
504 private:
505 // Matching, CFG layout, allocation, code generation
506 PhaseCFG* _cfg; // Results of CFG finding
507 int _java_calls; // Number of java calls in the method
508 int _inner_loops; // Number of inner loops in the method
509 Matcher* _matcher; // Engine to map ideal to machine instructions
510 PhaseRegAlloc* _regalloc; // Results of register allocation.
511 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
512 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
513 void* _indexSet_free_block_list; // free list of IndexSet bit blocks
514 int _interpreter_frame_size;
515
516 PhaseOutput* _output;
517
518 public:
519 // Accessors
520
521 // The Compile instance currently active in this (compiler) thread.
522 static Compile* current() {
523 return (Compile*) ciEnv::current()->compiler_data();
524 }
525
526 int interpreter_frame_size() const { return _interpreter_frame_size; }
527
528 PhaseOutput* output() const { return _output; }
529 void set_output(PhaseOutput* o) { _output = o; }
530
531 // ID for this compilation. Useful for setting breakpoints in the debugger.
532 int compile_id() const { return _compile_id; }
533 DirectiveSet* directive() const { return _directive; }
534
535 // Does this compilation allow instructions to subsume loads? User
536 // instructions that subsume a load may result in an unschedulable
537 // instruction sequence.
538 bool subsume_loads() const { return _options._subsume_loads; }
539 /** Do escape analysis. */
540 bool do_escape_analysis() const { return _options._do_escape_analysis; }
541 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; }
542 /** Do boxing elimination. */
543 bool eliminate_boxing() const { return _options._eliminate_boxing; }
544 /** Do aggressive boxing elimination. */
545 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
546 bool should_install_code() const { return _options._install_code; }
547 /** Do locks coarsening. */
548 bool do_locks_coarsening() const { return _options._do_locks_coarsening; }
549
550 // Other fixed compilation parameters.
551 ciMethod* method() const { return _method; }
552 int entry_bci() const { return _entry_bci; }
553 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
554 bool is_method_compilation() const { return (_method != NULL__null && !_method->flags().is_native()); }
555 const TypeFunc* tf() const { assert(_tf!=NULL, "")do { if (!(_tf!=__null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 555, "assert(" "_tf!=__null" ") failed", ""); ::breakpoint(
); } } while (0)
; return _tf; }
556 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, "")do { if (!(_tf==__null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 556, "assert(" "_tf==__null" ") failed", ""); ::breakpoint(
); } } while (0)
; _tf = tf; }
557 InlineTree* ilt() const { return _ilt; }
558 address stub_function() const { return _stub_function; }
559 const char* stub_name() const { return _stub_name; }
560 address stub_entry_point() const { return _stub_entry_point; }
561 void set_stub_entry_point(address z) { _stub_entry_point = z; }
562
563 // Control of this compilation.
564 int fixed_slots() const { assert(_fixed_slots >= 0, "")do { if (!(_fixed_slots >= 0)) { (*g_assert_poison) = 'X';
; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 564, "assert(" "_fixed_slots >= 0" ") failed", ""); ::breakpoint
(); } } while (0)
; return _fixed_slots; }
565 void set_fixed_slots(int n) { _fixed_slots = n; }
566 int major_progress() const { return _major_progress; }
567 void set_inlining_progress(bool z) { _inlining_progress = z; }
568 int inlining_progress() const { return _inlining_progress; }
569 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
570 int inlining_incrementally() const { return _inlining_incrementally; }
571 void set_do_cleanup(bool z) { _do_cleanup = z; }
572 int do_cleanup() const { return _do_cleanup; }
573 void set_major_progress() { _major_progress++; }
574 void restore_major_progress(int progress) { _major_progress += progress; }
575 void clear_major_progress() { _major_progress = 0; }
576 int max_inline_size() const { return _max_inline_size; }
577 void set_freq_inline_size(int n) { _freq_inline_size = n; }
578 int freq_inline_size() const { return _freq_inline_size; }
579 void set_max_inline_size(int n) { _max_inline_size = n; }
580 bool has_loops() const { return _has_loops; }
581 void set_has_loops(bool z) { _has_loops = z; }
582 bool has_split_ifs() const { return _has_split_ifs; }
583 void set_has_split_ifs(bool z) { _has_split_ifs = z; }
584 bool has_unsafe_access() const { return _has_unsafe_access; }
585 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
586 bool has_stringbuilder() const { return _has_stringbuilder; }
587 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
588 bool has_boxed_value() const { return _has_boxed_value; }
589 void set_has_boxed_value(bool z) { _has_boxed_value = z; }
590 bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
591 void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
592 uint max_vector_size() const { return _max_vector_size; }
593 void set_max_vector_size(uint s) { _max_vector_size = s; }
594 bool clear_upper_avx() const { return _clear_upper_avx; }
595 void set_clear_upper_avx(bool s) { _clear_upper_avx = s; }
596 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob")do { if (!(r < trapHistLength)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 596, "assert(" "r < trapHistLength" ") failed", "oob"); ::
breakpoint(); } } while (0)
; _trap_hist[r] = c; }
597 uint trap_count(uint r) const { assert(r < trapHistLength, "oob")do { if (!(r < trapHistLength)) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 597, "assert(" "r < trapHistLength" ") failed", "oob"); ::
breakpoint(); } } while (0)
; return _trap_hist[r]; }
598 bool trap_can_recompile() const { return _trap_can_recompile; }
599 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
600 uint decompile_count() const { return _decompile_count; }
601 void set_decompile_count(uint c) { _decompile_count = c; }
602 bool allow_range_check_smearing() const;
603 bool do_inlining() const { return _do_inlining; }
604 void set_do_inlining(bool z) { _do_inlining = z; }
605 bool do_scheduling() const { return _do_scheduling; }
606 void set_do_scheduling(bool z) { _do_scheduling = z; }
607 bool do_freq_based_layout() const{ return _do_freq_based_layout; }
608 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
609 bool do_vector_loop() const { return _do_vector_loop; }
610 void set_do_vector_loop(bool z) { _do_vector_loop = z; }
611 bool use_cmove() const { return _use_cmove; }
612 void set_use_cmove(bool z) { _use_cmove = z; }
613 bool age_code() const { return _age_code; }
614 void set_age_code(bool z) { _age_code = z; }
615 int AliasLevel() const { return _AliasLevel; }
616 bool print_assembly() const { return _print_assembly; }
617 void set_print_assembly(bool z) { _print_assembly = z; }
618 bool print_inlining() const { return _print_inlining; }
619 void set_print_inlining(bool z) { _print_inlining = z; }
620 bool print_intrinsics() const { return _print_intrinsics; }
621 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
622 RTMState rtm_state() const { return _rtm_state; }
623 void set_rtm_state(RTMState s) { _rtm_state = s; }
624 bool use_rtm() const { return (_rtm_state & NoRTM) == 0; }
625 bool profile_rtm() const { return _rtm_state == ProfileRTM; }
626 uint max_node_limit() const { return (uint)_max_node_limit; }
627 void set_max_node_limit(uint n) { _max_node_limit = n; }
628 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
629 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
630
631 // check the CompilerOracle for special behaviours for this compile
632 bool method_has_option(enum CompileCommand option) {
633 return method() != NULL__null && method()->has_option(option);
634 }
635
636#ifndef PRODUCT
637 uint next_igv_idx() { return _igv_idx++; }
638 bool trace_opto_output() const { return _trace_opto_output; }
639 bool print_ideal() const { return _print_ideal; }
640 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
641 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
642 int _in_dump_cnt; // Required for dumping ir nodes.
643#endif
644 bool has_irreducible_loop() const { return _has_irreducible_loop; }
645 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
646
647 // JSR 292
648 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
649 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
650
651 Ticks _latest_stage_start_counter;
652
653 void begin_method(int level = 1) {
654#ifndef PRODUCT
655 if (_method != NULL__null && should_print(level)) {
656 _printer->begin_method();
657 }
658#endif
659 C->_latest_stage_start_counter.stamp();
660 }
661
662 bool should_print(int level = 1) {
663#ifndef PRODUCT
664 if (PrintIdealGraphLevel < 0) { // disabled by the user
665 return false;
666 }
667
668 bool need = directive()->IGVPrintLevelOption >= level;
669 if (need && !_printer) {
670 _printer = IdealGraphPrinter::printer();
671 assert(_printer != NULL, "_printer is NULL when we need it!")do { if (!(_printer != __null)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 671, "assert(" "_printer != __null" ") failed", "_printer is NULL when we need it!"
); ::breakpoint(); } } while (0)
;
672 _printer->set_compile(this);
673 }
674 return need;
675#else
676 return false;
677#endif
678 }
679
680 void print_method(CompilerPhaseType cpt, const char *name, int level = 1);
681 void print_method(CompilerPhaseType cpt, int level = 1, int idx = 0);
682 void print_method(CompilerPhaseType cpt, Node* n, int level = 3);
683
684#ifndef PRODUCT
685 void igv_print_method_to_file(const char* phase_name = "Debug", bool append = false);
686 void igv_print_method_to_network(const char* phase_name = "Debug");
687 static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; }
688 static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; }
689#endif
690
691 void end_method(int level = 1);
692
693 int macro_count() const { return _macro_nodes.length(); }
694 int predicate_count() const { return _predicate_opaqs.length(); }
695 int skeleton_predicate_count() const { return _skeleton_predicate_opaqs.length(); }
696 int expensive_count() const { return _expensive_nodes.length(); }
697 int coarsened_count() const { return _coarsened_locks.length(); }
698
699 Node* macro_node(int idx) const { return _macro_nodes.at(idx); }
700 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs.at(idx); }
701 Node* skeleton_predicate_opaque4_node(int idx) const { return _skeleton_predicate_opaqs.at(idx); }
702 Node* expensive_node(int idx) const { return _expensive_nodes.at(idx); }
703
704 ConnectionGraph* congraph() { return _congraph;}
705 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
706 void add_macro_node(Node * n) {
707 //assert(n->is_macro(), "must be a macro node");
708 assert(!_macro_nodes.contains(n), "duplicate entry in expand list")do { if (!(!_macro_nodes.contains(n))) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 708, "assert(" "!_macro_nodes.contains(n)" ") failed", "duplicate entry in expand list"
); ::breakpoint(); } } while (0)
;
709 _macro_nodes.append(n);
710 }
711 void remove_macro_node(Node* n) {
712 // this function may be called twice for a node so we can only remove it
713 // if it's still existing.
714 _macro_nodes.remove_if_existing(n);
715 // remove from _predicate_opaqs list also if it is there
716 if (predicate_count() > 0) {
717 _predicate_opaqs.remove_if_existing(n);
718 }
719 // Remove from coarsened locks list if present
720 if (coarsened_count() > 0) {
721 remove_coarsened_lock(n);
722 }
723 }
724 void add_expensive_node(Node* n);
725 void remove_expensive_node(Node* n) {
726 _expensive_nodes.remove_if_existing(n);
727 }
728 void add_predicate_opaq(Node* n) {
729 assert(!_predicate_opaqs.contains(n), "duplicate entry in predicate opaque1")do { if (!(!_predicate_opaqs.contains(n))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 729, "assert(" "!_predicate_opaqs.contains(n)" ") failed", "duplicate entry in predicate opaque1"
); ::breakpoint(); } } while (0)
;
730 assert(_macro_nodes.contains(n), "should have already been in macro list")do { if (!(_macro_nodes.contains(n))) { (*g_assert_poison) = 'X'
;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 730, "assert(" "_macro_nodes.contains(n)" ") failed", "should have already been in macro list"
); ::breakpoint(); } } while (0)
;
731 _predicate_opaqs.append(n);
732 }
733 void add_skeleton_predicate_opaq(Node* n) {
734 assert(!_skeleton_predicate_opaqs.contains(n), "duplicate entry in skeleton predicate opaque4 list")do { if (!(!_skeleton_predicate_opaqs.contains(n))) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 734, "assert(" "!_skeleton_predicate_opaqs.contains(n)" ") failed"
, "duplicate entry in skeleton predicate opaque4 list"); ::breakpoint
(); } } while (0)
;
735 _skeleton_predicate_opaqs.append(n);
736 }
737 void remove_skeleton_predicate_opaq(Node* n) {
738 if (skeleton_predicate_count() > 0) {
739 _skeleton_predicate_opaqs.remove_if_existing(n);
740 }
741 }
742 void add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks);
743 void remove_coarsened_lock(Node* n);
744 bool coarsened_locks_consistent();
745
746 bool post_loop_opts_phase() { return _post_loop_opts_phase; }
747 void set_post_loop_opts_phase() { _post_loop_opts_phase = true; }
748 void reset_post_loop_opts_phase() { _post_loop_opts_phase = false; }
749
750 void record_for_post_loop_opts_igvn(Node* n);
751 void remove_from_post_loop_opts_igvn(Node* n);
752 void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn);
753
754 void sort_macro_nodes();
755
756 // remove the opaque nodes that protect the predicates so that the unused checks and
757 // uncommon traps will be eliminated from the graph.
758 void cleanup_loop_predicates(PhaseIterGVN &igvn);
759 bool is_predicate_opaq(Node* n) {
760 return _predicate_opaqs.contains(n);
761 }
762
763 // Are there candidate expensive nodes for optimization?
764 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
765 // Check whether n1 and n2 are similar
766 static int cmp_expensive_nodes(Node* n1, Node* n2);
767 // Sort expensive nodes to locate similar expensive nodes
768 void sort_expensive_nodes();
769
770 // Compilation environment.
771 Arena* comp_arena() { return &_comp_arena; }
772 ciEnv* env() const { return _env; }
773 CompileLog* log() const { return _log; }
774 bool failing() const { return _env->failing() || _failure_reason != NULL__null; }
25
Assuming field '_failure_reason' is equal to NULL
26
Returning zero, which participates in a condition later
775 const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
776
777 bool failure_reason_is(const char* r) const {
778 return (r == _failure_reason) || (r != NULL__null && _failure_reason != NULL__null && strcmp(r, _failure_reason) == 0);
779 }
780
781 void record_failure(const char* reason);
782 void record_method_not_compilable(const char* reason) {
783 env()->record_method_not_compilable(reason);
784 // Record failure reason.
785 record_failure(reason);
786 }
787 bool check_node_count(uint margin, const char* reason) {
788 if (live_nodes() + margin > max_node_limit()) {
789 record_method_not_compilable(reason);
790 return true;
791 } else {
792 return false;
793 }
794 }
795
796 // Node management
797 uint unique() const { return _unique; }
798 uint next_unique() { return _unique++; }
799 void set_unique(uint i) { _unique = i; }
800 static int debug_idx() { return debug_only(_debug_idx)_debug_idx+0; }
801 static void set_debug_idx(int i) { debug_only(_debug_idx = i)_debug_idx = i; }
802 Arena* node_arena() { return &_node_arena; }
803 Arena* old_arena() { return &_old_arena; }
804 RootNode* root() const { return _root; }
805 void set_root(RootNode* r) { _root = r; }
806 StartNode* start() const; // (Derived from root.)
807 void init_start(StartNode* s);
808 Node* immutable_memory();
809
810 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
811 Node* recent_alloc_obj() const { return _recent_alloc_obj; }
812 void set_recent_alloc(Node* ctl, Node* obj) {
813 _recent_alloc_ctl = ctl;
814 _recent_alloc_obj = obj;
815 }
816 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return;
817 _dead_node_count++;
818 }
819 void reset_dead_node_list() { _dead_node_list.reset();
820 _dead_node_count = 0;
821 }
822 uint live_nodes() const {
823 int val = _unique - _dead_node_count;
824 assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count)do { if (!(val >= 0)) { (*g_assert_poison) = 'X';; report_vm_error
("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 824, "assert(" "val >= 0" ") failed", "number of tracked dead nodes %d more than created nodes %d"
, _unique, _dead_node_count); ::breakpoint(); } } while (0)
;
825 return (uint) val;
826 }
827#ifdef ASSERT1
828 void set_phase_optimize_finished() { _phase_optimize_finished = true; }
829 bool phase_optimize_finished() const { return _phase_optimize_finished; }
830 uint count_live_nodes_by_graph_walk();
831 void print_missing_nodes();
832#endif
833
834 // Record modified nodes to check that they are put on IGVN worklist
835 void record_modified_node(Node* n) NOT_DEBUG_RETURN;
836 void remove_modified_node(Node* n) NOT_DEBUG_RETURN;
837 DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } )Unique_Node_List* modified_nodes() const { return _modified_nodes
; }
838
839 MachConstantBaseNode* mach_constant_base_node();
840 bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL__null; }
841 // Generated by adlc, true if CallNode requires MachConstantBase.
842 bool needs_deep_clone_jvms();
843
844 // Handy undefined Node
845 Node* top() const { return _top; }
846
847 // these are used by guys who need to know about creation and transformation of top:
848 Node* cached_top_node() { return _top; }
849 void set_cached_top_node(Node* tn);
850
851 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
852 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
853 Node_Notes* default_node_notes() const { return _default_node_notes; }
854 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
855
856 Node_Notes* node_notes_at(int idx) {
857 return locate_node_notes(_node_note_array, idx, false);
858 }
859 inline bool set_node_notes_at(int idx, Node_Notes* value);
860
861 // Copy notes from source to dest, if they exist.
862 // Overwrite dest only if source provides something.
863 // Return true if information was moved.
864 bool copy_node_notes_to(Node* dest, Node* source);
865
866 // Workhorse function to sort out the blocked Node_Notes array:
867 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
868 int idx, bool can_grow = false);
869
870 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
871
872 // Type management
873 Arena* type_arena() { return _type_arena; }
874 Dict* type_dict() { return _type_dict; }
875 size_t type_last_size() { return _type_last_size; }
876 int num_alias_types() { return _num_alias_types; }
877
878 void init_type_arena() { _type_arena = &_Compile_types; }
879 void set_type_arena(Arena* a) { _type_arena = a; }
880 void set_type_dict(Dict* d) { _type_dict = d; }
881 void set_type_last_size(size_t sz) { _type_last_size = sz; }
882
883 const TypeFunc* last_tf(ciMethod* m) {
884 return (m == _last_tf_m) ? _last_tf : NULL__null;
885 }
886 void set_last_tf(ciMethod* m, const TypeFunc* tf) {
887 assert(m != NULL || tf == NULL, "")do { if (!(m != __null || tf == __null)) { (*g_assert_poison)
= 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 887, "assert(" "m != __null || tf == __null" ") failed", ""
); ::breakpoint(); } } while (0)
;
888 _last_tf_m = m;
889 _last_tf = tf;
890 }
891
892 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob")do { if (!(idx < num_alias_types())) { (*g_assert_poison) =
'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 892, "assert(" "idx < num_alias_types()" ") failed", "oob"
); ::breakpoint(); } } while (0)
; return _alias_types[idx]; }
893 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL__null) { return find_alias_type(adr_type, false, field); }
894 bool have_alias_type(const TypePtr* adr_type);
895 AliasType* alias_type(ciField* field);
896
897 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
898 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
899 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
900
901 // Building nodes
902 void rethrow_exceptions(JVMState* jvms);
903 void return_values(JVMState* jvms);
904 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
905
906 // Decide how to build a call.
907 // The profile factor is a discount to apply to this site's interp. profile.
908 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
909 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL__null,
910 bool allow_intrinsics = true);
911 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
912 return should_delay_string_inlining(call_method, jvms) ||
913 should_delay_boxing_inlining(call_method, jvms) ||
914 should_delay_vector_inlining(call_method, jvms);
915 }
916 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
917 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
918 bool should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms);
919 bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms);
920
921 // Helper functions to identify inlining potential at call-site
922 ciMethod* optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
923 ciKlass* holder, ciMethod* callee,
924 const TypeOopPtr* receiver_type, bool is_virtual,
925 bool &call_does_dispatch, int &vtable_index,
926 bool check_access = true);
927 ciMethod* optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,
928 ciMethod* callee, const TypeOopPtr* receiver_type,
929 bool check_access = true);
930
931 // Report if there were too many traps at a current method and bci.
932 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
933 // If there is no MDO at all, report no trap unless told to assume it.
934 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
935 // This version, unspecific to a particular bci, asks if
936 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
937 bool too_many_traps(Deoptimization::DeoptReason reason,
938 // Privately used parameter for logging:
939 ciMethodData* logmd = NULL__null);
940 // Report if there were too many recompiles at a method and bci.
941 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
942 // Report if there were too many traps or recompiles at a method and bci.
943 bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) {
944 return too_many_traps(method, bci, reason) ||
945 too_many_recompiles(method, bci, reason);
946 }
947 // Return a bitset with the reasons where deoptimization is allowed,
948 // i.e., where there were not too many uncommon traps.
949 int _allowed_reasons;
950 int allowed_deopt_reasons() { return _allowed_reasons; }
951 void set_allowed_deopt_reasons();
952
953 // Parsing, optimization
954 PhaseGVN* initial_gvn() { return _initial_gvn; }
955 Unique_Node_List* for_igvn() { return _for_igvn; }
956 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List.
957 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; }
958 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
959
960 // Replace n by nn using initial_gvn, calling hash_delete and
961 // record_for_igvn as needed.
962 void gvn_replace_by(Node* n, Node* nn);
963
964
965 void identify_useful_nodes(Unique_Node_List &useful);
966 void update_dead_node_list(Unique_Node_List &useful);
967 void remove_useless_nodes (Unique_Node_List &useful);
968
969 void remove_useless_node(Node* dead);
970
971 // Record this CallGenerator for inlining at the end of parsing.
972 void add_late_inline(CallGenerator* cg) {
973 _late_inlines.insert_before(_late_inlines_pos, cg);
974 _late_inlines_pos++;
975 }
976
977 void prepend_late_inline(CallGenerator* cg) {
978 _late_inlines.insert_before(0, cg);
979 }
980
981 void add_string_late_inline(CallGenerator* cg) {
982 _string_late_inlines.push(cg);
983 }
984
985 void add_boxing_late_inline(CallGenerator* cg) {
986 _boxing_late_inlines.push(cg);
987 }
988
989 void add_vector_reboxing_late_inline(CallGenerator* cg) {
990 _vector_reboxing_late_inlines.push(cg);
991 }
992
993 void add_native_invoker(RuntimeStub* stub);
994
995 const GrowableArray<RuntimeStub*> native_invokers() const { return _native_invokers; }
996
997 void remove_useless_nodes (GrowableArray<Node*>& node_list, Unique_Node_List &useful);
998
999 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
1000 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead);
1001
1002 void remove_useless_coarsened_locks(Unique_Node_List& useful);
1003
1004 void process_print_inlining();
1005 void dump_print_inlining();
1006
1007 bool over_inlining_cutoff() const {
1008 if (!inlining_incrementally()) {
1009 return unique() > (uint)NodeCountInliningCutoff;
1010 } else {
1011 // Give some room for incremental inlining algorithm to "breathe"
1012 // and avoid thrashing when live node count is close to the limit.
1013 // Keep in mind that live_nodes() isn't accurate during inlining until
1014 // dead node elimination step happens (see Compile::inline_incrementally).
1015 return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10;
1016 }
1017 }
1018
1019 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
1020 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !")do { if (!(_number_of_mh_late_inlines > 0)) { (*g_assert_poison
) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/compile.hpp"
, 1020, "assert(" "_number_of_mh_late_inlines > 0" ") failed"
, "_number_of_mh_late_inlines < 0 !"); ::breakpoint(); } }
while (0)
; _number_of_mh_late_inlines--; }
1021 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
1022
1023 bool inline_incrementally_one();
1024 void inline_incrementally_cleanup(PhaseIterGVN& igvn);
1025 void inline_incrementally(PhaseIterGVN& igvn);
1026 void inline_string_calls(bool parse_time);
1027 void inline_boxing_calls(PhaseIterGVN& igvn);
1028 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
1029 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
1030
1031 void inline_vector_reboxing_calls();
1032 bool has_vbox_nodes();
1033
1034 void process_late_inline_calls_no_inline(PhaseIterGVN& igvn);
1035
1036 // Matching, CFG layout, allocation, code generation
1037 PhaseCFG* cfg() { return _cfg; }
1038 bool has_java_calls() const { return _java_calls > 0; }
1039 int java_calls() const { return _java_calls; }
1040 int inner_loops() const { return _inner_loops; }
1041 Matcher* matcher() { return _matcher; }
1042 PhaseRegAlloc* regalloc() { return _regalloc; }
1043 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
1044 Arena* indexSet_arena() { return _indexSet_arena; }
1045 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
1046 DebugInformationRecorder* debug_info() { return env()->debug_info(); }
1047
1048 void update_interpreter_frame_size(int size) {
1049 if (_interpreter_frame_size < size) {
1050 _interpreter_frame_size = size;
1051 }
1052 }
1053
1054 void set_matcher(Matcher* m) { _matcher = m; }
1055//void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
1056 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
1057 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
1058
1059 void set_java_calls(int z) { _java_calls = z; }
1060 void set_inner_loops(int z) { _inner_loops = z; }
1061
1062 Dependencies* dependencies() { return env()->dependencies(); }
1063
1064 // Major entry point. Given a Scope, compile the associated method.
1065 // For normal compilations, entry_bci is InvocationEntryBci. For on stack
1066 // replacement, entry_bci indicates the bytecode for which to compile a
1067 // continuation.
1068 Compile(ciEnv* ci_env, ciMethod* target,
1069 int entry_bci, Options options, DirectiveSet* directive);
1070
1071 // Second major entry point. From the TypeFunc signature, generate code
1072 // to pass arguments from the Java calling convention to the C calling
1073 // convention.
1074 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1075 address stub_function, const char *stub_name,
1076 int is_fancy_jump, bool pass_tls,
1077 bool return_pc, DirectiveSet* directive);
1078
1079 // Are we compiling a method?
1080 bool has_method() { return method() != NULL__null; }
1081
1082 // Maybe print some information about this compile.
1083 void print_compile_messages();
1084
1085 // Final graph reshaping, a post-pass after the regular optimizer is done.
1086 bool final_graph_reshaping();
1087
1088 // returns true if adr is completely contained in the given alias category
1089 bool must_alias(const TypePtr* adr, int alias_idx);
1090
1091 // returns true if adr overlaps with the given alias category
1092 bool can_alias(const TypePtr* adr, int alias_idx);
1093
1094 // Stack slots that may be unused by the calling convention but must
1095 // otherwise be preserved. On Intel this includes the return address.
1096 // On PowerPC it includes the 4 words holding the old TOC & LR glue.
1097 uint in_preserve_stack_slots() {
1098 return SharedRuntime::in_preserve_stack_slots();
1099 }
1100
1101 // "Top of Stack" slots that may be unused by the calling convention but must
1102 // otherwise be preserved.
1103 // On Intel these are not necessary and the value can be zero.
1104 static uint out_preserve_stack_slots() {
1105 return SharedRuntime::out_preserve_stack_slots();
1106 }
1107
1108 // Number of outgoing stack slots killed above the out_preserve_stack_slots
1109 // for calls to C. Supports the var-args backing area for register parms.
1110 uint varargs_C_out_slots_killed() const;
1111
1112 // Number of Stack Slots consumed by a synchronization entry
1113 int sync_stack_slots() const;
1114
1115 // Compute the name of old_SP. See <arch>.ad for frame layout.
1116 OptoReg::Name compute_old_SP();
1117
1118 private:
1119 // Phase control:
1120 void Init(int aliaslevel); // Prepare for a single compilation
1121 int Inline_Warm(); // Find more inlining work.
1122 void Finish_Warm(); // Give up on further inlines.
1123 void Optimize(); // Given a graph, optimize it
1124 void Code_Gen(); // Generate code from a graph
1125
1126 // Management of the AliasType table.
1127 void grow_alias_types();
1128 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1129 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1130 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
1131
1132 void verify_top(Node*) const PRODUCT_RETURN;
1133
1134 // Intrinsic setup.
1135 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
1136 int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper
1137 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn
1138 void register_intrinsic(CallGenerator* cg); // update fn
1139
1140#ifndef PRODUCT
1141 static juint _intrinsic_hist_count[];
1142 static jubyte _intrinsic_hist_flags[];
1143#endif
1144 // Function calls made by the public function final_graph_reshaping.
1145 // No need to be made public as they are not called elsewhere.
1146 void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc);
1147 void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop);
1148 void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc );
1149 void eliminate_redundant_card_marks(Node* n);
1150
1151 // Logic cone optimization.
1152 void optimize_logic_cones(PhaseIterGVN &igvn);
1153 void collect_logic_cone_roots(Unique_Node_List& list);
1154 void process_logic_cone_root(PhaseIterGVN &igvn, Node* n, VectorSet& visited);
1155 bool compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs);
1156 uint compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs);
1157 uint eval_macro_logic_op(uint func, uint op1, uint op2, uint op3);
1158 Node* xform_to_MacroLogicV(PhaseIterGVN &igvn, const TypeVect* vt, Unique_Node_List& partitions, Unique_Node_List& inputs);
1159 void check_no_dead_use() const NOT_DEBUG_RETURN;
1160
1161 public:
1162
1163 // Note: Histogram array size is about 1 Kb.
1164 enum { // flag bits:
1165 _intrinsic_worked = 1, // succeeded at least once
1166 _intrinsic_failed = 2, // tried it but it failed
1167 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
1168 _intrinsic_virtual = 8, // was seen in the virtual form (rare)
1169 _intrinsic_both = 16 // was seen in the non-virtual form (usual)
1170 };
1171 // Update histogram. Return boolean if this is a first-time occurrence.
1172 static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
1173 bool is_virtual, int flags) PRODUCT_RETURN0;
1174 static void print_intrinsic_statistics() PRODUCT_RETURN;
1175
1176 // Graph verification code
1177 // Walk the node list, verifying that there is a one-to-one
1178 // correspondence between Use-Def edges and Def-Use edges
1179 // The option no_dead_code enables stronger checks that the
1180 // graph is strongly connected from root in both directions.
1181 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
1182
1183 // End-of-run dumps.
1184 static void print_statistics() PRODUCT_RETURN;
1185
1186 // Verify ADLC assumptions during startup
1187 static void adlc_verification() PRODUCT_RETURN;
1188
1189 // Definitions of pd methods
1190 static void pd_compiler2_init();
1191
1192 // Static parse-time type checking logic for gen_subtype_check:
1193 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
1194 int static_subtype_check(ciKlass* superk, ciKlass* subk);
1195
1196 static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
1197 // Optional control dependency (for example, on range check)
1198 Node* ctrl = NULL__null);
1199
1200 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
1201 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);
1202
1203 // Auxiliary methods for randomized fuzzing/stressing
1204 int random();
1205 bool randomized_select(int count);
1206
1207 // supporting clone_map
1208 CloneMap& clone_map();
1209 void set_clone_map(Dict* d);
1210
1211 bool needs_clinit_barrier(ciField* ik, ciMethod* accessing_method);
1212 bool needs_clinit_barrier(ciMethod* ik, ciMethod* accessing_method);
1213 bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method);
1214
1215#ifdef IA32
1216 private:
1217 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
1218 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
1219
1220 // Remember if this compilation changes hardware mode to 24-bit precision.
1221 void set_24_bit_selection_and_mode(bool selection, bool mode) {
1222 _select_24_bit_instr = selection;
1223 _in_24_bit_fp_mode = mode;
1224 }
1225
1226 public:
1227 bool select_24_bit_instr() const { return _select_24_bit_instr; }
1228 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
1229#endif // IA32
1230#ifdef ASSERT1
1231 bool _type_verify_symmetry;
1232 void set_exception_backedge() { _exception_backedge = true; }
1233 bool has_exception_backedge() const { return _exception_backedge; }
1234#endif
1235
1236 static bool push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry,
1237 BasicType bt);
1238
1239 static Node* narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res);
1240};
1241
1242#endif // SHARE_OPTO_COMPILE_HPP

/home/daniel/Projects/java/jdk/src/hotspot/share/libadt/vectset.hpp

1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_LIBADT_VECTSET_HPP
26#define SHARE_LIBADT_VECTSET_HPP
27
28#include "memory/allocation.hpp"
29#include "utilities/copy.hpp"
30
31// Vector Sets
32
33// These sets can grow or shrink, based on the initial size and the largest
34// element currently in them.
35
36//------------------------------VectorSet--------------------------------------
37class VectorSet : public ResourceObj {
38private:
39
40 static const uint word_bits = 5;
41 static const uint bit_mask = 31;
42
43 // Used 32-bit words
44 uint _size;
45 uint32_t* _data;
46 // Allocated words
47 uint _data_size;
48 Arena* _set_arena;
49
50 void init(Arena* arena);
51 // Grow vector to required word capacity
52 void grow(uint new_word_capacity);
53public:
54 VectorSet();
55 VectorSet(Arena* arena);
56 ~VectorSet() {}
57
58 void insert(uint elem);
59 bool is_empty() const;
60 void reset() {
61 _size = 0;
62 }
63 void clear() {
64 reset();
65 }
66
67 // Fast inlined "test and set". Replaces the idiom:
68 // if (visited.test(idx)) return;
69 // visited.set(idx);
70 // With:
71 // if (visited.test_set(idx)) return;
72 //
73 bool test_set(uint elem) {
74 uint32_t word = elem >> word_bits;
75 if (word
32.1
'word' is >= field '_size'
32.1
'word' is >= field '_size'
32.1
'word' is >= field '_size'
32.1
'word' is >= field '_size'
>= _size) {
33
Taking true branch
76 // Then grow
77 grow(word);
78 }
79 uint32_t mask = 1U << (elem & bit_mask);
80 uint32_t data = _data[word];
81 _data[word] = data | mask;
82 return (data & mask) != 0;
34
Assuming the condition is false
35
Returning zero, which participates in a condition later
83 }
84
85 // Fast inlined test
86 bool test(uint elem) const {
87 uint32_t word = elem >> word_bits;
88 if (word >= _size) {
89 return false;
90 }
91 uint32_t mask = 1U << (elem & bit_mask);
92 return (_data[word] & mask) != 0;
93 }
94
95 void remove(uint elem) {
96 uint32_t word = elem >> word_bits;
97 if (word >= _size) {
98 return;
99 }
100 uint32_t mask = 1U << (elem & bit_mask);
101 _data[word] &= ~mask; // Clear bit
102 }
103
104 // Fast inlined set
105 void set(uint elem) {
106 uint32_t word = elem >> word_bits;
107 if (word >= _size) {
108 grow(word);
109 }
110 uint32_t mask = 1U << (elem & bit_mask);
111 _data[word] |= mask;
112 }
113};
114
115#endif // SHARE_LIBADT_VECTSET_HPP