File: | jdk/src/hotspot/share/opto/cfgnode.cpp |
Warning: | line 1442, column 7 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | ||||||
2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. | ||||||
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | ||||||
4 | * | ||||||
5 | * This code is free software; you can redistribute it and/or modify it | ||||||
6 | * under the terms of the GNU General Public License version 2 only, as | ||||||
7 | * published by the Free Software Foundation. | ||||||
8 | * | ||||||
9 | * This code is distributed in the hope that it will be useful, but WITHOUT | ||||||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||||||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||||||
12 | * version 2 for more details (a copy is included in the LICENSE file that | ||||||
13 | * accompanied this code). | ||||||
14 | * | ||||||
15 | * You should have received a copy of the GNU General Public License version | ||||||
16 | * 2 along with this work; if not, write to the Free Software Foundation, | ||||||
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
18 | * | ||||||
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | ||||||
20 | * or visit www.oracle.com if you need additional information or have any | ||||||
21 | * questions. | ||||||
22 | * | ||||||
23 | */ | ||||||
24 | |||||||
25 | #include "precompiled.hpp" | ||||||
26 | #include "gc/shared/barrierSet.hpp" | ||||||
27 | #include "gc/shared/c2/barrierSetC2.hpp" | ||||||
28 | #include "memory/allocation.inline.hpp" | ||||||
29 | #include "memory/resourceArea.hpp" | ||||||
30 | #include "oops/objArrayKlass.hpp" | ||||||
31 | #include "opto/addnode.hpp" | ||||||
32 | #include "opto/castnode.hpp" | ||||||
33 | #include "opto/cfgnode.hpp" | ||||||
34 | #include "opto/connode.hpp" | ||||||
35 | #include "opto/convertnode.hpp" | ||||||
36 | #include "opto/loopnode.hpp" | ||||||
37 | #include "opto/machnode.hpp" | ||||||
38 | #include "opto/movenode.hpp" | ||||||
39 | #include "opto/narrowptrnode.hpp" | ||||||
40 | #include "opto/mulnode.hpp" | ||||||
41 | #include "opto/phaseX.hpp" | ||||||
42 | #include "opto/regmask.hpp" | ||||||
43 | #include "opto/runtime.hpp" | ||||||
44 | #include "opto/subnode.hpp" | ||||||
45 | #include "opto/vectornode.hpp" | ||||||
46 | #include "utilities/vmError.hpp" | ||||||
47 | |||||||
48 | // Portions of code courtesy of Clifford Click | ||||||
49 | |||||||
50 | // Optimization - Graph Style | ||||||
51 | |||||||
52 | //============================================================================= | ||||||
53 | //------------------------------Value------------------------------------------ | ||||||
54 | // Compute the type of the RegionNode. | ||||||
55 | const Type* RegionNode::Value(PhaseGVN* phase) const { | ||||||
56 | for( uint i=1; i<req(); ++i ) { // For all paths in | ||||||
57 | Node *n = in(i); // Get Control source | ||||||
58 | if( !n ) continue; // Missing inputs are TOP | ||||||
59 | if( phase->type(n) == Type::CONTROL ) | ||||||
60 | return Type::CONTROL; | ||||||
61 | } | ||||||
62 | return Type::TOP; // All paths dead? Then so are we | ||||||
63 | } | ||||||
64 | |||||||
65 | //------------------------------Identity--------------------------------------- | ||||||
66 | // Check for Region being Identity. | ||||||
67 | Node* RegionNode::Identity(PhaseGVN* phase) { | ||||||
68 | // Cannot have Region be an identity, even if it has only 1 input. | ||||||
69 | // Phi users cannot have their Region input folded away for them, | ||||||
70 | // since they need to select the proper data input | ||||||
71 | return this; | ||||||
72 | } | ||||||
73 | |||||||
74 | //------------------------------merge_region----------------------------------- | ||||||
75 | // If a Region flows into a Region, merge into one big happy merge. This is | ||||||
76 | // hard to do if there is stuff that has to happen | ||||||
77 | static Node *merge_region(RegionNode *region, PhaseGVN *phase) { | ||||||
78 | if( region->Opcode() != Op_Region ) // Do not do to LoopNodes | ||||||
79 | return NULL__null; | ||||||
80 | Node *progress = NULL__null; // Progress flag | ||||||
81 | PhaseIterGVN *igvn = phase->is_IterGVN(); | ||||||
82 | |||||||
83 | uint rreq = region->req(); | ||||||
84 | for( uint i = 1; i < rreq; i++ ) { | ||||||
85 | Node *r = region->in(i); | ||||||
86 | if( r && r->Opcode() == Op_Region && // Found a region? | ||||||
87 | r->in(0) == r && // Not already collapsed? | ||||||
88 | r != region && // Avoid stupid situations | ||||||
89 | r->outcnt() == 2 ) { // Self user and 'region' user only? | ||||||
90 | assert(!r->as_Region()->has_phi(), "no phi users")do { if (!(!r->as_Region()->has_phi())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 90, "assert(" "!r->as_Region()->has_phi()" ") failed" , "no phi users"); ::breakpoint(); } } while (0); | ||||||
91 | if( !progress ) { // No progress | ||||||
92 | if (region->has_phi()) { | ||||||
93 | return NULL__null; // Only flatten if no Phi users | ||||||
94 | // igvn->hash_delete( phi ); | ||||||
95 | } | ||||||
96 | igvn->hash_delete( region ); | ||||||
97 | progress = region; // Making progress | ||||||
98 | } | ||||||
99 | igvn->hash_delete( r ); | ||||||
100 | |||||||
101 | // Append inputs to 'r' onto 'region' | ||||||
102 | for( uint j = 1; j < r->req(); j++ ) { | ||||||
103 | // Move an input from 'r' to 'region' | ||||||
104 | region->add_req(r->in(j)); | ||||||
105 | r->set_req(j, phase->C->top()); | ||||||
106 | // Update phis of 'region' | ||||||
107 | //for( uint k = 0; k < max; k++ ) { | ||||||
108 | // Node *phi = region->out(k); | ||||||
109 | // if( phi->is_Phi() ) { | ||||||
110 | // phi->add_req(phi->in(i)); | ||||||
111 | // } | ||||||
112 | //} | ||||||
113 | |||||||
114 | rreq++; // One more input to Region | ||||||
115 | } // Found a region to merge into Region | ||||||
116 | igvn->_worklist.push(r); | ||||||
117 | // Clobber pointer to the now dead 'r' | ||||||
118 | region->set_req(i, phase->C->top()); | ||||||
119 | } | ||||||
120 | } | ||||||
121 | |||||||
122 | return progress; | ||||||
123 | } | ||||||
124 | |||||||
125 | |||||||
126 | |||||||
127 | //--------------------------------has_phi-------------------------------------- | ||||||
128 | // Helper function: Return any PhiNode that uses this region or NULL | ||||||
129 | PhiNode* RegionNode::has_phi() const { | ||||||
130 | for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { | ||||||
131 | Node* phi = fast_out(i); | ||||||
132 | if (phi->is_Phi()) { // Check for Phi users | ||||||
133 | assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)")do { if (!(phi->in(0) == (Node*)this)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 133, "assert(" "phi->in(0) == (Node*)this" ") failed", "phi uses region only via in(0)" ); ::breakpoint(); } } while (0); | ||||||
134 | return phi->as_Phi(); // this one is good enough | ||||||
135 | } | ||||||
136 | } | ||||||
137 | |||||||
138 | return NULL__null; | ||||||
139 | } | ||||||
140 | |||||||
141 | |||||||
142 | //-----------------------------has_unique_phi---------------------------------- | ||||||
143 | // Helper function: Return the only PhiNode that uses this region or NULL | ||||||
144 | PhiNode* RegionNode::has_unique_phi() const { | ||||||
145 | // Check that only one use is a Phi | ||||||
146 | PhiNode* only_phi = NULL__null; | ||||||
147 | for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { | ||||||
148 | Node* phi = fast_out(i); | ||||||
149 | if (phi->is_Phi()) { // Check for Phi users | ||||||
150 | assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)")do { if (!(phi->in(0) == (Node*)this)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 150, "assert(" "phi->in(0) == (Node*)this" ") failed", "phi uses region only via in(0)" ); ::breakpoint(); } } while (0); | ||||||
151 | if (only_phi == NULL__null) { | ||||||
152 | only_phi = phi->as_Phi(); | ||||||
153 | } else { | ||||||
154 | return NULL__null; // multiple phis | ||||||
155 | } | ||||||
156 | } | ||||||
157 | } | ||||||
158 | |||||||
159 | return only_phi; | ||||||
160 | } | ||||||
161 | |||||||
162 | |||||||
163 | //------------------------------check_phi_clipping----------------------------- | ||||||
164 | // Helper function for RegionNode's identification of FP clipping | ||||||
165 | // Check inputs to the Phi | ||||||
166 | static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, ConNode * &max, uint &max_idx, Node * &val, uint &val_idx ) { | ||||||
167 | min = NULL__null; | ||||||
168 | max = NULL__null; | ||||||
169 | val = NULL__null; | ||||||
170 | min_idx = 0; | ||||||
171 | max_idx = 0; | ||||||
172 | val_idx = 0; | ||||||
173 | uint phi_max = phi->req(); | ||||||
174 | if( phi_max == 4 ) { | ||||||
175 | for( uint j = 1; j < phi_max; ++j ) { | ||||||
176 | Node *n = phi->in(j); | ||||||
177 | int opcode = n->Opcode(); | ||||||
178 | switch( opcode ) { | ||||||
179 | case Op_ConI: | ||||||
180 | { | ||||||
181 | if( min == NULL__null ) { | ||||||
182 | min = n->Opcode() == Op_ConI ? (ConNode*)n : NULL__null; | ||||||
183 | min_idx = j; | ||||||
184 | } else { | ||||||
185 | max = n->Opcode() == Op_ConI ? (ConNode*)n : NULL__null; | ||||||
186 | max_idx = j; | ||||||
187 | if( min->get_int() > max->get_int() ) { | ||||||
188 | // Swap min and max | ||||||
189 | ConNode *temp; | ||||||
190 | uint temp_idx; | ||||||
191 | temp = min; min = max; max = temp; | ||||||
192 | temp_idx = min_idx; min_idx = max_idx; max_idx = temp_idx; | ||||||
193 | } | ||||||
194 | } | ||||||
195 | } | ||||||
196 | break; | ||||||
197 | default: | ||||||
198 | { | ||||||
199 | val = n; | ||||||
200 | val_idx = j; | ||||||
201 | } | ||||||
202 | break; | ||||||
203 | } | ||||||
204 | } | ||||||
205 | } | ||||||
206 | return ( min && max && val && (min->get_int() <= 0) && (max->get_int() >=0) ); | ||||||
207 | } | ||||||
208 | |||||||
209 | |||||||
210 | //------------------------------check_if_clipping------------------------------ | ||||||
211 | // Helper function for RegionNode's identification of FP clipping | ||||||
212 | // Check that inputs to Region come from two IfNodes, | ||||||
213 | // | ||||||
214 | // If | ||||||
215 | // False True | ||||||
216 | // If | | ||||||
217 | // False True | | ||||||
218 | // | | | | ||||||
219 | // RegionNode_inputs | ||||||
220 | // | ||||||
221 | static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) { | ||||||
222 | top_if = NULL__null; | ||||||
223 | bot_if = NULL__null; | ||||||
224 | |||||||
225 | // Check control structure above RegionNode for (if ( if ) ) | ||||||
226 | Node *in1 = region->in(1); | ||||||
227 | Node *in2 = region->in(2); | ||||||
228 | Node *in3 = region->in(3); | ||||||
229 | // Check that all inputs are projections | ||||||
230 | if( in1->is_Proj() && in2->is_Proj() && in3->is_Proj() ) { | ||||||
231 | Node *in10 = in1->in(0); | ||||||
232 | Node *in20 = in2->in(0); | ||||||
233 | Node *in30 = in3->in(0); | ||||||
234 | // Check that #1 and #2 are ifTrue and ifFalse from same If | ||||||
235 | if( in10 != NULL__null && in10->is_If() && | ||||||
236 | in20 != NULL__null && in20->is_If() && | ||||||
237 | in30 != NULL__null && in30->is_If() && in10 == in20 && | ||||||
238 | (in1->Opcode() != in2->Opcode()) ) { | ||||||
239 | Node *in100 = in10->in(0); | ||||||
240 | Node *in1000 = (in100 != NULL__null && in100->is_Proj()) ? in100->in(0) : NULL__null; | ||||||
241 | // Check that control for in10 comes from other branch of IF from in3 | ||||||
242 | if( in1000 != NULL__null && in1000->is_If() && | ||||||
243 | in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) { | ||||||
244 | // Control pattern checks | ||||||
245 | top_if = (IfNode*)in1000; | ||||||
246 | bot_if = (IfNode*)in10; | ||||||
247 | } | ||||||
248 | } | ||||||
249 | } | ||||||
250 | |||||||
251 | return (top_if != NULL__null); | ||||||
252 | } | ||||||
253 | |||||||
254 | |||||||
255 | //------------------------------check_convf2i_clipping------------------------- | ||||||
256 | // Helper function for RegionNode's identification of FP clipping | ||||||
257 | // Verify that the value input to the phi comes from "ConvF2I; LShift; RShift" | ||||||
258 | static bool check_convf2i_clipping( PhiNode *phi, uint idx, ConvF2INode * &convf2i, Node *min, Node *max) { | ||||||
259 | convf2i = NULL__null; | ||||||
260 | |||||||
261 | // Check for the RShiftNode | ||||||
262 | Node *rshift = phi->in(idx); | ||||||
263 | assert( rshift, "Previous checks ensure phi input is present")do { if (!(rshift)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 263, "assert(" "rshift" ") failed", "Previous checks ensure phi input is present" ); ::breakpoint(); } } while (0); | ||||||
264 | if( rshift->Opcode() != Op_RShiftI ) { return false; } | ||||||
265 | |||||||
266 | // Check for the LShiftNode | ||||||
267 | Node *lshift = rshift->in(1); | ||||||
268 | assert( lshift, "Previous checks ensure phi input is present")do { if (!(lshift)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 268, "assert(" "lshift" ") failed", "Previous checks ensure phi input is present" ); ::breakpoint(); } } while (0); | ||||||
269 | if( lshift->Opcode() != Op_LShiftI ) { return false; } | ||||||
270 | |||||||
271 | // Check for the ConvF2INode | ||||||
272 | Node *conv = lshift->in(1); | ||||||
273 | if( conv->Opcode() != Op_ConvF2I ) { return false; } | ||||||
274 | |||||||
275 | // Check that shift amounts are only to get sign bits set after F2I | ||||||
276 | jint max_cutoff = max->get_int(); | ||||||
277 | jint min_cutoff = min->get_int(); | ||||||
278 | jint left_shift = lshift->in(2)->get_int(); | ||||||
279 | jint right_shift = rshift->in(2)->get_int(); | ||||||
280 | jint max_post_shift = nth_bit(BitsPerJavaInteger - left_shift - 1)(((BitsPerJavaInteger - left_shift - 1) >= BitsPerWord) ? 0 : (OneBit << (BitsPerJavaInteger - left_shift - 1))); | ||||||
281 | if( left_shift != right_shift || | ||||||
282 | 0 > left_shift || left_shift >= BitsPerJavaInteger || | ||||||
283 | max_post_shift < max_cutoff || | ||||||
284 | max_post_shift < -min_cutoff ) { | ||||||
285 | // Shifts are necessary but current transformation eliminates them | ||||||
286 | return false; | ||||||
287 | } | ||||||
288 | |||||||
289 | // OK to return the result of ConvF2I without shifting | ||||||
290 | convf2i = (ConvF2INode*)conv; | ||||||
291 | return true; | ||||||
292 | } | ||||||
293 | |||||||
294 | |||||||
295 | //------------------------------check_compare_clipping------------------------- | ||||||
296 | // Helper function for RegionNode's identification of FP clipping | ||||||
297 | static bool check_compare_clipping( bool less_than, IfNode *iff, ConNode *limit, Node * & input ) { | ||||||
298 | Node *i1 = iff->in(1); | ||||||
299 | if ( !i1->is_Bool() ) { return false; } | ||||||
300 | BoolNode *bool1 = i1->as_Bool(); | ||||||
301 | if( less_than && bool1->_test._test != BoolTest::le ) { return false; } | ||||||
302 | else if( !less_than && bool1->_test._test != BoolTest::lt ) { return false; } | ||||||
303 | const Node *cmpF = bool1->in(1); | ||||||
304 | if( cmpF->Opcode() != Op_CmpF ) { return false; } | ||||||
305 | // Test that the float value being compared against | ||||||
306 | // is equivalent to the int value used as a limit | ||||||
307 | Node *nodef = cmpF->in(2); | ||||||
308 | if( nodef->Opcode() != Op_ConF ) { return false; } | ||||||
309 | jfloat conf = nodef->getf(); | ||||||
310 | jint coni = limit->get_int(); | ||||||
311 | if( ((int)conf) != coni ) { return false; } | ||||||
312 | input = cmpF->in(1); | ||||||
313 | return true; | ||||||
314 | } | ||||||
315 | |||||||
316 | //------------------------------is_unreachable_region-------------------------- | ||||||
317 | // Find if the Region node is reachable from the root. | ||||||
318 | bool RegionNode::is_unreachable_region(const PhaseGVN* phase) { | ||||||
319 | Node* top = phase->C->top(); | ||||||
320 | assert(req() == 2 || (req() == 3 && in(1) != NULL && in(2) == top), "sanity check arguments")do { if (!(req() == 2 || (req() == 3 && in(1) != __null && in(2) == top))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 320, "assert(" "req() == 2 || (req() == 3 && in(1) != __null && in(2) == top)" ") failed", "sanity check arguments"); ::breakpoint(); } } while (0); | ||||||
321 | if (_is_unreachable_region) { | ||||||
322 | // Return cached result from previous evaluation which should still be valid | ||||||
323 | assert(is_unreachable_from_root(phase), "walk the graph again and check if its indeed unreachable")do { if (!(is_unreachable_from_root(phase))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 323, "assert(" "is_unreachable_from_root(phase)" ") failed" , "walk the graph again and check if its indeed unreachable") ; ::breakpoint(); } } while (0); | ||||||
324 | return true; | ||||||
325 | } | ||||||
326 | |||||||
327 | // First, cut the simple case of fallthrough region when NONE of | ||||||
328 | // region's phis references itself directly or through a data node. | ||||||
329 | if (is_possible_unsafe_loop(phase)) { | ||||||
330 | // If we have a possible unsafe loop, check if the region node is actually unreachable from root. | ||||||
331 | if (is_unreachable_from_root(phase)) { | ||||||
332 | _is_unreachable_region = true; | ||||||
333 | return true; | ||||||
334 | } | ||||||
335 | } | ||||||
336 | return false; | ||||||
337 | } | ||||||
338 | |||||||
339 | bool RegionNode::is_possible_unsafe_loop(const PhaseGVN* phase) const { | ||||||
340 | uint max = outcnt(); | ||||||
341 | uint i; | ||||||
342 | for (i = 0; i < max; i++) { | ||||||
343 | Node* n = raw_out(i); | ||||||
344 | if (n != NULL__null && n->is_Phi()) { | ||||||
345 | PhiNode* phi = n->as_Phi(); | ||||||
346 | assert(phi->in(0) == this, "sanity check phi")do { if (!(phi->in(0) == this)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 346, "assert(" "phi->in(0) == this" ") failed", "sanity check phi" ); ::breakpoint(); } } while (0); | ||||||
347 | if (phi->outcnt() == 0) { | ||||||
348 | continue; // Safe case - no loops | ||||||
349 | } | ||||||
350 | if (phi->outcnt() == 1) { | ||||||
351 | Node* u = phi->raw_out(0); | ||||||
352 | // Skip if only one use is an other Phi or Call or Uncommon trap. | ||||||
353 | // It is safe to consider this case as fallthrough. | ||||||
354 | if (u != NULL__null && (u->is_Phi() || u->is_CFG())) { | ||||||
355 | continue; | ||||||
356 | } | ||||||
357 | } | ||||||
358 | // Check when phi references itself directly or through an other node. | ||||||
359 | if (phi->as_Phi()->simple_data_loop_check(phi->in(1)) >= PhiNode::Unsafe) { | ||||||
360 | break; // Found possible unsafe data loop. | ||||||
361 | } | ||||||
362 | } | ||||||
363 | } | ||||||
364 | if (i >= max) { | ||||||
365 | return false; // An unsafe case was NOT found - don't need graph walk. | ||||||
366 | } | ||||||
367 | return true; | ||||||
368 | } | ||||||
369 | |||||||
370 | bool RegionNode::is_unreachable_from_root(const PhaseGVN* phase) const { | ||||||
371 | ResourceMark rm; | ||||||
372 | Node_List nstack; | ||||||
373 | VectorSet visited; | ||||||
374 | |||||||
375 | // Mark all control nodes reachable from root outputs | ||||||
376 | Node *n = (Node*)phase->C->root(); | ||||||
377 | nstack.push(n); | ||||||
378 | visited.set(n->_idx); | ||||||
379 | while (nstack.size() != 0) { | ||||||
380 | n = nstack.pop(); | ||||||
381 | uint max = n->outcnt(); | ||||||
382 | for (uint i = 0; i < max; i++) { | ||||||
383 | Node* m = n->raw_out(i); | ||||||
384 | if (m != NULL__null && m->is_CFG()) { | ||||||
385 | if (m == this) { | ||||||
386 | return false; // We reached the Region node - it is not dead. | ||||||
387 | } | ||||||
388 | if (!visited.test_set(m->_idx)) | ||||||
389 | nstack.push(m); | ||||||
390 | } | ||||||
391 | } | ||||||
392 | } | ||||||
393 | return true; // The Region node is unreachable - it is dead. | ||||||
394 | } | ||||||
395 | |||||||
396 | bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) { | ||||||
397 | // Incremental inlining + PhaseStringOpts sometimes produce: | ||||||
398 | // | ||||||
399 | // cmpP with 1 top input | ||||||
400 | // | | ||||||
401 | // If | ||||||
402 | // / \ | ||||||
403 | // IfFalse IfTrue /- Some Node | ||||||
404 | // \ / / / | ||||||
405 | // Region / /-MergeMem | ||||||
406 | // \---Phi | ||||||
407 | // | ||||||
408 | // | ||||||
409 | // It's expected by PhaseStringOpts that the Region goes away and is | ||||||
410 | // replaced by If's control input but because there's still a Phi, | ||||||
411 | // the Region stays in the graph. The top input from the cmpP is | ||||||
412 | // propagated forward and a subgraph that is useful goes away. The | ||||||
413 | // code below replaces the Phi with the MergeMem so that the Region | ||||||
414 | // is simplified. | ||||||
415 | |||||||
416 | PhiNode* phi = has_unique_phi(); | ||||||
417 | if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) { | ||||||
418 | MergeMemNode* m = NULL__null; | ||||||
419 | assert(phi->req() == 3, "same as region")do { if (!(phi->req() == 3)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 419, "assert(" "phi->req() == 3" ") failed", "same as region" ); ::breakpoint(); } } while (0); | ||||||
420 | for (uint i = 1; i < 3; ++i) { | ||||||
421 | Node *mem = phi->in(i); | ||||||
422 | if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) { | ||||||
423 | // Nothing is control-dependent on path #i except the region itself. | ||||||
424 | m = mem->as_MergeMem(); | ||||||
425 | uint j = 3 - i; | ||||||
426 | Node* other = phi->in(j); | ||||||
427 | if (other && other == m->base_memory()) { | ||||||
428 | // m is a successor memory to other, and is not pinned inside the diamond, so push it out. | ||||||
429 | // This will allow the diamond to collapse completely. | ||||||
430 | phase->is_IterGVN()->replace_node(phi, m); | ||||||
431 | return true; | ||||||
432 | } | ||||||
433 | } | ||||||
434 | } | ||||||
435 | } | ||||||
436 | return false; | ||||||
437 | } | ||||||
438 | |||||||
439 | //------------------------------Ideal------------------------------------------ | ||||||
440 | // Return a node which is more "ideal" than the current node. Must preserve | ||||||
441 | // the CFG, but we can still strip out dead paths. | ||||||
442 | Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { | ||||||
443 | if( !can_reshape && !in(0) ) return NULL__null; // Already degraded to a Copy | ||||||
444 | assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge")do { if (!(!in(0) || !in(0)->is_Root())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 444, "assert(" "!in(0) || !in(0)->is_Root()" ") failed", "not a specially hidden merge"); ::breakpoint(); } } while ( 0); | ||||||
445 | |||||||
446 | // Check for RegionNode with no Phi users and both inputs come from either | ||||||
447 | // arm of the same IF. If found, then the control-flow split is useless. | ||||||
448 | bool has_phis = false; | ||||||
449 | if (can_reshape) { // Need DU info to check for Phi users | ||||||
450 | has_phis = (has_phi() != NULL__null); // Cache result | ||||||
451 | if (has_phis && try_clean_mem_phi(phase)) { | ||||||
452 | has_phis = false; | ||||||
453 | } | ||||||
454 | |||||||
455 | if (!has_phis) { // No Phi users? Nothing merging? | ||||||
456 | for (uint i = 1; i < req()-1; i++) { | ||||||
457 | Node *if1 = in(i); | ||||||
458 | if( !if1 ) continue; | ||||||
459 | Node *iff = if1->in(0); | ||||||
460 | if( !iff || !iff->is_If() ) continue; | ||||||
461 | for( uint j=i+1; j<req(); j++ ) { | ||||||
462 | if( in(j) && in(j)->in(0) == iff && | ||||||
463 | if1->Opcode() != in(j)->Opcode() ) { | ||||||
464 | // Add the IF Projections to the worklist. They (and the IF itself) | ||||||
465 | // will be eliminated if dead. | ||||||
466 | phase->is_IterGVN()->add_users_to_worklist(iff); | ||||||
467 | set_req(i, iff->in(0));// Skip around the useless IF diamond | ||||||
468 | set_req(j, NULL__null); | ||||||
469 | return this; // Record progress | ||||||
470 | } | ||||||
471 | } | ||||||
472 | } | ||||||
473 | } | ||||||
474 | } | ||||||
475 | |||||||
476 | // Remove TOP or NULL input paths. If only 1 input path remains, this Region | ||||||
477 | // degrades to a copy. | ||||||
478 | bool add_to_worklist = false; | ||||||
479 | bool modified = false; | ||||||
480 | int cnt = 0; // Count of values merging | ||||||
481 | DEBUG_ONLY( int cnt_orig = req(); )int cnt_orig = req(); // Save original inputs count | ||||||
482 | int del_it = 0; // The last input path we delete | ||||||
483 | // For all inputs... | ||||||
484 | for( uint i=1; i<req(); ++i ){// For all paths in | ||||||
485 | Node *n = in(i); // Get the input | ||||||
486 | if( n != NULL__null ) { | ||||||
487 | // Remove useless control copy inputs | ||||||
488 | if( n->is_Region() && n->as_Region()->is_copy() ) { | ||||||
489 | set_req(i, n->nonnull_req()); | ||||||
490 | modified = true; | ||||||
491 | i--; | ||||||
492 | continue; | ||||||
493 | } | ||||||
494 | if( n->is_Proj() ) { // Remove useless rethrows | ||||||
495 | Node *call = n->in(0); | ||||||
496 | if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) { | ||||||
497 | set_req(i, call->in(0)); | ||||||
498 | modified = true; | ||||||
499 | i--; | ||||||
500 | continue; | ||||||
501 | } | ||||||
502 | } | ||||||
503 | if( phase->type(n) == Type::TOP ) { | ||||||
504 | set_req(i, NULL__null); // Ignore TOP inputs | ||||||
505 | modified = true; | ||||||
506 | i--; | ||||||
507 | continue; | ||||||
508 | } | ||||||
509 | cnt++; // One more value merging | ||||||
510 | |||||||
511 | } else if (can_reshape) { // Else found dead path with DU info | ||||||
512 | PhaseIterGVN *igvn = phase->is_IterGVN(); | ||||||
513 | del_req(i); // Yank path from self | ||||||
514 | del_it = i; | ||||||
515 | uint max = outcnt(); | ||||||
516 | DUIterator j; | ||||||
517 | bool progress = true; | ||||||
518 | while(progress) { // Need to establish property over all users | ||||||
519 | progress = false; | ||||||
520 | for (j = outs(); has_out(j); j++) { | ||||||
521 | Node *n = out(j); | ||||||
522 | if( n->req() != req() && n->is_Phi() ) { | ||||||
523 | assert( n->in(0) == this, "" )do { if (!(n->in(0) == this)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 523, "assert(" "n->in(0) == this" ") failed", ""); ::breakpoint (); } } while (0); | ||||||
524 | igvn->hash_delete(n); // Yank from hash before hacking edges | ||||||
525 | n->set_req_X(i,NULL__null,igvn);// Correct DU info | ||||||
526 | n->del_req(i); // Yank path from Phis | ||||||
527 | if( max != outcnt() ) { | ||||||
528 | progress = true; | ||||||
529 | j = refresh_out_pos(j); | ||||||
530 | max = outcnt(); | ||||||
531 | } | ||||||
532 | } | ||||||
533 | } | ||||||
534 | } | ||||||
535 | add_to_worklist = true; | ||||||
536 | i--; | ||||||
537 | } | ||||||
538 | } | ||||||
539 | |||||||
540 | if (can_reshape && cnt == 1) { | ||||||
541 | // Is it dead loop? | ||||||
542 | // If it is LoopNopde it had 2 (+1 itself) inputs and | ||||||
543 | // one of them was cut. The loop is dead if it was EntryContol. | ||||||
544 | // Loop node may have only one input because entry path | ||||||
545 | // is removed in PhaseIdealLoop::Dominators(). | ||||||
546 | assert(!this->is_Loop() || cnt_orig <= 3, "Loop node should have 3 or less inputs")do { if (!(!this->is_Loop() || cnt_orig <= 3)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 546, "assert(" "!this->is_Loop() || cnt_orig <= 3" ") failed" , "Loop node should have 3 or less inputs"); ::breakpoint(); } } while (0); | ||||||
547 | if ((this->is_Loop() && (del_it == LoopNode::EntryControl || | ||||||
548 | (del_it == 0 && is_unreachable_region(phase)))) || | ||||||
549 | (!this->is_Loop() && has_phis && is_unreachable_region(phase))) { | ||||||
550 | // Yes, the region will be removed during the next step below. | ||||||
551 | // Cut the backedge input and remove phis since no data paths left. | ||||||
552 | // We don't cut outputs to other nodes here since we need to put them | ||||||
553 | // on the worklist. | ||||||
554 | PhaseIterGVN *igvn = phase->is_IterGVN(); | ||||||
555 | if (in(1)->outcnt() == 1) { | ||||||
556 | igvn->_worklist.push(in(1)); | ||||||
557 | } | ||||||
558 | del_req(1); | ||||||
559 | cnt = 0; | ||||||
560 | assert( req() == 1, "no more inputs expected" )do { if (!(req() == 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 560, "assert(" "req() == 1" ") failed", "no more inputs expected" ); ::breakpoint(); } } while (0); | ||||||
561 | uint max = outcnt(); | ||||||
562 | bool progress = true; | ||||||
563 | Node *top = phase->C->top(); | ||||||
564 | DUIterator j; | ||||||
565 | while(progress) { | ||||||
566 | progress = false; | ||||||
567 | for (j = outs(); has_out(j); j++) { | ||||||
568 | Node *n = out(j); | ||||||
569 | if( n->is_Phi() ) { | ||||||
570 | assert(n->in(0) == this, "")do { if (!(n->in(0) == this)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 570, "assert(" "n->in(0) == this" ") failed", ""); ::breakpoint (); } } while (0); | ||||||
571 | assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" )do { if (!(n->req() == 2 && n->in(1) != __null) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 571, "assert(" "n->req() == 2 && n->in(1) != __null" ") failed", "Only one data input expected"); ::breakpoint(); } } while (0); | ||||||
572 | // Break dead loop data path. | ||||||
573 | // Eagerly replace phis with top to avoid regionless phis. | ||||||
574 | igvn->replace_node(n, top); | ||||||
575 | if( max != outcnt() ) { | ||||||
576 | progress = true; | ||||||
577 | j = refresh_out_pos(j); | ||||||
578 | max = outcnt(); | ||||||
579 | } | ||||||
580 | } | ||||||
581 | } | ||||||
582 | } | ||||||
583 | add_to_worklist = true; | ||||||
584 | } | ||||||
585 | } | ||||||
586 | if (add_to_worklist) { | ||||||
587 | phase->is_IterGVN()->add_users_to_worklist(this); // Revisit collapsed Phis | ||||||
588 | } | ||||||
589 | |||||||
590 | if( cnt <= 1 ) { // Only 1 path in? | ||||||
591 | set_req(0, NULL__null); // Null control input for region copy | ||||||
592 | if( cnt == 0 && !can_reshape) { // Parse phase - leave the node as it is. | ||||||
593 | // No inputs or all inputs are NULL. | ||||||
594 | return NULL__null; | ||||||
595 | } else if (can_reshape) { // Optimization phase - remove the node | ||||||
596 | PhaseIterGVN *igvn = phase->is_IterGVN(); | ||||||
597 | // Strip mined (inner) loop is going away, remove outer loop. | ||||||
598 | if (is_CountedLoop() && | ||||||
599 | as_Loop()->is_strip_mined()) { | ||||||
600 | Node* outer_sfpt = as_CountedLoop()->outer_safepoint(); | ||||||
601 | Node* outer_out = as_CountedLoop()->outer_loop_exit(); | ||||||
602 | if (outer_sfpt != NULL__null && outer_out != NULL__null) { | ||||||
603 | Node* in = outer_sfpt->in(0); | ||||||
604 | igvn->replace_node(outer_out, in); | ||||||
605 | LoopNode* outer = as_CountedLoop()->outer_loop(); | ||||||
606 | igvn->replace_input_of(outer, LoopNode::LoopBackControl, igvn->C->top()); | ||||||
607 | } | ||||||
608 | } | ||||||
609 | if (is_CountedLoop()) { | ||||||
610 | Node* opaq = as_CountedLoop()->is_canonical_loop_entry(); | ||||||
611 | if (opaq != NULL__null) { | ||||||
612 | // This is not a loop anymore. No need to keep the Opaque1 node on the test that guards the loop as it won't be | ||||||
613 | // subject to further loop opts. | ||||||
614 | assert(opaq->Opcode() == Op_Opaque1, "")do { if (!(opaq->Opcode() == Op_Opaque1)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 614, "assert(" "opaq->Opcode() == Op_Opaque1" ") failed" , ""); ::breakpoint(); } } while (0); | ||||||
615 | igvn->replace_node(opaq, opaq->in(1)); | ||||||
616 | } | ||||||
617 | } | ||||||
618 | Node *parent_ctrl; | ||||||
619 | if( cnt == 0 ) { | ||||||
620 | assert( req() == 1, "no inputs expected" )do { if (!(req() == 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 620, "assert(" "req() == 1" ") failed", "no inputs expected" ); ::breakpoint(); } } while (0); | ||||||
621 | // During IGVN phase such region will be subsumed by TOP node | ||||||
622 | // so region's phis will have TOP as control node. | ||||||
623 | // Kill phis here to avoid it. | ||||||
624 | // Also set other user's input to top. | ||||||
625 | parent_ctrl = phase->C->top(); | ||||||
626 | } else { | ||||||
627 | // The fallthrough case since we already checked dead loops above. | ||||||
628 | parent_ctrl = in(1); | ||||||
629 | assert(parent_ctrl != NULL, "Region is a copy of some non-null control")do { if (!(parent_ctrl != __null)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 629, "assert(" "parent_ctrl != __null" ") failed", "Region is a copy of some non-null control" ); ::breakpoint(); } } while (0); | ||||||
630 | assert(parent_ctrl != this, "Close dead loop")do { if (!(parent_ctrl != this)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 630, "assert(" "parent_ctrl != this" ") failed", "Close dead loop" ); ::breakpoint(); } } while (0); | ||||||
631 | } | ||||||
632 | if (!add_to_worklist) | ||||||
633 | igvn->add_users_to_worklist(this); // Check for further allowed opts | ||||||
634 | for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) { | ||||||
635 | Node* n = last_out(i); | ||||||
636 | igvn->hash_delete(n); // Remove from worklist before modifying edges | ||||||
637 | if (n->outcnt() == 0) { | ||||||
638 | int uses_found = n->replace_edge(this, phase->C->top(), igvn); | ||||||
639 | if (uses_found > 1) { // (--i) done at the end of the loop. | ||||||
640 | i -= (uses_found - 1); | ||||||
641 | } | ||||||
642 | continue; | ||||||
643 | } | ||||||
644 | if( n->is_Phi() ) { // Collapse all Phis | ||||||
645 | // Eagerly replace phis to avoid regionless phis. | ||||||
646 | Node* in; | ||||||
647 | if( cnt == 0 ) { | ||||||
648 | assert( n->req() == 1, "No data inputs expected" )do { if (!(n->req() == 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 648, "assert(" "n->req() == 1" ") failed", "No data inputs expected" ); ::breakpoint(); } } while (0); | ||||||
649 | in = parent_ctrl; // replaced by top | ||||||
650 | } else { | ||||||
651 | assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" )do { if (!(n->req() == 2 && n->in(1) != __null) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 651, "assert(" "n->req() == 2 && n->in(1) != __null" ") failed", "Only one data input expected"); ::breakpoint(); } } while (0); | ||||||
652 | in = n->in(1); // replaced by unique input | ||||||
653 | if( n->as_Phi()->is_unsafe_data_reference(in) ) | ||||||
654 | in = phase->C->top(); // replaced by top | ||||||
655 | } | ||||||
656 | igvn->replace_node(n, in); | ||||||
657 | } | ||||||
658 | else if( n->is_Region() ) { // Update all incoming edges | ||||||
659 | assert(n != this, "Must be removed from DefUse edges")do { if (!(n != this)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 659, "assert(" "n != this" ") failed", "Must be removed from DefUse edges" ); ::breakpoint(); } } while (0); | ||||||
660 | int uses_found = n->replace_edge(this, parent_ctrl, igvn); | ||||||
661 | if (uses_found > 1) { // (--i) done at the end of the loop. | ||||||
662 | i -= (uses_found - 1); | ||||||
663 | } | ||||||
664 | } | ||||||
665 | else { | ||||||
666 | assert(n->in(0) == this, "Expect RegionNode to be control parent")do { if (!(n->in(0) == this)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 666, "assert(" "n->in(0) == this" ") failed", "Expect RegionNode to be control parent" ); ::breakpoint(); } } while (0); | ||||||
667 | n->set_req(0, parent_ctrl); | ||||||
668 | } | ||||||
669 | #ifdef ASSERT1 | ||||||
670 | for( uint k=0; k < n->req(); k++ ) { | ||||||
671 | assert(n->in(k) != this, "All uses of RegionNode should be gone")do { if (!(n->in(k) != this)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 671, "assert(" "n->in(k) != this" ") failed", "All uses of RegionNode should be gone" ); ::breakpoint(); } } while (0); | ||||||
672 | } | ||||||
673 | #endif | ||||||
674 | } | ||||||
675 | // Remove the RegionNode itself from DefUse info | ||||||
676 | igvn->remove_dead_node(this); | ||||||
677 | return NULL__null; | ||||||
678 | } | ||||||
679 | return this; // Record progress | ||||||
680 | } | ||||||
681 | |||||||
682 | |||||||
683 | // If a Region flows into a Region, merge into one big happy merge. | ||||||
684 | if (can_reshape) { | ||||||
685 | Node *m = merge_region(this, phase); | ||||||
686 | if (m != NULL__null) return m; | ||||||
687 | } | ||||||
688 | |||||||
689 | // Check if this region is the root of a clipping idiom on floats | ||||||
690 | if( ConvertFloat2IntClipping && can_reshape && req() == 4 ) { | ||||||
691 | // Check that only one use is a Phi and that it simplifies to two constants + | ||||||
692 | PhiNode* phi = has_unique_phi(); | ||||||
693 | if (phi != NULL__null) { // One Phi user | ||||||
694 | // Check inputs to the Phi | ||||||
695 | ConNode *min; | ||||||
696 | ConNode *max; | ||||||
697 | Node *val; | ||||||
698 | uint min_idx; | ||||||
699 | uint max_idx; | ||||||
700 | uint val_idx; | ||||||
701 | if( check_phi_clipping( phi, min, min_idx, max, max_idx, val, val_idx ) ) { | ||||||
702 | IfNode *top_if; | ||||||
703 | IfNode *bot_if; | ||||||
704 | if( check_if_clipping( this, bot_if, top_if ) ) { | ||||||
705 | // Control pattern checks, now verify compares | ||||||
706 | Node *top_in = NULL__null; // value being compared against | ||||||
707 | Node *bot_in = NULL__null; | ||||||
708 | if( check_compare_clipping( true, bot_if, min, bot_in ) && | ||||||
709 | check_compare_clipping( false, top_if, max, top_in ) ) { | ||||||
710 | if( bot_in == top_in ) { | ||||||
711 | PhaseIterGVN *gvn = phase->is_IterGVN(); | ||||||
712 | assert( gvn != NULL, "Only had DefUse info in IterGVN")do { if (!(gvn != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 712, "assert(" "gvn != __null" ") failed", "Only had DefUse info in IterGVN" ); ::breakpoint(); } } while (0); | ||||||
713 | // Only remaining check is that bot_in == top_in == (Phi's val + mods) | ||||||
714 | |||||||
715 | // Check for the ConvF2INode | ||||||
716 | ConvF2INode *convf2i; | ||||||
717 | if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) && | ||||||
718 | convf2i->in(1) == bot_in ) { | ||||||
719 | // Matched pattern, including LShiftI; RShiftI, replace with integer compares | ||||||
720 | // max test | ||||||
721 | Node *cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, min )); | ||||||
722 | Node *boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::lt )); | ||||||
723 | IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5)(1e-5f), top_if->_fcnt )); | ||||||
724 | Node *if_min= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); | ||||||
725 | Node *ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); | ||||||
726 | // min test | ||||||
727 | cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, max )); | ||||||
728 | boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::gt )); | ||||||
729 | iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( ifF, boo, PROB_UNLIKELY_MAG(5)(1e-5f), bot_if->_fcnt )); | ||||||
730 | Node *if_max= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); | ||||||
731 | ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); | ||||||
732 | // update input edges to region node | ||||||
733 | set_req_X( min_idx, if_min, gvn ); | ||||||
734 | set_req_X( max_idx, if_max, gvn ); | ||||||
735 | set_req_X( val_idx, ifF, gvn ); | ||||||
736 | // remove unnecessary 'LShiftI; RShiftI' idiom | ||||||
737 | gvn->hash_delete(phi); | ||||||
738 | phi->set_req_X( val_idx, convf2i, gvn ); | ||||||
739 | gvn->hash_find_insert(phi); | ||||||
740 | // Return transformed region node | ||||||
741 | return this; | ||||||
742 | } | ||||||
743 | } | ||||||
744 | } | ||||||
745 | } | ||||||
746 | } | ||||||
747 | } | ||||||
748 | } | ||||||
749 | |||||||
750 | if (can_reshape) { | ||||||
751 | modified |= optimize_trichotomy(phase->is_IterGVN()); | ||||||
752 | } | ||||||
753 | |||||||
754 | return modified ? this : NULL__null; | ||||||
755 | } | ||||||
756 | |||||||
757 | //------------------------------optimize_trichotomy-------------------------- | ||||||
758 | // Optimize nested comparisons of the following kind: | ||||||
759 | // | ||||||
760 | // int compare(int a, int b) { | ||||||
761 | // return (a < b) ? -1 : (a == b) ? 0 : 1; | ||||||
762 | // } | ||||||
763 | // | ||||||
764 | // Shape 1: | ||||||
765 | // if (compare(a, b) == 1) { ... } -> if (a > b) { ... } | ||||||
766 | // | ||||||
767 | // Shape 2: | ||||||
768 | // if (compare(a, b) == 0) { ... } -> if (a == b) { ... } | ||||||
769 | // | ||||||
770 | // Above code leads to the following IR shapes where both Ifs compare the | ||||||
771 | // same value and two out of three region inputs idx1 and idx2 map to | ||||||
772 | // the same value and control flow. | ||||||
773 | // | ||||||
774 | // (1) If (2) If | ||||||
775 | // / \ / \ | ||||||
776 | // Proj Proj Proj Proj | ||||||
777 | // | \ | \ | ||||||
778 | // | If | If If | ||||||
779 | // | / \ | / \ / \ | ||||||
780 | // | Proj Proj | Proj Proj ==> Proj Proj | ||||||
781 | // | / / \ | / | / | ||||||
782 | // Region / \ | / | / | ||||||
783 | // \ / \ | / | / | ||||||
784 | // Region Region Region | ||||||
785 | // | ||||||
786 | // The method returns true if 'this' is modified and false otherwise. | ||||||
787 | bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) { | ||||||
788 | int idx1 = 1, idx2 = 2; | ||||||
789 | Node* region = NULL__null; | ||||||
790 | if (req() == 3 && in(1) != NULL__null && in(2) != NULL__null) { | ||||||
791 | // Shape 1: Check if one of the inputs is a region that merges two control | ||||||
792 | // inputs and has no other users (especially no Phi users). | ||||||
793 | region = in(1)->isa_Region() ? in(1) : in(2)->isa_Region(); | ||||||
794 | if (region == NULL__null || region->outcnt() != 2 || region->req() != 3) { | ||||||
795 | return false; // No suitable region input found | ||||||
796 | } | ||||||
797 | } else if (req() == 4) { | ||||||
798 | // Shape 2: Check if two control inputs map to the same value of the unique phi | ||||||
799 | // user and treat these as if they would come from another region (shape (1)). | ||||||
800 | PhiNode* phi = has_unique_phi(); | ||||||
801 | if (phi == NULL__null) { | ||||||
802 | return false; // No unique phi user | ||||||
803 | } | ||||||
804 | if (phi->in(idx1) != phi->in(idx2)) { | ||||||
805 | idx2 = 3; | ||||||
806 | if (phi->in(idx1) != phi->in(idx2)) { | ||||||
807 | idx1 = 2; | ||||||
808 | if (phi->in(idx1) != phi->in(idx2)) { | ||||||
809 | return false; // No equal phi inputs found | ||||||
810 | } | ||||||
811 | } | ||||||
812 | } | ||||||
813 | assert(phi->in(idx1) == phi->in(idx2), "must be")do { if (!(phi->in(idx1) == phi->in(idx2))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 813, "assert(" "phi->in(idx1) == phi->in(idx2)" ") failed" , "must be"); ::breakpoint(); } } while (0); // Region is merging same value | ||||||
814 | region = this; | ||||||
815 | } | ||||||
816 | if (region == NULL__null || region->in(idx1) == NULL__null || region->in(idx2) == NULL__null) { | ||||||
817 | return false; // Region does not merge two control inputs | ||||||
818 | } | ||||||
819 | // At this point we know that region->in(idx1) and region->(idx2) map to the same | ||||||
820 | // value and control flow. Now search for ifs that feed into these region inputs. | ||||||
821 | ProjNode* proj1 = region->in(idx1)->isa_Proj(); | ||||||
822 | ProjNode* proj2 = region->in(idx2)->isa_Proj(); | ||||||
823 | if (proj1 == NULL__null || proj1->outcnt() != 1 || | ||||||
824 | proj2 == NULL__null || proj2->outcnt() != 1) { | ||||||
825 | return false; // No projection inputs with region as unique user found | ||||||
826 | } | ||||||
827 | assert(proj1 != proj2, "should be different projections")do { if (!(proj1 != proj2)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 827, "assert(" "proj1 != proj2" ") failed", "should be different projections" ); ::breakpoint(); } } while (0); | ||||||
828 | IfNode* iff1 = proj1->in(0)->isa_If(); | ||||||
829 | IfNode* iff2 = proj2->in(0)->isa_If(); | ||||||
830 | if (iff1 == NULL__null || iff1->outcnt() != 2 || | ||||||
831 | iff2 == NULL__null || iff2->outcnt() != 2) { | ||||||
832 | return false; // No ifs found | ||||||
833 | } | ||||||
834 | if (iff1 == iff2) { | ||||||
835 | igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated | ||||||
836 | igvn->replace_input_of(region, idx1, iff1->in(0)); | ||||||
837 | igvn->replace_input_of(region, idx2, igvn->C->top()); | ||||||
838 | return (region == this); // Remove useless if (both projections map to the same control/value) | ||||||
839 | } | ||||||
840 | BoolNode* bol1 = iff1->in(1)->isa_Bool(); | ||||||
841 | BoolNode* bol2 = iff2->in(1)->isa_Bool(); | ||||||
842 | if (bol1 == NULL__null || bol2 == NULL__null) { | ||||||
843 | return false; // No bool inputs found | ||||||
844 | } | ||||||
845 | Node* cmp1 = bol1->in(1); | ||||||
846 | Node* cmp2 = bol2->in(1); | ||||||
847 | bool commute = false; | ||||||
848 | if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) { | ||||||
849 | return false; // No comparison | ||||||
850 | } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD || | ||||||
851 | cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD || | ||||||
852 | cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN || | ||||||
853 | cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN || | ||||||
854 | cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) { | ||||||
855 | // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests. | ||||||
856 | // SubTypeCheck is not commutative | ||||||
857 | return false; | ||||||
858 | } else if (cmp1 != cmp2) { | ||||||
859 | if (cmp1->in(1) == cmp2->in(2) && | ||||||
860 | cmp1->in(2) == cmp2->in(1)) { | ||||||
861 | commute = true; // Same but swapped inputs, commute the test | ||||||
862 | } else { | ||||||
863 | return false; // Ifs are not comparing the same values | ||||||
864 | } | ||||||
865 | } | ||||||
866 | proj1 = proj1->other_if_proj(); | ||||||
867 | proj2 = proj2->other_if_proj(); | ||||||
868 | if (!((proj1->unique_ctrl_out() == iff2 && | ||||||
869 | proj2->unique_ctrl_out() == this) || | ||||||
870 | (proj2->unique_ctrl_out() == iff1 && | ||||||
871 | proj1->unique_ctrl_out() == this))) { | ||||||
872 | return false; // Ifs are not connected through other projs | ||||||
873 | } | ||||||
874 | // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged | ||||||
875 | // through 'region' and map to the same value. Merge the boolean tests and replace | ||||||
876 | // the ifs by a single comparison. | ||||||
877 | BoolTest test1 = (proj1->_con == 1) ? bol1->_test : bol1->_test.negate(); | ||||||
878 | BoolTest test2 = (proj2->_con == 1) ? bol2->_test : bol2->_test.negate(); | ||||||
879 | test1 = commute ? test1.commute() : test1; | ||||||
880 | // After possibly commuting test1, if we can merge test1 & test2, then proj2/iff2/bol2 are the nodes to refine. | ||||||
881 | BoolTest::mask res = test1.merge(test2); | ||||||
882 | if (res == BoolTest::illegal) { | ||||||
883 | return false; // Unable to merge tests | ||||||
884 | } | ||||||
885 | // Adjust iff1 to always pass (only iff2 will remain) | ||||||
886 | igvn->replace_input_of(iff1, 1, igvn->intcon(proj1->_con)); | ||||||
887 | if (res == BoolTest::never) { | ||||||
888 | // Merged test is always false, adjust iff2 to always fail | ||||||
889 | igvn->replace_input_of(iff2, 1, igvn->intcon(1 - proj2->_con)); | ||||||
890 | } else { | ||||||
891 | // Replace bool input of iff2 with merged test | ||||||
892 | BoolNode* new_bol = new BoolNode(bol2->in(1), res); | ||||||
893 | igvn->replace_input_of(iff2, 1, igvn->transform((proj2->_con == 1) ? new_bol : new_bol->negate(igvn))); | ||||||
894 | if (new_bol->outcnt() == 0) { | ||||||
895 | igvn->remove_dead_node(new_bol); | ||||||
896 | } | ||||||
897 | } | ||||||
898 | return false; | ||||||
899 | } | ||||||
900 | |||||||
901 | const RegMask &RegionNode::out_RegMask() const { | ||||||
902 | return RegMask::Empty; | ||||||
903 | } | ||||||
904 | |||||||
905 | // Find the one non-null required input. RegionNode only | ||||||
906 | Node *Node::nonnull_req() const { | ||||||
907 | assert( is_Region(), "" )do { if (!(is_Region())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 907, "assert(" "is_Region()" ") failed", ""); ::breakpoint( ); } } while (0); | ||||||
908 | for( uint i = 1; i < _cnt; i++ ) | ||||||
909 | if( in(i) ) | ||||||
910 | return in(i); | ||||||
911 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 911); ::breakpoint(); } while (0); | ||||||
912 | return NULL__null; | ||||||
913 | } | ||||||
914 | |||||||
915 | |||||||
916 | //============================================================================= | ||||||
917 | // note that these functions assume that the _adr_type field is flattened | ||||||
918 | uint PhiNode::hash() const { | ||||||
919 | const Type* at = _adr_type; | ||||||
920 | return TypeNode::hash() + (at ? at->hash() : 0); | ||||||
921 | } | ||||||
922 | bool PhiNode::cmp( const Node &n ) const { | ||||||
923 | return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type; | ||||||
924 | } | ||||||
925 | static inline | ||||||
926 | const TypePtr* flatten_phi_adr_type(const TypePtr* at) { | ||||||
927 | if (at == NULL__null || at == TypePtr::BOTTOM) return at; | ||||||
928 | return Compile::current()->alias_type(at)->adr_type(); | ||||||
929 | } | ||||||
930 | |||||||
931 | //----------------------------make--------------------------------------------- | ||||||
932 | // create a new phi with edges matching r and set (initially) to x | ||||||
933 | PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) { | ||||||
934 | uint preds = r->req(); // Number of predecessor paths | ||||||
935 | assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at")do { if (!(t != Type::MEMORY || at == flatten_phi_adr_type(at ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 935, "assert(" "t != Type::MEMORY || at == flatten_phi_adr_type(at)" ") failed", "flatten at"); ::breakpoint(); } } while (0); | ||||||
936 | PhiNode* p = new PhiNode(r, t, at); | ||||||
937 | for (uint j = 1; j < preds; j++) { | ||||||
938 | // Fill in all inputs, except those which the region does not yet have | ||||||
939 | if (r->in(j) != NULL__null) | ||||||
940 | p->init_req(j, x); | ||||||
941 | } | ||||||
942 | return p; | ||||||
943 | } | ||||||
944 | PhiNode* PhiNode::make(Node* r, Node* x) { | ||||||
945 | const Type* t = x->bottom_type(); | ||||||
946 | const TypePtr* at = NULL__null; | ||||||
947 | if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); | ||||||
948 | return make(r, x, t, at); | ||||||
949 | } | ||||||
950 | PhiNode* PhiNode::make_blank(Node* r, Node* x) { | ||||||
951 | const Type* t = x->bottom_type(); | ||||||
952 | const TypePtr* at = NULL__null; | ||||||
953 | if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); | ||||||
954 | return new PhiNode(r, t, at); | ||||||
955 | } | ||||||
956 | |||||||
957 | |||||||
958 | //------------------------slice_memory----------------------------------------- | ||||||
959 | // create a new phi with narrowed memory type | ||||||
960 | PhiNode* PhiNode::slice_memory(const TypePtr* adr_type) const { | ||||||
961 | PhiNode* mem = (PhiNode*) clone(); | ||||||
962 | *(const TypePtr**)&mem->_adr_type = adr_type; | ||||||
963 | // convert self-loops, or else we get a bad graph | ||||||
964 | for (uint i = 1; i < req(); i++) { | ||||||
965 | if ((const Node*)in(i) == this) mem->set_req(i, mem); | ||||||
966 | } | ||||||
967 | mem->verify_adr_type(); | ||||||
968 | return mem; | ||||||
969 | } | ||||||
970 | |||||||
971 | //------------------------split_out_instance----------------------------------- | ||||||
972 | // Split out an instance type from a bottom phi. | ||||||
973 | PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const { | ||||||
974 | const TypeOopPtr *t_oop = at->isa_oopptr(); | ||||||
975 | assert(t_oop != NULL && t_oop->is_known_instance(), "expecting instance oopptr")do { if (!(t_oop != __null && t_oop->is_known_instance ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 975, "assert(" "t_oop != __null && t_oop->is_known_instance()" ") failed", "expecting instance oopptr"); ::breakpoint(); } } while (0); | ||||||
976 | const TypePtr *t = adr_type(); | ||||||
977 | assert(type() == Type::MEMORY &&do { if (!(type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && ! t->is_oopptr()->is_known_instance() && t->is_oopptr ()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type (t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop ->instance_id()) == t_oop))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 983, "assert(" "type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)" ") failed", "bottom or raw memory required"); ::breakpoint() ; } } while (0) | ||||||
978 | (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||do { if (!(type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && ! t->is_oopptr()->is_known_instance() && t->is_oopptr ()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type (t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop ->instance_id()) == t_oop))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 983, "assert(" "type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)" ") failed", "bottom or raw memory required"); ::breakpoint() ; } } while (0) | ||||||
979 | t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&do { if (!(type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && ! t->is_oopptr()->is_known_instance() && t->is_oopptr ()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type (t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop ->instance_id()) == t_oop))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 983, "assert(" "type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)" ") failed", "bottom or raw memory required"); ::breakpoint() ; } } while (0) | ||||||
980 | t->is_oopptr()->cast_to_exactness(true)do { if (!(type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && ! t->is_oopptr()->is_known_instance() && t->is_oopptr ()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type (t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop ->instance_id()) == t_oop))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 983, "assert(" "type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)" ") failed", "bottom or raw memory required"); ::breakpoint() ; } } while (0) | ||||||
981 | ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())do { if (!(type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && ! t->is_oopptr()->is_known_instance() && t->is_oopptr ()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type (t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop ->instance_id()) == t_oop))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 983, "assert(" "type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)" ") failed", "bottom or raw memory required"); ::breakpoint() ; } } while (0) | ||||||
982 | ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop),do { if (!(type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && ! t->is_oopptr()->is_known_instance() && t->is_oopptr ()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type (t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop ->instance_id()) == t_oop))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 983, "assert(" "type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)" ") failed", "bottom or raw memory required"); ::breakpoint() ; } } while (0) | ||||||
983 | "bottom or raw memory required")do { if (!(type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && ! t->is_oopptr()->is_known_instance() && t->is_oopptr ()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type (t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop ->instance_id()) == t_oop))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 983, "assert(" "type() == Type::MEMORY && (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)" ") failed", "bottom or raw memory required"); ::breakpoint() ; } } while (0); | ||||||
984 | |||||||
985 | // Check if an appropriate node already exists. | ||||||
986 | Node *region = in(0); | ||||||
987 | for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { | ||||||
988 | Node* use = region->fast_out(k); | ||||||
989 | if( use->is_Phi()) { | ||||||
990 | PhiNode *phi2 = use->as_Phi(); | ||||||
991 | if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) { | ||||||
992 | return phi2; | ||||||
993 | } | ||||||
994 | } | ||||||
995 | } | ||||||
996 | Compile *C = igvn->C; | ||||||
997 | Arena *a = Thread::current()->resource_area(); | ||||||
998 | Node_Array node_map = new Node_Array(a); | ||||||
999 | Node_Stack stack(a, C->live_nodes() >> 4); | ||||||
1000 | PhiNode *nphi = slice_memory(at); | ||||||
1001 | igvn->register_new_node_with_optimizer( nphi ); | ||||||
1002 | node_map.map(_idx, nphi); | ||||||
1003 | stack.push((Node *)this, 1); | ||||||
1004 | while(!stack.is_empty()) { | ||||||
1005 | PhiNode *ophi = stack.node()->as_Phi(); | ||||||
1006 | uint i = stack.index(); | ||||||
1007 | assert(i >= 1, "not control edge")do { if (!(i >= 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1007, "assert(" "i >= 1" ") failed", "not control edge") ; ::breakpoint(); } } while (0); | ||||||
1008 | stack.pop(); | ||||||
1009 | nphi = node_map[ophi->_idx]->as_Phi(); | ||||||
1010 | for (; i < ophi->req(); i++) { | ||||||
1011 | Node *in = ophi->in(i); | ||||||
1012 | if (in == NULL__null || igvn->type(in) == Type::TOP) | ||||||
1013 | continue; | ||||||
1014 | Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL__null, igvn); | ||||||
1015 | PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL__null; | ||||||
1016 | if (optphi != NULL__null && optphi->adr_type() == TypePtr::BOTTOM) { | ||||||
1017 | opt = node_map[optphi->_idx]; | ||||||
1018 | if (opt == NULL__null) { | ||||||
1019 | stack.push(ophi, i); | ||||||
1020 | nphi = optphi->slice_memory(at); | ||||||
1021 | igvn->register_new_node_with_optimizer( nphi ); | ||||||
1022 | node_map.map(optphi->_idx, nphi); | ||||||
1023 | ophi = optphi; | ||||||
1024 | i = 0; // will get incremented at top of loop | ||||||
1025 | continue; | ||||||
1026 | } | ||||||
1027 | } | ||||||
1028 | nphi->set_req(i, opt); | ||||||
1029 | } | ||||||
1030 | } | ||||||
1031 | return nphi; | ||||||
1032 | } | ||||||
1033 | |||||||
1034 | //------------------------verify_adr_type-------------------------------------- | ||||||
1035 | #ifdef ASSERT1 | ||||||
1036 | void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const { | ||||||
1037 | if (visited.test_set(_idx)) return; //already visited | ||||||
1038 | |||||||
1039 | // recheck constructor invariants: | ||||||
1040 | verify_adr_type(false); | ||||||
1041 | |||||||
1042 | // recheck local phi/phi consistency: | ||||||
1043 | assert(_adr_type == at || _adr_type == TypePtr::BOTTOM,do { if (!(_adr_type == at || _adr_type == TypePtr::BOTTOM)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1044, "assert(" "_adr_type == at || _adr_type == TypePtr::BOTTOM" ") failed", "adr_type must be consistent across phi nest"); :: breakpoint(); } } while (0) | ||||||
1044 | "adr_type must be consistent across phi nest")do { if (!(_adr_type == at || _adr_type == TypePtr::BOTTOM)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1044, "assert(" "_adr_type == at || _adr_type == TypePtr::BOTTOM" ") failed", "adr_type must be consistent across phi nest"); :: breakpoint(); } } while (0); | ||||||
1045 | |||||||
1046 | // walk around | ||||||
1047 | for (uint i = 1; i < req(); i++) { | ||||||
1048 | Node* n = in(i); | ||||||
1049 | if (n == NULL__null) continue; | ||||||
1050 | const Node* np = in(i); | ||||||
1051 | if (np->is_Phi()) { | ||||||
1052 | np->as_Phi()->verify_adr_type(visited, at); | ||||||
1053 | } else if (n->bottom_type() == Type::TOP | ||||||
1054 | || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) { | ||||||
1055 | // ignore top inputs | ||||||
1056 | } else { | ||||||
1057 | const TypePtr* nat = flatten_phi_adr_type(n->adr_type()); | ||||||
1058 | // recheck phi/non-phi consistency at leaves: | ||||||
1059 | assert((nat != NULL) == (at != NULL), "")do { if (!((nat != __null) == (at != __null))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1059, "assert(" "(nat != __null) == (at != __null)" ") failed" , ""); ::breakpoint(); } } while (0); | ||||||
1060 | assert(nat == at || nat == TypePtr::BOTTOM,do { if (!(nat == at || nat == TypePtr::BOTTOM)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1061, "assert(" "nat == at || nat == TypePtr::BOTTOM" ") failed" , "adr_type must be consistent at leaves of phi nest"); ::breakpoint (); } } while (0) | ||||||
1061 | "adr_type must be consistent at leaves of phi nest")do { if (!(nat == at || nat == TypePtr::BOTTOM)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1061, "assert(" "nat == at || nat == TypePtr::BOTTOM" ") failed" , "adr_type must be consistent at leaves of phi nest"); ::breakpoint (); } } while (0); | ||||||
1062 | } | ||||||
1063 | } | ||||||
1064 | } | ||||||
1065 | |||||||
1066 | // Verify a whole nest of phis rooted at this one. | ||||||
1067 | void PhiNode::verify_adr_type(bool recursive) const { | ||||||
1068 | if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error | ||||||
1069 | if (Node::in_dump()) return; // muzzle asserts when printing | ||||||
1070 | |||||||
1071 | assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only")do { if (!((_type == Type::MEMORY) == (_adr_type != __null))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1071, "assert(" "(_type == Type::MEMORY) == (_adr_type != __null)" ") failed", "adr_type for memory phis only"); ::breakpoint() ; } } while (0); | ||||||
1072 | |||||||
1073 | if (!VerifyAliases) return; // verify thoroughly only if requested | ||||||
1074 | |||||||
1075 | assert(_adr_type == flatten_phi_adr_type(_adr_type),do { if (!(_adr_type == flatten_phi_adr_type(_adr_type))) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1076, "assert(" "_adr_type == flatten_phi_adr_type(_adr_type)" ") failed", "Phi::adr_type must be pre-normalized"); ::breakpoint (); } } while (0) | ||||||
1076 | "Phi::adr_type must be pre-normalized")do { if (!(_adr_type == flatten_phi_adr_type(_adr_type))) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1076, "assert(" "_adr_type == flatten_phi_adr_type(_adr_type)" ") failed", "Phi::adr_type must be pre-normalized"); ::breakpoint (); } } while (0); | ||||||
1077 | |||||||
1078 | if (recursive) { | ||||||
1079 | VectorSet visited; | ||||||
1080 | verify_adr_type(visited, _adr_type); | ||||||
1081 | } | ||||||
1082 | } | ||||||
1083 | #endif | ||||||
1084 | |||||||
1085 | |||||||
1086 | //------------------------------Value------------------------------------------ | ||||||
1087 | // Compute the type of the PhiNode | ||||||
1088 | const Type* PhiNode::Value(PhaseGVN* phase) const { | ||||||
1089 | Node *r = in(0); // RegionNode | ||||||
1090 | if( !r ) // Copy or dead | ||||||
1091 | return in(1) ? phase->type(in(1)) : Type::TOP; | ||||||
1092 | |||||||
1093 | // Note: During parsing, phis are often transformed before their regions. | ||||||
1094 | // This means we have to use type_or_null to defend against untyped regions. | ||||||
1095 | if( phase->type_or_null(r) == Type::TOP ) // Dead code? | ||||||
1096 | return Type::TOP; | ||||||
1097 | |||||||
1098 | // Check for trip-counted loop. If so, be smarter. | ||||||
1099 | BaseCountedLoopNode* l = r->is_BaseCountedLoop() ? r->as_BaseCountedLoop() : NULL__null; | ||||||
1100 | if (l && ((const Node*)l->phi() == this)) { // Trip counted loop! | ||||||
1101 | // protect against init_trip() or limit() returning NULL | ||||||
1102 | if (l->can_be_counted_loop(phase)) { | ||||||
1103 | const Node* init = l->init_trip(); | ||||||
1104 | const Node* limit = l->limit(); | ||||||
1105 | const Node* stride = l->stride(); | ||||||
1106 | if (init != NULL__null && limit != NULL__null && stride != NULL__null) { | ||||||
1107 | const TypeInteger* lo = phase->type(init)->isa_integer(l->bt()); | ||||||
1108 | const TypeInteger* hi = phase->type(limit)->isa_integer(l->bt()); | ||||||
1109 | const TypeInteger* stride_t = phase->type(stride)->isa_integer(l->bt()); | ||||||
1110 | if (lo != NULL__null && hi != NULL__null && stride_t != NULL__null) { // Dying loops might have TOP here | ||||||
1111 | assert(stride_t->hi_as_long() >= stride_t->lo_as_long(), "bad stride type")do { if (!(stride_t->hi_as_long() >= stride_t->lo_as_long ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1111, "assert(" "stride_t->hi_as_long() >= stride_t->lo_as_long()" ") failed", "bad stride type"); ::breakpoint(); } } while (0 ); | ||||||
1112 | BoolTest::mask bt = l->loopexit()->test_trip(); | ||||||
1113 | // If the loop exit condition is "not equal", the condition | ||||||
1114 | // would not trigger if init > limit (if stride > 0) or if | ||||||
1115 | // init < limit if (stride > 0) so we can't deduce bounds | ||||||
1116 | // for the iv from the exit condition. | ||||||
1117 | if (bt != BoolTest::ne) { | ||||||
1118 | if (stride_t->hi_as_long() < 0) { // Down-counter loop | ||||||
1119 | swap(lo, hi); | ||||||
1120 | return TypeInteger::make(MIN2(lo->lo_as_long(), hi->lo_as_long()), hi->hi_as_long(), 3, l->bt())->filter_speculative(_type); | ||||||
1121 | } else if (stride_t->lo_as_long() >= 0) { | ||||||
1122 | return TypeInteger::make(lo->lo_as_long(), MAX2(lo->hi_as_long(), hi->hi_as_long()), 3, l->bt())->filter_speculative(_type); | ||||||
1123 | } | ||||||
1124 | } | ||||||
1125 | } | ||||||
1126 | } | ||||||
1127 | } else if (l->in(LoopNode::LoopBackControl) != NULL__null && | ||||||
1128 | in(LoopNode::EntryControl) != NULL__null && | ||||||
1129 | phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) { | ||||||
1130 | // During CCP, if we saturate the type of a counted loop's Phi | ||||||
1131 | // before the special code for counted loop above has a chance | ||||||
1132 | // to run (that is as long as the type of the backedge's control | ||||||
1133 | // is top), we might end up with non monotonic types | ||||||
1134 | return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type); | ||||||
1135 | } | ||||||
1136 | } | ||||||
1137 | |||||||
1138 | // Until we have harmony between classes and interfaces in the type | ||||||
1139 | // lattice, we must tread carefully around phis which implicitly | ||||||
1140 | // convert the one to the other. | ||||||
1141 | const TypePtr* ttp = _type->make_ptr(); | ||||||
1142 | const TypeInstPtr* ttip = (ttp != NULL__null) ? ttp->isa_instptr() : NULL__null; | ||||||
1143 | const TypeKlassPtr* ttkp = (ttp != NULL__null) ? ttp->isa_instklassptr() : NULL__null; | ||||||
1144 | bool is_intf = false; | ||||||
1145 | if (ttip != NULL__null) { | ||||||
1146 | ciKlass* k = ttip->klass(); | ||||||
1147 | if (k->is_loaded() && k->is_interface()) | ||||||
1148 | is_intf = true; | ||||||
1149 | } | ||||||
1150 | if (ttkp != NULL__null) { | ||||||
1151 | ciKlass* k = ttkp->klass(); | ||||||
1152 | if (k->is_loaded() && k->is_interface()) | ||||||
1153 | is_intf = true; | ||||||
1154 | } | ||||||
1155 | |||||||
1156 | // Default case: merge all inputs | ||||||
1157 | const Type *t = Type::TOP; // Merged type starting value | ||||||
1158 | for (uint i = 1; i < req(); ++i) {// For all paths in | ||||||
1159 | // Reachable control path? | ||||||
1160 | if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) { | ||||||
1161 | const Type* ti = phase->type(in(i)); | ||||||
1162 | // We assume that each input of an interface-valued Phi is a true | ||||||
1163 | // subtype of that interface. This might not be true of the meet | ||||||
1164 | // of all the input types. The lattice is not distributive in | ||||||
1165 | // such cases. Ward off asserts in type.cpp by refusing to do | ||||||
1166 | // meets between interfaces and proper classes. | ||||||
1167 | const TypePtr* tip = ti->make_ptr(); | ||||||
1168 | const TypeInstPtr* tiip = (tip != NULL__null) ? tip->isa_instptr() : NULL__null; | ||||||
1169 | if (tiip) { | ||||||
1170 | bool ti_is_intf = false; | ||||||
1171 | ciKlass* k = tiip->klass(); | ||||||
1172 | if (k->is_loaded() && k->is_interface()) | ||||||
1173 | ti_is_intf = true; | ||||||
1174 | if (is_intf != ti_is_intf) | ||||||
1175 | { t = _type; break; } | ||||||
1176 | } | ||||||
1177 | t = t->meet_speculative(ti); | ||||||
1178 | } | ||||||
1179 | } | ||||||
1180 | |||||||
1181 | // The worst-case type (from ciTypeFlow) should be consistent with "t". | ||||||
1182 | // That is, we expect that "t->higher_equal(_type)" holds true. | ||||||
1183 | // There are various exceptions: | ||||||
1184 | // - Inputs which are phis might in fact be widened unnecessarily. | ||||||
1185 | // For example, an input might be a widened int while the phi is a short. | ||||||
1186 | // - Inputs might be BotPtrs but this phi is dependent on a null check, | ||||||
1187 | // and postCCP has removed the cast which encodes the result of the check. | ||||||
1188 | // - The type of this phi is an interface, and the inputs are classes. | ||||||
1189 | // - Value calls on inputs might produce fuzzy results. | ||||||
1190 | // (Occurrences of this case suggest improvements to Value methods.) | ||||||
1191 | // | ||||||
1192 | // It is not possible to see Type::BOTTOM values as phi inputs, | ||||||
1193 | // because the ciTypeFlow pre-pass produces verifier-quality types. | ||||||
1194 | const Type* ft = t->filter_speculative(_type); // Worst case type | ||||||
1195 | |||||||
1196 | #ifdef ASSERT1 | ||||||
1197 | // The following logic has been moved into TypeOopPtr::filter. | ||||||
1198 | const Type* jt = t->join_speculative(_type); | ||||||
1199 | if (jt->empty()) { // Emptied out??? | ||||||
1200 | |||||||
1201 | // Check for evil case of 't' being a class and '_type' expecting an | ||||||
1202 | // interface. This can happen because the bytecodes do not contain | ||||||
1203 | // enough type info to distinguish a Java-level interface variable | ||||||
1204 | // from a Java-level object variable. If we meet 2 classes which | ||||||
1205 | // both implement interface I, but their meet is at 'j/l/O' which | ||||||
1206 | // doesn't implement I, we have no way to tell if the result should | ||||||
1207 | // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows | ||||||
1208 | // into a Phi which "knows" it's an Interface type we'll have to | ||||||
1209 | // uplift the type. | ||||||
1210 | if (!t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface()) { | ||||||
1211 | assert(ft == _type, "")do { if (!(ft == _type)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1211, "assert(" "ft == _type" ") failed", ""); ::breakpoint (); } } while (0); // Uplift to interface | ||||||
1212 | } else if (!t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface()) { | ||||||
1213 | assert(ft == _type, "")do { if (!(ft == _type)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1213, "assert(" "ft == _type" ") failed", ""); ::breakpoint (); } } while (0); // Uplift to interface | ||||||
1214 | } else { | ||||||
1215 | // We also have to handle 'evil cases' of interface- vs. class-arrays | ||||||
1216 | Type::get_arrays_base_elements(jt, _type, NULL__null, &ttip); | ||||||
1217 | if (!t->empty() && ttip != NULL__null && ttip->is_loaded() && ttip->klass()->is_interface()) { | ||||||
1218 | assert(ft == _type, "")do { if (!(ft == _type)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1218, "assert(" "ft == _type" ") failed", ""); ::breakpoint (); } } while (0); // Uplift to array of interface | ||||||
1219 | } else { | ||||||
1220 | // Otherwise it's something stupid like non-overlapping int ranges | ||||||
1221 | // found on dying counted loops. | ||||||
1222 | assert(ft == Type::TOP, "")do { if (!(ft == Type::TOP)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1222, "assert(" "ft == Type::TOP" ") failed", ""); ::breakpoint (); } } while (0); // Canonical empty value | ||||||
1223 | } | ||||||
1224 | } | ||||||
1225 | } | ||||||
1226 | |||||||
1227 | else { | ||||||
1228 | |||||||
1229 | // If we have an interface-typed Phi and we narrow to a class type, the join | ||||||
1230 | // should report back the class. However, if we have a J/L/Object | ||||||
1231 | // class-typed Phi and an interface flows in, it's possible that the meet & | ||||||
1232 | // join report an interface back out. This isn't possible but happens | ||||||
1233 | // because the type system doesn't interact well with interfaces. | ||||||
1234 | const TypePtr *jtp = jt->make_ptr(); | ||||||
1235 | const TypeInstPtr *jtip = (jtp != NULL__null) ? jtp->isa_instptr() : NULL__null; | ||||||
1236 | const TypeKlassPtr *jtkp = (jtp != NULL__null) ? jtp->isa_instklassptr() : NULL__null; | ||||||
1237 | if( jtip && ttip ) { | ||||||
1238 | if( jtip->is_loaded() && jtip->klass()->is_interface() && | ||||||
1239 | ttip->is_loaded() && !ttip->klass()->is_interface() ) { | ||||||
1240 | assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) ||do { if (!(ft == ttip->cast_to_ptr_type(jtip->ptr()) || ft->isa_narrowoop() && ft->make_ptr() == ttip-> cast_to_ptr_type(jtip->ptr()))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1241, "assert(" "ft == ttip->cast_to_ptr_type(jtip->ptr()) || ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr())" ") failed", ""); ::breakpoint(); } } while (0) | ||||||
1241 | ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr()), "")do { if (!(ft == ttip->cast_to_ptr_type(jtip->ptr()) || ft->isa_narrowoop() && ft->make_ptr() == ttip-> cast_to_ptr_type(jtip->ptr()))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1241, "assert(" "ft == ttip->cast_to_ptr_type(jtip->ptr()) || ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr())" ") failed", ""); ::breakpoint(); } } while (0); | ||||||
1242 | jt = ft; | ||||||
1243 | } | ||||||
1244 | } | ||||||
1245 | if( jtkp && ttkp ) { | ||||||
1246 | if( jtkp->is_loaded() && jtkp->klass()->is_interface() && | ||||||
1247 | !jtkp->klass_is_exact() && // Keep exact interface klass (6894807) | ||||||
1248 | ttkp->is_loaded() && !ttkp->klass()->is_interface() ) { | ||||||
1249 | assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) ||do { if (!(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || ft->isa_narrowklass() && ft->make_ptr() == ttkp ->cast_to_ptr_type(jtkp->ptr()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1250, "assert(" "ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr())" ") failed", ""); ::breakpoint(); } } while (0) | ||||||
1250 | ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), "")do { if (!(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || ft->isa_narrowklass() && ft->make_ptr() == ttkp ->cast_to_ptr_type(jtkp->ptr()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1250, "assert(" "ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr())" ") failed", ""); ::breakpoint(); } } while (0); | ||||||
1251 | jt = ft; | ||||||
1252 | } | ||||||
1253 | } | ||||||
1254 | if (jt != ft && jt->base() == ft->base()) { | ||||||
1255 | if (jt->isa_int() && | ||||||
1256 | jt->is_int()->_lo == ft->is_int()->_lo && | ||||||
1257 | jt->is_int()->_hi == ft->is_int()->_hi) | ||||||
1258 | jt = ft; | ||||||
1259 | if (jt->isa_long() && | ||||||
1260 | jt->is_long()->_lo == ft->is_long()->_lo && | ||||||
1261 | jt->is_long()->_hi == ft->is_long()->_hi) | ||||||
1262 | jt = ft; | ||||||
1263 | } | ||||||
1264 | if (jt != ft) { | ||||||
1265 | tty->print("merge type: "); t->dump(); tty->cr(); | ||||||
1266 | tty->print("kill type: "); _type->dump(); tty->cr(); | ||||||
1267 | tty->print("join type: "); jt->dump(); tty->cr(); | ||||||
1268 | tty->print("filter type: "); ft->dump(); tty->cr(); | ||||||
1269 | } | ||||||
1270 | assert(jt == ft, "")do { if (!(jt == ft)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1270, "assert(" "jt == ft" ") failed", ""); ::breakpoint(); } } while (0); | ||||||
1271 | } | ||||||
1272 | #endif //ASSERT | ||||||
1273 | |||||||
1274 | // Deal with conversion problems found in data loops. | ||||||
1275 | ft = phase->saturate(ft, phase->type_or_null(this), _type); | ||||||
1276 | |||||||
1277 | return ft; | ||||||
1278 | } | ||||||
1279 | |||||||
1280 | |||||||
1281 | //------------------------------is_diamond_phi--------------------------------- | ||||||
1282 | // Does this Phi represent a simple well-shaped diamond merge? Return the | ||||||
1283 | // index of the true path or 0 otherwise. | ||||||
1284 | // If check_control_only is true, do not inspect the If node at the | ||||||
1285 | // top, and return -1 (not an edge number) on success. | ||||||
1286 | int PhiNode::is_diamond_phi(bool check_control_only) const { | ||||||
1287 | // Check for a 2-path merge | ||||||
1288 | Node *region = in(0); | ||||||
1289 | if( !region ) return 0; | ||||||
1290 | if( region->req() != 3 ) return 0; | ||||||
1291 | if( req() != 3 ) return 0; | ||||||
1292 | // Check that both paths come from the same If | ||||||
1293 | Node *ifp1 = region->in(1); | ||||||
1294 | Node *ifp2 = region->in(2); | ||||||
1295 | if( !ifp1 || !ifp2 ) return 0; | ||||||
1296 | Node *iff = ifp1->in(0); | ||||||
1297 | if( !iff || !iff->is_If() ) return 0; | ||||||
1298 | if( iff != ifp2->in(0) ) return 0; | ||||||
1299 | if (check_control_only) return -1; | ||||||
1300 | // Check for a proper bool/cmp | ||||||
1301 | const Node *b = iff->in(1); | ||||||
1302 | if( !b->is_Bool() ) return 0; | ||||||
1303 | const Node *cmp = b->in(1); | ||||||
1304 | if( !cmp->is_Cmp() ) return 0; | ||||||
1305 | |||||||
1306 | // Check for branching opposite expected | ||||||
1307 | if( ifp2->Opcode() == Op_IfTrue ) { | ||||||
1308 | assert( ifp1->Opcode() == Op_IfFalse, "" )do { if (!(ifp1->Opcode() == Op_IfFalse)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1308, "assert(" "ifp1->Opcode() == Op_IfFalse" ") failed" , ""); ::breakpoint(); } } while (0); | ||||||
1309 | return 2; | ||||||
1310 | } else { | ||||||
1311 | assert( ifp1->Opcode() == Op_IfTrue, "" )do { if (!(ifp1->Opcode() == Op_IfTrue)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1311, "assert(" "ifp1->Opcode() == Op_IfTrue" ") failed" , ""); ::breakpoint(); } } while (0); | ||||||
1312 | return 1; | ||||||
1313 | } | ||||||
1314 | } | ||||||
1315 | |||||||
1316 | //----------------------------check_cmove_id----------------------------------- | ||||||
1317 | // Check for CMove'ing a constant after comparing against the constant. | ||||||
1318 | // Happens all the time now, since if we compare equality vs a constant in | ||||||
1319 | // the parser, we "know" the variable is constant on one path and we force | ||||||
1320 | // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a | ||||||
1321 | // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more | ||||||
1322 | // general in that we don't need constants. Since CMove's are only inserted | ||||||
1323 | // in very special circumstances, we do it here on generic Phi's. | ||||||
1324 | Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) { | ||||||
1325 | assert(true_path !=0, "only diamond shape graph expected")do { if (!(true_path !=0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1325, "assert(" "true_path !=0" ") failed", "only diamond shape graph expected" ); ::breakpoint(); } } while (0); | ||||||
1326 | |||||||
1327 | // is_diamond_phi() has guaranteed the correctness of the nodes sequence: | ||||||
1328 | // phi->region->if_proj->ifnode->bool->cmp | ||||||
1329 | Node* region = in(0); | ||||||
1330 | Node* iff = region->in(1)->in(0); | ||||||
1331 | BoolNode* b = iff->in(1)->as_Bool(); | ||||||
1332 | Node* cmp = b->in(1); | ||||||
1333 | Node* tval = in(true_path); | ||||||
1334 | Node* fval = in(3-true_path); | ||||||
1335 | Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b); | ||||||
1336 | if (id == NULL__null) | ||||||
1337 | return NULL__null; | ||||||
1338 | |||||||
1339 | // Either value might be a cast that depends on a branch of 'iff'. | ||||||
1340 | // Since the 'id' value will float free of the diamond, either | ||||||
1341 | // decast or return failure. | ||||||
1342 | Node* ctl = id->in(0); | ||||||
1343 | if (ctl != NULL__null && ctl->in(0) == iff) { | ||||||
1344 | if (id->is_ConstraintCast()) { | ||||||
1345 | return id->in(1); | ||||||
1346 | } else { | ||||||
1347 | // Don't know how to disentangle this value. | ||||||
1348 | return NULL__null; | ||||||
1349 | } | ||||||
1350 | } | ||||||
1351 | |||||||
1352 | return id; | ||||||
1353 | } | ||||||
1354 | |||||||
1355 | //------------------------------Identity--------------------------------------- | ||||||
1356 | // Check for Region being Identity. | ||||||
1357 | Node* PhiNode::Identity(PhaseGVN* phase) { | ||||||
1358 | // Check for no merging going on | ||||||
1359 | // (There used to be special-case code here when this->region->is_Loop. | ||||||
1360 | // It would check for a tributary phi on the backedge that the main phi | ||||||
1361 | // trivially, perhaps with a single cast. The unique_input method | ||||||
1362 | // does all this and more, by reducing such tributaries to 'this'.) | ||||||
1363 | Node* uin = unique_input(phase, false); | ||||||
1364 | if (uin != NULL__null) { | ||||||
1365 | return uin; | ||||||
1366 | } | ||||||
1367 | |||||||
1368 | int true_path = is_diamond_phi(); | ||||||
1369 | // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet. | ||||||
1370 | if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) { | ||||||
1371 | Node* id = is_cmove_id(phase, true_path); | ||||||
1372 | if (id != NULL__null) { | ||||||
1373 | return id; | ||||||
1374 | } | ||||||
1375 | } | ||||||
1376 | |||||||
1377 | // Looking for phis with identical inputs. If we find one that has | ||||||
1378 | // type TypePtr::BOTTOM, replace the current phi with the bottom phi. | ||||||
1379 | if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() != | ||||||
1380 | TypePtr::BOTTOM && !adr_type()->is_known_instance()) { | ||||||
1381 | uint phi_len = req(); | ||||||
1382 | Node* phi_reg = region(); | ||||||
1383 | for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) { | ||||||
1384 | Node* u = phi_reg->fast_out(i); | ||||||
1385 | if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY && | ||||||
1386 | u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg && | ||||||
1387 | u->req() == phi_len) { | ||||||
1388 | for (uint j = 1; j < phi_len; j++) { | ||||||
1389 | if (in(j) != u->in(j)) { | ||||||
1390 | u = NULL__null; | ||||||
1391 | break; | ||||||
1392 | } | ||||||
1393 | } | ||||||
1394 | if (u != NULL__null) { | ||||||
1395 | return u; | ||||||
1396 | } | ||||||
1397 | } | ||||||
1398 | } | ||||||
1399 | } | ||||||
1400 | |||||||
1401 | return this; // No identity | ||||||
1402 | } | ||||||
1403 | |||||||
1404 | //-----------------------------unique_input------------------------------------ | ||||||
1405 | // Find the unique value, discounting top, self-loops, and casts. | ||||||
1406 | // Return top if there are no inputs, and self if there are multiple. | ||||||
1407 | Node* PhiNode::unique_input(PhaseTransform* phase, bool uncast) { | ||||||
1408 | // 1) One unique direct input, | ||||||
1409 | // or if uncast is true: | ||||||
1410 | // 2) some of the inputs have an intervening ConstraintCast | ||||||
1411 | // 3) an input is a self loop | ||||||
1412 | // | ||||||
1413 | // 1) input or 2) input or 3) input __ | ||||||
1414 | // / \ / \ \ / \ | ||||||
1415 | // \ / | cast phi cast | ||||||
1416 | // phi \ / / \ / | ||||||
1417 | // phi / -- | ||||||
1418 | |||||||
1419 | Node* r = in(0); // RegionNode | ||||||
1420 | Node* input = NULL__null; // The unique direct input (maybe uncasted = ConstraintCasts removed) | ||||||
1421 | |||||||
1422 | for (uint i = 1, cnt = req(); i < cnt; ++i) { | ||||||
1423 | Node* rc = r->in(i); | ||||||
1424 | if (rc
| ||||||
1425 | continue; // ignore unreachable control path | ||||||
1426 | Node* n = in(i); | ||||||
1427 | if (n == NULL__null) | ||||||
1428 | continue; | ||||||
1429 | Node* un = n; | ||||||
1430 | if (uncast
| ||||||
1431 | #ifdef ASSERT1 | ||||||
1432 | Node* m = un->uncast(); | ||||||
1433 | #endif | ||||||
1434 | while (un
| ||||||
1435 | Node* next = un->in(1); | ||||||
1436 | if (phase->type(next)->isa_rawptr() && phase->type(un)->isa_oopptr()) { | ||||||
1437 | // risk exposing raw ptr at safepoint | ||||||
1438 | break; | ||||||
1439 | } | ||||||
1440 | un = next; | ||||||
1441 | } | ||||||
1442 | assert(m == un || un->in(1) == m, "Only expected at CheckCastPP from allocation")do { if (!(m == un || un->in(1) == m)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1442, "assert(" "m == un || un->in(1) == m" ") failed", "Only expected at CheckCastPP from allocation" ); ::breakpoint(); } } while (0); | ||||||
| |||||||
1443 | } | ||||||
1444 | if (un == NULL__null || un == this || phase->type(un) == Type::TOP) { | ||||||
1445 | continue; // ignore if top, or in(i) and "this" are in a data cycle | ||||||
1446 | } | ||||||
1447 | // Check for a unique input (maybe uncasted) | ||||||
1448 | if (input == NULL__null) { | ||||||
1449 | input = un; | ||||||
1450 | } else if (input != un) { | ||||||
1451 | input = NodeSentinel(Node*)-1; // no unique input | ||||||
1452 | } | ||||||
1453 | } | ||||||
1454 | if (input == NULL__null) { | ||||||
1455 | return phase->C->top(); // no inputs | ||||||
1456 | } | ||||||
1457 | |||||||
1458 | if (input != NodeSentinel(Node*)-1) { | ||||||
1459 | return input; // one unique direct input | ||||||
1460 | } | ||||||
1461 | |||||||
1462 | // Nothing. | ||||||
1463 | return NULL__null; | ||||||
1464 | } | ||||||
1465 | |||||||
1466 | //------------------------------is_x2logic------------------------------------- | ||||||
1467 | // Check for simple convert-to-boolean pattern | ||||||
1468 | // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1) | ||||||
1469 | // Convert Phi to an ConvIB. | ||||||
1470 | static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) { | ||||||
1471 | assert(true_path !=0, "only diamond shape graph expected")do { if (!(true_path !=0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1471, "assert(" "true_path !=0" ") failed", "only diamond shape graph expected" ); ::breakpoint(); } } while (0); | ||||||
1472 | // Convert the true/false index into an expected 0/1 return. | ||||||
1473 | // Map 2->0 and 1->1. | ||||||
1474 | int flipped = 2-true_path; | ||||||
1475 | |||||||
1476 | // is_diamond_phi() has guaranteed the correctness of the nodes sequence: | ||||||
1477 | // phi->region->if_proj->ifnode->bool->cmp | ||||||
1478 | Node *region = phi->in(0); | ||||||
1479 | Node *iff = region->in(1)->in(0); | ||||||
1480 | BoolNode *b = (BoolNode*)iff->in(1); | ||||||
1481 | const CmpNode *cmp = (CmpNode*)b->in(1); | ||||||
1482 | |||||||
1483 | Node *zero = phi->in(1); | ||||||
1484 | Node *one = phi->in(2); | ||||||
1485 | const Type *tzero = phase->type( zero ); | ||||||
1486 | const Type *tone = phase->type( one ); | ||||||
1487 | |||||||
1488 | // Check for compare vs 0 | ||||||
1489 | const Type *tcmp = phase->type(cmp->in(2)); | ||||||
1490 | if( tcmp != TypeInt::ZERO && tcmp != TypePtr::NULL_PTR ) { | ||||||
1491 | // Allow cmp-vs-1 if the other input is bounded by 0-1 | ||||||
1492 | if( !(tcmp == TypeInt::ONE && phase->type(cmp->in(1)) == TypeInt::BOOL) ) | ||||||
1493 | return NULL__null; | ||||||
1494 | flipped = 1-flipped; // Test is vs 1 instead of 0! | ||||||
1495 | } | ||||||
1496 | |||||||
1497 | // Check for setting zero/one opposite expected | ||||||
1498 | if( tzero == TypeInt::ZERO ) { | ||||||
1499 | if( tone == TypeInt::ONE ) { | ||||||
1500 | } else return NULL__null; | ||||||
1501 | } else if( tzero == TypeInt::ONE ) { | ||||||
1502 | if( tone == TypeInt::ZERO ) { | ||||||
1503 | flipped = 1-flipped; | ||||||
1504 | } else return NULL__null; | ||||||
1505 | } else return NULL__null; | ||||||
1506 | |||||||
1507 | // Check for boolean test backwards | ||||||
1508 | if( b->_test._test == BoolTest::ne ) { | ||||||
1509 | } else if( b->_test._test == BoolTest::eq ) { | ||||||
1510 | flipped = 1-flipped; | ||||||
1511 | } else return NULL__null; | ||||||
1512 | |||||||
1513 | // Build int->bool conversion | ||||||
1514 | Node *n = new Conv2BNode(cmp->in(1)); | ||||||
1515 | if( flipped ) | ||||||
1516 | n = new XorINode( phase->transform(n), phase->intcon(1) ); | ||||||
1517 | |||||||
1518 | return n; | ||||||
1519 | } | ||||||
1520 | |||||||
1521 | //------------------------------is_cond_add------------------------------------ | ||||||
1522 | // Check for simple conditional add pattern: "(P < Q) ? X+Y : X;" | ||||||
1523 | // To be profitable the control flow has to disappear; there can be no other | ||||||
1524 | // values merging here. We replace the test-and-branch with: | ||||||
1525 | // "(sgn(P-Q))&Y) + X". Basically, convert "(P < Q)" into 0 or -1 by | ||||||
1526 | // moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'. | ||||||
1527 | // Then convert Y to 0-or-Y and finally add. | ||||||
1528 | // This is a key transform for SpecJava _201_compress. | ||||||
1529 | static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) { | ||||||
1530 | assert(true_path !=0, "only diamond shape graph expected")do { if (!(true_path !=0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1530, "assert(" "true_path !=0" ") failed", "only diamond shape graph expected" ); ::breakpoint(); } } while (0); | ||||||
1531 | |||||||
1532 | // is_diamond_phi() has guaranteed the correctness of the nodes sequence: | ||||||
1533 | // phi->region->if_proj->ifnode->bool->cmp | ||||||
1534 | RegionNode *region = (RegionNode*)phi->in(0); | ||||||
1535 | Node *iff = region->in(1)->in(0); | ||||||
1536 | BoolNode* b = iff->in(1)->as_Bool(); | ||||||
1537 | const CmpNode *cmp = (CmpNode*)b->in(1); | ||||||
1538 | |||||||
1539 | // Make sure only merging this one phi here | ||||||
1540 | if (region->has_unique_phi() != phi) return NULL__null; | ||||||
1541 | |||||||
1542 | // Make sure each arm of the diamond has exactly one output, which we assume | ||||||
1543 | // is the region. Otherwise, the control flow won't disappear. | ||||||
1544 | if (region->in(1)->outcnt() != 1) return NULL__null; | ||||||
1545 | if (region->in(2)->outcnt() != 1) return NULL__null; | ||||||
1546 | |||||||
1547 | // Check for "(P < Q)" of type signed int | ||||||
1548 | if (b->_test._test != BoolTest::lt) return NULL__null; | ||||||
1549 | if (cmp->Opcode() != Op_CmpI) return NULL__null; | ||||||
1550 | |||||||
1551 | Node *p = cmp->in(1); | ||||||
1552 | Node *q = cmp->in(2); | ||||||
1553 | Node *n1 = phi->in( true_path); | ||||||
1554 | Node *n2 = phi->in(3-true_path); | ||||||
1555 | |||||||
1556 | int op = n1->Opcode(); | ||||||
1557 | if( op != Op_AddI // Need zero as additive identity | ||||||
1558 | /*&&op != Op_SubI && | ||||||
1559 | op != Op_AddP && | ||||||
1560 | op != Op_XorI && | ||||||
1561 | op != Op_OrI*/ ) | ||||||
1562 | return NULL__null; | ||||||
1563 | |||||||
1564 | Node *x = n2; | ||||||
1565 | Node *y = NULL__null; | ||||||
1566 | if( x == n1->in(1) ) { | ||||||
1567 | y = n1->in(2); | ||||||
1568 | } else if( x == n1->in(2) ) { | ||||||
1569 | y = n1->in(1); | ||||||
1570 | } else return NULL__null; | ||||||
1571 | |||||||
1572 | // Not so profitable if compare and add are constants | ||||||
1573 | if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() ) | ||||||
1574 | return NULL__null; | ||||||
1575 | |||||||
1576 | Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) ); | ||||||
1577 | Node *j_and = phase->transform( new AndINode(cmplt,y) ); | ||||||
1578 | return new AddINode(j_and,x); | ||||||
1579 | } | ||||||
1580 | |||||||
1581 | //------------------------------is_absolute------------------------------------ | ||||||
1582 | // Check for absolute value. | ||||||
1583 | static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { | ||||||
1584 | assert(true_path !=0, "only diamond shape graph expected")do { if (!(true_path !=0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1584, "assert(" "true_path !=0" ") failed", "only diamond shape graph expected" ); ::breakpoint(); } } while (0); | ||||||
1585 | |||||||
1586 | int cmp_zero_idx = 0; // Index of compare input where to look for zero | ||||||
1587 | int phi_x_idx = 0; // Index of phi input where to find naked x | ||||||
1588 | |||||||
1589 | // ABS ends with the merge of 2 control flow paths. | ||||||
1590 | // Find the false path from the true path. With only 2 inputs, 3 - x works nicely. | ||||||
1591 | int false_path = 3 - true_path; | ||||||
1592 | |||||||
1593 | // is_diamond_phi() has guaranteed the correctness of the nodes sequence: | ||||||
1594 | // phi->region->if_proj->ifnode->bool->cmp | ||||||
1595 | BoolNode *bol = phi_root->in(0)->in(1)->in(0)->in(1)->as_Bool(); | ||||||
1596 | Node *cmp = bol->in(1); | ||||||
1597 | |||||||
1598 | // Check bool sense | ||||||
1599 | if (cmp->Opcode() == Op_CmpF || cmp->Opcode() == Op_CmpD) { | ||||||
1600 | switch (bol->_test._test) { | ||||||
1601 | case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = true_path; break; | ||||||
1602 | case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break; | ||||||
1603 | case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = true_path; break; | ||||||
1604 | case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = false_path; break; | ||||||
1605 | default: return NULL__null; break; | ||||||
1606 | } | ||||||
1607 | } else if (cmp->Opcode() == Op_CmpI || cmp->Opcode() == Op_CmpL) { | ||||||
1608 | switch (bol->_test._test) { | ||||||
1609 | case BoolTest::lt: | ||||||
1610 | case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break; | ||||||
1611 | case BoolTest::gt: | ||||||
1612 | case BoolTest::ge: cmp_zero_idx = 2; phi_x_idx = true_path; break; | ||||||
1613 | default: return NULL__null; break; | ||||||
1614 | } | ||||||
1615 | } | ||||||
1616 | |||||||
1617 | // Test is next | ||||||
1618 | const Type *tzero = NULL__null; | ||||||
1619 | switch (cmp->Opcode()) { | ||||||
1620 | case Op_CmpI: tzero = TypeInt::ZERO; break; // Integer ABS | ||||||
1621 | case Op_CmpL: tzero = TypeLong::ZERO; break; // Long ABS | ||||||
1622 | case Op_CmpF: tzero = TypeF::ZERO; break; // Float ABS | ||||||
1623 | case Op_CmpD: tzero = TypeD::ZERO; break; // Double ABS | ||||||
1624 | default: return NULL__null; | ||||||
1625 | } | ||||||
1626 | |||||||
1627 | // Find zero input of compare; the other input is being abs'd | ||||||
1628 | Node *x = NULL__null; | ||||||
1629 | bool flip = false; | ||||||
1630 | if( phase->type(cmp->in(cmp_zero_idx)) == tzero ) { | ||||||
1631 | x = cmp->in(3 - cmp_zero_idx); | ||||||
1632 | } else if( phase->type(cmp->in(3 - cmp_zero_idx)) == tzero ) { | ||||||
1633 | // The test is inverted, we should invert the result... | ||||||
1634 | x = cmp->in(cmp_zero_idx); | ||||||
1635 | flip = true; | ||||||
1636 | } else { | ||||||
1637 | return NULL__null; | ||||||
1638 | } | ||||||
1639 | |||||||
1640 | // Next get the 2 pieces being selected, one is the original value | ||||||
1641 | // and the other is the negated value. | ||||||
1642 | if( phi_root->in(phi_x_idx) != x ) return NULL__null; | ||||||
1643 | |||||||
1644 | // Check other phi input for subtract node | ||||||
1645 | Node *sub = phi_root->in(3 - phi_x_idx); | ||||||
1646 | |||||||
1647 | bool is_sub = sub->Opcode() == Op_SubF || sub->Opcode() == Op_SubD || | ||||||
1648 | sub->Opcode() == Op_SubI || sub->Opcode() == Op_SubL; | ||||||
1649 | |||||||
1650 | // Allow only Sub(0,X) and fail out for all others; Neg is not OK | ||||||
1651 | if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return NULL__null; | ||||||
1652 | |||||||
1653 | if (tzero == TypeF::ZERO) { | ||||||
1654 | x = new AbsFNode(x); | ||||||
1655 | if (flip) { | ||||||
1656 | x = new SubFNode(sub->in(1), phase->transform(x)); | ||||||
1657 | } | ||||||
1658 | } else if (tzero == TypeD::ZERO) { | ||||||
1659 | x = new AbsDNode(x); | ||||||
1660 | if (flip) { | ||||||
1661 | x = new SubDNode(sub->in(1), phase->transform(x)); | ||||||
1662 | } | ||||||
1663 | } else if (tzero == TypeInt::ZERO && Matcher::match_rule_supported(Op_AbsI)) { | ||||||
1664 | x = new AbsINode(x); | ||||||
1665 | if (flip) { | ||||||
1666 | x = new SubINode(sub->in(1), phase->transform(x)); | ||||||
1667 | } | ||||||
1668 | } else if (tzero == TypeLong::ZERO && Matcher::match_rule_supported(Op_AbsL)) { | ||||||
1669 | x = new AbsLNode(x); | ||||||
1670 | if (flip) { | ||||||
1671 | x = new SubLNode(sub->in(1), phase->transform(x)); | ||||||
1672 | } | ||||||
1673 | } else return NULL__null; | ||||||
1674 | |||||||
1675 | return x; | ||||||
1676 | } | ||||||
1677 | |||||||
1678 | //------------------------------split_once------------------------------------- | ||||||
1679 | // Helper for split_flow_path | ||||||
1680 | static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) { | ||||||
1681 | igvn->hash_delete(n); // Remove from hash before hacking edges | ||||||
1682 | |||||||
1683 | uint j = 1; | ||||||
1684 | for (uint i = phi->req()-1; i > 0; i--) { | ||||||
1685 | if (phi->in(i) == val) { // Found a path with val? | ||||||
1686 | // Add to NEW Region/Phi, no DU info | ||||||
1687 | newn->set_req( j++, n->in(i) ); | ||||||
1688 | // Remove from OLD Region/Phi | ||||||
1689 | n->del_req(i); | ||||||
1690 | } | ||||||
1691 | } | ||||||
1692 | |||||||
1693 | // Register the new node but do not transform it. Cannot transform until the | ||||||
1694 | // entire Region/Phi conglomerate has been hacked as a single huge transform. | ||||||
1695 | igvn->register_new_node_with_optimizer( newn ); | ||||||
1696 | |||||||
1697 | // Now I can point to the new node. | ||||||
1698 | n->add_req(newn); | ||||||
1699 | igvn->_worklist.push(n); | ||||||
1700 | } | ||||||
1701 | |||||||
1702 | //------------------------------split_flow_path-------------------------------- | ||||||
1703 | // Check for merging identical values and split flow paths | ||||||
1704 | static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) { | ||||||
1705 | BasicType bt = phi->type()->basic_type(); | ||||||
1706 | if( bt == T_ILLEGAL || type2size[bt] <= 0 ) | ||||||
1707 | return NULL__null; // Bail out on funny non-value stuff | ||||||
1708 | if( phi->req() <= 3 ) // Need at least 2 matched inputs and a | ||||||
1709 | return NULL__null; // third unequal input to be worth doing | ||||||
1710 | |||||||
1711 | // Scan for a constant | ||||||
1712 | uint i; | ||||||
1713 | for( i = 1; i < phi->req()-1; i++ ) { | ||||||
1714 | Node *n = phi->in(i); | ||||||
1715 | if( !n ) return NULL__null; | ||||||
1716 | if( phase->type(n) == Type::TOP ) return NULL__null; | ||||||
1717 | if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass ) | ||||||
1718 | break; | ||||||
1719 | } | ||||||
1720 | if( i >= phi->req() ) // Only split for constants | ||||||
1721 | return NULL__null; | ||||||
1722 | |||||||
1723 | Node *val = phi->in(i); // Constant to split for | ||||||
1724 | uint hit = 0; // Number of times it occurs | ||||||
1725 | Node *r = phi->region(); | ||||||
1726 | |||||||
1727 | for( ; i < phi->req(); i++ ){ // Count occurrences of constant | ||||||
1728 | Node *n = phi->in(i); | ||||||
1729 | if( !n ) return NULL__null; | ||||||
1730 | if( phase->type(n) == Type::TOP ) return NULL__null; | ||||||
1731 | if( phi->in(i) == val ) { | ||||||
1732 | hit++; | ||||||
1733 | if (PhaseIdealLoop::find_predicate(r->in(i)) != NULL__null) { | ||||||
1734 | return NULL__null; // don't split loop entry path | ||||||
1735 | } | ||||||
1736 | } | ||||||
1737 | } | ||||||
1738 | |||||||
1739 | if( hit <= 1 || // Make sure we find 2 or more | ||||||
1740 | hit == phi->req()-1 ) // and not ALL the same value | ||||||
1741 | return NULL__null; | ||||||
1742 | |||||||
1743 | // Now start splitting out the flow paths that merge the same value. | ||||||
1744 | // Split first the RegionNode. | ||||||
1745 | PhaseIterGVN *igvn = phase->is_IterGVN(); | ||||||
1746 | RegionNode *newr = new RegionNode(hit+1); | ||||||
1747 | split_once(igvn, phi, val, r, newr); | ||||||
1748 | |||||||
1749 | // Now split all other Phis than this one | ||||||
1750 | for (DUIterator_Fast kmax, k = r->fast_outs(kmax); k < kmax; k++) { | ||||||
1751 | Node* phi2 = r->fast_out(k); | ||||||
1752 | if( phi2->is_Phi() && phi2->as_Phi() != phi ) { | ||||||
1753 | PhiNode *newphi = PhiNode::make_blank(newr, phi2); | ||||||
1754 | split_once(igvn, phi, val, phi2, newphi); | ||||||
1755 | } | ||||||
1756 | } | ||||||
1757 | |||||||
1758 | // Clean up this guy | ||||||
1759 | igvn->hash_delete(phi); | ||||||
1760 | for( i = phi->req()-1; i > 0; i-- ) { | ||||||
1761 | if( phi->in(i) == val ) { | ||||||
1762 | phi->del_req(i); | ||||||
1763 | } | ||||||
1764 | } | ||||||
1765 | phi->add_req(val); | ||||||
1766 | |||||||
1767 | return phi; | ||||||
1768 | } | ||||||
1769 | |||||||
1770 | //============================================================================= | ||||||
1771 | //------------------------------simple_data_loop_check------------------------- | ||||||
1772 | // Try to determining if the phi node in a simple safe/unsafe data loop. | ||||||
1773 | // Returns: | ||||||
1774 | // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop }; | ||||||
1775 | // Safe - safe case when the phi and it's inputs reference only safe data | ||||||
1776 | // nodes; | ||||||
1777 | // Unsafe - the phi and it's inputs reference unsafe data nodes but there | ||||||
1778 | // is no reference back to the phi - need a graph walk | ||||||
1779 | // to determine if it is in a loop; | ||||||
1780 | // UnsafeLoop - unsafe case when the phi references itself directly or through | ||||||
1781 | // unsafe data node. | ||||||
1782 | // Note: a safe data node is a node which could/never reference itself during | ||||||
1783 | // GVN transformations. For now it is Con, Proj, Phi, CastPP, CheckCastPP. | ||||||
1784 | // I mark Phi nodes as safe node not only because they can reference itself | ||||||
1785 | // but also to prevent mistaking the fallthrough case inside an outer loop | ||||||
1786 | // as dead loop when the phi references itselfs through an other phi. | ||||||
1787 | PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const { | ||||||
1788 | // It is unsafe loop if the phi node references itself directly. | ||||||
1789 | if (in == (Node*)this) | ||||||
1790 | return UnsafeLoop; // Unsafe loop | ||||||
1791 | // Unsafe loop if the phi node references itself through an unsafe data node. | ||||||
1792 | // Exclude cases with null inputs or data nodes which could reference | ||||||
1793 | // itself (safe for dead loops). | ||||||
1794 | if (in != NULL__null && !in->is_dead_loop_safe()) { | ||||||
1795 | // Check inputs of phi's inputs also. | ||||||
1796 | // It is much less expensive then full graph walk. | ||||||
1797 | uint cnt = in->req(); | ||||||
1798 | uint i = (in->is_Proj() && !in->is_CFG()) ? 0 : 1; | ||||||
1799 | for (; i < cnt; ++i) { | ||||||
1800 | Node* m = in->in(i); | ||||||
1801 | if (m == (Node*)this) | ||||||
1802 | return UnsafeLoop; // Unsafe loop | ||||||
1803 | if (m != NULL__null && !m->is_dead_loop_safe()) { | ||||||
1804 | // Check the most common case (about 30% of all cases): | ||||||
1805 | // phi->Load/Store->AddP->(ConP ConP Con)/(Parm Parm Con). | ||||||
1806 | Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : NULL__null; | ||||||
1807 | if (m1 == (Node*)this) | ||||||
1808 | return UnsafeLoop; // Unsafe loop | ||||||
1809 | if (m1 != NULL__null && m1 == m->in(2) && | ||||||
1810 | m1->is_dead_loop_safe() && m->in(3)->is_Con()) { | ||||||
1811 | continue; // Safe case | ||||||
1812 | } | ||||||
1813 | // The phi references an unsafe node - need full analysis. | ||||||
1814 | return Unsafe; | ||||||
1815 | } | ||||||
1816 | } | ||||||
1817 | } | ||||||
1818 | return Safe; // Safe case - we can optimize the phi node. | ||||||
1819 | } | ||||||
1820 | |||||||
1821 | //------------------------------is_unsafe_data_reference----------------------- | ||||||
1822 | // If phi can be reached through the data input - it is data loop. | ||||||
1823 | bool PhiNode::is_unsafe_data_reference(Node *in) const { | ||||||
1824 | assert(req() > 1, "")do { if (!(req() > 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1824, "assert(" "req() > 1" ") failed", ""); ::breakpoint (); } } while (0); | ||||||
1825 | // First, check simple cases when phi references itself directly or | ||||||
1826 | // through an other node. | ||||||
1827 | LoopSafety safety = simple_data_loop_check(in); | ||||||
1828 | if (safety == UnsafeLoop) | ||||||
1829 | return true; // phi references itself - unsafe loop | ||||||
1830 | else if (safety == Safe) | ||||||
1831 | return false; // Safe case - phi could be replaced with the unique input. | ||||||
1832 | |||||||
1833 | // Unsafe case when we should go through data graph to determine | ||||||
1834 | // if the phi references itself. | ||||||
1835 | |||||||
1836 | ResourceMark rm; | ||||||
1837 | |||||||
1838 | Node_List nstack; | ||||||
1839 | VectorSet visited; | ||||||
1840 | |||||||
1841 | nstack.push(in); // Start with unique input. | ||||||
1842 | visited.set(in->_idx); | ||||||
1843 | while (nstack.size() != 0) { | ||||||
1844 | Node* n = nstack.pop(); | ||||||
1845 | uint cnt = n->req(); | ||||||
1846 | uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1; | ||||||
1847 | for (; i < cnt; i++) { | ||||||
1848 | Node* m = n->in(i); | ||||||
1849 | if (m == (Node*)this) { | ||||||
1850 | return true; // Data loop | ||||||
1851 | } | ||||||
1852 | if (m != NULL__null && !m->is_dead_loop_safe()) { // Only look for unsafe cases. | ||||||
1853 | if (!visited.test_set(m->_idx)) | ||||||
1854 | nstack.push(m); | ||||||
1855 | } | ||||||
1856 | } | ||||||
1857 | } | ||||||
1858 | return false; // The phi is not reachable from its inputs | ||||||
1859 | } | ||||||
1860 | |||||||
1861 | // Is this Phi's region or some inputs to the region enqueued for IGVN | ||||||
1862 | // and so could cause the region to be optimized out? | ||||||
1863 | bool PhiNode::wait_for_region_igvn(PhaseGVN* phase) { | ||||||
1864 | PhaseIterGVN* igvn = phase->is_IterGVN(); | ||||||
1865 | Unique_Node_List& worklist = igvn->_worklist; | ||||||
1866 | bool delay = false; | ||||||
1867 | Node* r = in(0); | ||||||
1868 | for (uint j = 1; j < req(); j++) { | ||||||
1869 | Node* rc = r->in(j); | ||||||
1870 | Node* n = in(j); | ||||||
1871 | if (rc
| ||||||
1872 | rc->is_Proj()) { | ||||||
1873 | if (worklist.member(rc)) { | ||||||
1874 | delay = true; | ||||||
1875 | } else if (rc->in(0) != NULL__null && | ||||||
1876 | rc->in(0)->is_If()) { | ||||||
1877 | if (worklist.member(rc->in(0))) { | ||||||
1878 | delay = true; | ||||||
1879 | } else if (rc->in(0)->in(1) != NULL__null && | ||||||
1880 | rc->in(0)->in(1)->is_Bool()) { | ||||||
1881 | if (worklist.member(rc->in(0)->in(1))) { | ||||||
1882 | delay = true; | ||||||
1883 | } else if (rc->in(0)->in(1)->in(1) != NULL__null && | ||||||
1884 | rc->in(0)->in(1)->in(1)->is_Cmp()) { | ||||||
1885 | if (worklist.member(rc->in(0)->in(1)->in(1))) { | ||||||
1886 | delay = true; | ||||||
1887 | } | ||||||
1888 | } | ||||||
1889 | } | ||||||
1890 | } | ||||||
1891 | } | ||||||
1892 | } | ||||||
1893 | if (delay
| ||||||
1894 | worklist.push(this); | ||||||
1895 | } | ||||||
1896 | return delay; | ||||||
1897 | } | ||||||
1898 | |||||||
1899 | //------------------------------Ideal------------------------------------------ | ||||||
1900 | // Return a node which is more "ideal" than the current node. Must preserve | ||||||
1901 | // the CFG, but we can still strip out dead paths. | ||||||
1902 | Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { | ||||||
1903 | Node *r = in(0); // RegionNode | ||||||
1904 | assert(r != NULL && r->is_Region(), "this phi must have a region")do { if (!(r != __null && r->is_Region())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1904, "assert(" "r != __null && r->is_Region()" ") failed" , "this phi must have a region"); ::breakpoint(); } } while ( 0); | ||||||
| |||||||
1905 | assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge")do { if (!(r->in(0) == __null || !r->in(0)->is_Root( ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1905, "assert(" "r->in(0) == __null || !r->in(0)->is_Root()" ") failed", "not a specially hidden merge"); ::breakpoint(); } } while (0); | ||||||
1906 | |||||||
1907 | // Note: During parsing, phis are often transformed before their regions. | ||||||
1908 | // This means we have to use type_or_null to defend against untyped regions. | ||||||
1909 | if( phase->type_or_null(r) == Type::TOP ) // Dead code? | ||||||
1910 | return NULL__null; // No change | ||||||
1911 | |||||||
1912 | Node *top = phase->C->top(); | ||||||
1913 | bool new_phi = (outcnt() == 0); // transforming new Phi | ||||||
1914 | // No change for igvn if new phi is not hooked | ||||||
1915 | if (new_phi
| ||||||
1916 | return NULL__null; | ||||||
1917 | |||||||
1918 | // The are 2 situations when only one valid phi's input is left | ||||||
1919 | // (in addition to Region input). | ||||||
1920 | // One: region is not loop - replace phi with this input. | ||||||
1921 | // Two: region is loop - replace phi with top since this data path is dead | ||||||
1922 | // and we need to break the dead data loop. | ||||||
1923 | Node* progress = NULL__null; // Record if any progress made | ||||||
1924 | for( uint j = 1; j < req(); ++j ){ // For all paths in | ||||||
1925 | // Check unreachable control paths | ||||||
1926 | Node* rc = r->in(j); | ||||||
1927 | Node* n = in(j); // Get the input | ||||||
1928 | if (rc == NULL__null || phase->type(rc) == Type::TOP) { | ||||||
1929 | if (n != top) { // Not already top? | ||||||
1930 | PhaseIterGVN *igvn = phase->is_IterGVN(); | ||||||
1931 | if (can_reshape && igvn != NULL__null) { | ||||||
1932 | igvn->_worklist.push(r); | ||||||
1933 | } | ||||||
1934 | // Nuke it down | ||||||
1935 | set_req_X(j, top, phase); | ||||||
1936 | progress = this; // Record progress | ||||||
1937 | } | ||||||
1938 | } | ||||||
1939 | } | ||||||
1940 | |||||||
1941 | if (can_reshape && outcnt() == 0) { | ||||||
1942 | // set_req() above may kill outputs if Phi is referenced | ||||||
1943 | // only by itself on the dead (top) control path. | ||||||
1944 | return top; | ||||||
1945 | } | ||||||
1946 | |||||||
1947 | bool uncasted = false; | ||||||
1948 | Node* uin = unique_input(phase, false); | ||||||
1949 | if (uin == NULL__null && can_reshape
| ||||||
1950 | // If there is a chance that the region can be optimized out do | ||||||
1951 | // not add a cast node that we can't remove yet. | ||||||
1952 | !wait_for_region_igvn(phase)) { | ||||||
1953 | uncasted = true; | ||||||
1954 | uin = unique_input(phase, true); | ||||||
1955 | } | ||||||
1956 | if (uin == top) { // Simplest case: no alive inputs. | ||||||
1957 | if (can_reshape) // IGVN transformation | ||||||
1958 | return top; | ||||||
1959 | else | ||||||
1960 | return NULL__null; // Identity will return TOP | ||||||
1961 | } else if (uin != NULL__null) { | ||||||
1962 | // Only one not-NULL unique input path is left. | ||||||
1963 | // Determine if this input is backedge of a loop. | ||||||
1964 | // (Skip new phis which have no uses and dead regions). | ||||||
1965 | if (outcnt() > 0 && r->in(0) != NULL__null) { | ||||||
1966 | if (is_data_loop(r->as_Region(), uin, phase)) { | ||||||
1967 | // Break this data loop to avoid creation of a dead loop. | ||||||
1968 | if (can_reshape) { | ||||||
1969 | return top; | ||||||
1970 | } else { | ||||||
1971 | // We can't return top if we are in Parse phase - cut inputs only | ||||||
1972 | // let Identity to handle the case. | ||||||
1973 | replace_edge(uin, top, phase); | ||||||
1974 | return NULL__null; | ||||||
1975 | } | ||||||
1976 | } | ||||||
1977 | } | ||||||
1978 | |||||||
1979 | if (uncasted) { | ||||||
1980 | // Add cast nodes between the phi to be removed and its unique input. | ||||||
1981 | // Wait until after parsing for the type information to propagate from the casts. | ||||||
1982 | assert(can_reshape, "Invalid during parsing")do { if (!(can_reshape)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 1982, "assert(" "can_reshape" ") failed", "Invalid during parsing" ); ::breakpoint(); } } while (0); | ||||||
1983 | const Type* phi_type = bottom_type(); | ||||||
1984 | // Add casts to carry the control dependency of the Phi that is | ||||||
1985 | // going away | ||||||
1986 | Node* cast = NULL__null; | ||||||
1987 | if (phi_type->isa_ptr()) { | ||||||
1988 | const Type* uin_type = phase->type(uin); | ||||||
1989 | if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) { | ||||||
1990 | cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency); | ||||||
1991 | } else { | ||||||
1992 | // Use a CastPP for a cast to not null and a CheckCastPP for | ||||||
1993 | // a cast to a new klass (and both if both null-ness and | ||||||
1994 | // klass change). | ||||||
1995 | |||||||
1996 | // If the type of phi is not null but the type of uin may be | ||||||
1997 | // null, uin's type must be casted to not null | ||||||
1998 | if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() && | ||||||
1999 | uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) { | ||||||
2000 | cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, TypePtr::NOTNULL, ConstraintCastNode::StrongDependency); | ||||||
2001 | } | ||||||
2002 | |||||||
2003 | // If the type of phi and uin, both casted to not null, | ||||||
2004 | // differ the klass of uin must be (check)cast'ed to match | ||||||
2005 | // that of phi | ||||||
2006 | if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) { | ||||||
2007 | Node* n = uin; | ||||||
2008 | if (cast != NULL__null) { | ||||||
2009 | cast = phase->transform(cast); | ||||||
2010 | n = cast; | ||||||
2011 | } | ||||||
2012 | cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, ConstraintCastNode::StrongDependency); | ||||||
2013 | } | ||||||
2014 | if (cast == NULL__null) { | ||||||
2015 | cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency); | ||||||
2016 | } | ||||||
2017 | } | ||||||
2018 | } else { | ||||||
2019 | cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency); | ||||||
2020 | } | ||||||
2021 | assert(cast != NULL, "cast should be set")do { if (!(cast != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2021, "assert(" "cast != __null" ") failed", "cast should be set" ); ::breakpoint(); } } while (0); | ||||||
2022 | cast = phase->transform(cast); | ||||||
2023 | // set all inputs to the new cast(s) so the Phi is removed by Identity | ||||||
2024 | PhaseIterGVN* igvn = phase->is_IterGVN(); | ||||||
2025 | for (uint i = 1; i < req(); i++) { | ||||||
2026 | set_req_X(i, cast, igvn); | ||||||
2027 | } | ||||||
2028 | uin = cast; | ||||||
2029 | } | ||||||
2030 | |||||||
2031 | // One unique input. | ||||||
2032 | debug_only(Node* ident = Identity(phase))Node* ident = Identity(phase); | ||||||
2033 | // The unique input must eventually be detected by the Identity call. | ||||||
2034 | #ifdef ASSERT1 | ||||||
2035 | if (ident != uin && !ident->is_top()) { | ||||||
2036 | // print this output before failing assert | ||||||
2037 | r->dump(3); | ||||||
2038 | this->dump(3); | ||||||
2039 | ident->dump(); | ||||||
2040 | uin->dump(); | ||||||
2041 | } | ||||||
2042 | #endif | ||||||
2043 | assert(ident == uin || ident->is_top(), "Identity must clean this up")do { if (!(ident == uin || ident->is_top())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2043, "assert(" "ident == uin || ident->is_top()" ") failed" , "Identity must clean this up"); ::breakpoint(); } } while ( 0); | ||||||
2044 | return NULL__null; | ||||||
2045 | } | ||||||
2046 | |||||||
2047 | Node* opt = NULL__null; | ||||||
2048 | int true_path = is_diamond_phi(); | ||||||
2049 | if (true_path != 0 && | ||||||
2050 | // If one of the diamond's branch is in the process of dying then, the Phi's input for that branch might transform | ||||||
2051 | // to top. If that happens replacing the Phi with an operation that consumes the Phi's inputs will cause the Phi | ||||||
2052 | // to be replaced by top. To prevent that, delay the transformation until the branch has a chance to be removed. | ||||||
2053 | !(can_reshape && wait_for_region_igvn(phase))) { | ||||||
2054 | // Check for CMove'ing identity. If it would be unsafe, | ||||||
2055 | // handle it here. In the safe case, let Identity handle it. | ||||||
2056 | Node* unsafe_id = is_cmove_id(phase, true_path); | ||||||
2057 | if( unsafe_id != NULL__null && is_unsafe_data_reference(unsafe_id) ) | ||||||
2058 | opt = unsafe_id; | ||||||
2059 | |||||||
2060 | // Check for simple convert-to-boolean pattern | ||||||
2061 | if( opt == NULL__null ) | ||||||
2062 | opt = is_x2logic(phase, this, true_path); | ||||||
2063 | |||||||
2064 | // Check for absolute value | ||||||
2065 | if( opt == NULL__null ) | ||||||
2066 | opt = is_absolute(phase, this, true_path); | ||||||
2067 | |||||||
2068 | // Check for conditional add | ||||||
2069 | if( opt == NULL__null && can_reshape ) | ||||||
2070 | opt = is_cond_add(phase, this, true_path); | ||||||
2071 | |||||||
2072 | // These 4 optimizations could subsume the phi: | ||||||
2073 | // have to check for a dead data loop creation. | ||||||
2074 | if( opt != NULL__null ) { | ||||||
2075 | if( opt == unsafe_id || is_unsafe_data_reference(opt) ) { | ||||||
2076 | // Found dead loop. | ||||||
2077 | if( can_reshape ) | ||||||
2078 | return top; | ||||||
2079 | // We can't return top if we are in Parse phase - cut inputs only | ||||||
2080 | // to stop further optimizations for this phi. Identity will return TOP. | ||||||
2081 | assert(req() == 3, "only diamond merge phi here")do { if (!(req() == 3)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2081, "assert(" "req() == 3" ") failed", "only diamond merge phi here" ); ::breakpoint(); } } while (0); | ||||||
2082 | set_req(1, top); | ||||||
2083 | set_req(2, top); | ||||||
2084 | return NULL__null; | ||||||
2085 | } else { | ||||||
2086 | return opt; | ||||||
2087 | } | ||||||
2088 | } | ||||||
2089 | } | ||||||
2090 | |||||||
2091 | // Check for merging identical values and split flow paths | ||||||
2092 | if (can_reshape) { | ||||||
2093 | opt = split_flow_path(phase, this); | ||||||
2094 | // This optimization only modifies phi - don't need to check for dead loop. | ||||||
2095 | assert(opt == NULL || opt == this, "do not elide phi")do { if (!(opt == __null || opt == this)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2095, "assert(" "opt == __null || opt == this" ") failed", "do not elide phi" ); ::breakpoint(); } } while (0); | ||||||
2096 | if (opt != NULL__null) return opt; | ||||||
2097 | } | ||||||
2098 | |||||||
2099 | if (in(1) != NULL__null && in(1)->Opcode() == Op_AddP && can_reshape) { | ||||||
2100 | // Try to undo Phi of AddP: | ||||||
2101 | // (Phi (AddP base address offset) (AddP base2 address2 offset2)) | ||||||
2102 | // becomes: | ||||||
2103 | // newbase := (Phi base base2) | ||||||
2104 | // newaddress := (Phi address address2) | ||||||
2105 | // newoffset := (Phi offset offset2) | ||||||
2106 | // (AddP newbase newaddress newoffset) | ||||||
2107 | // | ||||||
2108 | // This occurs as a result of unsuccessful split_thru_phi and | ||||||
2109 | // interferes with taking advantage of addressing modes. See the | ||||||
2110 | // clone_shift_expressions code in matcher.cpp | ||||||
2111 | Node* addp = in(1); | ||||||
2112 | Node* base = addp->in(AddPNode::Base); | ||||||
2113 | Node* address = addp->in(AddPNode::Address); | ||||||
2114 | Node* offset = addp->in(AddPNode::Offset); | ||||||
2115 | if (base != NULL__null && address != NULL__null && offset != NULL__null && | ||||||
2116 | !base->is_top() && !address->is_top() && !offset->is_top()) { | ||||||
2117 | const Type* base_type = base->bottom_type(); | ||||||
2118 | const Type* address_type = address->bottom_type(); | ||||||
2119 | // make sure that all the inputs are similar to the first one, | ||||||
2120 | // i.e. AddP with base == address and same offset as first AddP | ||||||
2121 | bool doit = true; | ||||||
2122 | for (uint i = 2; i < req(); i++) { | ||||||
2123 | if (in(i) == NULL__null || | ||||||
2124 | in(i)->Opcode() != Op_AddP || | ||||||
2125 | in(i)->in(AddPNode::Base) == NULL__null || | ||||||
2126 | in(i)->in(AddPNode::Address) == NULL__null || | ||||||
2127 | in(i)->in(AddPNode::Offset) == NULL__null || | ||||||
2128 | in(i)->in(AddPNode::Base)->is_top() || | ||||||
2129 | in(i)->in(AddPNode::Address)->is_top() || | ||||||
2130 | in(i)->in(AddPNode::Offset)->is_top()) { | ||||||
2131 | doit = false; | ||||||
2132 | break; | ||||||
2133 | } | ||||||
2134 | if (in(i)->in(AddPNode::Offset) != base) { | ||||||
2135 | base = NULL__null; | ||||||
2136 | } | ||||||
2137 | if (in(i)->in(AddPNode::Offset) != offset) { | ||||||
2138 | offset = NULL__null; | ||||||
2139 | } | ||||||
2140 | if (in(i)->in(AddPNode::Address) != address) { | ||||||
2141 | address = NULL__null; | ||||||
2142 | } | ||||||
2143 | // Accumulate type for resulting Phi | ||||||
2144 | base_type = base_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type()); | ||||||
2145 | address_type = address_type->meet_speculative(in(i)->in(AddPNode::Address)->bottom_type()); | ||||||
2146 | } | ||||||
2147 | if (doit && base == NULL__null) { | ||||||
2148 | // Check for neighboring AddP nodes in a tree. | ||||||
2149 | // If they have a base, use that it. | ||||||
2150 | for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) { | ||||||
2151 | Node* u = this->fast_out(k); | ||||||
2152 | if (u->is_AddP()) { | ||||||
2153 | Node* base2 = u->in(AddPNode::Base); | ||||||
2154 | if (base2 != NULL__null && !base2->is_top()) { | ||||||
2155 | if (base == NULL__null) | ||||||
2156 | base = base2; | ||||||
2157 | else if (base != base2) | ||||||
2158 | { doit = false; break; } | ||||||
2159 | } | ||||||
2160 | } | ||||||
2161 | } | ||||||
2162 | } | ||||||
2163 | if (doit) { | ||||||
2164 | if (base == NULL__null) { | ||||||
2165 | base = new PhiNode(in(0), base_type, NULL__null); | ||||||
2166 | for (uint i = 1; i < req(); i++) { | ||||||
2167 | base->init_req(i, in(i)->in(AddPNode::Base)); | ||||||
2168 | } | ||||||
2169 | phase->is_IterGVN()->register_new_node_with_optimizer(base); | ||||||
2170 | } | ||||||
2171 | if (address == NULL__null) { | ||||||
2172 | address = new PhiNode(in(0), address_type, NULL__null); | ||||||
2173 | for (uint i = 1; i < req(); i++) { | ||||||
2174 | address->init_req(i, in(i)->in(AddPNode::Address)); | ||||||
2175 | } | ||||||
2176 | phase->is_IterGVN()->register_new_node_with_optimizer(address); | ||||||
2177 | } | ||||||
2178 | if (offset == NULL__null) { | ||||||
2179 | offset = new PhiNode(in(0), TypeX_XTypeLong::LONG, NULL__null); | ||||||
2180 | for (uint i = 1; i < req(); i++) { | ||||||
2181 | offset->init_req(i, in(i)->in(AddPNode::Offset)); | ||||||
2182 | } | ||||||
2183 | phase->is_IterGVN()->register_new_node_with_optimizer(offset); | ||||||
2184 | } | ||||||
2185 | return new AddPNode(base, address, offset); | ||||||
2186 | } | ||||||
2187 | } | ||||||
2188 | } | ||||||
2189 | |||||||
2190 | // Split phis through memory merges, so that the memory merges will go away. | ||||||
2191 | // Piggy-back this transformation on the search for a unique input.... | ||||||
2192 | // It will be as if the merged memory is the unique value of the phi. | ||||||
2193 | // (Do not attempt this optimization unless parsing is complete. | ||||||
2194 | // It would make the parser's memory-merge logic sick.) | ||||||
2195 | // (MergeMemNode is not dead_loop_safe - need to check for dead loop.) | ||||||
2196 | if (progress == NULL__null && can_reshape && type() == Type::MEMORY) { | ||||||
2197 | // see if this phi should be sliced | ||||||
2198 | uint merge_width = 0; | ||||||
2199 | bool saw_self = false; | ||||||
2200 | for( uint i=1; i<req(); ++i ) {// For all paths in | ||||||
2201 | Node *ii = in(i); | ||||||
2202 | // TOP inputs should not be counted as safe inputs because if the | ||||||
2203 | // Phi references itself through all other inputs then splitting the | ||||||
2204 | // Phi through memory merges would create dead loop at later stage. | ||||||
2205 | if (ii == top) { | ||||||
2206 | return NULL__null; // Delay optimization until graph is cleaned. | ||||||
2207 | } | ||||||
2208 | if (ii->is_MergeMem()) { | ||||||
2209 | MergeMemNode* n = ii->as_MergeMem(); | ||||||
2210 | merge_width = MAX2(merge_width, n->req()); | ||||||
2211 | saw_self = saw_self || (n->base_memory() == this); | ||||||
2212 | } | ||||||
2213 | } | ||||||
2214 | |||||||
2215 | // This restriction is temporarily necessary to ensure termination: | ||||||
2216 | if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0; | ||||||
2217 | |||||||
2218 | if (merge_width > Compile::AliasIdxRaw) { | ||||||
2219 | // found at least one non-empty MergeMem | ||||||
2220 | const TypePtr* at = adr_type(); | ||||||
2221 | if (at != TypePtr::BOTTOM) { | ||||||
2222 | // Patch the existing phi to select an input from the merge: | ||||||
2223 | // Phi:AT1(...MergeMem(m0, m1, m2)...) into | ||||||
2224 | // Phi:AT1(...m1...) | ||||||
2225 | int alias_idx = phase->C->get_alias_index(at); | ||||||
2226 | for (uint i=1; i<req(); ++i) { | ||||||
2227 | Node *ii = in(i); | ||||||
2228 | if (ii->is_MergeMem()) { | ||||||
2229 | MergeMemNode* n = ii->as_MergeMem(); | ||||||
2230 | // compress paths and change unreachable cycles to TOP | ||||||
2231 | // If not, we can update the input infinitely along a MergeMem cycle | ||||||
2232 | // Equivalent code is in MemNode::Ideal_common | ||||||
2233 | Node *m = phase->transform(n); | ||||||
2234 | if (outcnt() == 0) { // Above transform() may kill us! | ||||||
2235 | return top; | ||||||
2236 | } | ||||||
2237 | // If transformed to a MergeMem, get the desired slice | ||||||
2238 | // Otherwise the returned node represents memory for every slice | ||||||
2239 | Node *new_mem = (m->is_MergeMem()) ? | ||||||
2240 | m->as_MergeMem()->memory_at(alias_idx) : m; | ||||||
2241 | // Update input if it is progress over what we have now | ||||||
2242 | if (new_mem != ii) { | ||||||
2243 | set_req_X(i, new_mem, phase->is_IterGVN()); | ||||||
2244 | progress = this; | ||||||
2245 | } | ||||||
2246 | } | ||||||
2247 | } | ||||||
2248 | } else { | ||||||
2249 | // We know that at least one MergeMem->base_memory() == this | ||||||
2250 | // (saw_self == true). If all other inputs also references this phi | ||||||
2251 | // (directly or through data nodes) - it is a dead loop. | ||||||
2252 | bool saw_safe_input = false; | ||||||
2253 | for (uint j = 1; j < req(); ++j) { | ||||||
2254 | Node* n = in(j); | ||||||
2255 | if (n->is_MergeMem()) { | ||||||
2256 | MergeMemNode* mm = n->as_MergeMem(); | ||||||
2257 | if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) { | ||||||
2258 | // Skip this input if it references back to this phi or if the memory path is dead | ||||||
2259 | continue; | ||||||
2260 | } | ||||||
2261 | } | ||||||
2262 | if (!is_unsafe_data_reference(n)) { | ||||||
2263 | saw_safe_input = true; // found safe input | ||||||
2264 | break; | ||||||
2265 | } | ||||||
2266 | } | ||||||
2267 | if (!saw_safe_input) { | ||||||
2268 | // There is a dead loop: All inputs are either dead or reference back to this phi | ||||||
2269 | return top; | ||||||
2270 | } | ||||||
2271 | |||||||
2272 | // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into | ||||||
2273 | // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...)) | ||||||
2274 | PhaseIterGVN* igvn = phase->is_IterGVN(); | ||||||
2275 | assert(igvn != NULL, "sanity check")do { if (!(igvn != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2275, "assert(" "igvn != __null" ") failed", "sanity check" ); ::breakpoint(); } } while (0); | ||||||
2276 | Node* hook = new Node(1); | ||||||
2277 | PhiNode* new_base = (PhiNode*) clone(); | ||||||
2278 | // Must eagerly register phis, since they participate in loops. | ||||||
2279 | igvn->register_new_node_with_optimizer(new_base); | ||||||
2280 | hook->add_req(new_base); | ||||||
2281 | |||||||
2282 | MergeMemNode* result = MergeMemNode::make(new_base); | ||||||
2283 | for (uint i = 1; i < req(); ++i) { | ||||||
2284 | Node *ii = in(i); | ||||||
2285 | if (ii->is_MergeMem()) { | ||||||
2286 | MergeMemNode* n = ii->as_MergeMem(); | ||||||
2287 | for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) { | ||||||
2288 | // If we have not seen this slice yet, make a phi for it. | ||||||
2289 | bool made_new_phi = false; | ||||||
2290 | if (mms.is_empty()) { | ||||||
2291 | Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C)); | ||||||
2292 | made_new_phi = true; | ||||||
2293 | igvn->register_new_node_with_optimizer(new_phi); | ||||||
2294 | hook->add_req(new_phi); | ||||||
2295 | mms.set_memory(new_phi); | ||||||
2296 | } | ||||||
2297 | Node* phi = mms.memory(); | ||||||
2298 | assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice")do { if (!(made_new_phi || phi->in(i) == n)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2298, "assert(" "made_new_phi || phi->in(i) == n" ") failed" , "replace the i-th merge by a slice"); ::breakpoint(); } } while (0); | ||||||
2299 | phi->set_req(i, mms.memory2()); | ||||||
2300 | } | ||||||
2301 | } | ||||||
2302 | } | ||||||
2303 | // Distribute all self-loops. | ||||||
2304 | { // (Extra braces to hide mms.) | ||||||
2305 | for (MergeMemStream mms(result); mms.next_non_empty(); ) { | ||||||
2306 | Node* phi = mms.memory(); | ||||||
2307 | for (uint i = 1; i < req(); ++i) { | ||||||
2308 | if (phi->in(i) == this) phi->set_req(i, phi); | ||||||
2309 | } | ||||||
2310 | } | ||||||
2311 | } | ||||||
2312 | // Already replace this phi node to cut it off from the graph to not interfere in dead loop checks during the | ||||||
2313 | // transformations of the new phi nodes below. Otherwise, we could wrongly conclude that there is no dead loop | ||||||
2314 | // because we are finding this phi node again. Also set the type of the new MergeMem node in case we are also | ||||||
2315 | // visiting it in the transformations below. | ||||||
2316 | igvn->replace_node(this, result); | ||||||
2317 | igvn->set_type(result, result->bottom_type()); | ||||||
2318 | |||||||
2319 | // now transform the new nodes, and return the mergemem | ||||||
2320 | for (MergeMemStream mms(result); mms.next_non_empty(); ) { | ||||||
2321 | Node* phi = mms.memory(); | ||||||
2322 | mms.set_memory(phase->transform(phi)); | ||||||
2323 | } | ||||||
2324 | hook->destruct(igvn); | ||||||
2325 | // Replace self with the result. | ||||||
2326 | return result; | ||||||
2327 | } | ||||||
2328 | } | ||||||
2329 | // | ||||||
2330 | // Other optimizations on the memory chain | ||||||
2331 | // | ||||||
2332 | const TypePtr* at = adr_type(); | ||||||
2333 | for( uint i=1; i<req(); ++i ) {// For all paths in | ||||||
2334 | Node *ii = in(i); | ||||||
2335 | Node *new_in = MemNode::optimize_memory_chain(ii, at, NULL__null, phase); | ||||||
2336 | if (ii != new_in ) { | ||||||
2337 | set_req(i, new_in); | ||||||
2338 | progress = this; | ||||||
2339 | } | ||||||
2340 | } | ||||||
2341 | } | ||||||
2342 | |||||||
2343 | #ifdef _LP641 | ||||||
2344 | // Push DecodeN/DecodeNKlass down through phi. | ||||||
2345 | // The rest of phi graph will transform by split EncodeP node though phis up. | ||||||
2346 | if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL__null) { | ||||||
2347 | bool may_push = true; | ||||||
2348 | bool has_decodeN = false; | ||||||
2349 | bool is_decodeN = false; | ||||||
2350 | for (uint i=1; i<req(); ++i) {// For all paths in | ||||||
2351 | Node *ii = in(i); | ||||||
2352 | if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) { | ||||||
2353 | // Do optimization if a non dead path exist. | ||||||
2354 | if (ii->in(1)->bottom_type() != Type::TOP) { | ||||||
2355 | has_decodeN = true; | ||||||
2356 | is_decodeN = ii->is_DecodeN(); | ||||||
2357 | } | ||||||
2358 | } else if (!ii->is_Phi()) { | ||||||
2359 | may_push = false; | ||||||
2360 | } | ||||||
2361 | } | ||||||
2362 | |||||||
2363 | if (has_decodeN && may_push) { | ||||||
2364 | PhaseIterGVN *igvn = phase->is_IterGVN(); | ||||||
2365 | // Make narrow type for new phi. | ||||||
2366 | const Type* narrow_t; | ||||||
2367 | if (is_decodeN) { | ||||||
2368 | narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr()); | ||||||
2369 | } else { | ||||||
2370 | narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr()); | ||||||
2371 | } | ||||||
2372 | PhiNode* new_phi = new PhiNode(r, narrow_t); | ||||||
2373 | uint orig_cnt = req(); | ||||||
2374 | for (uint i=1; i<req(); ++i) {// For all paths in | ||||||
2375 | Node *ii = in(i); | ||||||
2376 | Node* new_ii = NULL__null; | ||||||
2377 | if (ii->is_DecodeNarrowPtr()) { | ||||||
2378 | assert(ii->bottom_type() == bottom_type(), "sanity")do { if (!(ii->bottom_type() == bottom_type())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2378, "assert(" "ii->bottom_type() == bottom_type()" ") failed" , "sanity"); ::breakpoint(); } } while (0); | ||||||
2379 | new_ii = ii->in(1); | ||||||
2380 | } else { | ||||||
2381 | assert(ii->is_Phi(), "sanity")do { if (!(ii->is_Phi())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2381, "assert(" "ii->is_Phi()" ") failed", "sanity"); :: breakpoint(); } } while (0); | ||||||
2382 | if (ii->as_Phi() == this) { | ||||||
2383 | new_ii = new_phi; | ||||||
2384 | } else { | ||||||
2385 | if (is_decodeN) { | ||||||
2386 | new_ii = new EncodePNode(ii, narrow_t); | ||||||
2387 | } else { | ||||||
2388 | new_ii = new EncodePKlassNode(ii, narrow_t); | ||||||
2389 | } | ||||||
2390 | igvn->register_new_node_with_optimizer(new_ii); | ||||||
2391 | } | ||||||
2392 | } | ||||||
2393 | new_phi->set_req(i, new_ii); | ||||||
2394 | } | ||||||
2395 | igvn->register_new_node_with_optimizer(new_phi, this); | ||||||
2396 | if (is_decodeN) { | ||||||
2397 | progress = new DecodeNNode(new_phi, bottom_type()); | ||||||
2398 | } else { | ||||||
2399 | progress = new DecodeNKlassNode(new_phi, bottom_type()); | ||||||
2400 | } | ||||||
2401 | } | ||||||
2402 | } | ||||||
2403 | #endif | ||||||
2404 | |||||||
2405 | // Phi (VB ... VB) => VB (Phi ...) (Phi ...) | ||||||
2406 | if (EnableVectorReboxing && can_reshape && progress == NULL__null && type()->isa_oopptr()) { | ||||||
2407 | progress = merge_through_phi(this, phase->is_IterGVN()); | ||||||
2408 | } | ||||||
2409 | |||||||
2410 | return progress; // Return any progress | ||||||
2411 | } | ||||||
2412 | |||||||
2413 | Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) { | ||||||
2414 | Node_Stack stack(1); | ||||||
2415 | VectorSet visited; | ||||||
2416 | Node_List node_map; | ||||||
2417 | |||||||
2418 | stack.push(root_phi, 1); // ignore control | ||||||
2419 | visited.set(root_phi->_idx); | ||||||
2420 | |||||||
2421 | Node* new_phi = new PhiNode(root_phi->in(0), t); | ||||||
2422 | node_map.map(root_phi->_idx, new_phi); | ||||||
2423 | |||||||
2424 | while (stack.is_nonempty()) { | ||||||
2425 | Node* n = stack.node(); | ||||||
2426 | uint idx = stack.index(); | ||||||
2427 | assert(n->is_Phi(), "not a phi")do { if (!(n->is_Phi())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2427, "assert(" "n->is_Phi()" ") failed", "not a phi"); :: breakpoint(); } } while (0); | ||||||
2428 | if (idx < n->req()) { | ||||||
2429 | stack.set_index(idx + 1); | ||||||
2430 | Node* def = n->in(idx); | ||||||
2431 | if (def == NULL__null) { | ||||||
2432 | continue; // ignore dead path | ||||||
2433 | } else if (def->is_Phi()) { // inner node | ||||||
2434 | Node* new_phi = node_map[n->_idx]; | ||||||
2435 | if (!visited.test_set(def->_idx)) { // not visited yet | ||||||
2436 | node_map.map(def->_idx, new PhiNode(def->in(0), t)); | ||||||
2437 | stack.push(def, 1); // ignore control | ||||||
2438 | } | ||||||
2439 | Node* new_in = node_map[def->_idx]; | ||||||
2440 | new_phi->set_req(idx, new_in); | ||||||
2441 | } else if (def->Opcode() == Op_VectorBox) { // leaf | ||||||
2442 | assert(n->is_Phi(), "not a phi")do { if (!(n->is_Phi())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2442, "assert(" "n->is_Phi()" ") failed", "not a phi"); :: breakpoint(); } } while (0); | ||||||
2443 | Node* new_phi = node_map[n->_idx]; | ||||||
2444 | new_phi->set_req(idx, def->in(c)); | ||||||
2445 | } else { | ||||||
2446 | assert(false, "not optimizeable")do { if (!(false)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2446, "assert(" "false" ") failed", "not optimizeable"); :: breakpoint(); } } while (0); | ||||||
2447 | return NULL__null; | ||||||
2448 | } | ||||||
2449 | } else { | ||||||
2450 | Node* new_phi = node_map[n->_idx]; | ||||||
2451 | igvn->register_new_node_with_optimizer(new_phi, n); | ||||||
2452 | stack.pop(); | ||||||
2453 | } | ||||||
2454 | } | ||||||
2455 | return new_phi; | ||||||
2456 | } | ||||||
2457 | |||||||
2458 | Node* PhiNode::merge_through_phi(Node* root_phi, PhaseIterGVN* igvn) { | ||||||
2459 | Node_Stack stack(1); | ||||||
2460 | VectorSet visited; | ||||||
2461 | |||||||
2462 | stack.push(root_phi, 1); // ignore control | ||||||
2463 | visited.set(root_phi->_idx); | ||||||
2464 | |||||||
2465 | VectorBoxNode* cached_vbox = NULL__null; | ||||||
2466 | while (stack.is_nonempty()) { | ||||||
2467 | Node* n = stack.node(); | ||||||
2468 | uint idx = stack.index(); | ||||||
2469 | if (idx < n->req()) { | ||||||
2470 | stack.set_index(idx + 1); | ||||||
2471 | Node* in = n->in(idx); | ||||||
2472 | if (in == NULL__null) { | ||||||
2473 | continue; // ignore dead path | ||||||
2474 | } else if (in->isa_Phi()) { | ||||||
2475 | if (!visited.test_set(in->_idx)) { | ||||||
2476 | stack.push(in, 1); // ignore control | ||||||
2477 | } | ||||||
2478 | } else if (in->Opcode() == Op_VectorBox) { | ||||||
2479 | VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in); | ||||||
2480 | if (cached_vbox == NULL__null) { | ||||||
2481 | cached_vbox = vbox; | ||||||
2482 | } else if (vbox->vec_type() != cached_vbox->vec_type()) { | ||||||
2483 | // TODO: vector type mismatch can be handled with additional reinterpret casts | ||||||
2484 | assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent")do { if (!(Type::cmp(vbox->vec_type(), cached_vbox->vec_type ()) != 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2484, "assert(" "Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0" ") failed", "inconsistent"); ::breakpoint(); } } while (0); | ||||||
2485 | return NULL__null; // not optimizable: vector type mismatch | ||||||
2486 | } else if (vbox->box_type() != cached_vbox->box_type()) { | ||||||
2487 | assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent")do { if (!(Type::cmp(vbox->box_type(), cached_vbox->box_type ()) != 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2487, "assert(" "Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0" ") failed", "inconsistent"); ::breakpoint(); } } while (0); | ||||||
2488 | return NULL__null; // not optimizable: box type mismatch | ||||||
2489 | } | ||||||
2490 | } else { | ||||||
2491 | return NULL__null; // not optimizable: neither Phi nor VectorBox | ||||||
2492 | } | ||||||
2493 | } else { | ||||||
2494 | stack.pop(); | ||||||
2495 | } | ||||||
2496 | } | ||||||
2497 | assert(cached_vbox != NULL, "sanity")do { if (!(cached_vbox != __null)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2497, "assert(" "cached_vbox != __null" ") failed", "sanity" ); ::breakpoint(); } } while (0); | ||||||
2498 | const TypeInstPtr* btype = cached_vbox->box_type(); | ||||||
2499 | const TypeVect* vtype = cached_vbox->vec_type(); | ||||||
2500 | Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box, igvn); | ||||||
2501 | Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn); | ||||||
2502 | return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype); | ||||||
2503 | } | ||||||
2504 | |||||||
2505 | bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) { | ||||||
2506 | // First, take the short cut when we know it is a loop and the EntryControl data path is dead. | ||||||
2507 | // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators(). | ||||||
2508 | // Then, check if there is a data loop when the phi references itself directly or through other data nodes. | ||||||
2509 | assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs")do { if (!(!r->is_Loop() || r->req() <= 3)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2509, "assert(" "!r->is_Loop() || r->req() <= 3" ") failed" , "Loop node should have 3 or less inputs"); ::breakpoint(); } } while (0); | ||||||
2510 | const bool is_loop = (r->is_Loop() && r->req() == 3); | ||||||
2511 | const Node* top = phase->C->top(); | ||||||
2512 | if (is_loop) { | ||||||
2513 | return !uin->eqv_uncast(in(LoopNode::EntryControl)); | ||||||
2514 | } else { | ||||||
2515 | // We have a data loop either with an unsafe data reference or if a region is unreachable. | ||||||
2516 | return is_unsafe_data_reference(uin) | ||||||
2517 | || (r->req() == 3 && (r->in(1) != top && r->in(2) == top && r->is_unreachable_region(phase))); | ||||||
2518 | } | ||||||
2519 | } | ||||||
2520 | |||||||
2521 | //------------------------------is_tripcount----------------------------------- | ||||||
2522 | bool PhiNode::is_tripcount(BasicType bt) const { | ||||||
2523 | return (in(0) != NULL__null && in(0)->is_BaseCountedLoop() && | ||||||
2524 | in(0)->as_BaseCountedLoop()->bt() == bt && | ||||||
2525 | in(0)->as_BaseCountedLoop()->phi() == this); | ||||||
2526 | } | ||||||
2527 | |||||||
2528 | //------------------------------out_RegMask------------------------------------ | ||||||
2529 | const RegMask &PhiNode::in_RegMask(uint i) const { | ||||||
2530 | return i ? out_RegMask() : RegMask::Empty; | ||||||
2531 | } | ||||||
2532 | |||||||
2533 | const RegMask &PhiNode::out_RegMask() const { | ||||||
2534 | uint ideal_reg = _type->ideal_reg(); | ||||||
2535 | assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" )do { if (!(ideal_reg != Node::NotAMachineReg)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2535, "assert(" "ideal_reg != Node::NotAMachineReg" ") failed" , "invalid type at Phi"); ::breakpoint(); } } while (0); | ||||||
2536 | if( ideal_reg == 0 ) return RegMask::Empty; | ||||||
2537 | assert(ideal_reg != Op_RegFlags, "flags register is not spillable")do { if (!(ideal_reg != Op_RegFlags)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/cfgnode.cpp" , 2537, "assert(" "ideal_reg != Op_RegFlags" ") failed", "flags register is not spillable" ); ::breakpoint(); } } while (0); | ||||||
2538 | return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]); | ||||||
2539 | } | ||||||
2540 | |||||||
2541 | #ifndef PRODUCT | ||||||
2542 | void PhiNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { | ||||||
2543 | // For a PhiNode, the set of related nodes includes all inputs till level 2, | ||||||
2544 | // and all outputs till level 1. In compact mode, inputs till level 1 are | ||||||
2545 | // collected. | ||||||
2546 | this->collect_nodes(in_rel, compact ? 1 : 2, false, false); | ||||||
2547 | this->collect_nodes(out_rel, -1, false, false); | ||||||
2548 | } | ||||||
2549 | |||||||
2550 | void PhiNode::dump_spec(outputStream *st) const { | ||||||
2551 | TypeNode::dump_spec(st); | ||||||
2552 | if (is_tripcount(T_INT) || is_tripcount(T_LONG)) { | ||||||
2553 | st->print(" #tripcount"); | ||||||
2554 | } | ||||||
2555 | } | ||||||
2556 | #endif | ||||||
2557 | |||||||
2558 | |||||||
2559 | //============================================================================= | ||||||
2560 | const Type* GotoNode::Value(PhaseGVN* phase) const { | ||||||
2561 | // If the input is reachable, then we are executed. | ||||||
2562 | // If the input is not reachable, then we are not executed. | ||||||
2563 | return phase->type(in(0)); | ||||||
2564 | } | ||||||
2565 | |||||||
2566 | Node* GotoNode::Identity(PhaseGVN* phase) { | ||||||
2567 | return in(0); // Simple copy of incoming control | ||||||
2568 | } | ||||||
2569 | |||||||
2570 | const RegMask &GotoNode::out_RegMask() const { | ||||||
2571 | return RegMask::Empty; | ||||||
2572 | } | ||||||
2573 | |||||||
2574 | #ifndef PRODUCT | ||||||
2575 | //-----------------------------related----------------------------------------- | ||||||
2576 | // The related nodes of a GotoNode are all inputs at level 1, as well as the | ||||||
2577 | // outputs at level 1. This is regardless of compact mode. | ||||||
2578 | void GotoNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { | ||||||
2579 | this->collect_nodes(in_rel, 1, false, false); | ||||||
2580 | this->collect_nodes(out_rel, -1, false, false); | ||||||
2581 | } | ||||||
2582 | #endif | ||||||
2583 | |||||||
2584 | |||||||
2585 | //============================================================================= | ||||||
2586 | const RegMask &JumpNode::out_RegMask() const { | ||||||
2587 | return RegMask::Empty; | ||||||
2588 | } | ||||||
2589 | |||||||
2590 | #ifndef PRODUCT | ||||||
2591 | //-----------------------------related----------------------------------------- | ||||||
2592 | // The related nodes of a JumpNode are all inputs at level 1, as well as the | ||||||
2593 | // outputs at level 2 (to include actual jump targets beyond projection nodes). | ||||||
2594 | // This is regardless of compact mode. | ||||||
2595 | void JumpNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { | ||||||
2596 | this->collect_nodes(in_rel, 1, false, false); | ||||||
2597 | this->collect_nodes(out_rel, -2, false, false); | ||||||
2598 | } | ||||||
2599 | #endif | ||||||
2600 | |||||||
2601 | //============================================================================= | ||||||
2602 | const RegMask &JProjNode::out_RegMask() const { | ||||||
2603 | return RegMask::Empty; | ||||||
2604 | } | ||||||
2605 | |||||||
2606 | //============================================================================= | ||||||
2607 | const RegMask &CProjNode::out_RegMask() const { | ||||||
2608 | return RegMask::Empty; | ||||||
2609 | } | ||||||
2610 | |||||||
2611 | |||||||
2612 | |||||||
2613 | //============================================================================= | ||||||
2614 | |||||||
2615 | uint PCTableNode::hash() const { return Node::hash() + _size; } | ||||||
2616 | bool PCTableNode::cmp( const Node &n ) const | ||||||
2617 | { return _size == ((PCTableNode&)n)._size; } | ||||||
2618 | |||||||
2619 | const Type *PCTableNode::bottom_type() const { | ||||||
2620 | const Type** f = TypeTuple::fields(_size); | ||||||
2621 | for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL; | ||||||
2622 | return TypeTuple::make(_size, f); | ||||||
2623 | } | ||||||
2624 | |||||||
2625 | //------------------------------Value------------------------------------------ | ||||||
2626 | // Compute the type of the PCTableNode. If reachable it is a tuple of | ||||||
2627 | // Control, otherwise the table targets are not reachable | ||||||
2628 | const Type* PCTableNode::Value(PhaseGVN* phase) const { | ||||||
2629 | if( phase->type(in(0)) == Type::CONTROL ) | ||||||
2630 | return bottom_type(); | ||||||
2631 | return Type::TOP; // All paths dead? Then so are we | ||||||
2632 | } | ||||||
2633 | |||||||
2634 | //------------------------------Ideal------------------------------------------ | ||||||
2635 | // Return a node which is more "ideal" than the current node. Strip out | ||||||
2636 | // control copies | ||||||
2637 | Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) { | ||||||
2638 | return remove_dead_region(phase, can_reshape) ? this : NULL__null; | ||||||
2639 | } | ||||||
2640 | |||||||
2641 | //============================================================================= | ||||||
2642 | uint JumpProjNode::hash() const { | ||||||
2643 | return Node::hash() + _dest_bci; | ||||||
2644 | } | ||||||
2645 | |||||||
2646 | bool JumpProjNode::cmp( const Node &n ) const { | ||||||
2647 | return ProjNode::cmp(n) && | ||||||
2648 | _dest_bci == ((JumpProjNode&)n)._dest_bci; | ||||||
2649 | } | ||||||
2650 | |||||||
2651 | #ifndef PRODUCT | ||||||
2652 | void JumpProjNode::dump_spec(outputStream *st) const { | ||||||
2653 | ProjNode::dump_spec(st); | ||||||
2654 | st->print("@bci %d ",_dest_bci); | ||||||
2655 | } | ||||||
2656 | |||||||
2657 | void JumpProjNode::dump_compact_spec(outputStream *st) const { | ||||||
2658 | ProjNode::dump_compact_spec(st); | ||||||
2659 | st->print("(%d)%d@%d", _switch_val, _proj_no, _dest_bci); | ||||||
2660 | } | ||||||
2661 | |||||||
2662 | void JumpProjNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { | ||||||
2663 | // The related nodes of a JumpProjNode are its inputs and outputs at level 1. | ||||||
2664 | this->collect_nodes(in_rel, 1, false, false); | ||||||
2665 | this->collect_nodes(out_rel, -1, false, false); | ||||||
2666 | } | ||||||
2667 | #endif | ||||||
2668 | |||||||
2669 | //============================================================================= | ||||||
2670 | //------------------------------Value------------------------------------------ | ||||||
2671 | // Check for being unreachable, or for coming from a Rethrow. Rethrow's cannot | ||||||
2672 | // have the default "fall_through_index" path. | ||||||
2673 | const Type* CatchNode::Value(PhaseGVN* phase) const { | ||||||
2674 | // Unreachable? Then so are all paths from here. | ||||||
2675 | if( phase->type(in(0)) == Type::TOP ) return Type::TOP; | ||||||
2676 | // First assume all paths are reachable | ||||||
2677 | const Type** f = TypeTuple::fields(_size); | ||||||
2678 | for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL; | ||||||
2679 | // Identify cases that will always throw an exception | ||||||
2680 | // () rethrow call | ||||||
2681 | // () virtual or interface call with NULL receiver | ||||||
2682 | // () call is a check cast with incompatible arguments | ||||||
2683 | if( in(1)->is_Proj() ) { | ||||||
2684 | Node *i10 = in(1)->in(0); | ||||||
2685 | if( i10->is_Call() ) { | ||||||
2686 | CallNode *call = i10->as_Call(); | ||||||
2687 | // Rethrows always throw exceptions, never return | ||||||
2688 | if (call->entry_point() == OptoRuntime::rethrow_stub()) { | ||||||
2689 | f[CatchProjNode::fall_through_index] = Type::TOP; | ||||||
2690 | } else if( call->req() > TypeFunc::Parms ) { | ||||||
2691 | const Type *arg0 = phase->type( call->in(TypeFunc::Parms) ); | ||||||
2692 | // Check for null receiver to virtual or interface calls | ||||||
2693 | if( call->is_CallDynamicJava() && | ||||||
2694 | arg0->higher_equal(TypePtr::NULL_PTR) ) { | ||||||
2695 | f[CatchProjNode::fall_through_index] = Type::TOP; | ||||||
2696 | } | ||||||
2697 | } // End of if not a runtime stub | ||||||
2698 | } // End of if have call above me | ||||||
2699 | } // End of slot 1 is not a projection | ||||||
2700 | return TypeTuple::make(_size, f); | ||||||
2701 | } | ||||||
2702 | |||||||
2703 | //============================================================================= | ||||||
2704 | uint CatchProjNode::hash() const { | ||||||
2705 | return Node::hash() + _handler_bci; | ||||||
2706 | } | ||||||
2707 | |||||||
2708 | |||||||
2709 | bool CatchProjNode::cmp( const Node &n ) const { | ||||||
2710 | return ProjNode::cmp(n) && | ||||||
2711 | _handler_bci == ((CatchProjNode&)n)._handler_bci; | ||||||
2712 | } | ||||||
2713 | |||||||
2714 | |||||||
2715 | //------------------------------Identity--------------------------------------- | ||||||
2716 | // If only 1 target is possible, choose it if it is the main control | ||||||
2717 | Node* CatchProjNode::Identity(PhaseGVN* phase) { | ||||||
2718 | // If my value is control and no other value is, then treat as ID | ||||||
2719 | const TypeTuple *t = phase->type(in(0))->is_tuple(); | ||||||
2720 | if (t->field_at(_con) != Type::CONTROL) return this; | ||||||
2721 | // If we remove the last CatchProj and elide the Catch/CatchProj, then we | ||||||
2722 | // also remove any exception table entry. Thus we must know the call | ||||||
2723 | // feeding the Catch will not really throw an exception. This is ok for | ||||||
2724 | // the main fall-thru control (happens when we know a call can never throw | ||||||
2725 | // an exception) or for "rethrow", because a further optimization will | ||||||
2726 | // yank the rethrow (happens when we inline a function that can throw an | ||||||
2727 | // exception and the caller has no handler). Not legal, e.g., for passing | ||||||
2728 | // a NULL receiver to a v-call, or passing bad types to a slow-check-cast. | ||||||
2729 | // These cases MUST throw an exception via the runtime system, so the VM | ||||||
2730 | // will be looking for a table entry. | ||||||
2731 | Node *proj = in(0)->in(1); // Expect a proj feeding CatchNode | ||||||
2732 | CallNode *call; | ||||||
2733 | if (_con != TypeFunc::Control && // Bail out if not the main control. | ||||||
2734 | !(proj->is_Proj() && // AND NOT a rethrow | ||||||
2735 | proj->in(0)->is_Call() && | ||||||
2736 | (call = proj->in(0)->as_Call()) && | ||||||
2737 | call->entry_point() == OptoRuntime::rethrow_stub())) | ||||||
2738 | return this; | ||||||
2739 | |||||||
2740 | // Search for any other path being control | ||||||
2741 | for (uint i = 0; i < t->cnt(); i++) { | ||||||
2742 | if (i != _con && t->field_at(i) == Type::CONTROL) | ||||||
2743 | return this; | ||||||
2744 | } | ||||||
2745 | // Only my path is possible; I am identity on control to the jump | ||||||
2746 | return in(0)->in(0); | ||||||
2747 | } | ||||||
2748 | |||||||
2749 | |||||||
2750 | #ifndef PRODUCT | ||||||
2751 | void CatchProjNode::dump_spec(outputStream *st) const { | ||||||
2752 | ProjNode::dump_spec(st); | ||||||
2753 | st->print("@bci %d ",_handler_bci); | ||||||
2754 | } | ||||||
2755 | #endif | ||||||
2756 | |||||||
2757 | //============================================================================= | ||||||
2758 | //------------------------------Identity--------------------------------------- | ||||||
2759 | // Check for CreateEx being Identity. | ||||||
2760 | Node* CreateExNode::Identity(PhaseGVN* phase) { | ||||||
2761 | if( phase->type(in(1)) == Type::TOP ) return in(1); | ||||||
2762 | if( phase->type(in(0)) == Type::TOP ) return in(0); | ||||||
2763 | // We only come from CatchProj, unless the CatchProj goes away. | ||||||
2764 | // If the CatchProj is optimized away, then we just carry the | ||||||
2765 | // exception oop through. | ||||||
2766 | CallNode *call = in(1)->in(0)->as_Call(); | ||||||
2767 | |||||||
2768 | return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) ) | ||||||
2769 | ? this | ||||||
2770 | : call->in(TypeFunc::Parms); | ||||||
2771 | } | ||||||
2772 | |||||||
2773 | //============================================================================= | ||||||
2774 | //------------------------------Value------------------------------------------ | ||||||
2775 | // Check for being unreachable. | ||||||
2776 | const Type* NeverBranchNode::Value(PhaseGVN* phase) const { | ||||||
2777 | if (!in(0) || in(0)->is_top()) return Type::TOP; | ||||||
2778 | return bottom_type(); | ||||||
2779 | } | ||||||
2780 | |||||||
2781 | //------------------------------Ideal------------------------------------------ | ||||||
2782 | // Check for no longer being part of a loop | ||||||
2783 | Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) { | ||||||
2784 | if (can_reshape && !in(0)->is_Region()) { | ||||||
2785 | // Dead code elimination can sometimes delete this projection so | ||||||
2786 | // if it's not there, there's nothing to do. | ||||||
2787 | Node* fallthru = proj_out_or_null(0); | ||||||
2788 | if (fallthru != NULL__null) { | ||||||
2789 | phase->is_IterGVN()->replace_node(fallthru, in(0)); | ||||||
2790 | } | ||||||
2791 | return phase->C->top(); | ||||||
2792 | } | ||||||
2793 | return NULL__null; | ||||||
2794 | } | ||||||
2795 | |||||||
2796 | #ifndef PRODUCT | ||||||
2797 | void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const { | ||||||
2798 | st->print("%s", Name()); | ||||||
2799 | } | ||||||
2800 | #endif |
1 | /* |
2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_OPTO_NODE_HPP |
26 | #define SHARE_OPTO_NODE_HPP |
27 | |
28 | #include "libadt/vectset.hpp" |
29 | #include "opto/compile.hpp" |
30 | #include "opto/type.hpp" |
31 | #include "utilities/copy.hpp" |
32 | |
33 | // Portions of code courtesy of Clifford Click |
34 | |
35 | // Optimization - Graph Style |
36 | |
37 | |
38 | class AbstractLockNode; |
39 | class AddNode; |
40 | class AddPNode; |
41 | class AliasInfo; |
42 | class AllocateArrayNode; |
43 | class AllocateNode; |
44 | class ArrayCopyNode; |
45 | class BaseCountedLoopNode; |
46 | class BaseCountedLoopEndNode; |
47 | class BlackholeNode; |
48 | class Block; |
49 | class BoolNode; |
50 | class BoxLockNode; |
51 | class CMoveNode; |
52 | class CallDynamicJavaNode; |
53 | class CallJavaNode; |
54 | class CallLeafNode; |
55 | class CallLeafNoFPNode; |
56 | class CallNode; |
57 | class CallRuntimeNode; |
58 | class CallNativeNode; |
59 | class CallStaticJavaNode; |
60 | class CastFFNode; |
61 | class CastDDNode; |
62 | class CastVVNode; |
63 | class CastIINode; |
64 | class CastLLNode; |
65 | class CatchNode; |
66 | class CatchProjNode; |
67 | class CheckCastPPNode; |
68 | class ClearArrayNode; |
69 | class CmpNode; |
70 | class CodeBuffer; |
71 | class ConstraintCastNode; |
72 | class ConNode; |
73 | class CompareAndSwapNode; |
74 | class CompareAndExchangeNode; |
75 | class CountedLoopNode; |
76 | class CountedLoopEndNode; |
77 | class DecodeNarrowPtrNode; |
78 | class DecodeNNode; |
79 | class DecodeNKlassNode; |
80 | class EncodeNarrowPtrNode; |
81 | class EncodePNode; |
82 | class EncodePKlassNode; |
83 | class FastLockNode; |
84 | class FastUnlockNode; |
85 | class HaltNode; |
86 | class IfNode; |
87 | class IfProjNode; |
88 | class IfFalseNode; |
89 | class IfTrueNode; |
90 | class InitializeNode; |
91 | class JVMState; |
92 | class JumpNode; |
93 | class JumpProjNode; |
94 | class LoadNode; |
95 | class LoadStoreNode; |
96 | class LoadStoreConditionalNode; |
97 | class LockNode; |
98 | class LongCountedLoopNode; |
99 | class LongCountedLoopEndNode; |
100 | class LoopNode; |
101 | class LShiftNode; |
102 | class MachBranchNode; |
103 | class MachCallDynamicJavaNode; |
104 | class MachCallJavaNode; |
105 | class MachCallLeafNode; |
106 | class MachCallNode; |
107 | class MachCallNativeNode; |
108 | class MachCallRuntimeNode; |
109 | class MachCallStaticJavaNode; |
110 | class MachConstantBaseNode; |
111 | class MachConstantNode; |
112 | class MachGotoNode; |
113 | class MachIfNode; |
114 | class MachJumpNode; |
115 | class MachNode; |
116 | class MachNullCheckNode; |
117 | class MachProjNode; |
118 | class MachReturnNode; |
119 | class MachSafePointNode; |
120 | class MachSpillCopyNode; |
121 | class MachTempNode; |
122 | class MachMergeNode; |
123 | class MachMemBarNode; |
124 | class Matcher; |
125 | class MemBarNode; |
126 | class MemBarStoreStoreNode; |
127 | class MemNode; |
128 | class MergeMemNode; |
129 | class MoveNode; |
130 | class MulNode; |
131 | class MultiNode; |
132 | class MultiBranchNode; |
133 | class NeverBranchNode; |
134 | class Opaque1Node; |
135 | class OuterStripMinedLoopNode; |
136 | class OuterStripMinedLoopEndNode; |
137 | class Node; |
138 | class Node_Array; |
139 | class Node_List; |
140 | class Node_Stack; |
141 | class OopMap; |
142 | class ParmNode; |
143 | class PCTableNode; |
144 | class PhaseCCP; |
145 | class PhaseGVN; |
146 | class PhaseIterGVN; |
147 | class PhaseRegAlloc; |
148 | class PhaseTransform; |
149 | class PhaseValues; |
150 | class PhiNode; |
151 | class Pipeline; |
152 | class ProjNode; |
153 | class RangeCheckNode; |
154 | class RegMask; |
155 | class RegionNode; |
156 | class RootNode; |
157 | class SafePointNode; |
158 | class SafePointScalarObjectNode; |
159 | class StartNode; |
160 | class State; |
161 | class StoreNode; |
162 | class SubNode; |
163 | class SubTypeCheckNode; |
164 | class Type; |
165 | class TypeNode; |
166 | class UnlockNode; |
167 | class VectorNode; |
168 | class LoadVectorNode; |
169 | class LoadVectorMaskedNode; |
170 | class StoreVectorMaskedNode; |
171 | class LoadVectorGatherNode; |
172 | class StoreVectorNode; |
173 | class StoreVectorScatterNode; |
174 | class VectorMaskCmpNode; |
175 | class VectorUnboxNode; |
176 | class VectorSet; |
177 | class VectorReinterpretNode; |
178 | class ShiftVNode; |
179 | |
180 | // The type of all node counts and indexes. |
181 | // It must hold at least 16 bits, but must also be fast to load and store. |
182 | // This type, if less than 32 bits, could limit the number of possible nodes. |
183 | // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) |
184 | typedef unsigned int node_idx_t; |
185 | |
186 | |
187 | #ifndef OPTO_DU_ITERATOR_ASSERT1 |
188 | #ifdef ASSERT1 |
189 | #define OPTO_DU_ITERATOR_ASSERT1 1 |
190 | #else |
191 | #define OPTO_DU_ITERATOR_ASSERT1 0 |
192 | #endif |
193 | #endif //OPTO_DU_ITERATOR_ASSERT |
194 | |
195 | #if OPTO_DU_ITERATOR_ASSERT1 |
196 | class DUIterator; |
197 | class DUIterator_Fast; |
198 | class DUIterator_Last; |
199 | #else |
200 | typedef uint DUIterator; |
201 | typedef Node** DUIterator_Fast; |
202 | typedef Node** DUIterator_Last; |
203 | #endif |
204 | |
205 | // Node Sentinel |
206 | #define NodeSentinel(Node*)-1 (Node*)-1 |
207 | |
208 | // Unknown count frequency |
209 | #define COUNT_UNKNOWN(-1.0f) (-1.0f) |
210 | |
211 | //------------------------------Node------------------------------------------- |
212 | // Nodes define actions in the program. They create values, which have types. |
213 | // They are both vertices in a directed graph and program primitives. Nodes |
214 | // are labeled; the label is the "opcode", the primitive function in the lambda |
215 | // calculus sense that gives meaning to the Node. Node inputs are ordered (so |
216 | // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to |
217 | // the Node's function. These inputs also define a Type equation for the Node. |
218 | // Solving these Type equations amounts to doing dataflow analysis. |
219 | // Control and data are uniformly represented in the graph. Finally, Nodes |
220 | // have a unique dense integer index which is used to index into side arrays |
221 | // whenever I have phase-specific information. |
222 | |
223 | class Node { |
224 | friend class VMStructs; |
225 | |
226 | // Lots of restrictions on cloning Nodes |
227 | NONCOPYABLE(Node)Node(Node const&) = delete; Node& operator=(Node const &) = delete; |
228 | |
229 | public: |
230 | friend class Compile; |
231 | #if OPTO_DU_ITERATOR_ASSERT1 |
232 | friend class DUIterator_Common; |
233 | friend class DUIterator; |
234 | friend class DUIterator_Fast; |
235 | friend class DUIterator_Last; |
236 | #endif |
237 | |
238 | // Because Nodes come and go, I define an Arena of Node structures to pull |
239 | // from. This should allow fast access to node creation & deletion. This |
240 | // field is a local cache of a value defined in some "program fragment" for |
241 | // which these Nodes are just a part of. |
242 | |
243 | inline void* operator new(size_t x) throw() { |
244 | Compile* C = Compile::current(); |
245 | Node* n = (Node*)C->node_arena()->AmallocWords(x); |
246 | return (void*)n; |
247 | } |
248 | |
249 | // Delete is a NOP |
250 | void operator delete( void *ptr ) {} |
251 | // Fancy destructor; eagerly attempt to reclaim Node numberings and storage |
252 | void destruct(PhaseValues* phase); |
253 | |
254 | // Create a new Node. Required is the number is of inputs required for |
255 | // semantic correctness. |
256 | Node( uint required ); |
257 | |
258 | // Create a new Node with given input edges. |
259 | // This version requires use of the "edge-count" new. |
260 | // E.g. new (C,3) FooNode( C, NULL, left, right ); |
261 | Node( Node *n0 ); |
262 | Node( Node *n0, Node *n1 ); |
263 | Node( Node *n0, Node *n1, Node *n2 ); |
264 | Node( Node *n0, Node *n1, Node *n2, Node *n3 ); |
265 | Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); |
266 | Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); |
267 | Node( Node *n0, Node *n1, Node *n2, Node *n3, |
268 | Node *n4, Node *n5, Node *n6 ); |
269 | |
270 | // Clone an inherited Node given only the base Node type. |
271 | Node* clone() const; |
272 | |
273 | // Clone a Node, immediately supplying one or two new edges. |
274 | // The first and second arguments, if non-null, replace in(1) and in(2), |
275 | // respectively. |
276 | Node* clone_with_data_edge(Node* in1, Node* in2 = NULL__null) const { |
277 | Node* nn = clone(); |
278 | if (in1 != NULL__null) nn->set_req(1, in1); |
279 | if (in2 != NULL__null) nn->set_req(2, in2); |
280 | return nn; |
281 | } |
282 | |
283 | private: |
284 | // Shared setup for the above constructors. |
285 | // Handles all interactions with Compile::current. |
286 | // Puts initial values in all Node fields except _idx. |
287 | // Returns the initial value for _idx, which cannot |
288 | // be initialized by assignment. |
289 | inline int Init(int req); |
290 | |
291 | //----------------- input edge handling |
292 | protected: |
293 | friend class PhaseCFG; // Access to address of _in array elements |
294 | Node **_in; // Array of use-def references to Nodes |
295 | Node **_out; // Array of def-use references to Nodes |
296 | |
297 | // Input edges are split into two categories. Required edges are required |
298 | // for semantic correctness; order is important and NULLs are allowed. |
299 | // Precedence edges are used to help determine execution order and are |
300 | // added, e.g., for scheduling purposes. They are unordered and not |
301 | // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 |
302 | // are required, from _cnt to _max-1 are precedence edges. |
303 | node_idx_t _cnt; // Total number of required Node inputs. |
304 | |
305 | node_idx_t _max; // Actual length of input array. |
306 | |
307 | // Output edges are an unordered list of def-use edges which exactly |
308 | // correspond to required input edges which point from other nodes |
309 | // to this one. Thus the count of the output edges is the number of |
310 | // users of this node. |
311 | node_idx_t _outcnt; // Total number of Node outputs. |
312 | |
313 | node_idx_t _outmax; // Actual length of output array. |
314 | |
315 | // Grow the actual input array to the next larger power-of-2 bigger than len. |
316 | void grow( uint len ); |
317 | // Grow the output array to the next larger power-of-2 bigger than len. |
318 | void out_grow( uint len ); |
319 | |
320 | public: |
321 | // Each Node is assigned a unique small/dense number. This number is used |
322 | // to index into auxiliary arrays of data and bit vectors. |
323 | // The field _idx is declared constant to defend against inadvertent assignments, |
324 | // since it is used by clients as a naked field. However, the field's value can be |
325 | // changed using the set_idx() method. |
326 | // |
327 | // The PhaseRenumberLive phase renumbers nodes based on liveness information. |
328 | // Therefore, it updates the value of the _idx field. The parse-time _idx is |
329 | // preserved in _parse_idx. |
330 | const node_idx_t _idx; |
331 | DEBUG_ONLY(const node_idx_t _parse_idx;)const node_idx_t _parse_idx; |
332 | // IGV node identifier. Two nodes, possibly in different compilation phases, |
333 | // have the same IGV identifier if (and only if) they are the very same node |
334 | // (same memory address) or one is "derived" from the other (by e.g. |
335 | // renumbering or matching). This identifier makes it possible to follow the |
336 | // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes. |
337 | NOT_PRODUCT(node_idx_t _igv_idx;)node_idx_t _igv_idx; |
338 | |
339 | // Get the (read-only) number of input edges |
340 | uint req() const { return _cnt; } |
341 | uint len() const { return _max; } |
342 | // Get the (read-only) number of output edges |
343 | uint outcnt() const { return _outcnt; } |
344 | |
345 | #if OPTO_DU_ITERATOR_ASSERT1 |
346 | // Iterate over the out-edges of this node. Deletions are illegal. |
347 | inline DUIterator outs() const; |
348 | // Use this when the out array might have changed to suppress asserts. |
349 | inline DUIterator& refresh_out_pos(DUIterator& i) const; |
350 | // Does the node have an out at this position? (Used for iteration.) |
351 | inline bool has_out(DUIterator& i) const; |
352 | inline Node* out(DUIterator& i) const; |
353 | // Iterate over the out-edges of this node. All changes are illegal. |
354 | inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; |
355 | inline Node* fast_out(DUIterator_Fast& i) const; |
356 | // Iterate over the out-edges of this node, deleting one at a time. |
357 | inline DUIterator_Last last_outs(DUIterator_Last& min) const; |
358 | inline Node* last_out(DUIterator_Last& i) const; |
359 | // The inline bodies of all these methods are after the iterator definitions. |
360 | #else |
361 | // Iterate over the out-edges of this node. Deletions are illegal. |
362 | // This iteration uses integral indexes, to decouple from array reallocations. |
363 | DUIterator outs() const { return 0; } |
364 | // Use this when the out array might have changed to suppress asserts. |
365 | DUIterator refresh_out_pos(DUIterator i) const { return i; } |
366 | |
367 | // Reference to the i'th output Node. Error if out of bounds. |
368 | Node* out(DUIterator i) const { assert(i < _outcnt, "oob")do { if (!(i < _outcnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 368, "assert(" "i < _outcnt" ") failed", "oob"); ::breakpoint (); } } while (0); return _out[i]; } |
369 | // Does the node have an out at this position? (Used for iteration.) |
370 | bool has_out(DUIterator i) const { return i < _outcnt; } |
371 | |
372 | // Iterate over the out-edges of this node. All changes are illegal. |
373 | // This iteration uses a pointer internal to the out array. |
374 | DUIterator_Fast fast_outs(DUIterator_Fast& max) const { |
375 | Node** out = _out; |
376 | // Assign a limit pointer to the reference argument: |
377 | max = out + (ptrdiff_t)_outcnt; |
378 | // Return the base pointer: |
379 | return out; |
380 | } |
381 | Node* fast_out(DUIterator_Fast i) const { return *i; } |
382 | // Iterate over the out-edges of this node, deleting one at a time. |
383 | // This iteration uses a pointer internal to the out array. |
384 | DUIterator_Last last_outs(DUIterator_Last& min) const { |
385 | Node** out = _out; |
386 | // Assign a limit pointer to the reference argument: |
387 | min = out; |
388 | // Return the pointer to the start of the iteration: |
389 | return out + (ptrdiff_t)_outcnt - 1; |
390 | } |
391 | Node* last_out(DUIterator_Last i) const { return *i; } |
392 | #endif |
393 | |
394 | // Reference to the i'th input Node. Error if out of bounds. |
395 | Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max)do { if (!(i < _max)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 395, "assert(" "i < _max" ") failed", "oob: i=%d, _max=%d" , i, _max); ::breakpoint(); } } while (0); return _in[i]; } |
396 | // Reference to the i'th input Node. NULL if out of bounds. |
397 | Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL__null); } |
398 | // Reference to the i'th output Node. Error if out of bounds. |
399 | // Use this accessor sparingly. We are going trying to use iterators instead. |
400 | Node* raw_out(uint i) const { assert(i < _outcnt,"oob")do { if (!(i < _outcnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 400, "assert(" "i < _outcnt" ") failed", "oob"); ::breakpoint (); } } while (0); return _out[i]; } |
401 | // Return the unique out edge. |
402 | Node* unique_out() const { assert(_outcnt==1,"not unique")do { if (!(_outcnt==1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 402, "assert(" "_outcnt==1" ") failed", "not unique"); ::breakpoint (); } } while (0); return _out[0]; } |
403 | // Delete out edge at position 'i' by moving last out edge to position 'i' |
404 | void raw_del_out(uint i) { |
405 | assert(i < _outcnt,"oob")do { if (!(i < _outcnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 405, "assert(" "i < _outcnt" ") failed", "oob"); ::breakpoint (); } } while (0); |
406 | assert(_outcnt > 0,"oob")do { if (!(_outcnt > 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 406, "assert(" "_outcnt > 0" ") failed", "oob"); ::breakpoint (); } } while (0); |
407 | #if OPTO_DU_ITERATOR_ASSERT1 |
408 | // Record that a change happened here. |
409 | debug_only(_last_del = _out[i]; ++_del_tick)_last_del = _out[i]; ++_del_tick; |
410 | #endif |
411 | _out[i] = _out[--_outcnt]; |
412 | // Smash the old edge so it can't be used accidentally. |
413 | debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef)_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef; |
414 | } |
415 | |
416 | #ifdef ASSERT1 |
417 | bool is_dead() const; |
418 | #define is_not_dead(n)((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()) ) ((n) == NULL__null || !VerifyIterativeGVN || !((n)->is_dead())) |
419 | bool is_reachable_from_root() const; |
420 | #endif |
421 | // Check whether node has become unreachable |
422 | bool is_unreachable(PhaseIterGVN &igvn) const; |
423 | |
424 | // Set a required input edge, also updates corresponding output edge |
425 | void add_req( Node *n ); // Append a NEW required input |
426 | void add_req( Node *n0, Node *n1 ) { |
427 | add_req(n0); add_req(n1); } |
428 | void add_req( Node *n0, Node *n1, Node *n2 ) { |
429 | add_req(n0); add_req(n1); add_req(n2); } |
430 | void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). |
431 | void del_req( uint idx ); // Delete required edge & compact |
432 | void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order |
433 | void ins_req( uint i, Node *n ); // Insert a NEW required input |
434 | void set_req( uint i, Node *n ) { |
435 | assert( is_not_dead(n), "can not use dead node")do { if (!(((n) == __null || !VerifyIterativeGVN || !((n)-> is_dead())))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 435, "assert(" "((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))" ") failed", "can not use dead node"); ::breakpoint(); } } while (0); |
436 | assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt)do { if (!(i < _cnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 436, "assert(" "i < _cnt" ") failed", "oob: i=%d, _cnt=%d" , i, _cnt); ::breakpoint(); } } while (0); |
437 | assert( !VerifyHashTableKeys || _hash_lock == 0,do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 438, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed" , "remove node from hash table before modifying it"); ::breakpoint (); } } while (0) |
438 | "remove node from hash table before modifying it")do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 438, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed" , "remove node from hash table before modifying it"); ::breakpoint (); } } while (0); |
439 | Node** p = &_in[i]; // cache this._in, across the del_out call |
440 | if (*p != NULL__null) (*p)->del_out((Node *)this); |
441 | (*p) = n; |
442 | if (n != NULL__null) n->add_out((Node *)this); |
443 | Compile::current()->record_modified_node(this); |
444 | } |
445 | // Light version of set_req() to init inputs after node creation. |
446 | void init_req( uint i, Node *n ) { |
447 | assert( i == 0 && this == n ||do { if (!(i == 0 && this == n || ((n) == __null || ! VerifyIterativeGVN || !((n)->is_dead())))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 448, "assert(" "i == 0 && this == n || ((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))" ") failed", "can not use dead node"); ::breakpoint(); } } while (0) |
448 | is_not_dead(n), "can not use dead node")do { if (!(i == 0 && this == n || ((n) == __null || ! VerifyIterativeGVN || !((n)->is_dead())))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 448, "assert(" "i == 0 && this == n || ((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))" ") failed", "can not use dead node"); ::breakpoint(); } } while (0); |
449 | assert( i < _cnt, "oob")do { if (!(i < _cnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 449, "assert(" "i < _cnt" ") failed", "oob"); ::breakpoint (); } } while (0); |
450 | assert( !VerifyHashTableKeys || _hash_lock == 0,do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 451, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed" , "remove node from hash table before modifying it"); ::breakpoint (); } } while (0) |
451 | "remove node from hash table before modifying it")do { if (!(!VerifyHashTableKeys || _hash_lock == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 451, "assert(" "!VerifyHashTableKeys || _hash_lock == 0" ") failed" , "remove node from hash table before modifying it"); ::breakpoint (); } } while (0); |
452 | assert( _in[i] == NULL, "sanity")do { if (!(_in[i] == __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 452, "assert(" "_in[i] == __null" ") failed", "sanity"); :: breakpoint(); } } while (0); |
453 | _in[i] = n; |
454 | if (n != NULL__null) n->add_out((Node *)this); |
455 | Compile::current()->record_modified_node(this); |
456 | } |
457 | // Find first occurrence of n among my edges: |
458 | int find_edge(Node* n); |
459 | int find_prec_edge(Node* n) { |
460 | for (uint i = req(); i < len(); i++) { |
461 | if (_in[i] == n) return i; |
462 | if (_in[i] == NULL__null) { |
463 | DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); )while ((++i) < len()) do { if (!(_in[i] == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 463, "assert(" "_in[i] == __null" ") failed", "Gap in prec edges!" ); ::breakpoint(); } } while (0); |
464 | break; |
465 | } |
466 | } |
467 | return -1; |
468 | } |
469 | int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = NULL__null); |
470 | int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn); |
471 | // NULL out all inputs to eliminate incoming Def-Use edges. |
472 | void disconnect_inputs(Compile* C); |
473 | |
474 | // Quickly, return true if and only if I am Compile::current()->top(). |
475 | bool is_top() const { |
476 | assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "")do { if (!((this == (Node*) Compile::current()->top()) == ( _out == __null))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 476, "assert(" "(this == (Node*) Compile::current()->top()) == (_out == __null)" ") failed", ""); ::breakpoint(); } } while (0); |
477 | return (_out == NULL__null); |
478 | } |
479 | // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) |
480 | void setup_is_top(); |
481 | |
482 | // Strip away casting. (It is depth-limited.) |
483 | Node* uncast(bool keep_deps = false) const; |
484 | // Return whether two Nodes are equivalent, after stripping casting. |
485 | bool eqv_uncast(const Node* n, bool keep_deps = false) const { |
486 | return (this->uncast(keep_deps) == n->uncast(keep_deps)); |
487 | } |
488 | |
489 | // Find out of current node that matches opcode. |
490 | Node* find_out_with(int opcode); |
491 | // Return true if the current node has an out that matches opcode. |
492 | bool has_out_with(int opcode); |
493 | // Return true if the current node has an out that matches any of the opcodes. |
494 | bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4); |
495 | |
496 | private: |
497 | static Node* uncast_helper(const Node* n, bool keep_deps); |
498 | |
499 | // Add an output edge to the end of the list |
500 | void add_out( Node *n ) { |
501 | if (is_top()) return; |
502 | if( _outcnt == _outmax ) out_grow(_outcnt); |
503 | _out[_outcnt++] = n; |
504 | } |
505 | // Delete an output edge |
506 | void del_out( Node *n ) { |
507 | if (is_top()) return; |
508 | Node** outp = &_out[_outcnt]; |
509 | // Find and remove n |
510 | do { |
511 | assert(outp > _out, "Missing Def-Use edge")do { if (!(outp > _out)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 511, "assert(" "outp > _out" ") failed", "Missing Def-Use edge" ); ::breakpoint(); } } while (0); |
512 | } while (*--outp != n); |
513 | *outp = _out[--_outcnt]; |
514 | // Smash the old edge so it can't be used accidentally. |
515 | debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef)_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef; |
516 | // Record that a change happened here. |
517 | #if OPTO_DU_ITERATOR_ASSERT1 |
518 | debug_only(_last_del = n; ++_del_tick)_last_del = n; ++_del_tick; |
519 | #endif |
520 | } |
521 | // Close gap after removing edge. |
522 | void close_prec_gap_at(uint gap) { |
523 | assert(_cnt <= gap && gap < _max, "no valid prec edge")do { if (!(_cnt <= gap && gap < _max)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 523, "assert(" "_cnt <= gap && gap < _max" ") failed" , "no valid prec edge"); ::breakpoint(); } } while (0); |
524 | uint i = gap; |
525 | Node *last = NULL__null; |
526 | for (; i < _max-1; ++i) { |
527 | Node *next = _in[i+1]; |
528 | if (next == NULL__null) break; |
529 | last = next; |
530 | } |
531 | _in[gap] = last; // Move last slot to empty one. |
532 | _in[i] = NULL__null; // NULL out last slot. |
533 | } |
534 | |
535 | public: |
536 | // Globally replace this node by a given new node, updating all uses. |
537 | void replace_by(Node* new_node); |
538 | // Globally replace this node by a given new node, updating all uses |
539 | // and cutting input edges of old node. |
540 | void subsume_by(Node* new_node, Compile* c) { |
541 | replace_by(new_node); |
542 | disconnect_inputs(c); |
543 | } |
544 | void set_req_X(uint i, Node *n, PhaseIterGVN *igvn); |
545 | void set_req_X(uint i, Node *n, PhaseGVN *gvn); |
546 | // Find the one non-null required input. RegionNode only |
547 | Node *nonnull_req() const; |
548 | // Add or remove precedence edges |
549 | void add_prec( Node *n ); |
550 | void rm_prec( uint i ); |
551 | |
552 | // Note: prec(i) will not necessarily point to n if edge already exists. |
553 | void set_prec( uint i, Node *n ) { |
554 | assert(i < _max, "oob: i=%d, _max=%d", i, _max)do { if (!(i < _max)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 554, "assert(" "i < _max" ") failed", "oob: i=%d, _max=%d" , i, _max); ::breakpoint(); } } while (0); |
555 | assert(is_not_dead(n), "can not use dead node")do { if (!(((n) == __null || !VerifyIterativeGVN || !((n)-> is_dead())))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 555, "assert(" "((n) == __null || !VerifyIterativeGVN || !((n)->is_dead()))" ") failed", "can not use dead node"); ::breakpoint(); } } while (0); |
556 | assert(i >= _cnt, "not a precedence edge")do { if (!(i >= _cnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 556, "assert(" "i >= _cnt" ") failed", "not a precedence edge" ); ::breakpoint(); } } while (0); |
557 | // Avoid spec violation: duplicated prec edge. |
558 | if (_in[i] == n) return; |
559 | if (n == NULL__null || find_prec_edge(n) != -1) { |
560 | rm_prec(i); |
561 | return; |
562 | } |
563 | if (_in[i] != NULL__null) _in[i]->del_out((Node *)this); |
564 | _in[i] = n; |
565 | n->add_out((Node *)this); |
566 | } |
567 | |
568 | // Set this node's index, used by cisc_version to replace current node |
569 | void set_idx(uint new_idx) { |
570 | const node_idx_t* ref = &_idx; |
571 | *(node_idx_t*)ref = new_idx; |
572 | } |
573 | // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) |
574 | void swap_edges(uint i1, uint i2) { |
575 | debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH)uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH; |
576 | // Def-Use info is unchanged |
577 | Node* n1 = in(i1); |
578 | Node* n2 = in(i2); |
579 | _in[i1] = n2; |
580 | _in[i2] = n1; |
581 | // If this node is in the hash table, make sure it doesn't need a rehash. |
582 | assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code")do { if (!(check_hash == NO_HASH || check_hash == hash())) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 582, "assert(" "check_hash == NO_HASH || check_hash == hash()" ") failed", "edge swap must preserve hash code"); ::breakpoint (); } } while (0); |
583 | } |
584 | |
585 | // Iterators over input Nodes for a Node X are written as: |
586 | // for( i = 0; i < X.req(); i++ ) ... X[i] ... |
587 | // NOTE: Required edges can contain embedded NULL pointers. |
588 | |
589 | //----------------- Other Node Properties |
590 | |
591 | // Generate class IDs for (some) ideal nodes so that it is possible to determine |
592 | // the type of a node using a non-virtual method call (the method is_<Node>() below). |
593 | // |
594 | // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines |
595 | // the type of the node the ID represents; another subset of an ID's bits are reserved |
596 | // for the superclasses of the node represented by the ID. |
597 | // |
598 | // By design, if A is a supertype of B, A.is_B() returns true and B.is_A() |
599 | // returns false. A.is_A() returns true. |
600 | // |
601 | // If two classes, A and B, have the same superclass, a different bit of A's class id |
602 | // is reserved for A's type than for B's type. That bit is specified by the third |
603 | // parameter in the macro DEFINE_CLASS_ID. |
604 | // |
605 | // By convention, classes with deeper hierarchy are declared first. Moreover, |
606 | // classes with the same hierarchy depth are sorted by usage frequency. |
607 | // |
608 | // The query method masks the bits to cut off bits of subclasses and then compares |
609 | // the result with the class id (see the macro DEFINE_CLASS_QUERY below). |
610 | // |
611 | // Class_MachCall=30, ClassMask_MachCall=31 |
612 | // 12 8 4 0 |
613 | // 0 0 0 0 0 0 0 0 1 1 1 1 0 |
614 | // | | | | |
615 | // | | | Bit_Mach=2 |
616 | // | | Bit_MachReturn=4 |
617 | // | Bit_MachSafePoint=8 |
618 | // Bit_MachCall=16 |
619 | // |
620 | // Class_CountedLoop=56, ClassMask_CountedLoop=63 |
621 | // 12 8 4 0 |
622 | // 0 0 0 0 0 0 0 1 1 1 0 0 0 |
623 | // | | | |
624 | // | | Bit_Region=8 |
625 | // | Bit_Loop=16 |
626 | // Bit_CountedLoop=32 |
627 | |
628 | #define DEFINE_CLASS_ID(cl, supcl, subn) \ |
629 | Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ |
630 | Class_##cl = Class_##supcl + Bit_##cl , \ |
631 | ClassMask_##cl = ((Bit_##cl << 1) - 1) , |
632 | |
633 | // This enum is used only for C2 ideal and mach nodes with is_<node>() methods |
634 | // so that its values fit into 32 bits. |
635 | enum NodeClasses { |
636 | Bit_Node = 0x00000000, |
637 | Class_Node = 0x00000000, |
638 | ClassMask_Node = 0xFFFFFFFF, |
639 | |
640 | DEFINE_CLASS_ID(Multi, Node, 0) |
641 | DEFINE_CLASS_ID(SafePoint, Multi, 0) |
642 | DEFINE_CLASS_ID(Call, SafePoint, 0) |
643 | DEFINE_CLASS_ID(CallJava, Call, 0) |
644 | DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) |
645 | DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) |
646 | DEFINE_CLASS_ID(CallRuntime, Call, 1) |
647 | DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) |
648 | DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0) |
649 | DEFINE_CLASS_ID(Allocate, Call, 2) |
650 | DEFINE_CLASS_ID(AllocateArray, Allocate, 0) |
651 | DEFINE_CLASS_ID(AbstractLock, Call, 3) |
652 | DEFINE_CLASS_ID(Lock, AbstractLock, 0) |
653 | DEFINE_CLASS_ID(Unlock, AbstractLock, 1) |
654 | DEFINE_CLASS_ID(ArrayCopy, Call, 4) |
655 | DEFINE_CLASS_ID(CallNative, Call, 5) |
656 | DEFINE_CLASS_ID(MultiBranch, Multi, 1) |
657 | DEFINE_CLASS_ID(PCTable, MultiBranch, 0) |
658 | DEFINE_CLASS_ID(Catch, PCTable, 0) |
659 | DEFINE_CLASS_ID(Jump, PCTable, 1) |
660 | DEFINE_CLASS_ID(If, MultiBranch, 1) |
661 | DEFINE_CLASS_ID(BaseCountedLoopEnd, If, 0) |
662 | DEFINE_CLASS_ID(CountedLoopEnd, BaseCountedLoopEnd, 0) |
663 | DEFINE_CLASS_ID(LongCountedLoopEnd, BaseCountedLoopEnd, 1) |
664 | DEFINE_CLASS_ID(RangeCheck, If, 1) |
665 | DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2) |
666 | DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) |
667 | DEFINE_CLASS_ID(Start, Multi, 2) |
668 | DEFINE_CLASS_ID(MemBar, Multi, 3) |
669 | DEFINE_CLASS_ID(Initialize, MemBar, 0) |
670 | DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) |
671 | |
672 | DEFINE_CLASS_ID(Mach, Node, 1) |
673 | DEFINE_CLASS_ID(MachReturn, Mach, 0) |
674 | DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) |
675 | DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) |
676 | DEFINE_CLASS_ID(MachCallJava, MachCall, 0) |
677 | DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) |
678 | DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) |
679 | DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) |
680 | DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) |
681 | DEFINE_CLASS_ID(MachCallNative, MachCall, 2) |
682 | DEFINE_CLASS_ID(MachBranch, Mach, 1) |
683 | DEFINE_CLASS_ID(MachIf, MachBranch, 0) |
684 | DEFINE_CLASS_ID(MachGoto, MachBranch, 1) |
685 | DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2) |
686 | DEFINE_CLASS_ID(MachSpillCopy, Mach, 2) |
687 | DEFINE_CLASS_ID(MachTemp, Mach, 3) |
688 | DEFINE_CLASS_ID(MachConstantBase, Mach, 4) |
689 | DEFINE_CLASS_ID(MachConstant, Mach, 5) |
690 | DEFINE_CLASS_ID(MachJump, MachConstant, 0) |
691 | DEFINE_CLASS_ID(MachMerge, Mach, 6) |
692 | DEFINE_CLASS_ID(MachMemBar, Mach, 7) |
693 | |
694 | DEFINE_CLASS_ID(Type, Node, 2) |
695 | DEFINE_CLASS_ID(Phi, Type, 0) |
696 | DEFINE_CLASS_ID(ConstraintCast, Type, 1) |
697 | DEFINE_CLASS_ID(CastII, ConstraintCast, 0) |
698 | DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1) |
699 | DEFINE_CLASS_ID(CastLL, ConstraintCast, 2) |
700 | DEFINE_CLASS_ID(CastFF, ConstraintCast, 3) |
701 | DEFINE_CLASS_ID(CastDD, ConstraintCast, 4) |
702 | DEFINE_CLASS_ID(CastVV, ConstraintCast, 5) |
703 | DEFINE_CLASS_ID(CMove, Type, 3) |
704 | DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) |
705 | DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) |
706 | DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) |
707 | DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) |
708 | DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) |
709 | DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) |
710 | DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) |
711 | DEFINE_CLASS_ID(Vector, Type, 7) |
712 | DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0) |
713 | DEFINE_CLASS_ID(VectorUnbox, Vector, 1) |
714 | DEFINE_CLASS_ID(VectorReinterpret, Vector, 2) |
715 | DEFINE_CLASS_ID(ShiftV, Vector, 3) |
716 | |
717 | DEFINE_CLASS_ID(Proj, Node, 3) |
718 | DEFINE_CLASS_ID(CatchProj, Proj, 0) |
719 | DEFINE_CLASS_ID(JumpProj, Proj, 1) |
720 | DEFINE_CLASS_ID(IfProj, Proj, 2) |
721 | DEFINE_CLASS_ID(IfTrue, IfProj, 0) |
722 | DEFINE_CLASS_ID(IfFalse, IfProj, 1) |
723 | DEFINE_CLASS_ID(Parm, Proj, 4) |
724 | DEFINE_CLASS_ID(MachProj, Proj, 5) |
725 | |
726 | DEFINE_CLASS_ID(Mem, Node, 4) |
727 | DEFINE_CLASS_ID(Load, Mem, 0) |
728 | DEFINE_CLASS_ID(LoadVector, Load, 0) |
729 | DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0) |
730 | DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 1) |
731 | DEFINE_CLASS_ID(Store, Mem, 1) |
732 | DEFINE_CLASS_ID(StoreVector, Store, 0) |
733 | DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0) |
734 | DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 1) |
735 | DEFINE_CLASS_ID(LoadStore, Mem, 2) |
736 | DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0) |
737 | DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0) |
738 | DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1) |
739 | |
740 | DEFINE_CLASS_ID(Region, Node, 5) |
741 | DEFINE_CLASS_ID(Loop, Region, 0) |
742 | DEFINE_CLASS_ID(Root, Loop, 0) |
743 | DEFINE_CLASS_ID(BaseCountedLoop, Loop, 1) |
744 | DEFINE_CLASS_ID(CountedLoop, BaseCountedLoop, 0) |
745 | DEFINE_CLASS_ID(LongCountedLoop, BaseCountedLoop, 1) |
746 | DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2) |
747 | |
748 | DEFINE_CLASS_ID(Sub, Node, 6) |
749 | DEFINE_CLASS_ID(Cmp, Sub, 0) |
750 | DEFINE_CLASS_ID(FastLock, Cmp, 0) |
751 | DEFINE_CLASS_ID(FastUnlock, Cmp, 1) |
752 | DEFINE_CLASS_ID(SubTypeCheck,Cmp, 2) |
753 | |
754 | DEFINE_CLASS_ID(MergeMem, Node, 7) |
755 | DEFINE_CLASS_ID(Bool, Node, 8) |
756 | DEFINE_CLASS_ID(AddP, Node, 9) |
757 | DEFINE_CLASS_ID(BoxLock, Node, 10) |
758 | DEFINE_CLASS_ID(Add, Node, 11) |
759 | DEFINE_CLASS_ID(Mul, Node, 12) |
760 | DEFINE_CLASS_ID(ClearArray, Node, 14) |
761 | DEFINE_CLASS_ID(Halt, Node, 15) |
762 | DEFINE_CLASS_ID(Opaque1, Node, 16) |
763 | DEFINE_CLASS_ID(Move, Node, 17) |
764 | DEFINE_CLASS_ID(LShift, Node, 18) |
765 | |
766 | _max_classes = ClassMask_Move |
767 | }; |
768 | #undef DEFINE_CLASS_ID |
769 | |
770 | // Flags are sorted by usage frequency. |
771 | enum NodeFlags { |
772 | Flag_is_Copy = 1 << 0, // should be first bit to avoid shift |
773 | Flag_rematerialize = 1 << 1, |
774 | Flag_needs_anti_dependence_check = 1 << 2, |
775 | Flag_is_macro = 1 << 3, |
776 | Flag_is_Con = 1 << 4, |
777 | Flag_is_cisc_alternate = 1 << 5, |
778 | Flag_is_dead_loop_safe = 1 << 6, |
779 | Flag_may_be_short_branch = 1 << 7, |
780 | Flag_avoid_back_to_back_before = 1 << 8, |
781 | Flag_avoid_back_to_back_after = 1 << 9, |
782 | Flag_has_call = 1 << 10, |
783 | Flag_is_reduction = 1 << 11, |
784 | Flag_is_scheduled = 1 << 12, |
785 | Flag_has_vector_mask_set = 1 << 13, |
786 | Flag_is_expensive = 1 << 14, |
787 | Flag_is_predicated_vector = 1 << 15, |
788 | Flag_for_post_loop_opts_igvn = 1 << 16, |
789 | _last_flag = Flag_for_post_loop_opts_igvn |
790 | }; |
791 | |
792 | class PD; |
793 | |
794 | private: |
795 | juint _class_id; |
796 | juint _flags; |
797 | |
798 | static juint max_flags(); |
799 | |
800 | protected: |
801 | // These methods should be called from constructors only. |
802 | void init_class_id(juint c) { |
803 | _class_id = c; // cast out const |
804 | } |
805 | void init_flags(uint fl) { |
806 | assert(fl <= max_flags(), "invalid node flag")do { if (!(fl <= max_flags())) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 806, "assert(" "fl <= max_flags()" ") failed", "invalid node flag" ); ::breakpoint(); } } while (0); |
807 | _flags |= fl; |
808 | } |
809 | void clear_flag(uint fl) { |
810 | assert(fl <= max_flags(), "invalid node flag")do { if (!(fl <= max_flags())) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 810, "assert(" "fl <= max_flags()" ") failed", "invalid node flag" ); ::breakpoint(); } } while (0); |
811 | _flags &= ~fl; |
812 | } |
813 | |
814 | public: |
815 | const juint class_id() const { return _class_id; } |
816 | |
817 | const juint flags() const { return _flags; } |
818 | |
819 | void add_flag(juint fl) { init_flags(fl); } |
820 | |
821 | void remove_flag(juint fl) { clear_flag(fl); } |
822 | |
823 | // Return a dense integer opcode number |
824 | virtual int Opcode() const; |
825 | |
826 | // Virtual inherited Node size |
827 | virtual uint size_of() const; |
828 | |
829 | // Other interesting Node properties |
830 | #define DEFINE_CLASS_QUERY(type) \ |
831 | bool is_##type() const { \ |
832 | return ((_class_id & ClassMask_##type) == Class_##type); \ |
833 | } \ |
834 | type##Node *as_##type() const { \ |
835 | assert(is_##type(), "invalid node class: %s", Name())do { if (!(is_##type())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 835, "assert(" "is_##type()" ") failed", "invalid node class: %s" , Name()); ::breakpoint(); } } while (0); \ |
836 | return (type##Node*)this; \ |
837 | } \ |
838 | type##Node* isa_##type() const { \ |
839 | return (is_##type()) ? as_##type() : NULL__null; \ |
840 | } |
841 | |
842 | DEFINE_CLASS_QUERY(AbstractLock) |
843 | DEFINE_CLASS_QUERY(Add) |
844 | DEFINE_CLASS_QUERY(AddP) |
845 | DEFINE_CLASS_QUERY(Allocate) |
846 | DEFINE_CLASS_QUERY(AllocateArray) |
847 | DEFINE_CLASS_QUERY(ArrayCopy) |
848 | DEFINE_CLASS_QUERY(BaseCountedLoop) |
849 | DEFINE_CLASS_QUERY(BaseCountedLoopEnd) |
850 | DEFINE_CLASS_QUERY(Bool) |
851 | DEFINE_CLASS_QUERY(BoxLock) |
852 | DEFINE_CLASS_QUERY(Call) |
853 | DEFINE_CLASS_QUERY(CallNative) |
854 | DEFINE_CLASS_QUERY(CallDynamicJava) |
855 | DEFINE_CLASS_QUERY(CallJava) |
856 | DEFINE_CLASS_QUERY(CallLeaf) |
857 | DEFINE_CLASS_QUERY(CallLeafNoFP) |
858 | DEFINE_CLASS_QUERY(CallRuntime) |
859 | DEFINE_CLASS_QUERY(CallStaticJava) |
860 | DEFINE_CLASS_QUERY(Catch) |
861 | DEFINE_CLASS_QUERY(CatchProj) |
862 | DEFINE_CLASS_QUERY(CheckCastPP) |
863 | DEFINE_CLASS_QUERY(CastII) |
864 | DEFINE_CLASS_QUERY(CastLL) |
865 | DEFINE_CLASS_QUERY(ConstraintCast) |
866 | DEFINE_CLASS_QUERY(ClearArray) |
867 | DEFINE_CLASS_QUERY(CMove) |
868 | DEFINE_CLASS_QUERY(Cmp) |
869 | DEFINE_CLASS_QUERY(CountedLoop) |
870 | DEFINE_CLASS_QUERY(CountedLoopEnd) |
871 | DEFINE_CLASS_QUERY(DecodeNarrowPtr) |
872 | DEFINE_CLASS_QUERY(DecodeN) |
873 | DEFINE_CLASS_QUERY(DecodeNKlass) |
874 | DEFINE_CLASS_QUERY(EncodeNarrowPtr) |
875 | DEFINE_CLASS_QUERY(EncodeP) |
876 | DEFINE_CLASS_QUERY(EncodePKlass) |
877 | DEFINE_CLASS_QUERY(FastLock) |
878 | DEFINE_CLASS_QUERY(FastUnlock) |
879 | DEFINE_CLASS_QUERY(Halt) |
880 | DEFINE_CLASS_QUERY(If) |
881 | DEFINE_CLASS_QUERY(RangeCheck) |
882 | DEFINE_CLASS_QUERY(IfProj) |
883 | DEFINE_CLASS_QUERY(IfFalse) |
884 | DEFINE_CLASS_QUERY(IfTrue) |
885 | DEFINE_CLASS_QUERY(Initialize) |
886 | DEFINE_CLASS_QUERY(Jump) |
887 | DEFINE_CLASS_QUERY(JumpProj) |
888 | DEFINE_CLASS_QUERY(LongCountedLoop) |
889 | DEFINE_CLASS_QUERY(LongCountedLoopEnd) |
890 | DEFINE_CLASS_QUERY(Load) |
891 | DEFINE_CLASS_QUERY(LoadStore) |
892 | DEFINE_CLASS_QUERY(LoadStoreConditional) |
893 | DEFINE_CLASS_QUERY(Lock) |
894 | DEFINE_CLASS_QUERY(Loop) |
895 | DEFINE_CLASS_QUERY(LShift) |
896 | DEFINE_CLASS_QUERY(Mach) |
897 | DEFINE_CLASS_QUERY(MachBranch) |
898 | DEFINE_CLASS_QUERY(MachCall) |
899 | DEFINE_CLASS_QUERY(MachCallNative) |
900 | DEFINE_CLASS_QUERY(MachCallDynamicJava) |
901 | DEFINE_CLASS_QUERY(MachCallJava) |
902 | DEFINE_CLASS_QUERY(MachCallLeaf) |
903 | DEFINE_CLASS_QUERY(MachCallRuntime) |
904 | DEFINE_CLASS_QUERY(MachCallStaticJava) |
905 | DEFINE_CLASS_QUERY(MachConstantBase) |
906 | DEFINE_CLASS_QUERY(MachConstant) |
907 | DEFINE_CLASS_QUERY(MachGoto) |
908 | DEFINE_CLASS_QUERY(MachIf) |
909 | DEFINE_CLASS_QUERY(MachJump) |
910 | DEFINE_CLASS_QUERY(MachNullCheck) |
911 | DEFINE_CLASS_QUERY(MachProj) |
912 | DEFINE_CLASS_QUERY(MachReturn) |
913 | DEFINE_CLASS_QUERY(MachSafePoint) |
914 | DEFINE_CLASS_QUERY(MachSpillCopy) |
915 | DEFINE_CLASS_QUERY(MachTemp) |
916 | DEFINE_CLASS_QUERY(MachMemBar) |
917 | DEFINE_CLASS_QUERY(MachMerge) |
918 | DEFINE_CLASS_QUERY(Mem) |
919 | DEFINE_CLASS_QUERY(MemBar) |
920 | DEFINE_CLASS_QUERY(MemBarStoreStore) |
921 | DEFINE_CLASS_QUERY(MergeMem) |
922 | DEFINE_CLASS_QUERY(Move) |
923 | DEFINE_CLASS_QUERY(Mul) |
924 | DEFINE_CLASS_QUERY(Multi) |
925 | DEFINE_CLASS_QUERY(MultiBranch) |
926 | DEFINE_CLASS_QUERY(Opaque1) |
927 | DEFINE_CLASS_QUERY(OuterStripMinedLoop) |
928 | DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd) |
929 | DEFINE_CLASS_QUERY(Parm) |
930 | DEFINE_CLASS_QUERY(PCTable) |
931 | DEFINE_CLASS_QUERY(Phi) |
932 | DEFINE_CLASS_QUERY(Proj) |
933 | DEFINE_CLASS_QUERY(Region) |
934 | DEFINE_CLASS_QUERY(Root) |
935 | DEFINE_CLASS_QUERY(SafePoint) |
936 | DEFINE_CLASS_QUERY(SafePointScalarObject) |
937 | DEFINE_CLASS_QUERY(Start) |
938 | DEFINE_CLASS_QUERY(Store) |
939 | DEFINE_CLASS_QUERY(Sub) |
940 | DEFINE_CLASS_QUERY(SubTypeCheck) |
941 | DEFINE_CLASS_QUERY(Type) |
942 | DEFINE_CLASS_QUERY(Vector) |
943 | DEFINE_CLASS_QUERY(VectorMaskCmp) |
944 | DEFINE_CLASS_QUERY(VectorUnbox) |
945 | DEFINE_CLASS_QUERY(VectorReinterpret); |
946 | DEFINE_CLASS_QUERY(LoadVector) |
947 | DEFINE_CLASS_QUERY(LoadVectorGather) |
948 | DEFINE_CLASS_QUERY(StoreVector) |
949 | DEFINE_CLASS_QUERY(StoreVectorScatter) |
950 | DEFINE_CLASS_QUERY(ShiftV) |
951 | DEFINE_CLASS_QUERY(Unlock) |
952 | |
953 | #undef DEFINE_CLASS_QUERY |
954 | |
955 | // duplicate of is_MachSpillCopy() |
956 | bool is_SpillCopy () const { |
957 | return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); |
958 | } |
959 | |
960 | bool is_Con () const { return (_flags & Flag_is_Con) != 0; } |
961 | // The data node which is safe to leave in dead loop during IGVN optimization. |
962 | bool is_dead_loop_safe() const; |
963 | |
964 | // is_Copy() returns copied edge index (0 or 1) |
965 | uint is_Copy() const { return (_flags & Flag_is_Copy); } |
966 | |
967 | virtual bool is_CFG() const { return false; } |
968 | |
969 | // If this node is control-dependent on a test, can it be |
970 | // rerouted to a dominating equivalent test? This is usually |
971 | // true of non-CFG nodes, but can be false for operations which |
972 | // depend for their correct sequencing on more than one test. |
973 | // (In that case, hoisting to a dominating test may silently |
974 | // skip some other important test.) |
975 | virtual bool depends_only_on_test() const { assert(!is_CFG(), "")do { if (!(!is_CFG())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 975, "assert(" "!is_CFG()" ") failed", ""); ::breakpoint(); } } while (0); return true; }; |
976 | |
977 | // When building basic blocks, I need to have a notion of block beginning |
978 | // Nodes, next block selector Nodes (block enders), and next block |
979 | // projections. These calls need to work on their machine equivalents. The |
980 | // Ideal beginning Nodes are RootNode, RegionNode and StartNode. |
981 | bool is_block_start() const { |
982 | if ( is_Region() ) |
983 | return this == (const Node*)in(0); |
984 | else |
985 | return is_Start(); |
986 | } |
987 | |
988 | // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, |
989 | // Goto and Return. This call also returns the block ending Node. |
990 | virtual const Node *is_block_proj() const; |
991 | |
992 | // The node is a "macro" node which needs to be expanded before matching |
993 | bool is_macro() const { return (_flags & Flag_is_macro) != 0; } |
994 | // The node is expensive: the best control is set during loop opts |
995 | bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL__null; } |
996 | |
997 | // An arithmetic node which accumulates a data in a loop. |
998 | // It must have the loop's phi as input and provide a def to the phi. |
999 | bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; } |
1000 | |
1001 | bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; } |
1002 | |
1003 | // The node is a CountedLoopEnd with a mask annotation so as to emit a restore context |
1004 | bool has_vector_mask_set() const { return (_flags & Flag_has_vector_mask_set) != 0; } |
1005 | |
1006 | // Used in lcm to mark nodes that have scheduled |
1007 | bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; } |
1008 | |
1009 | bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; } |
1010 | |
1011 | //----------------- Optimization |
1012 | |
1013 | // Get the worst-case Type output for this Node. |
1014 | virtual const class Type *bottom_type() const; |
1015 | |
1016 | // If we find a better type for a node, try to record it permanently. |
1017 | // Return true if this node actually changed. |
1018 | // Be sure to do the hash_delete game in the "rehash" variant. |
1019 | void raise_bottom_type(const Type* new_type); |
1020 | |
1021 | // Get the address type with which this node uses and/or defs memory, |
1022 | // or NULL if none. The address type is conservatively wide. |
1023 | // Returns non-null for calls, membars, loads, stores, etc. |
1024 | // Returns TypePtr::BOTTOM if the node touches memory "broadly". |
1025 | virtual const class TypePtr *adr_type() const { return NULL__null; } |
1026 | |
1027 | // Return an existing node which computes the same function as this node. |
1028 | // The optimistic combined algorithm requires this to return a Node which |
1029 | // is a small number of steps away (e.g., one of my inputs). |
1030 | virtual Node* Identity(PhaseGVN* phase); |
1031 | |
1032 | // Return the set of values this Node can take on at runtime. |
1033 | virtual const Type* Value(PhaseGVN* phase) const; |
1034 | |
1035 | // Return a node which is more "ideal" than the current node. |
1036 | // The invariants on this call are subtle. If in doubt, read the |
1037 | // treatise in node.cpp above the default implemention AND TEST WITH |
1038 | // +VerifyIterativeGVN! |
1039 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
1040 | |
1041 | // Some nodes have specific Ideal subgraph transformations only if they are |
1042 | // unique users of specific nodes. Such nodes should be put on IGVN worklist |
1043 | // for the transformations to happen. |
1044 | bool has_special_unique_user() const; |
1045 | |
1046 | // Skip Proj and CatchProj nodes chains. Check for Null and Top. |
1047 | Node* find_exact_control(Node* ctrl); |
1048 | |
1049 | // Check if 'this' node dominates or equal to 'sub'. |
1050 | bool dominates(Node* sub, Node_List &nlist); |
1051 | |
1052 | protected: |
1053 | bool remove_dead_region(PhaseGVN *phase, bool can_reshape); |
1054 | public: |
1055 | |
1056 | // See if there is valid pipeline info |
1057 | static const Pipeline *pipeline_class(); |
1058 | virtual const Pipeline *pipeline() const; |
1059 | |
1060 | // Compute the latency from the def to this instruction of the ith input node |
1061 | uint latency(uint i); |
1062 | |
1063 | // Hash & compare functions, for pessimistic value numbering |
1064 | |
1065 | // If the hash function returns the special sentinel value NO_HASH, |
1066 | // the node is guaranteed never to compare equal to any other node. |
1067 | // If we accidentally generate a hash with value NO_HASH the node |
1068 | // won't go into the table and we'll lose a little optimization. |
1069 | static const uint NO_HASH = 0; |
1070 | virtual uint hash() const; |
1071 | virtual bool cmp( const Node &n ) const; |
1072 | |
1073 | // Operation appears to be iteratively computed (such as an induction variable) |
1074 | // It is possible for this operation to return false for a loop-varying |
1075 | // value, if it appears (by local graph inspection) to be computed by a simple conditional. |
1076 | bool is_iteratively_computed(); |
1077 | |
1078 | // Determine if a node is a counted loop induction variable. |
1079 | // NOTE: The method is defined in "loopnode.cpp". |
1080 | bool is_cloop_ind_var() const; |
1081 | |
1082 | // Return a node with opcode "opc" and same inputs as "this" if one can |
1083 | // be found; Otherwise return NULL; |
1084 | Node* find_similar(int opc); |
1085 | |
1086 | // Return the unique control out if only one. Null if none or more than one. |
1087 | Node* unique_ctrl_out() const; |
1088 | |
1089 | // Set control or add control as precedence edge |
1090 | void ensure_control_or_add_prec(Node* c); |
1091 | |
1092 | //----------------- Code Generation |
1093 | |
1094 | // Ideal register class for Matching. Zero means unmatched instruction |
1095 | // (these are cloned instead of converted to machine nodes). |
1096 | virtual uint ideal_reg() const; |
1097 | |
1098 | static const uint NotAMachineReg; // must be > max. machine register |
1099 | |
1100 | // Do we Match on this edge index or not? Generally false for Control |
1101 | // and true for everything else. Weird for calls & returns. |
1102 | virtual uint match_edge(uint idx) const; |
1103 | |
1104 | // Register class output is returned in |
1105 | virtual const RegMask &out_RegMask() const; |
1106 | // Register class input is expected in |
1107 | virtual const RegMask &in_RegMask(uint) const; |
1108 | // Should we clone rather than spill this instruction? |
1109 | bool rematerialize() const; |
1110 | |
1111 | // Return JVM State Object if this Node carries debug info, or NULL otherwise |
1112 | virtual JVMState* jvms() const; |
1113 | |
1114 | // Print as assembly |
1115 | virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; |
1116 | // Emit bytes starting at parameter 'ptr' |
1117 | // Bump 'ptr' by the number of output bytes |
1118 | virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; |
1119 | // Size of instruction in bytes |
1120 | virtual uint size(PhaseRegAlloc *ra_) const; |
1121 | |
1122 | // Convenience function to extract an integer constant from a node. |
1123 | // If it is not an integer constant (either Con, CastII, or Mach), |
1124 | // return value_if_unknown. |
1125 | jint find_int_con(jint value_if_unknown) const { |
1126 | const TypeInt* t = find_int_type(); |
1127 | return (t != NULL__null && t->is_con()) ? t->get_con() : value_if_unknown; |
1128 | } |
1129 | // Return the constant, knowing it is an integer constant already |
1130 | jint get_int() const { |
1131 | const TypeInt* t = find_int_type(); |
1132 | guarantee(t != NULL, "must be con")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1132, "guarantee(" "t != NULL" ") failed", "must be con"); :: breakpoint(); } } while (0); |
1133 | return t->get_con(); |
1134 | } |
1135 | // Here's where the work is done. Can produce non-constant int types too. |
1136 | const TypeInt* find_int_type() const; |
1137 | const TypeInteger* find_integer_type(BasicType bt) const; |
1138 | |
1139 | // Same thing for long (and intptr_t, via type.hpp): |
1140 | jlong get_long() const { |
1141 | const TypeLong* t = find_long_type(); |
1142 | guarantee(t != NULL, "must be con")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1142, "guarantee(" "t != NULL" ") failed", "must be con"); :: breakpoint(); } } while (0); |
1143 | return t->get_con(); |
1144 | } |
1145 | jlong find_long_con(jint value_if_unknown) const { |
1146 | const TypeLong* t = find_long_type(); |
1147 | return (t != NULL__null && t->is_con()) ? t->get_con() : value_if_unknown; |
1148 | } |
1149 | const TypeLong* find_long_type() const; |
1150 | |
1151 | jlong get_integer_as_long(BasicType bt) const { |
1152 | const TypeInteger* t = find_integer_type(bt); |
1153 | guarantee(t != NULL, "must be con")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1153, "guarantee(" "t != NULL" ") failed", "must be con"); :: breakpoint(); } } while (0); |
1154 | return t->get_con_as_long(bt); |
1155 | } |
1156 | const TypePtr* get_ptr_type() const; |
1157 | |
1158 | // These guys are called by code generated by ADLC: |
1159 | intptr_t get_ptr() const; |
1160 | intptr_t get_narrowcon() const; |
1161 | jdouble getd() const; |
1162 | jfloat getf() const; |
1163 | |
1164 | // Nodes which are pinned into basic blocks |
1165 | virtual bool pinned() const { return false; } |
1166 | |
1167 | // Nodes which use memory without consuming it, hence need antidependences |
1168 | // More specifically, needs_anti_dependence_check returns true iff the node |
1169 | // (a) does a load, and (b) does not perform a store (except perhaps to a |
1170 | // stack slot or some other unaliased location). |
1171 | bool needs_anti_dependence_check() const; |
1172 | |
1173 | // Return which operand this instruction may cisc-spill. In other words, |
1174 | // return operand position that can convert from reg to memory access |
1175 | virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } |
1176 | bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } |
1177 | |
1178 | // Whether this is a memory-writing machine node. |
1179 | bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); } |
1180 | |
1181 | //----------------- Printing, etc |
1182 | #ifndef PRODUCT |
1183 | private: |
1184 | int _indent; |
1185 | |
1186 | public: |
1187 | void set_indent(int indent) { _indent = indent; } |
1188 | |
1189 | private: |
1190 | static bool add_to_worklist(Node* n, Node_List* worklist, Arena* old_arena, VectorSet* old_space, VectorSet* new_space); |
1191 | public: |
1192 | Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx. |
1193 | Node* find_ctrl(int idx); // Search control ancestors for the given idx. |
1194 | void dump() const { dump("\n"); } // Print this node. |
1195 | void dump(const char* suffix, bool mark = false, outputStream *st = tty) const; // Print this node. |
1196 | void dump(int depth) const; // Print this node, recursively to depth d |
1197 | void dump_ctrl(int depth) const; // Print control nodes, to depth d |
1198 | void dump_comp() const; // Print this node in compact representation. |
1199 | // Print this node in compact representation. |
1200 | void dump_comp(const char* suffix, outputStream *st = tty) const; |
1201 | virtual void dump_req(outputStream *st = tty) const; // Print required-edge info |
1202 | virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info |
1203 | virtual void dump_out(outputStream *st = tty) const; // Print the output edge info |
1204 | virtual void dump_spec(outputStream *st) const {}; // Print per-node info |
1205 | // Print compact per-node info |
1206 | virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); } |
1207 | void dump_related() const; // Print related nodes (depends on node at hand). |
1208 | // Print related nodes up to given depths for input and output nodes. |
1209 | void dump_related(uint d_in, uint d_out) const; |
1210 | void dump_related_compact() const; // Print related nodes in compact representation. |
1211 | // Collect related nodes. |
1212 | virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; |
1213 | // Collect nodes starting from this node, explicitly including/excluding control and data links. |
1214 | void collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const; |
1215 | |
1216 | // Node collectors, to be used in implementations of Node::rel(). |
1217 | // Collect the entire data input graph. Include control inputs if requested. |
1218 | void collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const; |
1219 | // Collect the entire control input graph. Include data inputs if requested. |
1220 | void collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const; |
1221 | // Collect the entire output graph until hitting and including control nodes. |
1222 | void collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const; |
1223 | |
1224 | void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges |
1225 | static void verify(int verify_depth, VectorSet& visited, Node_List& worklist); |
1226 | |
1227 | // This call defines a class-unique string used to identify class instances |
1228 | virtual const char *Name() const; |
1229 | |
1230 | void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) |
1231 | // RegMask Print Functions |
1232 | void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } |
1233 | void dump_out_regmask() { out_RegMask().dump(); } |
1234 | static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } |
1235 | void fast_dump() const { |
1236 | tty->print("%4d: %-17s", _idx, Name()); |
1237 | for (uint i = 0; i < len(); i++) |
1238 | if (in(i)) |
1239 | tty->print(" %4d", in(i)->_idx); |
1240 | else |
1241 | tty->print(" NULL"); |
1242 | tty->print("\n"); |
1243 | } |
1244 | #endif |
1245 | #ifdef ASSERT1 |
1246 | void verify_construction(); |
1247 | bool verify_jvms(const JVMState* jvms) const; |
1248 | int _debug_idx; // Unique value assigned to every node. |
1249 | int debug_idx() const { return _debug_idx; } |
1250 | void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } |
1251 | |
1252 | Node* _debug_orig; // Original version of this, if any. |
1253 | Node* debug_orig() const { return _debug_orig; } |
1254 | void set_debug_orig(Node* orig); // _debug_orig = orig |
1255 | void dump_orig(outputStream *st, bool print_key = true) const; |
1256 | |
1257 | int _hash_lock; // Barrier to modifications of nodes in the hash table |
1258 | void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?")do { if (!(_hash_lock < 99)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1258, "assert(" "_hash_lock < 99" ") failed", "in too many hash tables?" ); ::breakpoint(); } } while (0); } |
1259 | void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks")do { if (!(_hash_lock >= 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1259, "assert(" "_hash_lock >= 0" ") failed", "mispaired hash locks" ); ::breakpoint(); } } while (0); } |
1260 | |
1261 | static void init_NodeProperty(); |
1262 | |
1263 | #if OPTO_DU_ITERATOR_ASSERT1 |
1264 | const Node* _last_del; // The last deleted node. |
1265 | uint _del_tick; // Bumped when a deletion happens.. |
1266 | #endif |
1267 | #endif |
1268 | }; |
1269 | |
1270 | inline bool not_a_node(const Node* n) { |
1271 | if (n == NULL__null) return true; |
1272 | if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. |
1273 | if (*(address*)n == badAddress((address)::badAddressVal)) return true; // kill by Node::destruct |
1274 | return false; |
1275 | } |
1276 | |
1277 | //----------------------------------------------------------------------------- |
1278 | // Iterators over DU info, and associated Node functions. |
1279 | |
1280 | #if OPTO_DU_ITERATOR_ASSERT1 |
1281 | |
1282 | // Common code for assertion checking on DU iterators. |
1283 | class DUIterator_Common { |
1284 | #ifdef ASSERT1 |
1285 | protected: |
1286 | bool _vdui; // cached value of VerifyDUIterators |
1287 | const Node* _node; // the node containing the _out array |
1288 | uint _outcnt; // cached node->_outcnt |
1289 | uint _del_tick; // cached node->_del_tick |
1290 | Node* _last; // last value produced by the iterator |
1291 | |
1292 | void sample(const Node* node); // used by c'tor to set up for verifies |
1293 | void verify(const Node* node, bool at_end_ok = false); |
1294 | void verify_resync(); |
1295 | void reset(const DUIterator_Common& that); |
1296 | |
1297 | // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators |
1298 | #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } |
1299 | #else |
1300 | #define I_VDUI_ONLY(i,x) { } |
1301 | #endif //ASSERT |
1302 | }; |
1303 | |
1304 | #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) |
1305 | |
1306 | // Default DU iterator. Allows appends onto the out array. |
1307 | // Allows deletion from the out array only at the current point. |
1308 | // Usage: |
1309 | // for (DUIterator i = x->outs(); x->has_out(i); i++) { |
1310 | // Node* y = x->out(i); |
1311 | // ... |
1312 | // } |
1313 | // Compiles in product mode to a unsigned integer index, which indexes |
1314 | // onto a repeatedly reloaded base pointer of x->_out. The loop predicate |
1315 | // also reloads x->_outcnt. If you delete, you must perform "--i" just |
1316 | // before continuing the loop. You must delete only the last-produced |
1317 | // edge. You must delete only a single copy of the last-produced edge, |
1318 | // or else you must delete all copies at once (the first time the edge |
1319 | // is produced by the iterator). |
1320 | class DUIterator : public DUIterator_Common { |
1321 | friend class Node; |
1322 | |
1323 | // This is the index which provides the product-mode behavior. |
1324 | // Whatever the product-mode version of the system does to the |
1325 | // DUI index is done to this index. All other fields in |
1326 | // this class are used only for assertion checking. |
1327 | uint _idx; |
1328 | |
1329 | #ifdef ASSERT1 |
1330 | uint _refresh_tick; // Records the refresh activity. |
1331 | |
1332 | void sample(const Node* node); // Initialize _refresh_tick etc. |
1333 | void verify(const Node* node, bool at_end_ok = false); |
1334 | void verify_increment(); // Verify an increment operation. |
1335 | void verify_resync(); // Verify that we can back up over a deletion. |
1336 | void verify_finish(); // Verify that the loop terminated properly. |
1337 | void refresh(); // Resample verification info. |
1338 | void reset(const DUIterator& that); // Resample after assignment. |
1339 | #endif |
1340 | |
1341 | DUIterator(const Node* node, int dummy_to_avoid_conversion) |
1342 | { _idx = 0; debug_only(sample(node))sample(node); } |
1343 | |
1344 | public: |
1345 | // initialize to garbage; clear _vdui to disable asserts |
1346 | DUIterator() |
1347 | { /*initialize to garbage*/ debug_only(_vdui = false)_vdui = false; } |
1348 | |
1349 | DUIterator(const DUIterator& that) |
1350 | { _idx = that._idx; debug_only(_vdui = false; reset(that))_vdui = false; reset(that); } |
1351 | |
1352 | void operator++(int dummy_to_specify_postfix_op) |
1353 | { _idx++; VDUI_ONLY(verify_increment()); } |
1354 | |
1355 | void operator--() |
1356 | { VDUI_ONLY(verify_resync()); --_idx; } |
1357 | |
1358 | ~DUIterator() |
1359 | { VDUI_ONLY(verify_finish()); } |
1360 | |
1361 | void operator=(const DUIterator& that) |
1362 | { _idx = that._idx; debug_only(reset(that))reset(that); } |
1363 | }; |
1364 | |
1365 | DUIterator Node::outs() const |
1366 | { return DUIterator(this, 0); } |
1367 | DUIterator& Node::refresh_out_pos(DUIterator& i) const |
1368 | { I_VDUI_ONLY(i, i.refresh()); return i; } |
1369 | bool Node::has_out(DUIterator& i) const |
1370 | { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } |
1371 | Node* Node::out(DUIterator& i) const |
1372 | { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=)i._last= _out[i._idx]; } |
1373 | |
1374 | |
1375 | // Faster DU iterator. Disallows insertions into the out array. |
1376 | // Allows deletion from the out array only at the current point. |
1377 | // Usage: |
1378 | // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { |
1379 | // Node* y = x->fast_out(i); |
1380 | // ... |
1381 | // } |
1382 | // Compiles in product mode to raw Node** pointer arithmetic, with |
1383 | // no reloading of pointers from the original node x. If you delete, |
1384 | // you must perform "--i; --imax" just before continuing the loop. |
1385 | // If you delete multiple copies of the same edge, you must decrement |
1386 | // imax, but not i, multiple times: "--i, imax -= num_edges". |
1387 | class DUIterator_Fast : public DUIterator_Common { |
1388 | friend class Node; |
1389 | friend class DUIterator_Last; |
1390 | |
1391 | // This is the pointer which provides the product-mode behavior. |
1392 | // Whatever the product-mode version of the system does to the |
1393 | // DUI pointer is done to this pointer. All other fields in |
1394 | // this class are used only for assertion checking. |
1395 | Node** _outp; |
1396 | |
1397 | #ifdef ASSERT1 |
1398 | void verify(const Node* node, bool at_end_ok = false); |
1399 | void verify_limit(); |
1400 | void verify_resync(); |
1401 | void verify_relimit(uint n); |
1402 | void reset(const DUIterator_Fast& that); |
1403 | #endif |
1404 | |
1405 | // Note: offset must be signed, since -1 is sometimes passed |
1406 | DUIterator_Fast(const Node* node, ptrdiff_t offset) |
1407 | { _outp = node->_out + offset; debug_only(sample(node))sample(node); } |
1408 | |
1409 | public: |
1410 | // initialize to garbage; clear _vdui to disable asserts |
1411 | DUIterator_Fast() |
1412 | { /*initialize to garbage*/ debug_only(_vdui = false)_vdui = false; } |
1413 | |
1414 | DUIterator_Fast(const DUIterator_Fast& that) |
1415 | { _outp = that._outp; debug_only(_vdui = false; reset(that))_vdui = false; reset(that); } |
1416 | |
1417 | void operator++(int dummy_to_specify_postfix_op) |
1418 | { _outp++; VDUI_ONLY(verify(_node, true)); } |
1419 | |
1420 | void operator--() |
1421 | { VDUI_ONLY(verify_resync()); --_outp; } |
1422 | |
1423 | void operator-=(uint n) // applied to the limit only |
1424 | { _outp -= n; VDUI_ONLY(verify_relimit(n)); } |
1425 | |
1426 | bool operator<(DUIterator_Fast& limit) { |
1427 | I_VDUI_ONLY(*this, this->verify(_node, true)); |
1428 | I_VDUI_ONLY(limit, limit.verify_limit()); |
1429 | return _outp < limit._outp; |
1430 | } |
1431 | |
1432 | void operator=(const DUIterator_Fast& that) |
1433 | { _outp = that._outp; debug_only(reset(that))reset(that); } |
1434 | }; |
1435 | |
1436 | DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { |
1437 | // Assign a limit pointer to the reference argument: |
1438 | imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); |
1439 | // Return the base pointer: |
1440 | return DUIterator_Fast(this, 0); |
1441 | } |
1442 | Node* Node::fast_out(DUIterator_Fast& i) const { |
1443 | I_VDUI_ONLY(i, i.verify(this)); |
1444 | return debug_only(i._last=)i._last= *i._outp; |
1445 | } |
1446 | |
1447 | |
1448 | // Faster DU iterator. Requires each successive edge to be removed. |
1449 | // Does not allow insertion of any edges. |
1450 | // Usage: |
1451 | // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { |
1452 | // Node* y = x->last_out(i); |
1453 | // ... |
1454 | // } |
1455 | // Compiles in product mode to raw Node** pointer arithmetic, with |
1456 | // no reloading of pointers from the original node x. |
1457 | class DUIterator_Last : private DUIterator_Fast { |
1458 | friend class Node; |
1459 | |
1460 | #ifdef ASSERT1 |
1461 | void verify(const Node* node, bool at_end_ok = false); |
1462 | void verify_limit(); |
1463 | void verify_step(uint num_edges); |
1464 | #endif |
1465 | |
1466 | // Note: offset must be signed, since -1 is sometimes passed |
1467 | DUIterator_Last(const Node* node, ptrdiff_t offset) |
1468 | : DUIterator_Fast(node, offset) { } |
1469 | |
1470 | void operator++(int dummy_to_specify_postfix_op) {} // do not use |
1471 | void operator<(int) {} // do not use |
1472 | |
1473 | public: |
1474 | DUIterator_Last() { } |
1475 | // initialize to garbage |
1476 | |
1477 | DUIterator_Last(const DUIterator_Last& that) = default; |
1478 | |
1479 | void operator--() |
1480 | { _outp--; VDUI_ONLY(verify_step(1)); } |
1481 | |
1482 | void operator-=(uint n) |
1483 | { _outp -= n; VDUI_ONLY(verify_step(n)); } |
1484 | |
1485 | bool operator>=(DUIterator_Last& limit) { |
1486 | I_VDUI_ONLY(*this, this->verify(_node, true)); |
1487 | I_VDUI_ONLY(limit, limit.verify_limit()); |
1488 | return _outp >= limit._outp; |
1489 | } |
1490 | |
1491 | DUIterator_Last& operator=(const DUIterator_Last& that) = default; |
1492 | }; |
1493 | |
1494 | DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { |
1495 | // Assign a limit pointer to the reference argument: |
1496 | imin = DUIterator_Last(this, 0); |
1497 | // Return the initial pointer: |
1498 | return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); |
1499 | } |
1500 | Node* Node::last_out(DUIterator_Last& i) const { |
1501 | I_VDUI_ONLY(i, i.verify(this)); |
1502 | return debug_only(i._last=)i._last= *i._outp; |
1503 | } |
1504 | |
1505 | #endif //OPTO_DU_ITERATOR_ASSERT |
1506 | |
1507 | #undef I_VDUI_ONLY |
1508 | #undef VDUI_ONLY |
1509 | |
1510 | // An Iterator that truly follows the iterator pattern. Doesn't |
1511 | // support deletion but could be made to. |
1512 | // |
1513 | // for (SimpleDUIterator i(n); i.has_next(); i.next()) { |
1514 | // Node* m = i.get(); |
1515 | // |
1516 | class SimpleDUIterator : public StackObj { |
1517 | private: |
1518 | Node* node; |
1519 | DUIterator_Fast i; |
1520 | DUIterator_Fast imax; |
1521 | public: |
1522 | SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} |
1523 | bool has_next() { return i < imax; } |
1524 | void next() { i++; } |
1525 | Node* get() { return node->fast_out(i); } |
1526 | }; |
1527 | |
1528 | |
1529 | //----------------------------------------------------------------------------- |
1530 | // Map dense integer indices to Nodes. Uses classic doubling-array trick. |
1531 | // Abstractly provides an infinite array of Node*'s, initialized to NULL. |
1532 | // Note that the constructor just zeros things, and since I use Arena |
1533 | // allocation I do not need a destructor to reclaim storage. |
1534 | class Node_Array : public ResourceObj { |
1535 | friend class VMStructs; |
1536 | protected: |
1537 | Arena* _a; // Arena to allocate in |
1538 | uint _max; |
1539 | Node** _nodes; |
1540 | void grow( uint i ); // Grow array node to fit |
1541 | public: |
1542 | Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) { |
1543 | _nodes = NEW_ARENA_ARRAY(a, Node*, max)(Node**) (a)->Amalloc((max) * sizeof(Node*)); |
1544 | clear(); |
1545 | } |
1546 | |
1547 | Node_Array(Node_Array* na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} |
1548 | Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped |
1549 | { return (i<_max) ? _nodes[i] : (Node*)NULL__null; } |
1550 | Node* at(uint i) const { assert(i<_max,"oob")do { if (!(i<_max)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1550, "assert(" "i<_max" ") failed", "oob"); ::breakpoint (); } } while (0); return _nodes[i]; } |
1551 | Node** adr() { return _nodes; } |
1552 | // Extend the mapping: index i maps to Node *n. |
1553 | void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } |
1554 | void insert( uint i, Node *n ); |
1555 | void remove( uint i ); // Remove, preserving order |
1556 | // Clear all entries in _nodes to NULL but keep storage |
1557 | void clear() { |
1558 | Copy::zero_to_bytes(_nodes, _max * sizeof(Node*)); |
1559 | } |
1560 | |
1561 | uint Size() const { return _max; } |
1562 | void dump() const; |
1563 | }; |
1564 | |
1565 | class Node_List : public Node_Array { |
1566 | friend class VMStructs; |
1567 | uint _cnt; |
1568 | public: |
1569 | Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {} |
1570 | Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {} |
1571 | bool contains(const Node* n) const { |
1572 | for (uint e = 0; e < size(); e++) { |
1573 | if (at(e) == n) return true; |
1574 | } |
1575 | return false; |
1576 | } |
1577 | void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } |
1578 | void remove( uint i ) { Node_Array::remove(i); _cnt--; } |
1579 | void push( Node *b ) { map(_cnt++,b); } |
1580 | void yank( Node *n ); // Find and remove |
1581 | Node *pop() { return _nodes[--_cnt]; } |
1582 | void clear() { _cnt = 0; Node_Array::clear(); } // retain storage |
1583 | void copy(const Node_List& from) { |
1584 | if (from._max > _max) { |
1585 | grow(from._max); |
1586 | } |
1587 | _cnt = from._cnt; |
1588 | Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*)); |
1589 | } |
1590 | |
1591 | uint size() const { return _cnt; } |
1592 | void dump() const; |
1593 | void dump_simple() const; |
1594 | }; |
1595 | |
1596 | //------------------------------Unique_Node_List------------------------------- |
1597 | class Unique_Node_List : public Node_List { |
1598 | friend class VMStructs; |
1599 | VectorSet _in_worklist; |
1600 | uint _clock_index; // Index in list where to pop from next |
1601 | public: |
1602 | Unique_Node_List() : Node_List(), _clock_index(0) {} |
1603 | Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} |
1604 | |
1605 | void remove( Node *n ); |
1606 | bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } |
1607 | VectorSet& member_set(){ return _in_worklist; } |
1608 | |
1609 | void push(Node* b) { |
1610 | if( !_in_worklist.test_set(b->_idx) ) |
1611 | Node_List::push(b); |
1612 | } |
1613 | Node *pop() { |
1614 | if( _clock_index >= size() ) _clock_index = 0; |
1615 | Node *b = at(_clock_index); |
1616 | map( _clock_index, Node_List::pop()); |
1617 | if (size() != 0) _clock_index++; // Always start from 0 |
1618 | _in_worklist.remove(b->_idx); |
1619 | return b; |
1620 | } |
1621 | Node *remove(uint i) { |
1622 | Node *b = Node_List::at(i); |
1623 | _in_worklist.remove(b->_idx); |
1624 | map(i,Node_List::pop()); |
1625 | return b; |
1626 | } |
1627 | void yank(Node *n) { |
1628 | _in_worklist.remove(n->_idx); |
1629 | Node_List::yank(n); |
1630 | } |
1631 | void clear() { |
1632 | _in_worklist.clear(); // Discards storage but grows automatically |
1633 | Node_List::clear(); |
1634 | _clock_index = 0; |
1635 | } |
1636 | |
1637 | // Used after parsing to remove useless nodes before Iterative GVN |
1638 | void remove_useless_nodes(VectorSet& useful); |
1639 | |
1640 | bool contains(const Node* n) const { |
1641 | fatal("use faster member() instead")do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1641, "use faster member() instead"); ::breakpoint(); } while (0); |
1642 | return false; |
1643 | } |
1644 | |
1645 | #ifndef PRODUCT |
1646 | void print_set() const { _in_worklist.print(); } |
1647 | #endif |
1648 | }; |
1649 | |
1650 | // Inline definition of Compile::record_for_igvn must be deferred to this point. |
1651 | inline void Compile::record_for_igvn(Node* n) { |
1652 | _for_igvn->push(n); |
1653 | } |
1654 | |
1655 | //------------------------------Node_Stack------------------------------------- |
1656 | class Node_Stack { |
1657 | friend class VMStructs; |
1658 | protected: |
1659 | struct INode { |
1660 | Node *node; // Processed node |
1661 | uint indx; // Index of next node's child |
1662 | }; |
1663 | INode *_inode_top; // tos, stack grows up |
1664 | INode *_inode_max; // End of _inodes == _inodes + _max |
1665 | INode *_inodes; // Array storage for the stack |
1666 | Arena *_a; // Arena to allocate in |
1667 | void grow(); |
1668 | public: |
1669 | Node_Stack(int size) { |
1670 | size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; |
1671 | _a = Thread::current()->resource_area(); |
1672 | _inodes = NEW_ARENA_ARRAY( _a, INode, max )(INode*) (_a)->Amalloc((max) * sizeof(INode)); |
1673 | _inode_max = _inodes + max; |
1674 | _inode_top = _inodes - 1; // stack is empty |
1675 | } |
1676 | |
1677 | Node_Stack(Arena *a, int size) : _a(a) { |
1678 | size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; |
1679 | _inodes = NEW_ARENA_ARRAY( _a, INode, max )(INode*) (_a)->Amalloc((max) * sizeof(INode)); |
1680 | _inode_max = _inodes + max; |
1681 | _inode_top = _inodes - 1; // stack is empty |
1682 | } |
1683 | |
1684 | void pop() { |
1685 | assert(_inode_top >= _inodes, "node stack underflow")do { if (!(_inode_top >= _inodes)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1685, "assert(" "_inode_top >= _inodes" ") failed", "node stack underflow" ); ::breakpoint(); } } while (0); |
1686 | --_inode_top; |
1687 | } |
1688 | void push(Node *n, uint i) { |
1689 | ++_inode_top; |
1690 | if (_inode_top >= _inode_max) grow(); |
1691 | INode *top = _inode_top; // optimization |
1692 | top->node = n; |
1693 | top->indx = i; |
1694 | } |
1695 | Node *node() const { |
1696 | return _inode_top->node; |
1697 | } |
1698 | Node* node_at(uint i) const { |
1699 | assert(_inodes + i <= _inode_top, "in range")do { if (!(_inodes + i <= _inode_top)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1699, "assert(" "_inodes + i <= _inode_top" ") failed", "in range" ); ::breakpoint(); } } while (0); |
1700 | return _inodes[i].node; |
1701 | } |
1702 | uint index() const { |
1703 | return _inode_top->indx; |
1704 | } |
1705 | uint index_at(uint i) const { |
1706 | assert(_inodes + i <= _inode_top, "in range")do { if (!(_inodes + i <= _inode_top)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1706, "assert(" "_inodes + i <= _inode_top" ") failed", "in range" ); ::breakpoint(); } } while (0); |
1707 | return _inodes[i].indx; |
1708 | } |
1709 | void set_node(Node *n) { |
1710 | _inode_top->node = n; |
1711 | } |
1712 | void set_index(uint i) { |
1713 | _inode_top->indx = i; |
1714 | } |
1715 | uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size |
1716 | uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size |
1717 | bool is_nonempty() const { return (_inode_top >= _inodes); } |
1718 | bool is_empty() const { return (_inode_top < _inodes); } |
1719 | void clear() { _inode_top = _inodes - 1; } // retain storage |
1720 | |
1721 | // Node_Stack is used to map nodes. |
1722 | Node* find(uint idx) const; |
1723 | }; |
1724 | |
1725 | |
1726 | //-----------------------------Node_Notes-------------------------------------- |
1727 | // Debugging or profiling annotations loosely and sparsely associated |
1728 | // with some nodes. See Compile::node_notes_at for the accessor. |
1729 | class Node_Notes { |
1730 | friend class VMStructs; |
1731 | JVMState* _jvms; |
1732 | |
1733 | public: |
1734 | Node_Notes(JVMState* jvms = NULL__null) { |
1735 | _jvms = jvms; |
1736 | } |
1737 | |
1738 | JVMState* jvms() { return _jvms; } |
1739 | void set_jvms(JVMState* x) { _jvms = x; } |
1740 | |
1741 | // True if there is nothing here. |
1742 | bool is_clear() { |
1743 | return (_jvms == NULL__null); |
1744 | } |
1745 | |
1746 | // Make there be nothing here. |
1747 | void clear() { |
1748 | _jvms = NULL__null; |
1749 | } |
1750 | |
1751 | // Make a new, clean node notes. |
1752 | static Node_Notes* make(Compile* C) { |
1753 | Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1)(Node_Notes*) (C->comp_arena())->Amalloc((1) * sizeof(Node_Notes )); |
1754 | nn->clear(); |
1755 | return nn; |
1756 | } |
1757 | |
1758 | Node_Notes* clone(Compile* C) { |
1759 | Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1)(Node_Notes*) (C->comp_arena())->Amalloc((1) * sizeof(Node_Notes )); |
1760 | (*nn) = (*this); |
1761 | return nn; |
1762 | } |
1763 | |
1764 | // Absorb any information from source. |
1765 | bool update_from(Node_Notes* source) { |
1766 | bool changed = false; |
1767 | if (source != NULL__null) { |
1768 | if (source->jvms() != NULL__null) { |
1769 | set_jvms(source->jvms()); |
1770 | changed = true; |
1771 | } |
1772 | } |
1773 | return changed; |
1774 | } |
1775 | }; |
1776 | |
1777 | // Inlined accessors for Compile::node_nodes that require the preceding class: |
1778 | inline Node_Notes* |
1779 | Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, |
1780 | int idx, bool can_grow) { |
1781 | assert(idx >= 0, "oob")do { if (!(idx >= 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1781, "assert(" "idx >= 0" ") failed", "oob"); ::breakpoint (); } } while (0); |
1782 | int block_idx = (idx >> _log2_node_notes_block_size); |
1783 | int grow_by = (block_idx - (arr == NULL__null? 0: arr->length())); |
1784 | if (grow_by >= 0) { |
1785 | if (!can_grow) return NULL__null; |
1786 | grow_node_notes(arr, grow_by + 1); |
1787 | } |
1788 | if (arr == NULL__null) return NULL__null; |
1789 | // (Every element of arr is a sub-array of length _node_notes_block_size.) |
1790 | return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); |
1791 | } |
1792 | |
1793 | inline bool |
1794 | Compile::set_node_notes_at(int idx, Node_Notes* value) { |
1795 | if (value == NULL__null || value->is_clear()) |
1796 | return false; // nothing to write => write nothing |
1797 | Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); |
1798 | assert(loc != NULL, "")do { if (!(loc != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1798, "assert(" "loc != __null" ") failed", ""); ::breakpoint (); } } while (0); |
1799 | return loc->update_from(value); |
1800 | } |
1801 | |
1802 | |
1803 | //------------------------------TypeNode--------------------------------------- |
1804 | // Node with a Type constant. |
1805 | class TypeNode : public Node { |
1806 | protected: |
1807 | virtual uint hash() const; // Check the type |
1808 | virtual bool cmp( const Node &n ) const; |
1809 | virtual uint size_of() const; // Size is bigger |
1810 | const Type* const _type; |
1811 | public: |
1812 | void set_type(const Type* t) { |
1813 | assert(t != NULL, "sanity")do { if (!(t != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1813, "assert(" "t != __null" ") failed", "sanity"); ::breakpoint (); } } while (0); |
1814 | debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH)uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH; |
1815 | *(const Type**)&_type = t; // cast away const-ness |
1816 | // If this node is in the hash table, make sure it doesn't need a rehash. |
1817 | assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code")do { if (!(check_hash == NO_HASH || check_hash == hash())) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1817, "assert(" "check_hash == NO_HASH || check_hash == hash()" ") failed", "type change must preserve hash code"); ::breakpoint (); } } while (0); |
1818 | } |
1819 | const Type* type() const { assert(_type != NULL, "sanity")do { if (!(_type != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1819, "assert(" "_type != __null" ") failed", "sanity"); :: breakpoint(); } } while (0); return _type; }; |
1820 | TypeNode( const Type *t, uint required ) : Node(required), _type(t) { |
1821 | init_class_id(Class_Type); |
1822 | } |
1823 | virtual const Type* Value(PhaseGVN* phase) const; |
1824 | virtual const Type *bottom_type() const; |
1825 | virtual uint ideal_reg() const; |
1826 | #ifndef PRODUCT |
1827 | virtual void dump_spec(outputStream *st) const; |
1828 | virtual void dump_compact_spec(outputStream *st) const; |
1829 | #endif |
1830 | }; |
1831 | |
1832 | #include "opto/opcodes.hpp" |
1833 | |
1834 | #define Op_IL(op)inline int Op_op(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1834, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_opI ; } return Op_opL; } \ |
1835 | inline int Op_ ## op(BasicType bt) { \ |
1836 | assert(bt == T_INT || bt == T_LONG, "only for int or longs")do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1836, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); \ |
1837 | if (bt == T_INT) { \ |
1838 | return Op_## op ## I; \ |
1839 | } \ |
1840 | return Op_## op ## L; \ |
1841 | } |
1842 | |
1843 | Op_IL(Add)inline int Op_Add(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1843, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_AddI ; } return Op_AddL; } |
1844 | Op_IL(Sub)inline int Op_Sub(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1844, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_SubI ; } return Op_SubL; } |
1845 | Op_IL(Mul)inline int Op_Mul(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1845, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_MulI ; } return Op_MulL; } |
1846 | Op_IL(URShift)inline int Op_URShift(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1846, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_URShiftI ; } return Op_URShiftL; } |
1847 | Op_IL(LShift)inline int Op_LShift(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1847, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_LShiftI ; } return Op_LShiftL; } |
1848 | Op_IL(Xor)inline int Op_Xor(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1848, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_XorI ; } return Op_XorL; } |
1849 | Op_IL(Cmp)inline int Op_Cmp(BasicType bt) { do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1849, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); if (bt == T_INT) { return Op_CmpI ; } return Op_CmpL; } |
1850 | |
1851 | inline int Op_Cmp_unsigned(BasicType bt) { |
1852 | assert(bt == T_INT || bt == T_LONG, "only for int or longs")do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1852, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); |
1853 | if (bt == T_INT) { |
1854 | return Op_CmpU; |
1855 | } |
1856 | return Op_CmpUL; |
1857 | } |
1858 | |
1859 | inline int Op_Cast(BasicType bt) { |
1860 | assert(bt == T_INT || bt == T_LONG, "only for int or longs")do { if (!(bt == T_INT || bt == T_LONG)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/node.hpp" , 1860, "assert(" "bt == T_INT || bt == T_LONG" ") failed", "only for int or longs" ); ::breakpoint(); } } while (0); |
1861 | if (bt == T_INT) { |
1862 | return Op_CastII; |
1863 | } |
1864 | return Op_CastLL; |
1865 | } |
1866 | |
1867 | #endif // SHARE_OPTO_NODE_HPP |
1 | /* |
2 | * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_OPTO_TYPE_HPP |
26 | #define SHARE_OPTO_TYPE_HPP |
27 | |
28 | #include "opto/adlcVMDeps.hpp" |
29 | #include "runtime/handles.hpp" |
30 | |
31 | // Portions of code courtesy of Clifford Click |
32 | |
33 | // Optimization - Graph Style |
34 | |
35 | |
36 | // This class defines a Type lattice. The lattice is used in the constant |
37 | // propagation algorithms, and for some type-checking of the iloc code. |
38 | // Basic types include RSD's (lower bound, upper bound, stride for integers), |
39 | // float & double precision constants, sets of data-labels and code-labels. |
40 | // The complete lattice is described below. Subtypes have no relationship to |
41 | // up or down in the lattice; that is entirely determined by the behavior of |
42 | // the MEET/JOIN functions. |
43 | |
44 | class Dict; |
45 | class Type; |
46 | class TypeD; |
47 | class TypeF; |
48 | class TypeInteger; |
49 | class TypeInt; |
50 | class TypeLong; |
51 | class TypeNarrowPtr; |
52 | class TypeNarrowOop; |
53 | class TypeNarrowKlass; |
54 | class TypeAry; |
55 | class TypeTuple; |
56 | class TypeVect; |
57 | class TypeVectA; |
58 | class TypeVectS; |
59 | class TypeVectD; |
60 | class TypeVectX; |
61 | class TypeVectY; |
62 | class TypeVectZ; |
63 | class TypeVectMask; |
64 | class TypePtr; |
65 | class TypeRawPtr; |
66 | class TypeOopPtr; |
67 | class TypeInstPtr; |
68 | class TypeAryPtr; |
69 | class TypeKlassPtr; |
70 | class TypeInstKlassPtr; |
71 | class TypeAryKlassPtr; |
72 | class TypeMetadataPtr; |
73 | |
74 | //------------------------------Type------------------------------------------- |
75 | // Basic Type object, represents a set of primitive Values. |
76 | // Types are hash-cons'd into a private class dictionary, so only one of each |
77 | // different kind of Type exists. Types are never modified after creation, so |
78 | // all their interesting fields are constant. |
79 | class Type { |
80 | friend class VMStructs; |
81 | |
82 | public: |
83 | enum TYPES { |
84 | Bad=0, // Type check |
85 | Control, // Control of code (not in lattice) |
86 | Top, // Top of the lattice |
87 | Int, // Integer range (lo-hi) |
88 | Long, // Long integer range (lo-hi) |
89 | Half, // Placeholder half of doubleword |
90 | NarrowOop, // Compressed oop pointer |
91 | NarrowKlass, // Compressed klass pointer |
92 | |
93 | Tuple, // Method signature or object layout |
94 | Array, // Array types |
95 | |
96 | VectorMask, // Vector predicate/mask type |
97 | VectorA, // (Scalable) Vector types for vector length agnostic |
98 | VectorS, // 32bit Vector types |
99 | VectorD, // 64bit Vector types |
100 | VectorX, // 128bit Vector types |
101 | VectorY, // 256bit Vector types |
102 | VectorZ, // 512bit Vector types |
103 | |
104 | AnyPtr, // Any old raw, klass, inst, or array pointer |
105 | RawPtr, // Raw (non-oop) pointers |
106 | OopPtr, // Any and all Java heap entities |
107 | InstPtr, // Instance pointers (non-array objects) |
108 | AryPtr, // Array pointers |
109 | // (Ptr order matters: See is_ptr, isa_ptr, is_oopptr, isa_oopptr.) |
110 | |
111 | MetadataPtr, // Generic metadata |
112 | KlassPtr, // Klass pointers |
113 | InstKlassPtr, |
114 | AryKlassPtr, |
115 | |
116 | Function, // Function signature |
117 | Abio, // Abstract I/O |
118 | Return_Address, // Subroutine return address |
119 | Memory, // Abstract store |
120 | FloatTop, // No float value |
121 | FloatCon, // Floating point constant |
122 | FloatBot, // Any float value |
123 | DoubleTop, // No double value |
124 | DoubleCon, // Double precision constant |
125 | DoubleBot, // Any double value |
126 | Bottom, // Bottom of lattice |
127 | lastype // Bogus ending type (not in lattice) |
128 | }; |
129 | |
130 | // Signal values for offsets from a base pointer |
131 | enum OFFSET_SIGNALS { |
132 | OffsetTop = -2000000000, // undefined offset |
133 | OffsetBot = -2000000001 // any possible offset |
134 | }; |
135 | |
136 | // Min and max WIDEN values. |
137 | enum WIDEN { |
138 | WidenMin = 0, |
139 | WidenMax = 3 |
140 | }; |
141 | |
142 | private: |
143 | typedef struct { |
144 | TYPES dual_type; |
145 | BasicType basic_type; |
146 | const char* msg; |
147 | bool isa_oop; |
148 | uint ideal_reg; |
149 | relocInfo::relocType reloc; |
150 | } TypeInfo; |
151 | |
152 | // Dictionary of types shared among compilations. |
153 | static Dict* _shared_type_dict; |
154 | static const TypeInfo _type_info[]; |
155 | |
156 | static int uhash( const Type *const t ); |
157 | // Structural equality check. Assumes that cmp() has already compared |
158 | // the _base types and thus knows it can cast 't' appropriately. |
159 | virtual bool eq( const Type *t ) const; |
160 | |
161 | // Top-level hash-table of types |
162 | static Dict *type_dict() { |
163 | return Compile::current()->type_dict(); |
164 | } |
165 | |
166 | // DUAL operation: reflect around lattice centerline. Used instead of |
167 | // join to ensure my lattice is symmetric up and down. Dual is computed |
168 | // lazily, on demand, and cached in _dual. |
169 | const Type *_dual; // Cached dual value |
170 | |
171 | #ifdef ASSERT1 |
172 | // One type is interface, the other is oop |
173 | virtual bool interface_vs_oop_helper(const Type *t) const; |
174 | #endif |
175 | |
176 | const Type *meet_helper(const Type *t, bool include_speculative) const; |
177 | void check_symmetrical(const Type *t, const Type *mt) const; |
178 | |
179 | protected: |
180 | // Each class of type is also identified by its base. |
181 | const TYPES _base; // Enum of Types type |
182 | |
183 | Type( TYPES t ) : _dual(NULL__null), _base(t) {} // Simple types |
184 | // ~Type(); // Use fast deallocation |
185 | const Type *hashcons(); // Hash-cons the type |
186 | virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; |
187 | const Type *join_helper(const Type *t, bool include_speculative) const { |
188 | return dual()->meet_helper(t->dual(), include_speculative)->dual(); |
189 | } |
190 | |
191 | public: |
192 | |
193 | inline void* operator new( size_t x ) throw() { |
194 | Compile* compile = Compile::current(); |
195 | compile->set_type_last_size(x); |
196 | return compile->type_arena()->AmallocWords(x); |
197 | } |
198 | inline void operator delete( void* ptr ) { |
199 | Compile* compile = Compile::current(); |
200 | compile->type_arena()->Afree(ptr,compile->type_last_size()); |
201 | } |
202 | |
203 | // Initialize the type system for a particular compilation. |
204 | static void Initialize(Compile* compile); |
205 | |
206 | // Initialize the types shared by all compilations. |
207 | static void Initialize_shared(Compile* compile); |
208 | |
209 | TYPES base() const { |
210 | assert(_base > Bad && _base < lastype, "sanity")do { if (!(_base > Bad && _base < lastype)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 210, "assert(" "_base > Bad && _base < lastype" ") failed", "sanity"); ::breakpoint(); } } while (0); |
211 | return _base; |
212 | } |
213 | |
214 | // Create a new hash-consd type |
215 | static const Type *make(enum TYPES); |
216 | // Test for equivalence of types |
217 | static int cmp( const Type *const t1, const Type *const t2 ); |
218 | // Test for higher or equal in lattice |
219 | // Variant that drops the speculative part of the types |
220 | bool higher_equal(const Type *t) const { |
221 | return !cmp(meet(t),t->remove_speculative()); |
222 | } |
223 | // Variant that keeps the speculative part of the types |
224 | bool higher_equal_speculative(const Type *t) const { |
225 | return !cmp(meet_speculative(t),t); |
226 | } |
227 | |
228 | // MEET operation; lower in lattice. |
229 | // Variant that drops the speculative part of the types |
230 | const Type *meet(const Type *t) const { |
231 | return meet_helper(t, false); |
232 | } |
233 | // Variant that keeps the speculative part of the types |
234 | const Type *meet_speculative(const Type *t) const { |
235 | return meet_helper(t, true)->cleanup_speculative(); |
236 | } |
237 | // WIDEN: 'widens' for Ints and other range types |
238 | virtual const Type *widen( const Type *old, const Type* limit ) const { return this; } |
239 | // NARROW: complement for widen, used by pessimistic phases |
240 | virtual const Type *narrow( const Type *old ) const { return this; } |
241 | |
242 | // DUAL operation: reflect around lattice centerline. Used instead of |
243 | // join to ensure my lattice is symmetric up and down. |
244 | const Type *dual() const { return _dual; } |
245 | |
246 | // Compute meet dependent on base type |
247 | virtual const Type *xmeet( const Type *t ) const; |
248 | virtual const Type *xdual() const; // Compute dual right now. |
249 | |
250 | // JOIN operation; higher in lattice. Done by finding the dual of the |
251 | // meet of the dual of the 2 inputs. |
252 | // Variant that drops the speculative part of the types |
253 | const Type *join(const Type *t) const { |
254 | return join_helper(t, false); |
255 | } |
256 | // Variant that keeps the speculative part of the types |
257 | const Type *join_speculative(const Type *t) const { |
258 | return join_helper(t, true)->cleanup_speculative(); |
259 | } |
260 | |
261 | // Modified version of JOIN adapted to the needs Node::Value. |
262 | // Normalizes all empty values to TOP. Does not kill _widen bits. |
263 | // Currently, it also works around limitations involving interface types. |
264 | // Variant that drops the speculative part of the types |
265 | const Type *filter(const Type *kills) const { |
266 | return filter_helper(kills, false); |
267 | } |
268 | // Variant that keeps the speculative part of the types |
269 | const Type *filter_speculative(const Type *kills) const { |
270 | return filter_helper(kills, true)->cleanup_speculative(); |
271 | } |
272 | |
273 | #ifdef ASSERT1 |
274 | // One type is interface, the other is oop |
275 | virtual bool interface_vs_oop(const Type *t) const; |
276 | #endif |
277 | |
278 | // Returns true if this pointer points at memory which contains a |
279 | // compressed oop references. |
280 | bool is_ptr_to_narrowoop() const; |
281 | bool is_ptr_to_narrowklass() const; |
282 | |
283 | bool is_ptr_to_boxing_obj() const; |
284 | |
285 | |
286 | // Convenience access |
287 | float getf() const; |
288 | double getd() const; |
289 | |
290 | const TypeInt *is_int() const; |
291 | const TypeInt *isa_int() const; // Returns NULL if not an Int |
292 | const TypeInteger* is_integer(BasicType bt) const; |
293 | const TypeInteger* isa_integer(BasicType bt) const; |
294 | const TypeLong *is_long() const; |
295 | const TypeLong *isa_long() const; // Returns NULL if not a Long |
296 | const TypeD *isa_double() const; // Returns NULL if not a Double{Top,Con,Bot} |
297 | const TypeD *is_double_constant() const; // Asserts it is a DoubleCon |
298 | const TypeD *isa_double_constant() const; // Returns NULL if not a DoubleCon |
299 | const TypeF *isa_float() const; // Returns NULL if not a Float{Top,Con,Bot} |
300 | const TypeF *is_float_constant() const; // Asserts it is a FloatCon |
301 | const TypeF *isa_float_constant() const; // Returns NULL if not a FloatCon |
302 | const TypeTuple *is_tuple() const; // Collection of fields, NOT a pointer |
303 | const TypeAry *is_ary() const; // Array, NOT array pointer |
304 | const TypeAry *isa_ary() const; // Returns NULL of not ary |
305 | const TypeVect *is_vect() const; // Vector |
306 | const TypeVect *isa_vect() const; // Returns NULL if not a Vector |
307 | const TypeVectMask *is_vectmask() const; // Predicate/Mask Vector |
308 | const TypeVectMask *isa_vectmask() const; // Returns NULL if not a Vector Predicate/Mask |
309 | const TypePtr *is_ptr() const; // Asserts it is a ptr type |
310 | const TypePtr *isa_ptr() const; // Returns NULL if not ptr type |
311 | const TypeRawPtr *isa_rawptr() const; // NOT Java oop |
312 | const TypeRawPtr *is_rawptr() const; // Asserts is rawptr |
313 | const TypeNarrowOop *is_narrowoop() const; // Java-style GC'd pointer |
314 | const TypeNarrowOop *isa_narrowoop() const; // Returns NULL if not oop ptr type |
315 | const TypeNarrowKlass *is_narrowklass() const; // compressed klass pointer |
316 | const TypeNarrowKlass *isa_narrowklass() const;// Returns NULL if not oop ptr type |
317 | const TypeOopPtr *isa_oopptr() const; // Returns NULL if not oop ptr type |
318 | const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer |
319 | const TypeInstPtr *isa_instptr() const; // Returns NULL if not InstPtr |
320 | const TypeInstPtr *is_instptr() const; // Instance |
321 | const TypeAryPtr *isa_aryptr() const; // Returns NULL if not AryPtr |
322 | const TypeAryPtr *is_aryptr() const; // Array oop |
323 | |
324 | const TypeMetadataPtr *isa_metadataptr() const; // Returns NULL if not oop ptr type |
325 | const TypeMetadataPtr *is_metadataptr() const; // Java-style GC'd pointer |
326 | const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr |
327 | const TypeKlassPtr *is_klassptr() const; // assert if not KlassPtr |
328 | const TypeInstKlassPtr *isa_instklassptr() const; // Returns NULL if not IntKlassPtr |
329 | const TypeInstKlassPtr *is_instklassptr() const; // assert if not IntKlassPtr |
330 | const TypeAryKlassPtr *isa_aryklassptr() const; // Returns NULL if not AryKlassPtr |
331 | const TypeAryKlassPtr *is_aryklassptr() const; // assert if not AryKlassPtr |
332 | |
333 | virtual bool is_finite() const; // Has a finite value |
334 | virtual bool is_nan() const; // Is not a number (NaN) |
335 | |
336 | // Returns this ptr type or the equivalent ptr type for this compressed pointer. |
337 | const TypePtr* make_ptr() const; |
338 | |
339 | // Returns this oopptr type or the equivalent oopptr type for this compressed pointer. |
340 | // Asserts if the underlying type is not an oopptr or narrowoop. |
341 | const TypeOopPtr* make_oopptr() const; |
342 | |
343 | // Returns this compressed pointer or the equivalent compressed version |
344 | // of this pointer type. |
345 | const TypeNarrowOop* make_narrowoop() const; |
346 | |
347 | // Returns this compressed klass pointer or the equivalent |
348 | // compressed version of this pointer type. |
349 | const TypeNarrowKlass* make_narrowklass() const; |
350 | |
351 | // Special test for register pressure heuristic |
352 | bool is_floatingpoint() const; // True if Float or Double base type |
353 | |
354 | // Do you have memory, directly or through a tuple? |
355 | bool has_memory( ) const; |
356 | |
357 | // TRUE if type is a singleton |
358 | virtual bool singleton(void) const; |
359 | |
360 | // TRUE if type is above the lattice centerline, and is therefore vacuous |
361 | virtual bool empty(void) const; |
362 | |
363 | // Return a hash for this type. The hash function is public so ConNode |
364 | // (constants) can hash on their constant, which is represented by a Type. |
365 | virtual int hash() const; |
366 | |
367 | // Map ideal registers (machine types) to ideal types |
368 | static const Type *mreg2type[]; |
369 | |
370 | // Printing, statistics |
371 | #ifndef PRODUCT |
372 | void dump_on(outputStream *st) const; |
373 | void dump() const { |
374 | dump_on(tty); |
375 | } |
376 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
377 | static void dump_stats(); |
378 | // Groups of types, for debugging and visualization only. |
379 | enum class Category { |
380 | Data, |
381 | Memory, |
382 | Mixed, // Tuples with types of different categories. |
383 | Control, |
384 | Other, // {Type::Top, Type::Abio, Type::Bottom}. |
385 | Undef // {Type::Bad, Type::lastype}, for completeness. |
386 | }; |
387 | // Return the category of this type. |
388 | Category category() const; |
389 | |
390 | static const char* str(const Type* t); |
391 | #endif // !PRODUCT |
392 | void typerr(const Type *t) const; // Mixing types error |
393 | |
394 | // Create basic type |
395 | static const Type* get_const_basic_type(BasicType type) { |
396 | assert((uint)type <= T_CONFLICT && _const_basic_type[type] != NULL, "bad type")do { if (!((uint)type <= T_CONFLICT && _const_basic_type [type] != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 396, "assert(" "(uint)type <= T_CONFLICT && _const_basic_type[type] != __null" ") failed", "bad type"); ::breakpoint(); } } while (0); |
397 | return _const_basic_type[type]; |
398 | } |
399 | |
400 | // For two instance arrays of same dimension, return the base element types. |
401 | // Otherwise or if the arrays have different dimensions, return NULL. |
402 | static void get_arrays_base_elements(const Type *a1, const Type *a2, |
403 | const TypeInstPtr **e1, const TypeInstPtr **e2); |
404 | |
405 | // Mapping to the array element's basic type. |
406 | BasicType array_element_basic_type() const; |
407 | |
408 | // Create standard type for a ciType: |
409 | static const Type* get_const_type(ciType* type); |
410 | |
411 | // Create standard zero value: |
412 | static const Type* get_zero_type(BasicType type) { |
413 | assert((uint)type <= T_CONFLICT && _zero_type[type] != NULL, "bad type")do { if (!((uint)type <= T_CONFLICT && _zero_type[ type] != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 413, "assert(" "(uint)type <= T_CONFLICT && _zero_type[type] != __null" ") failed", "bad type"); ::breakpoint(); } } while (0); |
414 | return _zero_type[type]; |
415 | } |
416 | |
417 | // Report if this is a zero value (not top). |
418 | bool is_zero_type() const { |
419 | BasicType type = basic_type(); |
420 | if (type == T_VOID || type >= T_CONFLICT) |
421 | return false; |
422 | else |
423 | return (this == _zero_type[type]); |
424 | } |
425 | |
426 | // Convenience common pre-built types. |
427 | static const Type *ABIO; |
428 | static const Type *BOTTOM; |
429 | static const Type *CONTROL; |
430 | static const Type *DOUBLE; |
431 | static const Type *FLOAT; |
432 | static const Type *HALF; |
433 | static const Type *MEMORY; |
434 | static const Type *MULTI; |
435 | static const Type *RETURN_ADDRESS; |
436 | static const Type *TOP; |
437 | |
438 | // Mapping from compiler type to VM BasicType |
439 | BasicType basic_type() const { return _type_info[_base].basic_type; } |
440 | uint ideal_reg() const { return _type_info[_base].ideal_reg; } |
441 | const char* msg() const { return _type_info[_base].msg; } |
442 | bool isa_oop_ptr() const { return _type_info[_base].isa_oop; } |
443 | relocInfo::relocType reloc() const { return _type_info[_base].reloc; } |
444 | |
445 | // Mapping from CI type system to compiler type: |
446 | static const Type* get_typeflow_type(ciType* type); |
447 | |
448 | static const Type* make_from_constant(ciConstant constant, |
449 | bool require_constant = false, |
450 | int stable_dimension = 0, |
451 | bool is_narrow = false, |
452 | bool is_autobox_cache = false); |
453 | |
454 | static const Type* make_constant_from_field(ciInstance* holder, |
455 | int off, |
456 | bool is_unsigned_load, |
457 | BasicType loadbt); |
458 | |
459 | static const Type* make_constant_from_field(ciField* field, |
460 | ciInstance* holder, |
461 | BasicType loadbt, |
462 | bool is_unsigned_load); |
463 | |
464 | static const Type* make_constant_from_array_element(ciArray* array, |
465 | int off, |
466 | int stable_dimension, |
467 | BasicType loadbt, |
468 | bool is_unsigned_load); |
469 | |
470 | // Speculative type helper methods. See TypePtr. |
471 | virtual const TypePtr* speculative() const { return NULL__null; } |
472 | virtual ciKlass* speculative_type() const { return NULL__null; } |
473 | virtual ciKlass* speculative_type_not_null() const { return NULL__null; } |
474 | virtual bool speculative_maybe_null() const { return true; } |
475 | virtual bool speculative_always_null() const { return true; } |
476 | virtual const Type* remove_speculative() const { return this; } |
477 | virtual const Type* cleanup_speculative() const { return this; } |
478 | virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const { return exact_kls != NULL__null; } |
479 | virtual bool would_improve_ptr(ProfilePtrKind ptr_kind) const { return ptr_kind == ProfileAlwaysNull || ptr_kind == ProfileNeverNull; } |
480 | const Type* maybe_remove_speculative(bool include_speculative) const; |
481 | |
482 | virtual bool maybe_null() const { return true; } |
483 | virtual bool is_known_instance() const { return false; } |
484 | |
485 | private: |
486 | // support arrays |
487 | static const Type* _zero_type[T_CONFLICT+1]; |
488 | static const Type* _const_basic_type[T_CONFLICT+1]; |
489 | }; |
490 | |
491 | //------------------------------TypeF------------------------------------------ |
492 | // Class of Float-Constant Types. |
493 | class TypeF : public Type { |
494 | TypeF( float f ) : Type(FloatCon), _f(f) {}; |
495 | public: |
496 | virtual bool eq( const Type *t ) const; |
497 | virtual int hash() const; // Type specific hashing |
498 | virtual bool singleton(void) const; // TRUE if type is a singleton |
499 | virtual bool empty(void) const; // TRUE if type is vacuous |
500 | public: |
501 | const float _f; // Float constant |
502 | |
503 | static const TypeF *make(float f); |
504 | |
505 | virtual bool is_finite() const; // Has a finite value |
506 | virtual bool is_nan() const; // Is not a number (NaN) |
507 | |
508 | virtual const Type *xmeet( const Type *t ) const; |
509 | virtual const Type *xdual() const; // Compute dual right now. |
510 | // Convenience common pre-built types. |
511 | static const TypeF *MAX; |
512 | static const TypeF *MIN; |
513 | static const TypeF *ZERO; // positive zero only |
514 | static const TypeF *ONE; |
515 | static const TypeF *POS_INF; |
516 | static const TypeF *NEG_INF; |
517 | #ifndef PRODUCT |
518 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
519 | #endif |
520 | }; |
521 | |
522 | //------------------------------TypeD------------------------------------------ |
523 | // Class of Double-Constant Types. |
524 | class TypeD : public Type { |
525 | TypeD( double d ) : Type(DoubleCon), _d(d) {}; |
526 | public: |
527 | virtual bool eq( const Type *t ) const; |
528 | virtual int hash() const; // Type specific hashing |
529 | virtual bool singleton(void) const; // TRUE if type is a singleton |
530 | virtual bool empty(void) const; // TRUE if type is vacuous |
531 | public: |
532 | const double _d; // Double constant |
533 | |
534 | static const TypeD *make(double d); |
535 | |
536 | virtual bool is_finite() const; // Has a finite value |
537 | virtual bool is_nan() const; // Is not a number (NaN) |
538 | |
539 | virtual const Type *xmeet( const Type *t ) const; |
540 | virtual const Type *xdual() const; // Compute dual right now. |
541 | // Convenience common pre-built types. |
542 | static const TypeD *MAX; |
543 | static const TypeD *MIN; |
544 | static const TypeD *ZERO; // positive zero only |
545 | static const TypeD *ONE; |
546 | static const TypeD *POS_INF; |
547 | static const TypeD *NEG_INF; |
548 | #ifndef PRODUCT |
549 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
550 | #endif |
551 | }; |
552 | |
553 | class TypeInteger : public Type { |
554 | protected: |
555 | TypeInteger(TYPES t) : Type(t) {} |
556 | |
557 | public: |
558 | virtual jlong hi_as_long() const = 0; |
559 | virtual jlong lo_as_long() const = 0; |
560 | jlong get_con_as_long(BasicType bt) const; |
561 | bool is_con() const { return lo_as_long() == hi_as_long(); } |
562 | |
563 | static const TypeInteger* make(jlong lo, jlong hi, int w, BasicType bt); |
564 | |
565 | static const TypeInteger* bottom(BasicType type); |
566 | static const TypeInteger* zero(BasicType type); |
567 | static const TypeInteger* one(BasicType type); |
568 | static const TypeInteger* minus_1(BasicType type); |
569 | }; |
570 | |
571 | |
572 | |
573 | //------------------------------TypeInt---------------------------------------- |
574 | // Class of integer ranges, the set of integers between a lower bound and an |
575 | // upper bound, inclusive. |
576 | class TypeInt : public TypeInteger { |
577 | TypeInt( jint lo, jint hi, int w ); |
578 | protected: |
579 | virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; |
580 | |
581 | public: |
582 | typedef jint NativeType; |
583 | virtual bool eq( const Type *t ) const; |
584 | virtual int hash() const; // Type specific hashing |
585 | virtual bool singleton(void) const; // TRUE if type is a singleton |
586 | virtual bool empty(void) const; // TRUE if type is vacuous |
587 | const jint _lo, _hi; // Lower bound, upper bound |
588 | const short _widen; // Limit on times we widen this sucker |
589 | |
590 | static const TypeInt *make(jint lo); |
591 | // must always specify w |
592 | static const TypeInt *make(jint lo, jint hi, int w); |
593 | |
594 | // Check for single integer |
595 | bool is_con() const { return _lo==_hi; } |
596 | bool is_con(int i) const { return is_con() && _lo == i; } |
597 | jint get_con() const { assert(is_con(), "" )do { if (!(is_con())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 597, "assert(" "is_con()" ") failed", ""); ::breakpoint(); } } while (0); return _lo; } |
598 | |
599 | virtual bool is_finite() const; // Has a finite value |
600 | |
601 | virtual const Type *xmeet( const Type *t ) const; |
602 | virtual const Type *xdual() const; // Compute dual right now. |
603 | virtual const Type *widen( const Type *t, const Type* limit_type ) const; |
604 | virtual const Type *narrow( const Type *t ) const; |
605 | |
606 | virtual jlong hi_as_long() const { return _hi; } |
607 | virtual jlong lo_as_long() const { return _lo; } |
608 | |
609 | // Do not kill _widen bits. |
610 | // Convenience common pre-built types. |
611 | static const TypeInt *MAX; |
612 | static const TypeInt *MIN; |
613 | static const TypeInt *MINUS_1; |
614 | static const TypeInt *ZERO; |
615 | static const TypeInt *ONE; |
616 | static const TypeInt *BOOL; |
617 | static const TypeInt *CC; |
618 | static const TypeInt *CC_LT; // [-1] == MINUS_1 |
619 | static const TypeInt *CC_GT; // [1] == ONE |
620 | static const TypeInt *CC_EQ; // [0] == ZERO |
621 | static const TypeInt *CC_LE; // [-1,0] |
622 | static const TypeInt *CC_GE; // [0,1] == BOOL (!) |
623 | static const TypeInt *BYTE; |
624 | static const TypeInt *UBYTE; |
625 | static const TypeInt *CHAR; |
626 | static const TypeInt *SHORT; |
627 | static const TypeInt *POS; |
628 | static const TypeInt *POS1; |
629 | static const TypeInt *INT; |
630 | static const TypeInt *SYMINT; // symmetric range [-max_jint..max_jint] |
631 | static const TypeInt *TYPE_DOMAIN; // alias for TypeInt::INT |
632 | |
633 | static const TypeInt *as_self(const Type *t) { return t->is_int(); } |
634 | #ifndef PRODUCT |
635 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
636 | #endif |
637 | }; |
638 | |
639 | |
640 | //------------------------------TypeLong--------------------------------------- |
641 | // Class of long integer ranges, the set of integers between a lower bound and |
642 | // an upper bound, inclusive. |
643 | class TypeLong : public TypeInteger { |
644 | TypeLong( jlong lo, jlong hi, int w ); |
645 | protected: |
646 | // Do not kill _widen bits. |
647 | virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; |
648 | public: |
649 | typedef jlong NativeType; |
650 | virtual bool eq( const Type *t ) const; |
651 | virtual int hash() const; // Type specific hashing |
652 | virtual bool singleton(void) const; // TRUE if type is a singleton |
653 | virtual bool empty(void) const; // TRUE if type is vacuous |
654 | public: |
655 | const jlong _lo, _hi; // Lower bound, upper bound |
656 | const short _widen; // Limit on times we widen this sucker |
657 | |
658 | static const TypeLong *make(jlong lo); |
659 | // must always specify w |
660 | static const TypeLong *make(jlong lo, jlong hi, int w); |
661 | |
662 | // Check for single integer |
663 | bool is_con() const { return _lo==_hi; } |
664 | bool is_con(int i) const { return is_con() && _lo == i; } |
665 | jlong get_con() const { assert(is_con(), "" )do { if (!(is_con())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 665, "assert(" "is_con()" ") failed", ""); ::breakpoint(); } } while (0); return _lo; } |
666 | |
667 | // Check for positive 32-bit value. |
668 | int is_positive_int() const { return _lo >= 0 && _hi <= (jlong)max_jint; } |
669 | |
670 | virtual bool is_finite() const; // Has a finite value |
671 | |
672 | virtual jlong hi_as_long() const { return _hi; } |
673 | virtual jlong lo_as_long() const { return _lo; } |
674 | |
675 | virtual const Type *xmeet( const Type *t ) const; |
676 | virtual const Type *xdual() const; // Compute dual right now. |
677 | virtual const Type *widen( const Type *t, const Type* limit_type ) const; |
678 | virtual const Type *narrow( const Type *t ) const; |
679 | // Convenience common pre-built types. |
680 | static const TypeLong *MAX; |
681 | static const TypeLong *MIN; |
682 | static const TypeLong *MINUS_1; |
683 | static const TypeLong *ZERO; |
684 | static const TypeLong *ONE; |
685 | static const TypeLong *POS; |
686 | static const TypeLong *LONG; |
687 | static const TypeLong *INT; // 32-bit subrange [min_jint..max_jint] |
688 | static const TypeLong *UINT; // 32-bit unsigned [0..max_juint] |
689 | static const TypeLong *TYPE_DOMAIN; // alias for TypeLong::LONG |
690 | |
691 | // static convenience methods. |
692 | static const TypeLong *as_self(const Type *t) { return t->is_long(); } |
693 | |
694 | #ifndef PRODUCT |
695 | virtual void dump2( Dict &d, uint, outputStream *st ) const;// Specialized per-Type dumping |
696 | #endif |
697 | }; |
698 | |
699 | //------------------------------TypeTuple-------------------------------------- |
700 | // Class of Tuple Types, essentially type collections for function signatures |
701 | // and class layouts. It happens to also be a fast cache for the HotSpot |
702 | // signature types. |
703 | class TypeTuple : public Type { |
704 | TypeTuple( uint cnt, const Type **fields ) : Type(Tuple), _cnt(cnt), _fields(fields) { } |
705 | |
706 | const uint _cnt; // Count of fields |
707 | const Type ** const _fields; // Array of field types |
708 | |
709 | public: |
710 | virtual bool eq( const Type *t ) const; |
711 | virtual int hash() const; // Type specific hashing |
712 | virtual bool singleton(void) const; // TRUE if type is a singleton |
713 | virtual bool empty(void) const; // TRUE if type is vacuous |
714 | |
715 | // Accessors: |
716 | uint cnt() const { return _cnt; } |
717 | const Type* field_at(uint i) const { |
718 | assert(i < _cnt, "oob")do { if (!(i < _cnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 718, "assert(" "i < _cnt" ") failed", "oob"); ::breakpoint (); } } while (0); |
719 | return _fields[i]; |
720 | } |
721 | void set_field_at(uint i, const Type* t) { |
722 | assert(i < _cnt, "oob")do { if (!(i < _cnt)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 722, "assert(" "i < _cnt" ") failed", "oob"); ::breakpoint (); } } while (0); |
723 | _fields[i] = t; |
724 | } |
725 | |
726 | static const TypeTuple *make( uint cnt, const Type **fields ); |
727 | static const TypeTuple *make_range(ciSignature *sig); |
728 | static const TypeTuple *make_domain(ciInstanceKlass* recv, ciSignature *sig); |
729 | |
730 | // Subroutine call type with space allocated for argument types |
731 | // Memory for Control, I_O, Memory, FramePtr, and ReturnAdr is allocated implicitly |
732 | static const Type **fields( uint arg_cnt ); |
733 | |
734 | virtual const Type *xmeet( const Type *t ) const; |
735 | virtual const Type *xdual() const; // Compute dual right now. |
736 | // Convenience common pre-built types. |
737 | static const TypeTuple *IFBOTH; |
738 | static const TypeTuple *IFFALSE; |
739 | static const TypeTuple *IFTRUE; |
740 | static const TypeTuple *IFNEITHER; |
741 | static const TypeTuple *LOOPBODY; |
742 | static const TypeTuple *MEMBAR; |
743 | static const TypeTuple *STORECONDITIONAL; |
744 | static const TypeTuple *START_I2C; |
745 | static const TypeTuple *INT_PAIR; |
746 | static const TypeTuple *LONG_PAIR; |
747 | static const TypeTuple *INT_CC_PAIR; |
748 | static const TypeTuple *LONG_CC_PAIR; |
749 | #ifndef PRODUCT |
750 | virtual void dump2( Dict &d, uint, outputStream *st ) const; // Specialized per-Type dumping |
751 | #endif |
752 | }; |
753 | |
754 | //------------------------------TypeAry---------------------------------------- |
755 | // Class of Array Types |
756 | class TypeAry : public Type { |
757 | TypeAry(const Type* elem, const TypeInt* size, bool stable) : Type(Array), |
758 | _elem(elem), _size(size), _stable(stable) {} |
759 | public: |
760 | virtual bool eq( const Type *t ) const; |
761 | virtual int hash() const; // Type specific hashing |
762 | virtual bool singleton(void) const; // TRUE if type is a singleton |
763 | virtual bool empty(void) const; // TRUE if type is vacuous |
764 | |
765 | private: |
766 | const Type *_elem; // Element type of array |
767 | const TypeInt *_size; // Elements in array |
768 | const bool _stable; // Are elements @Stable? |
769 | friend class TypeAryPtr; |
770 | |
771 | public: |
772 | static const TypeAry* make(const Type* elem, const TypeInt* size, bool stable = false); |
773 | |
774 | virtual const Type *xmeet( const Type *t ) const; |
775 | virtual const Type *xdual() const; // Compute dual right now. |
776 | bool ary_must_be_exact() const; // true if arrays of such are never generic |
777 | virtual const Type* remove_speculative() const; |
778 | virtual const Type* cleanup_speculative() const; |
779 | #ifdef ASSERT1 |
780 | // One type is interface, the other is oop |
781 | virtual bool interface_vs_oop(const Type *t) const; |
782 | #endif |
783 | #ifndef PRODUCT |
784 | virtual void dump2( Dict &d, uint, outputStream *st ) const; // Specialized per-Type dumping |
785 | #endif |
786 | }; |
787 | |
788 | //------------------------------TypeVect--------------------------------------- |
789 | // Class of Vector Types |
790 | class TypeVect : public Type { |
791 | const Type* _elem; // Vector's element type |
792 | const uint _length; // Elements in vector (power of 2) |
793 | |
794 | protected: |
795 | TypeVect(TYPES t, const Type* elem, uint length) : Type(t), |
796 | _elem(elem), _length(length) {} |
797 | |
798 | public: |
799 | const Type* element_type() const { return _elem; } |
800 | BasicType element_basic_type() const { return _elem->array_element_basic_type(); } |
801 | uint length() const { return _length; } |
802 | uint length_in_bytes() const { |
803 | return _length * type2aelembytes(element_basic_type()); |
804 | } |
805 | |
806 | virtual bool eq(const Type *t) const; |
807 | virtual int hash() const; // Type specific hashing |
808 | virtual bool singleton(void) const; // TRUE if type is a singleton |
809 | virtual bool empty(void) const; // TRUE if type is vacuous |
810 | |
811 | static const TypeVect *make(const BasicType elem_bt, uint length, bool is_mask = false) { |
812 | // Use bottom primitive type. |
813 | return make(get_const_basic_type(elem_bt), length, is_mask); |
814 | } |
815 | // Used directly by Replicate nodes to construct singleton vector. |
816 | static const TypeVect *make(const Type* elem, uint length, bool is_mask = false); |
817 | |
818 | static const TypeVect *makemask(const BasicType elem_bt, uint length) { |
819 | // Use bottom primitive type. |
820 | return makemask(get_const_basic_type(elem_bt), length); |
821 | } |
822 | static const TypeVect *makemask(const Type* elem, uint length); |
823 | |
824 | |
825 | virtual const Type *xmeet( const Type *t) const; |
826 | virtual const Type *xdual() const; // Compute dual right now. |
827 | |
828 | static const TypeVect *VECTA; |
829 | static const TypeVect *VECTS; |
830 | static const TypeVect *VECTD; |
831 | static const TypeVect *VECTX; |
832 | static const TypeVect *VECTY; |
833 | static const TypeVect *VECTZ; |
834 | static const TypeVect *VECTMASK; |
835 | |
836 | #ifndef PRODUCT |
837 | virtual void dump2(Dict &d, uint, outputStream *st) const; // Specialized per-Type dumping |
838 | #endif |
839 | }; |
840 | |
841 | class TypeVectA : public TypeVect { |
842 | friend class TypeVect; |
843 | TypeVectA(const Type* elem, uint length) : TypeVect(VectorA, elem, length) {} |
844 | }; |
845 | |
846 | class TypeVectS : public TypeVect { |
847 | friend class TypeVect; |
848 | TypeVectS(const Type* elem, uint length) : TypeVect(VectorS, elem, length) {} |
849 | }; |
850 | |
851 | class TypeVectD : public TypeVect { |
852 | friend class TypeVect; |
853 | TypeVectD(const Type* elem, uint length) : TypeVect(VectorD, elem, length) {} |
854 | }; |
855 | |
856 | class TypeVectX : public TypeVect { |
857 | friend class TypeVect; |
858 | TypeVectX(const Type* elem, uint length) : TypeVect(VectorX, elem, length) {} |
859 | }; |
860 | |
861 | class TypeVectY : public TypeVect { |
862 | friend class TypeVect; |
863 | TypeVectY(const Type* elem, uint length) : TypeVect(VectorY, elem, length) {} |
864 | }; |
865 | |
866 | class TypeVectZ : public TypeVect { |
867 | friend class TypeVect; |
868 | TypeVectZ(const Type* elem, uint length) : TypeVect(VectorZ, elem, length) {} |
869 | }; |
870 | |
871 | class TypeVectMask : public TypeVect { |
872 | public: |
873 | friend class TypeVect; |
874 | TypeVectMask(const Type* elem, uint length) : TypeVect(VectorMask, elem, length) {} |
875 | virtual bool eq(const Type *t) const; |
876 | virtual const Type *xdual() const; |
877 | static const TypeVectMask* make(const BasicType elem_bt, uint length); |
878 | static const TypeVectMask* make(const Type* elem, uint length); |
879 | }; |
880 | |
881 | //------------------------------TypePtr---------------------------------------- |
882 | // Class of machine Pointer Types: raw data, instances or arrays. |
883 | // If the _base enum is AnyPtr, then this refers to all of the above. |
884 | // Otherwise the _base will indicate which subset of pointers is affected, |
885 | // and the class will be inherited from. |
886 | class TypePtr : public Type { |
887 | friend class TypeNarrowPtr; |
888 | public: |
889 | enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR }; |
890 | protected: |
891 | TypePtr(TYPES t, PTR ptr, int offset, |
892 | const TypePtr* speculative = NULL__null, |
893 | int inline_depth = InlineDepthBottom) : |
894 | Type(t), _speculative(speculative), _inline_depth(inline_depth), _offset(offset), |
895 | _ptr(ptr) {} |
896 | static const PTR ptr_meet[lastPTR][lastPTR]; |
897 | static const PTR ptr_dual[lastPTR]; |
898 | static const char * const ptr_msg[lastPTR]; |
899 | |
900 | enum { |
901 | InlineDepthBottom = INT_MAX2147483647, |
902 | InlineDepthTop = -InlineDepthBottom |
903 | }; |
904 | |
905 | // Extra type information profiling gave us. We propagate it the |
906 | // same way the rest of the type info is propagated. If we want to |
907 | // use it, then we have to emit a guard: this part of the type is |
908 | // not something we know but something we speculate about the type. |
909 | const TypePtr* _speculative; |
910 | // For speculative types, we record at what inlining depth the |
911 | // profiling point that provided the data is. We want to favor |
912 | // profile data coming from outer scopes which are likely better for |
913 | // the current compilation. |
914 | int _inline_depth; |
915 | |
916 | // utility methods to work on the speculative part of the type |
917 | const TypePtr* dual_speculative() const; |
918 | const TypePtr* xmeet_speculative(const TypePtr* other) const; |
919 | bool eq_speculative(const TypePtr* other) const; |
920 | int hash_speculative() const; |
921 | const TypePtr* add_offset_speculative(intptr_t offset) const; |
922 | #ifndef PRODUCT |
923 | void dump_speculative(outputStream *st) const; |
924 | #endif |
925 | |
926 | // utility methods to work on the inline depth of the type |
927 | int dual_inline_depth() const; |
928 | int meet_inline_depth(int depth) const; |
929 | #ifndef PRODUCT |
930 | void dump_inline_depth(outputStream *st) const; |
931 | #endif |
932 | |
933 | // TypeInstPtr (TypeAryPtr resp.) and TypeInstKlassPtr (TypeAryKlassPtr resp.) implement very similar meet logic. |
934 | // The logic for meeting 2 instances (2 arrays resp.) is shared in the 2 utility methods below. However the logic for |
935 | // the oop and klass versions can be slightly different and extra logic may have to be executed depending on what |
936 | // exact case the meet falls into. The MeetResult struct is used by the utility methods to communicate what case was |
937 | // encountered so the right logic specific to klasses or oops can be executed., |
938 | enum MeetResult { |
939 | QUICK, |
940 | UNLOADED, |
941 | SUBTYPE, |
942 | NOT_SUBTYPE, |
943 | LCA |
944 | }; |
945 | static MeetResult |
946 | meet_instptr(PTR &ptr, ciKlass* this_klass, ciKlass* tinst_klass, bool this_xk, bool tinst_xk, PTR this_ptr, |
947 | PTR tinst_ptr, ciKlass*&res_klass, bool &res_xk); |
948 | static MeetResult |
949 | meet_aryptr(PTR& ptr, const Type*& elem, ciKlass* this_klass, ciKlass* tap_klass, bool this_xk, bool tap_xk, PTR this_ptr, PTR tap_ptr, ciKlass*& res_klass, bool& res_xk); |
950 | |
951 | public: |
952 | const int _offset; // Offset into oop, with TOP & BOT |
953 | const PTR _ptr; // Pointer equivalence class |
954 | |
955 | const int offset() const { return _offset; } |
956 | const PTR ptr() const { return _ptr; } |
957 | |
958 | static const TypePtr *make(TYPES t, PTR ptr, int offset, |
959 | const TypePtr* speculative = NULL__null, |
960 | int inline_depth = InlineDepthBottom); |
961 | |
962 | // Return a 'ptr' version of this type |
963 | virtual const Type *cast_to_ptr_type(PTR ptr) const; |
964 | |
965 | virtual intptr_t get_con() const; |
966 | |
967 | int xadd_offset( intptr_t offset ) const; |
968 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
969 | virtual bool eq(const Type *t) const; |
970 | virtual int hash() const; // Type specific hashing |
971 | |
972 | virtual bool singleton(void) const; // TRUE if type is a singleton |
973 | virtual bool empty(void) const; // TRUE if type is vacuous |
974 | virtual const Type *xmeet( const Type *t ) const; |
975 | virtual const Type *xmeet_helper( const Type *t ) const; |
976 | int meet_offset( int offset ) const; |
977 | int dual_offset( ) const; |
978 | virtual const Type *xdual() const; // Compute dual right now. |
979 | |
980 | // meet, dual and join over pointer equivalence sets |
981 | PTR meet_ptr( const PTR in_ptr ) const { return ptr_meet[in_ptr][ptr()]; } |
982 | PTR dual_ptr() const { return ptr_dual[ptr()]; } |
983 | |
984 | // This is textually confusing unless one recalls that |
985 | // join(t) == dual()->meet(t->dual())->dual(). |
986 | PTR join_ptr( const PTR in_ptr ) const { |
987 | return ptr_dual[ ptr_meet[ ptr_dual[in_ptr] ] [ dual_ptr() ] ]; |
988 | } |
989 | |
990 | // Speculative type helper methods. |
991 | virtual const TypePtr* speculative() const { return _speculative; } |
992 | int inline_depth() const { return _inline_depth; } |
993 | virtual ciKlass* speculative_type() const; |
994 | virtual ciKlass* speculative_type_not_null() const; |
995 | virtual bool speculative_maybe_null() const; |
996 | virtual bool speculative_always_null() const; |
997 | virtual const Type* remove_speculative() const; |
998 | virtual const Type* cleanup_speculative() const; |
999 | virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const; |
1000 | virtual bool would_improve_ptr(ProfilePtrKind maybe_null) const; |
1001 | virtual const TypePtr* with_inline_depth(int depth) const; |
1002 | |
1003 | virtual bool maybe_null() const { return meet_ptr(Null) == ptr(); } |
1004 | |
1005 | // Tests for relation to centerline of type lattice: |
1006 | static bool above_centerline(PTR ptr) { return (ptr <= AnyNull); } |
1007 | static bool below_centerline(PTR ptr) { return (ptr >= NotNull); } |
1008 | // Convenience common pre-built types. |
1009 | static const TypePtr *NULL_PTR; |
1010 | static const TypePtr *NOTNULL; |
1011 | static const TypePtr *BOTTOM; |
1012 | #ifndef PRODUCT |
1013 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
1014 | #endif |
1015 | }; |
1016 | |
1017 | //------------------------------TypeRawPtr------------------------------------- |
1018 | // Class of raw pointers, pointers to things other than Oops. Examples |
1019 | // include the stack pointer, top of heap, card-marking area, handles, etc. |
1020 | class TypeRawPtr : public TypePtr { |
1021 | protected: |
1022 | TypeRawPtr( PTR ptr, address bits ) : TypePtr(RawPtr,ptr,0), _bits(bits){} |
1023 | public: |
1024 | virtual bool eq( const Type *t ) const; |
1025 | virtual int hash() const; // Type specific hashing |
1026 | |
1027 | const address _bits; // Constant value, if applicable |
1028 | |
1029 | static const TypeRawPtr *make( PTR ptr ); |
1030 | static const TypeRawPtr *make( address bits ); |
1031 | |
1032 | // Return a 'ptr' version of this type |
1033 | virtual const TypeRawPtr* cast_to_ptr_type(PTR ptr) const; |
1034 | |
1035 | virtual intptr_t get_con() const; |
1036 | |
1037 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
1038 | |
1039 | virtual const Type *xmeet( const Type *t ) const; |
1040 | virtual const Type *xdual() const; // Compute dual right now. |
1041 | // Convenience common pre-built types. |
1042 | static const TypeRawPtr *BOTTOM; |
1043 | static const TypeRawPtr *NOTNULL; |
1044 | #ifndef PRODUCT |
1045 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
1046 | #endif |
1047 | }; |
1048 | |
1049 | //------------------------------TypeOopPtr------------------------------------- |
1050 | // Some kind of oop (Java pointer), either instance or array. |
1051 | class TypeOopPtr : public TypePtr { |
1052 | protected: |
1053 | TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, |
1054 | const TypePtr* speculative, int inline_depth); |
1055 | public: |
1056 | virtual bool eq( const Type *t ) const; |
1057 | virtual int hash() const; // Type specific hashing |
1058 | virtual bool singleton(void) const; // TRUE if type is a singleton |
1059 | enum { |
1060 | InstanceTop = -1, // undefined instance |
1061 | InstanceBot = 0 // any possible instance |
1062 | }; |
1063 | protected: |
1064 | |
1065 | // Oop is NULL, unless this is a constant oop. |
1066 | ciObject* _const_oop; // Constant oop |
1067 | // If _klass is NULL, then so is _sig. This is an unloaded klass. |
1068 | ciKlass* _klass; // Klass object |
1069 | // Does the type exclude subclasses of the klass? (Inexact == polymorphic.) |
1070 | bool _klass_is_exact; |
1071 | bool _is_ptr_to_narrowoop; |
1072 | bool _is_ptr_to_narrowklass; |
1073 | bool _is_ptr_to_boxed_value; |
1074 | |
1075 | // If not InstanceTop or InstanceBot, indicates that this is |
1076 | // a particular instance of this type which is distinct. |
1077 | // This is the node index of the allocation node creating this instance. |
1078 | int _instance_id; |
1079 | |
1080 | static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact); |
1081 | |
1082 | int dual_instance_id() const; |
1083 | int meet_instance_id(int uid) const; |
1084 | |
1085 | // Do not allow interface-vs.-noninterface joins to collapse to top. |
1086 | virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; |
1087 | |
1088 | public: |
1089 | // Creates a type given a klass. Correctly handles multi-dimensional arrays |
1090 | // Respects UseUniqueSubclasses. |
1091 | // If the klass is final, the resulting type will be exact. |
1092 | static const TypeOopPtr* make_from_klass(ciKlass* klass) { |
1093 | return make_from_klass_common(klass, true, false); |
1094 | } |
1095 | // Same as before, but will produce an exact type, even if |
1096 | // the klass is not final, as long as it has exactly one implementation. |
1097 | static const TypeOopPtr* make_from_klass_unique(ciKlass* klass) { |
1098 | return make_from_klass_common(klass, true, true); |
1099 | } |
1100 | // Same as before, but does not respects UseUniqueSubclasses. |
1101 | // Use this only for creating array element types. |
1102 | static const TypeOopPtr* make_from_klass_raw(ciKlass* klass) { |
1103 | return make_from_klass_common(klass, false, false); |
1104 | } |
1105 | // Creates a singleton type given an object. |
1106 | // If the object cannot be rendered as a constant, |
1107 | // may return a non-singleton type. |
1108 | // If require_constant, produce a NULL if a singleton is not possible. |
1109 | static const TypeOopPtr* make_from_constant(ciObject* o, |
1110 | bool require_constant = false); |
1111 | |
1112 | // Make a generic (unclassed) pointer to an oop. |
1113 | static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, |
1114 | const TypePtr* speculative = NULL__null, |
1115 | int inline_depth = InlineDepthBottom); |
1116 | |
1117 | ciObject* const_oop() const { return _const_oop; } |
1118 | virtual ciKlass* klass() const { return _klass; } |
1119 | bool klass_is_exact() const { return _klass_is_exact; } |
1120 | |
1121 | // Returns true if this pointer points at memory which contains a |
1122 | // compressed oop references. |
1123 | bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; } |
1124 | bool is_ptr_to_narrowklass_nv() const { return _is_ptr_to_narrowklass; } |
1125 | bool is_ptr_to_boxed_value() const { return _is_ptr_to_boxed_value; } |
1126 | bool is_known_instance() const { return _instance_id > 0; } |
1127 | int instance_id() const { return _instance_id; } |
1128 | bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; } |
1129 | |
1130 | virtual intptr_t get_con() const; |
1131 | |
1132 | virtual const TypeOopPtr* cast_to_ptr_type(PTR ptr) const; |
1133 | |
1134 | virtual const Type *cast_to_exactness(bool klass_is_exact) const; |
1135 | |
1136 | virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; |
1137 | |
1138 | // corresponding pointer to klass, for a given instance |
1139 | virtual const TypeKlassPtr* as_klass_type(bool try_for_exact = false) const; |
1140 | |
1141 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
1142 | |
1143 | // Speculative type helper methods. |
1144 | virtual const Type* remove_speculative() const; |
1145 | virtual const Type* cleanup_speculative() const; |
1146 | virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const; |
1147 | virtual const TypePtr* with_inline_depth(int depth) const; |
1148 | |
1149 | virtual const TypePtr* with_instance_id(int instance_id) const; |
1150 | |
1151 | virtual const Type *xdual() const; // Compute dual right now. |
1152 | // the core of the computation of the meet for TypeOopPtr and for its subclasses |
1153 | virtual const Type *xmeet_helper(const Type *t) const; |
1154 | |
1155 | // Convenience common pre-built type. |
1156 | static const TypeOopPtr *BOTTOM; |
1157 | #ifndef PRODUCT |
1158 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
1159 | #endif |
1160 | }; |
1161 | |
1162 | //------------------------------TypeInstPtr------------------------------------ |
1163 | // Class of Java object pointers, pointing either to non-array Java instances |
1164 | // or to a Klass* (including array klasses). |
1165 | class TypeInstPtr : public TypeOopPtr { |
1166 | TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, |
1167 | const TypePtr* speculative, int inline_depth); |
1168 | virtual bool eq( const Type *t ) const; |
1169 | virtual int hash() const; // Type specific hashing |
1170 | |
1171 | ciSymbol* _name; // class name |
1172 | |
1173 | public: |
1174 | ciSymbol* name() const { return _name; } |
1175 | |
1176 | bool is_loaded() const { return _klass->is_loaded(); } |
1177 | |
1178 | // Make a pointer to a constant oop. |
1179 | static const TypeInstPtr *make(ciObject* o) { |
1180 | return make(TypePtr::Constant, o->klass(), true, o, 0, InstanceBot); |
1181 | } |
1182 | // Make a pointer to a constant oop with offset. |
1183 | static const TypeInstPtr *make(ciObject* o, int offset) { |
1184 | return make(TypePtr::Constant, o->klass(), true, o, offset, InstanceBot); |
1185 | } |
1186 | |
1187 | // Make a pointer to some value of type klass. |
1188 | static const TypeInstPtr *make(PTR ptr, ciKlass* klass) { |
1189 | return make(ptr, klass, false, NULL__null, 0, InstanceBot); |
1190 | } |
1191 | |
1192 | // Make a pointer to some non-polymorphic value of exactly type klass. |
1193 | static const TypeInstPtr *make_exact(PTR ptr, ciKlass* klass) { |
1194 | return make(ptr, klass, true, NULL__null, 0, InstanceBot); |
1195 | } |
1196 | |
1197 | // Make a pointer to some value of type klass with offset. |
1198 | static const TypeInstPtr *make(PTR ptr, ciKlass* klass, int offset) { |
1199 | return make(ptr, klass, false, NULL__null, offset, InstanceBot); |
1200 | } |
1201 | |
1202 | // Make a pointer to an oop. |
1203 | static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, |
1204 | int instance_id = InstanceBot, |
1205 | const TypePtr* speculative = NULL__null, |
1206 | int inline_depth = InlineDepthBottom); |
1207 | |
1208 | /** Create constant type for a constant boxed value */ |
1209 | const Type* get_const_boxed_value() const; |
1210 | |
1211 | // If this is a java.lang.Class constant, return the type for it or NULL. |
1212 | // Pass to Type::get_const_type to turn it to a type, which will usually |
1213 | // be a TypeInstPtr, but may also be a TypeInt::INT for int.class, etc. |
1214 | ciType* java_mirror_type() const; |
1215 | |
1216 | virtual const TypeInstPtr* cast_to_ptr_type(PTR ptr) const; |
1217 | |
1218 | virtual const Type *cast_to_exactness(bool klass_is_exact) const; |
1219 | |
1220 | virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; |
1221 | |
1222 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
1223 | |
1224 | // Speculative type helper methods. |
1225 | virtual const Type* remove_speculative() const; |
1226 | virtual const TypePtr* with_inline_depth(int depth) const; |
1227 | virtual const TypePtr* with_instance_id(int instance_id) const; |
1228 | |
1229 | // the core of the computation of the meet of 2 types |
1230 | virtual const Type *xmeet_helper(const Type *t) const; |
1231 | virtual const TypeInstPtr *xmeet_unloaded( const TypeInstPtr *t ) const; |
1232 | virtual const Type *xdual() const; // Compute dual right now. |
1233 | |
1234 | const TypeKlassPtr* as_klass_type(bool try_for_exact = false) const; |
1235 | |
1236 | // Convenience common pre-built types. |
1237 | static const TypeInstPtr *NOTNULL; |
1238 | static const TypeInstPtr *BOTTOM; |
1239 | static const TypeInstPtr *MIRROR; |
1240 | static const TypeInstPtr *MARK; |
1241 | static const TypeInstPtr *KLASS; |
1242 | #ifndef PRODUCT |
1243 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping |
1244 | #endif |
1245 | }; |
1246 | |
1247 | //------------------------------TypeAryPtr------------------------------------- |
1248 | // Class of Java array pointers |
1249 | class TypeAryPtr : public TypeOopPtr { |
1250 | TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, |
1251 | int offset, int instance_id, bool is_autobox_cache, |
1252 | const TypePtr* speculative, int inline_depth) |
1253 | : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative, inline_depth), |
1254 | _ary(ary), |
1255 | _is_autobox_cache(is_autobox_cache) |
1256 | { |
1257 | #ifdef ASSERT1 |
1258 | if (k != NULL__null) { |
1259 | // Verify that specified klass and TypeAryPtr::klass() follow the same rules. |
1260 | ciKlass* ck = compute_klass(true); |
1261 | if (k != ck) { |
1262 | this->dump(); tty->cr(); |
1263 | tty->print(" k: "); |
1264 | k->print(); tty->cr(); |
1265 | tty->print("ck: "); |
1266 | if (ck != NULL__null) ck->print(); |
1267 | else tty->print("<NULL>"); |
1268 | tty->cr(); |
1269 | assert(false, "unexpected TypeAryPtr::_klass")do { if (!(false)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1269, "assert(" "false" ") failed", "unexpected TypeAryPtr::_klass" ); ::breakpoint(); } } while (0); |
1270 | } |
1271 | } |
1272 | #endif |
1273 | } |
1274 | virtual bool eq( const Type *t ) const; |
1275 | virtual int hash() const; // Type specific hashing |
1276 | const TypeAry *_ary; // Array we point into |
1277 | const bool _is_autobox_cache; |
1278 | |
1279 | ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)bool verify = false) const; |
1280 | |
1281 | public: |
1282 | // Accessors |
1283 | ciKlass* klass() const; |
1284 | const TypeAry* ary() const { return _ary; } |
1285 | const Type* elem() const { return _ary->_elem; } |
1286 | const TypeInt* size() const { return _ary->_size; } |
1287 | bool is_stable() const { return _ary->_stable; } |
1288 | |
1289 | bool is_autobox_cache() const { return _is_autobox_cache; } |
1290 | |
1291 | static const TypeAryPtr *make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, |
1292 | int instance_id = InstanceBot, |
1293 | const TypePtr* speculative = NULL__null, |
1294 | int inline_depth = InlineDepthBottom); |
1295 | // Constant pointer to array |
1296 | static const TypeAryPtr *make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, |
1297 | int instance_id = InstanceBot, |
1298 | const TypePtr* speculative = NULL__null, |
1299 | int inline_depth = InlineDepthBottom, bool is_autobox_cache = false); |
1300 | |
1301 | // Return a 'ptr' version of this type |
1302 | virtual const TypeAryPtr* cast_to_ptr_type(PTR ptr) const; |
1303 | |
1304 | virtual const Type *cast_to_exactness(bool klass_is_exact) const; |
1305 | |
1306 | virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; |
1307 | |
1308 | virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const; |
1309 | virtual const TypeInt* narrow_size_type(const TypeInt* size) const; |
1310 | |
1311 | virtual bool empty(void) const; // TRUE if type is vacuous |
1312 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
1313 | |
1314 | // Speculative type helper methods. |
1315 | virtual const Type* remove_speculative() const; |
1316 | virtual const TypePtr* with_inline_depth(int depth) const; |
1317 | virtual const TypePtr* with_instance_id(int instance_id) const; |
1318 | |
1319 | // the core of the computation of the meet of 2 types |
1320 | virtual const Type *xmeet_helper(const Type *t) const; |
1321 | virtual const Type *xdual() const; // Compute dual right now. |
1322 | |
1323 | const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const; |
1324 | int stable_dimension() const; |
1325 | |
1326 | const TypeAryPtr* cast_to_autobox_cache() const; |
1327 | |
1328 | static jint max_array_length(BasicType etype) ; |
1329 | virtual const TypeKlassPtr* as_klass_type(bool try_for_exact = false) const; |
1330 | |
1331 | // Convenience common pre-built types. |
1332 | static const TypeAryPtr *RANGE; |
1333 | static const TypeAryPtr *OOPS; |
1334 | static const TypeAryPtr *NARROWOOPS; |
1335 | static const TypeAryPtr *BYTES; |
1336 | static const TypeAryPtr *SHORTS; |
1337 | static const TypeAryPtr *CHARS; |
1338 | static const TypeAryPtr *INTS; |
1339 | static const TypeAryPtr *LONGS; |
1340 | static const TypeAryPtr *FLOATS; |
1341 | static const TypeAryPtr *DOUBLES; |
1342 | // selects one of the above: |
1343 | static const TypeAryPtr *get_array_body_type(BasicType elem) { |
1344 | assert((uint)elem <= T_CONFLICT && _array_body_type[elem] != NULL, "bad elem type")do { if (!((uint)elem <= T_CONFLICT && _array_body_type [elem] != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1344, "assert(" "(uint)elem <= T_CONFLICT && _array_body_type[elem] != __null" ") failed", "bad elem type"); ::breakpoint(); } } while (0); |
1345 | return _array_body_type[elem]; |
1346 | } |
1347 | static const TypeAryPtr *_array_body_type[T_CONFLICT+1]; |
1348 | // sharpen the type of an int which is used as an array size |
1349 | #ifdef ASSERT1 |
1350 | // One type is interface, the other is oop |
1351 | virtual bool interface_vs_oop(const Type *t) const; |
1352 | #endif |
1353 | #ifndef PRODUCT |
1354 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping |
1355 | #endif |
1356 | }; |
1357 | |
1358 | //------------------------------TypeMetadataPtr------------------------------------- |
1359 | // Some kind of metadata, either Method*, MethodData* or CPCacheOop |
1360 | class TypeMetadataPtr : public TypePtr { |
1361 | protected: |
1362 | TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset); |
1363 | // Do not allow interface-vs.-noninterface joins to collapse to top. |
1364 | virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; |
1365 | public: |
1366 | virtual bool eq( const Type *t ) const; |
1367 | virtual int hash() const; // Type specific hashing |
1368 | virtual bool singleton(void) const; // TRUE if type is a singleton |
1369 | |
1370 | private: |
1371 | ciMetadata* _metadata; |
1372 | |
1373 | public: |
1374 | static const TypeMetadataPtr* make(PTR ptr, ciMetadata* m, int offset); |
1375 | |
1376 | static const TypeMetadataPtr* make(ciMethod* m); |
1377 | static const TypeMetadataPtr* make(ciMethodData* m); |
1378 | |
1379 | ciMetadata* metadata() const { return _metadata; } |
1380 | |
1381 | virtual const TypeMetadataPtr* cast_to_ptr_type(PTR ptr) const; |
1382 | |
1383 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
1384 | |
1385 | virtual const Type *xmeet( const Type *t ) const; |
1386 | virtual const Type *xdual() const; // Compute dual right now. |
1387 | |
1388 | virtual intptr_t get_con() const; |
1389 | |
1390 | // Convenience common pre-built types. |
1391 | static const TypeMetadataPtr *BOTTOM; |
1392 | |
1393 | #ifndef PRODUCT |
1394 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
1395 | #endif |
1396 | }; |
1397 | |
1398 | //------------------------------TypeKlassPtr----------------------------------- |
1399 | // Class of Java Klass pointers |
1400 | class TypeKlassPtr : public TypePtr { |
1401 | protected: |
1402 | TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, int offset); |
1403 | |
1404 | virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; |
1405 | |
1406 | public: |
1407 | virtual bool eq( const Type *t ) const; |
1408 | virtual int hash() const; |
1409 | virtual bool singleton(void) const; // TRUE if type is a singleton |
1410 | virtual bool must_be_exact() const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1410); ::breakpoint(); } while (0); return false; } |
1411 | |
1412 | protected: |
1413 | |
1414 | ciKlass* _klass; |
1415 | |
1416 | public: |
1417 | |
1418 | virtual ciKlass* klass() const { return _klass; } |
1419 | bool klass_is_exact() const { return _ptr == Constant; } |
1420 | bool is_loaded() const { return klass()->is_loaded(); } |
1421 | |
1422 | static const TypeKlassPtr* make(ciKlass* klass); |
1423 | static const TypeKlassPtr *make(PTR ptr, ciKlass* klass, int offset); |
1424 | |
1425 | |
1426 | virtual const TypePtr* cast_to_ptr_type(PTR ptr) const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1426); ::breakpoint(); } while (0); return NULL__null; } |
1427 | |
1428 | virtual const TypeKlassPtr *cast_to_exactness(bool klass_is_exact) const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1428); ::breakpoint(); } while (0); return NULL__null; } |
1429 | |
1430 | // corresponding pointer to instance, for a given class |
1431 | virtual const TypeOopPtr* as_instance_type() const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1431); ::breakpoint(); } while (0); return NULL__null; } |
1432 | |
1433 | virtual const TypePtr *add_offset( intptr_t offset ) const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1433); ::breakpoint(); } while (0); return NULL__null; } |
1434 | virtual const Type *xmeet( const Type *t ) const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1434); ::breakpoint(); } while (0); return NULL__null; } |
1435 | virtual const Type *xdual() const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1435); ::breakpoint(); } while (0); return NULL__null; } |
1436 | |
1437 | virtual intptr_t get_con() const; |
1438 | |
1439 | virtual const TypeKlassPtr* with_offset(intptr_t offset) const { ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1439); ::breakpoint(); } while (0); return NULL__null; } |
1440 | |
1441 | #ifndef PRODUCT |
1442 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping |
1443 | #endif |
1444 | }; |
1445 | |
1446 | // Instance klass pointer, mirrors TypeInstPtr |
1447 | class TypeInstKlassPtr : public TypeKlassPtr { |
1448 | |
1449 | TypeInstKlassPtr(PTR ptr, ciKlass* klass, int offset) |
1450 | : TypeKlassPtr(InstKlassPtr, ptr, klass, offset) { |
1451 | } |
1452 | |
1453 | virtual bool must_be_exact() const; |
1454 | |
1455 | public: |
1456 | // Instance klass ignoring any interface |
1457 | ciInstanceKlass* instance_klass() const { return klass()->as_instance_klass(); } |
1458 | |
1459 | static const TypeInstKlassPtr *make(ciKlass* k) { |
1460 | return make(TypePtr::Constant, k, 0); |
1461 | } |
1462 | static const TypeInstKlassPtr *make(PTR ptr, ciKlass* k, int offset); |
1463 | |
1464 | virtual const TypePtr* cast_to_ptr_type(PTR ptr) const; |
1465 | |
1466 | virtual const TypeKlassPtr *cast_to_exactness(bool klass_is_exact) const; |
1467 | |
1468 | // corresponding pointer to instance, for a given class |
1469 | virtual const TypeOopPtr* as_instance_type() const; |
1470 | virtual int hash() const; |
1471 | virtual bool eq(const Type *t) const; |
1472 | |
1473 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
1474 | virtual const Type *xmeet( const Type *t ) const; |
1475 | virtual const Type *xdual() const; |
1476 | virtual const TypeKlassPtr* with_offset(intptr_t offset) const; |
1477 | |
1478 | // Convenience common pre-built types. |
1479 | static const TypeInstKlassPtr* OBJECT; // Not-null object klass or below |
1480 | static const TypeInstKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same |
1481 | }; |
1482 | |
1483 | // Array klass pointer, mirrors TypeAryPtr |
1484 | class TypeAryKlassPtr : public TypeKlassPtr { |
1485 | const Type *_elem; |
1486 | |
1487 | TypeAryKlassPtr(PTR ptr, const Type *elem, ciKlass* klass, int offset) |
1488 | : TypeKlassPtr(AryKlassPtr, ptr, klass, offset), _elem(elem) { |
1489 | } |
1490 | |
1491 | virtual bool must_be_exact() const; |
1492 | |
1493 | public: |
1494 | virtual ciKlass* klass() const; |
1495 | |
1496 | // returns base element type, an instance klass (and not interface) for object arrays |
1497 | const Type* base_element_type(int& dims) const; |
1498 | |
1499 | static const TypeAryKlassPtr *make(PTR ptr, ciKlass* k, int offset); |
1500 | static const TypeAryKlassPtr *make(PTR ptr, const Type *elem, ciKlass* k, int offset); |
1501 | static const TypeAryKlassPtr* make(ciKlass* klass); |
1502 | |
1503 | const Type *elem() const { return _elem; } |
1504 | |
1505 | virtual bool eq(const Type *t) const; |
1506 | virtual int hash() const; // Type specific hashing |
1507 | |
1508 | virtual const TypePtr* cast_to_ptr_type(PTR ptr) const; |
1509 | |
1510 | virtual const TypeKlassPtr *cast_to_exactness(bool klass_is_exact) const; |
1511 | |
1512 | // corresponding pointer to instance, for a given class |
1513 | virtual const TypeOopPtr* as_instance_type() const; |
1514 | |
1515 | virtual const TypePtr *add_offset( intptr_t offset ) const; |
1516 | virtual const Type *xmeet( const Type *t ) const; |
1517 | virtual const Type *xdual() const; // Compute dual right now. |
1518 | |
1519 | virtual const TypeKlassPtr* with_offset(intptr_t offset) const; |
1520 | |
1521 | virtual bool empty(void) const { |
1522 | return TypeKlassPtr::empty() || _elem->empty(); |
1523 | } |
1524 | |
1525 | #ifndef PRODUCT |
1526 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping |
1527 | #endif |
1528 | }; |
1529 | |
1530 | class TypeNarrowPtr : public Type { |
1531 | protected: |
1532 | const TypePtr* _ptrtype; // Could be TypePtr::NULL_PTR |
1533 | |
1534 | TypeNarrowPtr(TYPES t, const TypePtr* ptrtype): Type(t), |
1535 | _ptrtype(ptrtype) { |
1536 | assert(ptrtype->offset() == 0 ||do { if (!(ptrtype->offset() == 0 || ptrtype->offset() == OffsetBot || ptrtype->offset() == OffsetTop)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1538, "assert(" "ptrtype->offset() == 0 || ptrtype->offset() == OffsetBot || ptrtype->offset() == OffsetTop" ") failed", "no real offsets"); ::breakpoint(); } } while (0 ) |
1537 | ptrtype->offset() == OffsetBot ||do { if (!(ptrtype->offset() == 0 || ptrtype->offset() == OffsetBot || ptrtype->offset() == OffsetTop)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1538, "assert(" "ptrtype->offset() == 0 || ptrtype->offset() == OffsetBot || ptrtype->offset() == OffsetTop" ") failed", "no real offsets"); ::breakpoint(); } } while (0 ) |
1538 | ptrtype->offset() == OffsetTop, "no real offsets")do { if (!(ptrtype->offset() == 0 || ptrtype->offset() == OffsetBot || ptrtype->offset() == OffsetTop)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1538, "assert(" "ptrtype->offset() == 0 || ptrtype->offset() == OffsetBot || ptrtype->offset() == OffsetTop" ") failed", "no real offsets"); ::breakpoint(); } } while (0 ); |
1539 | } |
1540 | |
1541 | virtual const TypeNarrowPtr *isa_same_narrowptr(const Type *t) const = 0; |
1542 | virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const = 0; |
1543 | virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const = 0; |
1544 | virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const = 0; |
1545 | // Do not allow interface-vs.-noninterface joins to collapse to top. |
1546 | virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; |
1547 | public: |
1548 | virtual bool eq( const Type *t ) const; |
1549 | virtual int hash() const; // Type specific hashing |
1550 | virtual bool singleton(void) const; // TRUE if type is a singleton |
1551 | |
1552 | virtual const Type *xmeet( const Type *t ) const; |
1553 | virtual const Type *xdual() const; // Compute dual right now. |
1554 | |
1555 | virtual intptr_t get_con() const; |
1556 | |
1557 | virtual bool empty(void) const; // TRUE if type is vacuous |
1558 | |
1559 | // returns the equivalent ptr type for this compressed pointer |
1560 | const TypePtr *get_ptrtype() const { |
1561 | return _ptrtype; |
1562 | } |
1563 | |
1564 | bool is_known_instance() const { |
1565 | return _ptrtype->is_known_instance(); |
1566 | } |
1567 | |
1568 | #ifndef PRODUCT |
1569 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
1570 | #endif |
1571 | }; |
1572 | |
1573 | //------------------------------TypeNarrowOop---------------------------------- |
1574 | // A compressed reference to some kind of Oop. This type wraps around |
1575 | // a preexisting TypeOopPtr and forwards most of it's operations to |
1576 | // the underlying type. It's only real purpose is to track the |
1577 | // oopness of the compressed oop value when we expose the conversion |
1578 | // between the normal and the compressed form. |
1579 | class TypeNarrowOop : public TypeNarrowPtr { |
1580 | protected: |
1581 | TypeNarrowOop( const TypePtr* ptrtype): TypeNarrowPtr(NarrowOop, ptrtype) { |
1582 | } |
1583 | |
1584 | virtual const TypeNarrowPtr *isa_same_narrowptr(const Type *t) const { |
1585 | return t->isa_narrowoop(); |
1586 | } |
1587 | |
1588 | virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const { |
1589 | return t->is_narrowoop(); |
1590 | } |
1591 | |
1592 | virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const { |
1593 | return new TypeNarrowOop(t); |
1594 | } |
1595 | |
1596 | virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const { |
1597 | return (const TypeNarrowPtr*)((new TypeNarrowOop(t))->hashcons()); |
1598 | } |
1599 | |
1600 | public: |
1601 | |
1602 | static const TypeNarrowOop *make( const TypePtr* type); |
1603 | |
1604 | static const TypeNarrowOop* make_from_constant(ciObject* con, bool require_constant = false) { |
1605 | return make(TypeOopPtr::make_from_constant(con, require_constant)); |
1606 | } |
1607 | |
1608 | static const TypeNarrowOop *BOTTOM; |
1609 | static const TypeNarrowOop *NULL_PTR; |
1610 | |
1611 | virtual const Type* remove_speculative() const; |
1612 | virtual const Type* cleanup_speculative() const; |
1613 | |
1614 | #ifndef PRODUCT |
1615 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
1616 | #endif |
1617 | }; |
1618 | |
1619 | //------------------------------TypeNarrowKlass---------------------------------- |
1620 | // A compressed reference to klass pointer. This type wraps around a |
1621 | // preexisting TypeKlassPtr and forwards most of it's operations to |
1622 | // the underlying type. |
1623 | class TypeNarrowKlass : public TypeNarrowPtr { |
1624 | protected: |
1625 | TypeNarrowKlass( const TypePtr* ptrtype): TypeNarrowPtr(NarrowKlass, ptrtype) { |
1626 | } |
1627 | |
1628 | virtual const TypeNarrowPtr *isa_same_narrowptr(const Type *t) const { |
1629 | return t->isa_narrowklass(); |
1630 | } |
1631 | |
1632 | virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const { |
1633 | return t->is_narrowklass(); |
1634 | } |
1635 | |
1636 | virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const { |
1637 | return new TypeNarrowKlass(t); |
1638 | } |
1639 | |
1640 | virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const { |
1641 | return (const TypeNarrowPtr*)((new TypeNarrowKlass(t))->hashcons()); |
1642 | } |
1643 | |
1644 | public: |
1645 | static const TypeNarrowKlass *make( const TypePtr* type); |
1646 | |
1647 | // static const TypeNarrowKlass *BOTTOM; |
1648 | static const TypeNarrowKlass *NULL_PTR; |
1649 | |
1650 | #ifndef PRODUCT |
1651 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; |
1652 | #endif |
1653 | }; |
1654 | |
1655 | //------------------------------TypeFunc--------------------------------------- |
1656 | // Class of Array Types |
1657 | class TypeFunc : public Type { |
1658 | TypeFunc( const TypeTuple *domain, const TypeTuple *range ) : Type(Function), _domain(domain), _range(range) {} |
1659 | virtual bool eq( const Type *t ) const; |
1660 | virtual int hash() const; // Type specific hashing |
1661 | virtual bool singleton(void) const; // TRUE if type is a singleton |
1662 | virtual bool empty(void) const; // TRUE if type is vacuous |
1663 | |
1664 | const TypeTuple* const _domain; // Domain of inputs |
1665 | const TypeTuple* const _range; // Range of results |
1666 | |
1667 | public: |
1668 | // Constants are shared among ADLC and VM |
1669 | enum { Control = AdlcVMDeps::Control, |
1670 | I_O = AdlcVMDeps::I_O, |
1671 | Memory = AdlcVMDeps::Memory, |
1672 | FramePtr = AdlcVMDeps::FramePtr, |
1673 | ReturnAdr = AdlcVMDeps::ReturnAdr, |
1674 | Parms = AdlcVMDeps::Parms |
1675 | }; |
1676 | |
1677 | |
1678 | // Accessors: |
1679 | const TypeTuple* domain() const { return _domain; } |
1680 | const TypeTuple* range() const { return _range; } |
1681 | |
1682 | static const TypeFunc *make(ciMethod* method); |
1683 | static const TypeFunc *make(ciSignature signature, const Type* extra); |
1684 | static const TypeFunc *make(const TypeTuple* domain, const TypeTuple* range); |
1685 | |
1686 | virtual const Type *xmeet( const Type *t ) const; |
1687 | virtual const Type *xdual() const; // Compute dual right now. |
1688 | |
1689 | BasicType return_type() const; |
1690 | |
1691 | #ifndef PRODUCT |
1692 | virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping |
1693 | #endif |
1694 | // Convenience common pre-built types. |
1695 | }; |
1696 | |
1697 | //------------------------------accessors-------------------------------------- |
1698 | inline bool Type::is_ptr_to_narrowoop() const { |
1699 | #ifdef _LP641 |
1700 | return (isa_oopptr() != NULL__null && is_oopptr()->is_ptr_to_narrowoop_nv()); |
1701 | #else |
1702 | return false; |
1703 | #endif |
1704 | } |
1705 | |
1706 | inline bool Type::is_ptr_to_narrowklass() const { |
1707 | #ifdef _LP641 |
1708 | return (isa_oopptr() != NULL__null && is_oopptr()->is_ptr_to_narrowklass_nv()); |
1709 | #else |
1710 | return false; |
1711 | #endif |
1712 | } |
1713 | |
1714 | inline float Type::getf() const { |
1715 | assert( _base == FloatCon, "Not a FloatCon" )do { if (!(_base == FloatCon)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1715, "assert(" "_base == FloatCon" ") failed", "Not a FloatCon" ); ::breakpoint(); } } while (0); |
1716 | return ((TypeF*)this)->_f; |
1717 | } |
1718 | |
1719 | inline double Type::getd() const { |
1720 | assert( _base == DoubleCon, "Not a DoubleCon" )do { if (!(_base == DoubleCon)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1720, "assert(" "_base == DoubleCon" ") failed", "Not a DoubleCon" ); ::breakpoint(); } } while (0); |
1721 | return ((TypeD*)this)->_d; |
1722 | } |
1723 | |
1724 | inline const TypeInteger *Type::is_integer(BasicType bt) const { |
1725 | assert((bt == T_INT && _base == Int) || (bt == T_LONG && _base == Long), "Not an Int")do { if (!((bt == T_INT && _base == Int) || (bt == T_LONG && _base == Long))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1725, "assert(" "(bt == T_INT && _base == Int) || (bt == T_LONG && _base == Long)" ") failed", "Not an Int"); ::breakpoint(); } } while (0); |
1726 | return (TypeInteger*)this; |
1727 | } |
1728 | |
1729 | inline const TypeInteger *Type::isa_integer(BasicType bt) const { |
1730 | return (((bt == T_INT && _base == Int) || (bt == T_LONG && _base == Long)) ? (TypeInteger*)this : NULL__null); |
1731 | } |
1732 | |
1733 | inline const TypeInt *Type::is_int() const { |
1734 | assert( _base == Int, "Not an Int" )do { if (!(_base == Int)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1734, "assert(" "_base == Int" ") failed", "Not an Int"); :: breakpoint(); } } while (0); |
1735 | return (TypeInt*)this; |
1736 | } |
1737 | |
1738 | inline const TypeInt *Type::isa_int() const { |
1739 | return ( _base == Int ? (TypeInt*)this : NULL__null); |
1740 | } |
1741 | |
1742 | inline const TypeLong *Type::is_long() const { |
1743 | assert( _base == Long, "Not a Long" )do { if (!(_base == Long)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1743, "assert(" "_base == Long" ") failed", "Not a Long"); :: breakpoint(); } } while (0); |
1744 | return (TypeLong*)this; |
1745 | } |
1746 | |
1747 | inline const TypeLong *Type::isa_long() const { |
1748 | return ( _base == Long ? (TypeLong*)this : NULL__null); |
1749 | } |
1750 | |
1751 | inline const TypeF *Type::isa_float() const { |
1752 | return ((_base == FloatTop || |
1753 | _base == FloatCon || |
1754 | _base == FloatBot) ? (TypeF*)this : NULL__null); |
1755 | } |
1756 | |
1757 | inline const TypeF *Type::is_float_constant() const { |
1758 | assert( _base == FloatCon, "Not a Float" )do { if (!(_base == FloatCon)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1758, "assert(" "_base == FloatCon" ") failed", "Not a Float" ); ::breakpoint(); } } while (0); |
1759 | return (TypeF*)this; |
1760 | } |
1761 | |
1762 | inline const TypeF *Type::isa_float_constant() const { |
1763 | return ( _base == FloatCon ? (TypeF*)this : NULL__null); |
1764 | } |
1765 | |
1766 | inline const TypeD *Type::isa_double() const { |
1767 | return ((_base == DoubleTop || |
1768 | _base == DoubleCon || |
1769 | _base == DoubleBot) ? (TypeD*)this : NULL__null); |
1770 | } |
1771 | |
1772 | inline const TypeD *Type::is_double_constant() const { |
1773 | assert( _base == DoubleCon, "Not a Double" )do { if (!(_base == DoubleCon)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1773, "assert(" "_base == DoubleCon" ") failed", "Not a Double" ); ::breakpoint(); } } while (0); |
1774 | return (TypeD*)this; |
1775 | } |
1776 | |
1777 | inline const TypeD *Type::isa_double_constant() const { |
1778 | return ( _base == DoubleCon ? (TypeD*)this : NULL__null); |
1779 | } |
1780 | |
1781 | inline const TypeTuple *Type::is_tuple() const { |
1782 | assert( _base == Tuple, "Not a Tuple" )do { if (!(_base == Tuple)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1782, "assert(" "_base == Tuple" ") failed", "Not a Tuple") ; ::breakpoint(); } } while (0); |
1783 | return (TypeTuple*)this; |
1784 | } |
1785 | |
1786 | inline const TypeAry *Type::is_ary() const { |
1787 | assert( _base == Array , "Not an Array" )do { if (!(_base == Array)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1787, "assert(" "_base == Array" ") failed", "Not an Array" ); ::breakpoint(); } } while (0); |
1788 | return (TypeAry*)this; |
1789 | } |
1790 | |
1791 | inline const TypeAry *Type::isa_ary() const { |
1792 | return ((_base == Array) ? (TypeAry*)this : NULL__null); |
1793 | } |
1794 | |
1795 | inline const TypeVectMask *Type::is_vectmask() const { |
1796 | assert( _base == VectorMask, "Not a Vector Mask" )do { if (!(_base == VectorMask)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1796, "assert(" "_base == VectorMask" ") failed", "Not a Vector Mask" ); ::breakpoint(); } } while (0); |
1797 | return (TypeVectMask*)this; |
1798 | } |
1799 | |
1800 | inline const TypeVectMask *Type::isa_vectmask() const { |
1801 | return (_base == VectorMask) ? (TypeVectMask*)this : NULL__null; |
1802 | } |
1803 | |
1804 | inline const TypeVect *Type::is_vect() const { |
1805 | assert( _base >= VectorMask && _base <= VectorZ, "Not a Vector" )do { if (!(_base >= VectorMask && _base <= VectorZ )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1805, "assert(" "_base >= VectorMask && _base <= VectorZ" ") failed", "Not a Vector"); ::breakpoint(); } } while (0); |
1806 | return (TypeVect*)this; |
1807 | } |
1808 | |
1809 | inline const TypeVect *Type::isa_vect() const { |
1810 | return (_base >= VectorMask && _base <= VectorZ) ? (TypeVect*)this : NULL__null; |
1811 | } |
1812 | |
1813 | inline const TypePtr *Type::is_ptr() const { |
1814 | // AnyPtr is the first Ptr and KlassPtr the last, with no non-ptrs between. |
1815 | assert(_base >= AnyPtr && _base <= AryKlassPtr, "Not a pointer")do { if (!(_base >= AnyPtr && _base <= AryKlassPtr )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1815, "assert(" "_base >= AnyPtr && _base <= AryKlassPtr" ") failed", "Not a pointer"); ::breakpoint(); } } while (0); |
1816 | return (TypePtr*)this; |
1817 | } |
1818 | |
1819 | inline const TypePtr *Type::isa_ptr() const { |
1820 | // AnyPtr is the first Ptr and KlassPtr the last, with no non-ptrs between. |
1821 | return (_base >= AnyPtr && _base <= AryKlassPtr) ? (TypePtr*)this : NULL__null; |
1822 | } |
1823 | |
1824 | inline const TypeOopPtr *Type::is_oopptr() const { |
1825 | // OopPtr is the first and KlassPtr the last, with no non-oops between. |
1826 | assert(_base >= OopPtr && _base <= AryPtr, "Not a Java pointer" )do { if (!(_base >= OopPtr && _base <= AryPtr)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1826, "assert(" "_base >= OopPtr && _base <= AryPtr" ") failed", "Not a Java pointer"); ::breakpoint(); } } while (0) ; |
1827 | return (TypeOopPtr*)this; |
1828 | } |
1829 | |
1830 | inline const TypeOopPtr *Type::isa_oopptr() const { |
1831 | // OopPtr is the first and KlassPtr the last, with no non-oops between. |
1832 | return (_base >= OopPtr && _base <= AryPtr) ? (TypeOopPtr*)this : NULL__null; |
1833 | } |
1834 | |
1835 | inline const TypeRawPtr *Type::isa_rawptr() const { |
1836 | return (_base == RawPtr) ? (TypeRawPtr*)this : NULL__null; |
1837 | } |
1838 | |
1839 | inline const TypeRawPtr *Type::is_rawptr() const { |
1840 | assert( _base == RawPtr, "Not a raw pointer" )do { if (!(_base == RawPtr)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1840, "assert(" "_base == RawPtr" ") failed", "Not a raw pointer" ); ::breakpoint(); } } while (0); |
1841 | return (TypeRawPtr*)this; |
1842 | } |
1843 | |
1844 | inline const TypeInstPtr *Type::isa_instptr() const { |
1845 | return (_base == InstPtr) ? (TypeInstPtr*)this : NULL__null; |
1846 | } |
1847 | |
1848 | inline const TypeInstPtr *Type::is_instptr() const { |
1849 | assert( _base == InstPtr, "Not an object pointer" )do { if (!(_base == InstPtr)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1849, "assert(" "_base == InstPtr" ") failed", "Not an object pointer" ); ::breakpoint(); } } while (0); |
1850 | return (TypeInstPtr*)this; |
1851 | } |
1852 | |
1853 | inline const TypeAryPtr *Type::isa_aryptr() const { |
1854 | return (_base == AryPtr) ? (TypeAryPtr*)this : NULL__null; |
1855 | } |
1856 | |
1857 | inline const TypeAryPtr *Type::is_aryptr() const { |
1858 | assert( _base == AryPtr, "Not an array pointer" )do { if (!(_base == AryPtr)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1858, "assert(" "_base == AryPtr" ") failed", "Not an array pointer" ); ::breakpoint(); } } while (0); |
1859 | return (TypeAryPtr*)this; |
1860 | } |
1861 | |
1862 | inline const TypeNarrowOop *Type::is_narrowoop() const { |
1863 | // OopPtr is the first and KlassPtr the last, with no non-oops between. |
1864 | assert(_base == NarrowOop, "Not a narrow oop" )do { if (!(_base == NarrowOop)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1864, "assert(" "_base == NarrowOop" ") failed", "Not a narrow oop" ); ::breakpoint(); } } while (0) ; |
1865 | return (TypeNarrowOop*)this; |
1866 | } |
1867 | |
1868 | inline const TypeNarrowOop *Type::isa_narrowoop() const { |
1869 | // OopPtr is the first and KlassPtr the last, with no non-oops between. |
1870 | return (_base == NarrowOop) ? (TypeNarrowOop*)this : NULL__null; |
1871 | } |
1872 | |
1873 | inline const TypeNarrowKlass *Type::is_narrowklass() const { |
1874 | assert(_base == NarrowKlass, "Not a narrow oop" )do { if (!(_base == NarrowKlass)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1874, "assert(" "_base == NarrowKlass" ") failed", "Not a narrow oop" ); ::breakpoint(); } } while (0) ; |
1875 | return (TypeNarrowKlass*)this; |
1876 | } |
1877 | |
1878 | inline const TypeNarrowKlass *Type::isa_narrowklass() const { |
1879 | return (_base == NarrowKlass) ? (TypeNarrowKlass*)this : NULL__null; |
1880 | } |
1881 | |
1882 | inline const TypeMetadataPtr *Type::is_metadataptr() const { |
1883 | // MetadataPtr is the first and CPCachePtr the last |
1884 | assert(_base == MetadataPtr, "Not a metadata pointer" )do { if (!(_base == MetadataPtr)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1884, "assert(" "_base == MetadataPtr" ") failed", "Not a metadata pointer" ); ::breakpoint(); } } while (0) ; |
1885 | return (TypeMetadataPtr*)this; |
1886 | } |
1887 | |
1888 | inline const TypeMetadataPtr *Type::isa_metadataptr() const { |
1889 | return (_base == MetadataPtr) ? (TypeMetadataPtr*)this : NULL__null; |
1890 | } |
1891 | |
1892 | inline const TypeKlassPtr *Type::isa_klassptr() const { |
1893 | return (_base >= KlassPtr && _base <= AryKlassPtr ) ? (TypeKlassPtr*)this : NULL__null; |
1894 | } |
1895 | |
1896 | inline const TypeKlassPtr *Type::is_klassptr() const { |
1897 | assert(_base >= KlassPtr && _base <= AryKlassPtr, "Not a klass pointer")do { if (!(_base >= KlassPtr && _base <= AryKlassPtr )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1897, "assert(" "_base >= KlassPtr && _base <= AryKlassPtr" ") failed", "Not a klass pointer"); ::breakpoint(); } } while (0); |
1898 | return (TypeKlassPtr*)this; |
1899 | } |
1900 | |
1901 | inline const TypeInstKlassPtr *Type::isa_instklassptr() const { |
1902 | return (_base == InstKlassPtr) ? (TypeInstKlassPtr*)this : NULL__null; |
1903 | } |
1904 | |
1905 | inline const TypeInstKlassPtr *Type::is_instklassptr() const { |
1906 | assert(_base == InstKlassPtr, "Not a klass pointer")do { if (!(_base == InstKlassPtr)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1906, "assert(" "_base == InstKlassPtr" ") failed", "Not a klass pointer" ); ::breakpoint(); } } while (0); |
1907 | return (TypeInstKlassPtr*)this; |
1908 | } |
1909 | |
1910 | inline const TypeAryKlassPtr *Type::isa_aryklassptr() const { |
1911 | return (_base == AryKlassPtr) ? (TypeAryKlassPtr*)this : NULL__null; |
1912 | } |
1913 | |
1914 | inline const TypeAryKlassPtr *Type::is_aryklassptr() const { |
1915 | assert(_base == AryKlassPtr, "Not a klass pointer")do { if (!(_base == AryKlassPtr)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/type.hpp" , 1915, "assert(" "_base == AryKlassPtr" ") failed", "Not a klass pointer" ); ::breakpoint(); } } while (0); |
1916 | return (TypeAryKlassPtr*)this; |
1917 | } |
1918 | |
1919 | inline const TypePtr* Type::make_ptr() const { |
1920 | return (_base == NarrowOop) ? is_narrowoop()->get_ptrtype() : |
1921 | ((_base == NarrowKlass) ? is_narrowklass()->get_ptrtype() : |
1922 | isa_ptr()); |
1923 | } |
1924 | |
1925 | inline const TypeOopPtr* Type::make_oopptr() const { |
1926 | return (_base == NarrowOop) ? is_narrowoop()->get_ptrtype()->isa_oopptr() : isa_oopptr(); |
1927 | } |
1928 | |
1929 | inline const TypeNarrowOop* Type::make_narrowoop() const { |
1930 | return (_base == NarrowOop) ? is_narrowoop() : |
1931 | (isa_ptr() ? TypeNarrowOop::make(is_ptr()) : NULL__null); |
1932 | } |
1933 | |
1934 | inline const TypeNarrowKlass* Type::make_narrowklass() const { |
1935 | return (_base == NarrowKlass) ? is_narrowklass() : |
1936 | (isa_ptr() ? TypeNarrowKlass::make(is_ptr()) : NULL__null); |
1937 | } |
1938 | |
1939 | inline bool Type::is_floatingpoint() const { |
1940 | if( (_base == FloatCon) || (_base == FloatBot) || |
1941 | (_base == DoubleCon) || (_base == DoubleBot) ) |
1942 | return true; |
1943 | return false; |
1944 | } |
1945 | |
1946 | inline bool Type::is_ptr_to_boxing_obj() const { |
1947 | const TypeInstPtr* tp = isa_instptr(); |
1948 | return (tp != NULL__null) && (tp->offset() == 0) && |
1949 | tp->klass()->is_instance_klass() && |
1950 | tp->klass()->as_instance_klass()->is_box_klass(); |
1951 | } |
1952 | |
1953 | |
1954 | // =============================================================== |
1955 | // Things that need to be 64-bits in the 64-bit build but |
1956 | // 32-bits in the 32-bit build. Done this way to get full |
1957 | // optimization AND strong typing. |
1958 | #ifdef _LP641 |
1959 | |
1960 | // For type queries and asserts |
1961 | #define is_intptr_tis_long is_long |
1962 | #define isa_intptr_tisa_long isa_long |
1963 | #define find_intptr_t_typefind_long_type find_long_type |
1964 | #define find_intptr_t_confind_long_con find_long_con |
1965 | #define TypeXTypeLong TypeLong |
1966 | #define Type_XType::Long Type::Long |
1967 | #define TypeX_XTypeLong::LONG TypeLong::LONG |
1968 | #define TypeX_ZEROTypeLong::ZERO TypeLong::ZERO |
1969 | // For 'ideal_reg' machine registers |
1970 | #define Op_RegXOp_RegL Op_RegL |
1971 | // For phase->intcon variants |
1972 | #define MakeConXlongcon longcon |
1973 | #define ConXNodeConLNode ConLNode |
1974 | // For array index arithmetic |
1975 | #define MulXNodeMulLNode MulLNode |
1976 | #define AndXNodeAndLNode AndLNode |
1977 | #define OrXNodeOrLNode OrLNode |
1978 | #define CmpXNodeCmpLNode CmpLNode |
1979 | #define SubXNodeSubLNode SubLNode |
1980 | #define LShiftXNodeLShiftLNode LShiftLNode |
1981 | // For object size computation: |
1982 | #define AddXNodeAddLNode AddLNode |
1983 | #define RShiftXNodeRShiftLNode RShiftLNode |
1984 | // For card marks and hashcodes |
1985 | #define URShiftXNodeURShiftLNode URShiftLNode |
1986 | // For shenandoahSupport |
1987 | #define LoadXNodeLoadLNode LoadLNode |
1988 | #define StoreXNodeStoreLNode StoreLNode |
1989 | // Opcodes |
1990 | #define Op_LShiftXOp_LShiftL Op_LShiftL |
1991 | #define Op_AndXOp_AndL Op_AndL |
1992 | #define Op_AddXOp_AddL Op_AddL |
1993 | #define Op_SubXOp_SubL Op_SubL |
1994 | #define Op_XorXOp_XorL Op_XorL |
1995 | #define Op_URShiftXOp_URShiftL Op_URShiftL |
1996 | #define Op_LoadXOp_LoadL Op_LoadL |
1997 | // conversions |
1998 | #define ConvI2X(x)ConvI2L(x) ConvI2L(x) |
1999 | #define ConvL2X(x)(x) (x) |
2000 | #define ConvX2I(x)ConvL2I(x) ConvL2I(x) |
2001 | #define ConvX2L(x)(x) (x) |
2002 | #define ConvX2UL(x)(x) (x) |
2003 | |
2004 | #else |
2005 | |
2006 | // For type queries and asserts |
2007 | #define is_intptr_tis_long is_int |
2008 | #define isa_intptr_tisa_long isa_int |
2009 | #define find_intptr_t_typefind_long_type find_int_type |
2010 | #define find_intptr_t_confind_long_con find_int_con |
2011 | #define TypeXTypeLong TypeInt |
2012 | #define Type_XType::Long Type::Int |
2013 | #define TypeX_XTypeLong::LONG TypeInt::INT |
2014 | #define TypeX_ZEROTypeLong::ZERO TypeInt::ZERO |
2015 | // For 'ideal_reg' machine registers |
2016 | #define Op_RegXOp_RegL Op_RegI |
2017 | // For phase->intcon variants |
2018 | #define MakeConXlongcon intcon |
2019 | #define ConXNodeConLNode ConINode |
2020 | // For array index arithmetic |
2021 | #define MulXNodeMulLNode MulINode |
2022 | #define AndXNodeAndLNode AndINode |
2023 | #define OrXNodeOrLNode OrINode |
2024 | #define CmpXNodeCmpLNode CmpINode |
2025 | #define SubXNodeSubLNode SubINode |
2026 | #define LShiftXNodeLShiftLNode LShiftINode |
2027 | // For object size computation: |
2028 | #define AddXNodeAddLNode AddINode |
2029 | #define RShiftXNodeRShiftLNode RShiftINode |
2030 | // For card marks and hashcodes |
2031 | #define URShiftXNodeURShiftLNode URShiftINode |
2032 | // For shenandoahSupport |
2033 | #define LoadXNodeLoadLNode LoadINode |
2034 | #define StoreXNodeStoreLNode StoreINode |
2035 | // Opcodes |
2036 | #define Op_LShiftXOp_LShiftL Op_LShiftI |
2037 | #define Op_AndXOp_AndL Op_AndI |
2038 | #define Op_AddXOp_AddL Op_AddI |
2039 | #define Op_SubXOp_SubL Op_SubI |
2040 | #define Op_XorXOp_XorL Op_XorI |
2041 | #define Op_URShiftXOp_URShiftL Op_URShiftI |
2042 | #define Op_LoadXOp_LoadL Op_LoadI |
2043 | // conversions |
2044 | #define ConvI2X(x)ConvI2L(x) (x) |
2045 | #define ConvL2X(x)(x) ConvL2I(x) |
2046 | #define ConvX2I(x)ConvL2I(x) (x) |
2047 | #define ConvX2L(x)(x) ConvI2L(x) |
2048 | #define ConvX2UL(x)(x) ConvI2UL(x) |
2049 | |
2050 | #endif |
2051 | |
2052 | #endif // SHARE_OPTO_TYPE_HPP |