File: | jdk/src/hotspot/share/opto/graphKit.cpp |
Warning: | line 3890, column 5 Value stored to 'header_size_min' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "ci/ciUtilities.hpp" |
27 | #include "classfile/javaClasses.hpp" |
28 | #include "ci/ciNativeEntryPoint.hpp" |
29 | #include "ci/ciObjArray.hpp" |
30 | #include "asm/register.hpp" |
31 | #include "compiler/compileLog.hpp" |
32 | #include "gc/shared/barrierSet.hpp" |
33 | #include "gc/shared/c2/barrierSetC2.hpp" |
34 | #include "interpreter/interpreter.hpp" |
35 | #include "memory/resourceArea.hpp" |
36 | #include "opto/addnode.hpp" |
37 | #include "opto/castnode.hpp" |
38 | #include "opto/convertnode.hpp" |
39 | #include "opto/graphKit.hpp" |
40 | #include "opto/idealKit.hpp" |
41 | #include "opto/intrinsicnode.hpp" |
42 | #include "opto/locknode.hpp" |
43 | #include "opto/machnode.hpp" |
44 | #include "opto/opaquenode.hpp" |
45 | #include "opto/parse.hpp" |
46 | #include "opto/rootnode.hpp" |
47 | #include "opto/runtime.hpp" |
48 | #include "opto/subtypenode.hpp" |
49 | #include "runtime/deoptimization.hpp" |
50 | #include "runtime/sharedRuntime.hpp" |
51 | #include "utilities/bitMap.inline.hpp" |
52 | #include "utilities/powerOfTwo.hpp" |
53 | #include "utilities/growableArray.hpp" |
54 | |
55 | //----------------------------GraphKit----------------------------------------- |
56 | // Main utility constructor. |
57 | GraphKit::GraphKit(JVMState* jvms) |
58 | : Phase(Phase::Parser), |
59 | _env(C->env()), |
60 | _gvn(*C->initial_gvn()), |
61 | _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) |
62 | { |
63 | _exceptions = jvms->map()->next_exception(); |
64 | if (_exceptions != NULL__null) jvms->map()->set_next_exception(NULL__null); |
65 | set_jvms(jvms); |
66 | } |
67 | |
68 | // Private constructor for parser. |
69 | GraphKit::GraphKit() |
70 | : Phase(Phase::Parser), |
71 | _env(C->env()), |
72 | _gvn(*C->initial_gvn()), |
73 | _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) |
74 | { |
75 | _exceptions = NULL__null; |
76 | set_map(NULL__null); |
77 | debug_only(_sp = -99)_sp = -99; |
78 | debug_only(set_bci(-99))set_bci(-99); |
79 | } |
80 | |
81 | |
82 | |
83 | //---------------------------clean_stack--------------------------------------- |
84 | // Clear away rubbish from the stack area of the JVM state. |
85 | // This destroys any arguments that may be waiting on the stack. |
86 | void GraphKit::clean_stack(int from_sp) { |
87 | SafePointNode* map = this->map(); |
88 | JVMState* jvms = this->jvms(); |
89 | int stk_size = jvms->stk_size(); |
90 | int stkoff = jvms->stkoff(); |
91 | Node* top = this->top(); |
92 | for (int i = from_sp; i < stk_size; i++) { |
93 | if (map->in(stkoff + i) != top) { |
94 | map->set_req(stkoff + i, top); |
95 | } |
96 | } |
97 | } |
98 | |
99 | |
100 | //--------------------------------sync_jvms----------------------------------- |
101 | // Make sure our current jvms agrees with our parse state. |
102 | JVMState* GraphKit::sync_jvms() const { |
103 | JVMState* jvms = this->jvms(); |
104 | jvms->set_bci(bci()); // Record the new bci in the JVMState |
105 | jvms->set_sp(sp()); // Record the new sp in the JVMState |
106 | assert(jvms_in_sync(), "jvms is now in sync")do { if (!(jvms_in_sync())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 106, "assert(" "jvms_in_sync()" ") failed", "jvms is now in sync" ); ::breakpoint(); } } while (0); |
107 | return jvms; |
108 | } |
109 | |
110 | //--------------------------------sync_jvms_for_reexecute--------------------- |
111 | // Make sure our current jvms agrees with our parse state. This version |
112 | // uses the reexecute_sp for reexecuting bytecodes. |
113 | JVMState* GraphKit::sync_jvms_for_reexecute() { |
114 | JVMState* jvms = this->jvms(); |
115 | jvms->set_bci(bci()); // Record the new bci in the JVMState |
116 | jvms->set_sp(reexecute_sp()); // Record the new sp in the JVMState |
117 | return jvms; |
118 | } |
119 | |
120 | #ifdef ASSERT1 |
121 | bool GraphKit::jvms_in_sync() const { |
122 | Parse* parse = is_Parse(); |
123 | if (parse == NULL__null) { |
124 | if (bci() != jvms()->bci()) return false; |
125 | if (sp() != (int)jvms()->sp()) return false; |
126 | return true; |
127 | } |
128 | if (jvms()->method() != parse->method()) return false; |
129 | if (jvms()->bci() != parse->bci()) return false; |
130 | int jvms_sp = jvms()->sp(); |
131 | if (jvms_sp != parse->sp()) return false; |
132 | int jvms_depth = jvms()->depth(); |
133 | if (jvms_depth != parse->depth()) return false; |
134 | return true; |
135 | } |
136 | |
137 | // Local helper checks for special internal merge points |
138 | // used to accumulate and merge exception states. |
139 | // They are marked by the region's in(0) edge being the map itself. |
140 | // Such merge points must never "escape" into the parser at large, |
141 | // until they have been handed to gvn.transform. |
142 | static bool is_hidden_merge(Node* reg) { |
143 | if (reg == NULL__null) return false; |
144 | if (reg->is_Phi()) { |
145 | reg = reg->in(0); |
146 | if (reg == NULL__null) return false; |
147 | } |
148 | return reg->is_Region() && reg->in(0) != NULL__null && reg->in(0)->is_Root(); |
149 | } |
150 | |
151 | void GraphKit::verify_map() const { |
152 | if (map() == NULL__null) return; // null map is OK |
153 | assert(map()->req() <= jvms()->endoff(), "no extra garbage on map")do { if (!(map()->req() <= jvms()->endoff())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 153, "assert(" "map()->req() <= jvms()->endoff()" ") failed" , "no extra garbage on map"); ::breakpoint(); } } while (0); |
154 | assert(!map()->has_exceptions(), "call add_exception_states_from 1st")do { if (!(!map()->has_exceptions())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 154, "assert(" "!map()->has_exceptions()" ") failed", "call add_exception_states_from 1st" ); ::breakpoint(); } } while (0); |
155 | assert(!is_hidden_merge(control()), "call use_exception_state, not set_map")do { if (!(!is_hidden_merge(control()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 155, "assert(" "!is_hidden_merge(control())" ") failed", "call use_exception_state, not set_map" ); ::breakpoint(); } } while (0); |
156 | } |
157 | |
158 | void GraphKit::verify_exception_state(SafePointNode* ex_map) { |
159 | assert(ex_map->next_exception() == NULL, "not already part of a chain")do { if (!(ex_map->next_exception() == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 159, "assert(" "ex_map->next_exception() == __null" ") failed" , "not already part of a chain"); ::breakpoint(); } } while ( 0); |
160 | assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop")do { if (!(has_saved_ex_oop(ex_map))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 160, "assert(" "has_saved_ex_oop(ex_map)" ") failed", "every exception state has an ex_oop" ); ::breakpoint(); } } while (0); |
161 | } |
162 | #endif |
163 | |
164 | //---------------------------stop_and_kill_map--------------------------------- |
165 | // Set _map to NULL, signalling a stop to further bytecode execution. |
166 | // First smash the current map's control to a constant, to mark it dead. |
167 | void GraphKit::stop_and_kill_map() { |
168 | SafePointNode* dead_map = stop(); |
169 | if (dead_map != NULL__null) { |
170 | dead_map->disconnect_inputs(C); // Mark the map as killed. |
171 | assert(dead_map->is_killed(), "must be so marked")do { if (!(dead_map->is_killed())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 171, "assert(" "dead_map->is_killed()" ") failed", "must be so marked" ); ::breakpoint(); } } while (0); |
172 | } |
173 | } |
174 | |
175 | |
176 | //--------------------------------stopped-------------------------------------- |
177 | // Tell if _map is NULL, or control is top. |
178 | bool GraphKit::stopped() { |
179 | if (map() == NULL__null) return true; |
180 | else if (control() == top()) return true; |
181 | else return false; |
182 | } |
183 | |
184 | |
185 | //-----------------------------has_ex_handler---------------------------------- |
186 | // Tell if this method or any caller method has exception handlers. |
187 | bool GraphKit::has_ex_handler() { |
188 | for (JVMState* jvmsp = jvms(); jvmsp != NULL__null; jvmsp = jvmsp->caller()) { |
189 | if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) { |
190 | return true; |
191 | } |
192 | } |
193 | return false; |
194 | } |
195 | |
196 | //------------------------------save_ex_oop------------------------------------ |
197 | // Save an exception without blowing stack contents or other JVM state. |
198 | void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) { |
199 | assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again")do { if (!(!has_saved_ex_oop(ex_map))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 199, "assert(" "!has_saved_ex_oop(ex_map)" ") failed", "clear ex-oop before setting again" ); ::breakpoint(); } } while (0); |
200 | ex_map->add_req(ex_oop); |
201 | debug_only(verify_exception_state(ex_map))verify_exception_state(ex_map); |
202 | } |
203 | |
204 | inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) { |
205 | assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there")do { if (!(GraphKit::has_saved_ex_oop(ex_map))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 205, "assert(" "GraphKit::has_saved_ex_oop(ex_map)" ") failed" , "ex_oop must be there"); ::breakpoint(); } } while (0); |
206 | Node* ex_oop = ex_map->in(ex_map->req()-1); |
207 | if (clear_it) ex_map->del_req(ex_map->req()-1); |
208 | return ex_oop; |
209 | } |
210 | |
211 | //-----------------------------saved_ex_oop------------------------------------ |
212 | // Recover a saved exception from its map. |
213 | Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) { |
214 | return common_saved_ex_oop(ex_map, false); |
215 | } |
216 | |
217 | //--------------------------clear_saved_ex_oop--------------------------------- |
218 | // Erase a previously saved exception from its map. |
219 | Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) { |
220 | return common_saved_ex_oop(ex_map, true); |
221 | } |
222 | |
223 | #ifdef ASSERT1 |
224 | //---------------------------has_saved_ex_oop---------------------------------- |
225 | // Erase a previously saved exception from its map. |
226 | bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) { |
227 | return ex_map->req() == ex_map->jvms()->endoff()+1; |
228 | } |
229 | #endif |
230 | |
231 | //-------------------------make_exception_state-------------------------------- |
232 | // Turn the current JVM state into an exception state, appending the ex_oop. |
233 | SafePointNode* GraphKit::make_exception_state(Node* ex_oop) { |
234 | sync_jvms(); |
235 | SafePointNode* ex_map = stop(); // do not manipulate this map any more |
236 | set_saved_ex_oop(ex_map, ex_oop); |
237 | return ex_map; |
238 | } |
239 | |
240 | |
241 | //--------------------------add_exception_state-------------------------------- |
242 | // Add an exception to my list of exceptions. |
243 | void GraphKit::add_exception_state(SafePointNode* ex_map) { |
244 | if (ex_map == NULL__null || ex_map->control() == top()) { |
245 | return; |
246 | } |
247 | #ifdef ASSERT1 |
248 | verify_exception_state(ex_map); |
249 | if (has_exceptions()) { |
250 | assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place")do { if (!(ex_map->jvms()->same_calls_as(_exceptions-> jvms()))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 250, "assert(" "ex_map->jvms()->same_calls_as(_exceptions->jvms())" ") failed", "all collected exceptions must come from the same place" ); ::breakpoint(); } } while (0); |
251 | } |
252 | #endif |
253 | |
254 | // If there is already an exception of exactly this type, merge with it. |
255 | // In particular, null-checks and other low-level exceptions common up here. |
256 | Node* ex_oop = saved_ex_oop(ex_map); |
257 | const Type* ex_type = _gvn.type(ex_oop); |
258 | if (ex_oop == top()) { |
259 | // No action needed. |
260 | return; |
261 | } |
262 | assert(ex_type->isa_instptr(), "exception must be an instance")do { if (!(ex_type->isa_instptr())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 262, "assert(" "ex_type->isa_instptr()" ") failed", "exception must be an instance" ); ::breakpoint(); } } while (0); |
263 | for (SafePointNode* e2 = _exceptions; e2 != NULL__null; e2 = e2->next_exception()) { |
264 | const Type* ex_type2 = _gvn.type(saved_ex_oop(e2)); |
265 | // We check sp also because call bytecodes can generate exceptions |
266 | // both before and after arguments are popped! |
267 | if (ex_type2 == ex_type |
268 | && e2->_jvms->sp() == ex_map->_jvms->sp()) { |
269 | combine_exception_states(ex_map, e2); |
270 | return; |
271 | } |
272 | } |
273 | |
274 | // No pre-existing exception of the same type. Chain it on the list. |
275 | push_exception_state(ex_map); |
276 | } |
277 | |
278 | //-----------------------add_exception_states_from----------------------------- |
279 | void GraphKit::add_exception_states_from(JVMState* jvms) { |
280 | SafePointNode* ex_map = jvms->map()->next_exception(); |
281 | if (ex_map != NULL__null) { |
282 | jvms->map()->set_next_exception(NULL__null); |
283 | for (SafePointNode* next_map; ex_map != NULL__null; ex_map = next_map) { |
284 | next_map = ex_map->next_exception(); |
285 | ex_map->set_next_exception(NULL__null); |
286 | add_exception_state(ex_map); |
287 | } |
288 | } |
289 | } |
290 | |
291 | //-----------------------transfer_exceptions_into_jvms------------------------- |
292 | JVMState* GraphKit::transfer_exceptions_into_jvms() { |
293 | if (map() == NULL__null) { |
294 | // We need a JVMS to carry the exceptions, but the map has gone away. |
295 | // Create a scratch JVMS, cloned from any of the exception states... |
296 | if (has_exceptions()) { |
297 | _map = _exceptions; |
298 | _map = clone_map(); |
299 | _map->set_next_exception(NULL__null); |
300 | clear_saved_ex_oop(_map); |
301 | debug_only(verify_map())verify_map(); |
302 | } else { |
303 | // ...or created from scratch |
304 | JVMState* jvms = new (C) JVMState(_method, NULL__null); |
305 | jvms->set_bci(_bci); |
306 | jvms->set_sp(_sp); |
307 | jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms)); |
308 | set_jvms(jvms); |
309 | for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top()); |
310 | set_all_memory(top()); |
311 | while (map()->req() < jvms->endoff()) map()->add_req(top()); |
312 | } |
313 | // (This is a kludge, in case you didn't notice.) |
314 | set_control(top()); |
315 | } |
316 | JVMState* jvms = sync_jvms(); |
317 | assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet")do { if (!(!jvms->map()->has_exceptions())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 317, "assert(" "!jvms->map()->has_exceptions()" ") failed" , "no exceptions on this map yet"); ::breakpoint(); } } while (0); |
318 | jvms->map()->set_next_exception(_exceptions); |
319 | _exceptions = NULL__null; // done with this set of exceptions |
320 | return jvms; |
321 | } |
322 | |
323 | static inline void add_n_reqs(Node* dstphi, Node* srcphi) { |
324 | assert(is_hidden_merge(dstphi), "must be a special merge node")do { if (!(is_hidden_merge(dstphi))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 324, "assert(" "is_hidden_merge(dstphi)" ") failed", "must be a special merge node" ); ::breakpoint(); } } while (0); |
325 | assert(is_hidden_merge(srcphi), "must be a special merge node")do { if (!(is_hidden_merge(srcphi))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 325, "assert(" "is_hidden_merge(srcphi)" ") failed", "must be a special merge node" ); ::breakpoint(); } } while (0); |
326 | uint limit = srcphi->req(); |
327 | for (uint i = PhiNode::Input; i < limit; i++) { |
328 | dstphi->add_req(srcphi->in(i)); |
329 | } |
330 | } |
331 | static inline void add_one_req(Node* dstphi, Node* src) { |
332 | assert(is_hidden_merge(dstphi), "must be a special merge node")do { if (!(is_hidden_merge(dstphi))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 332, "assert(" "is_hidden_merge(dstphi)" ") failed", "must be a special merge node" ); ::breakpoint(); } } while (0); |
333 | assert(!is_hidden_merge(src), "must not be a special merge node")do { if (!(!is_hidden_merge(src))) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 333, "assert(" "!is_hidden_merge(src)" ") failed", "must not be a special merge node" ); ::breakpoint(); } } while (0); |
334 | dstphi->add_req(src); |
335 | } |
336 | |
337 | //-----------------------combine_exception_states------------------------------ |
338 | // This helper function combines exception states by building phis on a |
339 | // specially marked state-merging region. These regions and phis are |
340 | // untransformed, and can build up gradually. The region is marked by |
341 | // having a control input of its exception map, rather than NULL. Such |
342 | // regions do not appear except in this function, and in use_exception_state. |
343 | void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) { |
344 | if (failing()) return; // dying anyway... |
345 | JVMState* ex_jvms = ex_map->_jvms; |
346 | assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains")do { if (!(ex_jvms->same_calls_as(phi_map->_jvms))) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 346, "assert(" "ex_jvms->same_calls_as(phi_map->_jvms)" ") failed", "consistent call chains"); ::breakpoint(); } } while (0); |
347 | assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals")do { if (!(ex_jvms->stkoff() == phi_map->_jvms->stkoff ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 347, "assert(" "ex_jvms->stkoff() == phi_map->_jvms->stkoff()" ") failed", "matching locals"); ::breakpoint(); } } while (0 ); |
348 | assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes")do { if (!(ex_jvms->sp() == phi_map->_jvms->sp())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 348, "assert(" "ex_jvms->sp() == phi_map->_jvms->sp()" ") failed", "matching stack sizes"); ::breakpoint(); } } while (0); |
349 | assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS")do { if (!(ex_jvms->monoff() == phi_map->_jvms->monoff ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 349, "assert(" "ex_jvms->monoff() == phi_map->_jvms->monoff()" ") failed", "matching JVMS"); ::breakpoint(); } } while (0); |
350 | assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects")do { if (!(ex_jvms->scloff() == phi_map->_jvms->scloff ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 350, "assert(" "ex_jvms->scloff() == phi_map->_jvms->scloff()" ") failed", "matching scalar replaced objects"); ::breakpoint (); } } while (0); |
351 | assert(ex_map->req() == phi_map->req(), "matching maps")do { if (!(ex_map->req() == phi_map->req())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 351, "assert(" "ex_map->req() == phi_map->req()" ") failed" , "matching maps"); ::breakpoint(); } } while (0); |
352 | uint tos = ex_jvms->stkoff() + ex_jvms->sp(); |
353 | Node* hidden_merge_mark = root(); |
354 | Node* region = phi_map->control(); |
355 | MergeMemNode* phi_mem = phi_map->merged_memory(); |
356 | MergeMemNode* ex_mem = ex_map->merged_memory(); |
357 | if (region->in(0) != hidden_merge_mark) { |
358 | // The control input is not (yet) a specially-marked region in phi_map. |
359 | // Make it so, and build some phis. |
360 | region = new RegionNode(2); |
361 | _gvn.set_type(region, Type::CONTROL); |
362 | region->set_req(0, hidden_merge_mark); // marks an internal ex-state |
363 | region->init_req(1, phi_map->control()); |
364 | phi_map->set_control(region); |
365 | Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO); |
366 | record_for_igvn(io_phi); |
367 | _gvn.set_type(io_phi, Type::ABIO); |
368 | phi_map->set_i_o(io_phi); |
369 | for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) { |
370 | Node* m = mms.memory(); |
371 | Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C)); |
372 | record_for_igvn(m_phi); |
373 | _gvn.set_type(m_phi, Type::MEMORY); |
374 | mms.set_memory(m_phi); |
375 | } |
376 | } |
377 | |
378 | // Either or both of phi_map and ex_map might already be converted into phis. |
379 | Node* ex_control = ex_map->control(); |
380 | // if there is special marking on ex_map also, we add multiple edges from src |
381 | bool add_multiple = (ex_control->in(0) == hidden_merge_mark); |
382 | // how wide was the destination phi_map, originally? |
383 | uint orig_width = region->req(); |
384 | |
385 | if (add_multiple) { |
386 | add_n_reqs(region, ex_control); |
387 | add_n_reqs(phi_map->i_o(), ex_map->i_o()); |
388 | } else { |
389 | // ex_map has no merges, so we just add single edges everywhere |
390 | add_one_req(region, ex_control); |
391 | add_one_req(phi_map->i_o(), ex_map->i_o()); |
392 | } |
393 | for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) { |
394 | if (mms.is_empty()) { |
395 | // get a copy of the base memory, and patch some inputs into it |
396 | const TypePtr* adr_type = mms.adr_type(C); |
397 | Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); |
398 | assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "")do { if (!(phi->as_Phi()->region() == mms.base_memory() ->in(0))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 398, "assert(" "phi->as_Phi()->region() == mms.base_memory()->in(0)" ") failed", ""); ::breakpoint(); } } while (0); |
399 | mms.set_memory(phi); |
400 | // Prepare to append interesting stuff onto the newly sliced phi: |
401 | while (phi->req() > orig_width) phi->del_req(phi->req()-1); |
402 | } |
403 | // Append stuff from ex_map: |
404 | if (add_multiple) { |
405 | add_n_reqs(mms.memory(), mms.memory2()); |
406 | } else { |
407 | add_one_req(mms.memory(), mms.memory2()); |
408 | } |
409 | } |
410 | uint limit = ex_map->req(); |
411 | for (uint i = TypeFunc::Parms; i < limit; i++) { |
412 | // Skip everything in the JVMS after tos. (The ex_oop follows.) |
413 | if (i == tos) i = ex_jvms->monoff(); |
414 | Node* src = ex_map->in(i); |
415 | Node* dst = phi_map->in(i); |
416 | if (src != dst) { |
417 | PhiNode* phi; |
418 | if (dst->in(0) != region) { |
419 | dst = phi = PhiNode::make(region, dst, _gvn.type(dst)); |
420 | record_for_igvn(phi); |
421 | _gvn.set_type(phi, phi->type()); |
422 | phi_map->set_req(i, dst); |
423 | // Prepare to append interesting stuff onto the new phi: |
424 | while (dst->req() > orig_width) dst->del_req(dst->req()-1); |
425 | } else { |
426 | assert(dst->is_Phi(), "nobody else uses a hidden region")do { if (!(dst->is_Phi())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 426, "assert(" "dst->is_Phi()" ") failed", "nobody else uses a hidden region" ); ::breakpoint(); } } while (0); |
427 | phi = dst->as_Phi(); |
428 | } |
429 | if (add_multiple && src->in(0) == ex_control) { |
430 | // Both are phis. |
431 | add_n_reqs(dst, src); |
432 | } else { |
433 | while (dst->req() < region->req()) add_one_req(dst, src); |
434 | } |
435 | const Type* srctype = _gvn.type(src); |
436 | if (phi->type() != srctype) { |
437 | const Type* dsttype = phi->type()->meet_speculative(srctype); |
438 | if (phi->type() != dsttype) { |
439 | phi->set_type(dsttype); |
440 | _gvn.set_type(phi, dsttype); |
441 | } |
442 | } |
443 | } |
444 | } |
445 | phi_map->merge_replaced_nodes_with(ex_map); |
446 | } |
447 | |
448 | //--------------------------use_exception_state-------------------------------- |
449 | Node* GraphKit::use_exception_state(SafePointNode* phi_map) { |
450 | if (failing()) { stop(); return top(); } |
451 | Node* region = phi_map->control(); |
452 | Node* hidden_merge_mark = root(); |
453 | assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation")do { if (!(phi_map->jvms()->map() == phi_map)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 453, "assert(" "phi_map->jvms()->map() == phi_map" ") failed" , "sanity: 1-1 relation"); ::breakpoint(); } } while (0); |
454 | Node* ex_oop = clear_saved_ex_oop(phi_map); |
455 | if (region->in(0) == hidden_merge_mark) { |
456 | // Special marking for internal ex-states. Process the phis now. |
457 | region->set_req(0, region); // now it's an ordinary region |
458 | set_jvms(phi_map->jvms()); // ...so now we can use it as a map |
459 | // Note: Setting the jvms also sets the bci and sp. |
460 | set_control(_gvn.transform(region)); |
461 | uint tos = jvms()->stkoff() + sp(); |
462 | for (uint i = 1; i < tos; i++) { |
463 | Node* x = phi_map->in(i); |
464 | if (x->in(0) == region) { |
465 | assert(x->is_Phi(), "expected a special phi")do { if (!(x->is_Phi())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 465, "assert(" "x->is_Phi()" ") failed", "expected a special phi" ); ::breakpoint(); } } while (0); |
466 | phi_map->set_req(i, _gvn.transform(x)); |
467 | } |
468 | } |
469 | for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { |
470 | Node* x = mms.memory(); |
471 | if (x->in(0) == region) { |
472 | assert(x->is_Phi(), "nobody else uses a hidden region")do { if (!(x->is_Phi())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 472, "assert(" "x->is_Phi()" ") failed", "nobody else uses a hidden region" ); ::breakpoint(); } } while (0); |
473 | mms.set_memory(_gvn.transform(x)); |
474 | } |
475 | } |
476 | if (ex_oop->in(0) == region) { |
477 | assert(ex_oop->is_Phi(), "expected a special phi")do { if (!(ex_oop->is_Phi())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 477, "assert(" "ex_oop->is_Phi()" ") failed", "expected a special phi" ); ::breakpoint(); } } while (0); |
478 | ex_oop = _gvn.transform(ex_oop); |
479 | } |
480 | } else { |
481 | set_jvms(phi_map->jvms()); |
482 | } |
483 | |
484 | assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared")do { if (!(!is_hidden_merge(phi_map->control()))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 484, "assert(" "!is_hidden_merge(phi_map->control())" ") failed" , "hidden ex. states cleared"); ::breakpoint(); } } while (0); |
485 | assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared")do { if (!(!is_hidden_merge(phi_map->i_o()))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 485, "assert(" "!is_hidden_merge(phi_map->i_o())" ") failed" , "hidden ex. states cleared"); ::breakpoint(); } } while (0); |
486 | return ex_oop; |
487 | } |
488 | |
489 | //---------------------------------java_bc------------------------------------- |
490 | Bytecodes::Code GraphKit::java_bc() const { |
491 | ciMethod* method = this->method(); |
492 | int bci = this->bci(); |
493 | if (method != NULL__null && bci != InvocationEntryBci) |
494 | return method->java_code_at_bci(bci); |
495 | else |
496 | return Bytecodes::_illegal; |
497 | } |
498 | |
499 | void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, |
500 | bool must_throw) { |
501 | // if the exception capability is set, then we will generate code |
502 | // to check the JavaThread.should_post_on_exceptions flag to see |
503 | // if we actually need to report exception events (for this |
504 | // thread). If we don't need to report exception events, we will |
505 | // take the normal fast path provided by add_exception_events. If |
506 | // exception event reporting is enabled for this thread, we will |
507 | // take the uncommon_trap in the BuildCutout below. |
508 | |
509 | // first must access the should_post_on_exceptions_flag in this thread's JavaThread |
510 | Node* jthread = _gvn.transform(new ThreadLocalNode()); |
511 | Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); |
512 | Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); |
513 | |
514 | // Test the should_post_on_exceptions_flag vs. 0 |
515 | Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) ); |
516 | Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) ); |
517 | |
518 | // Branch to slow_path if should_post_on_exceptions_flag was true |
519 | { BuildCutout unless(this, tst, PROB_MAX(1.0f-(1e-6f))); |
520 | // Do not try anything fancy if we're notifying the VM on every throw. |
521 | // Cf. case Bytecodes::_athrow in parse2.cpp. |
522 | uncommon_trap(reason, Deoptimization::Action_none, |
523 | (ciKlass*)NULL__null, (char*)NULL__null, must_throw); |
524 | } |
525 | |
526 | } |
527 | |
528 | //------------------------------builtin_throw---------------------------------- |
529 | void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) { |
530 | bool must_throw = true; |
531 | |
532 | // If this particular condition has not yet happened at this |
533 | // bytecode, then use the uncommon trap mechanism, and allow for |
534 | // a future recompilation if several traps occur here. |
535 | // If the throw is hot, try to use a more complicated inline mechanism |
536 | // which keeps execution inside the compiled code. |
537 | bool treat_throw_as_hot = false; |
538 | ciMethodData* md = method()->method_data(); |
539 | |
540 | if (ProfileTraps) { |
541 | if (too_many_traps(reason)) { |
542 | treat_throw_as_hot = true; |
543 | } |
544 | // (If there is no MDO at all, assume it is early in |
545 | // execution, and that any deopts are part of the |
546 | // startup transient, and don't need to be remembered.) |
547 | |
548 | // Also, if there is a local exception handler, treat all throws |
549 | // as hot if there has been at least one in this method. |
550 | if (C->trap_count(reason) != 0 |
551 | && method()->method_data()->trap_count(reason) != 0 |
552 | && has_ex_handler()) { |
553 | treat_throw_as_hot = true; |
554 | } |
555 | } |
556 | |
557 | // If this throw happens frequently, an uncommon trap might cause |
558 | // a performance pothole. If there is a local exception handler, |
559 | // and if this particular bytecode appears to be deoptimizing often, |
560 | // let us handle the throw inline, with a preconstructed instance. |
561 | // Note: If the deopt count has blown up, the uncommon trap |
562 | // runtime is going to flush this nmethod, not matter what. |
563 | if (treat_throw_as_hot && method()->can_omit_stack_trace()) { |
564 | // If the throw is local, we use a pre-existing instance and |
565 | // punt on the backtrace. This would lead to a missing backtrace |
566 | // (a repeat of 4292742) if the backtrace object is ever asked |
567 | // for its backtrace. |
568 | // Fixing this remaining case of 4292742 requires some flavor of |
569 | // escape analysis. Leave that for the future. |
570 | ciInstance* ex_obj = NULL__null; |
571 | switch (reason) { |
572 | case Deoptimization::Reason_null_check: |
573 | ex_obj = env()->NullPointerException_instance(); |
574 | break; |
575 | case Deoptimization::Reason_div0_check: |
576 | ex_obj = env()->ArithmeticException_instance(); |
577 | break; |
578 | case Deoptimization::Reason_range_check: |
579 | ex_obj = env()->ArrayIndexOutOfBoundsException_instance(); |
580 | break; |
581 | case Deoptimization::Reason_class_check: |
582 | ex_obj = env()->ClassCastException_instance(); |
583 | break; |
584 | case Deoptimization::Reason_array_check: |
585 | ex_obj = env()->ArrayStoreException_instance(); |
586 | break; |
587 | default: |
588 | break; |
589 | } |
590 | if (failing()) { stop(); return; } // exception allocation might fail |
591 | if (ex_obj != NULL__null) { |
592 | if (env()->jvmti_can_post_on_exceptions()) { |
593 | // check if we must post exception events, take uncommon trap if so |
594 | uncommon_trap_if_should_post_on_exceptions(reason, must_throw); |
595 | // here if should_post_on_exceptions is false |
596 | // continue on with the normal codegen |
597 | } |
598 | |
599 | // Cheat with a preallocated exception object. |
600 | if (C->log() != NULL__null) |
601 | C->log()->elem("hot_throw preallocated='1' reason='%s'", |
602 | Deoptimization::trap_reason_name(reason)); |
603 | const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj); |
604 | Node* ex_node = _gvn.transform(ConNode::make(ex_con)); |
605 | |
606 | // Clear the detail message of the preallocated exception object. |
607 | // Weblogic sometimes mutates the detail message of exceptions |
608 | // using reflection. |
609 | int offset = java_lang_Throwable::get_detailMessage_offset(); |
610 | const TypePtr* adr_typ = ex_con->add_offset(offset); |
611 | |
612 | Node *adr = basic_plus_adr(ex_node, ex_node, offset); |
613 | const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass()); |
614 | Node *store = access_store_at(ex_node, adr, adr_typ, null(), val_type, T_OBJECT, IN_HEAP); |
615 | |
616 | add_exception_state(make_exception_state(ex_node)); |
617 | return; |
618 | } |
619 | } |
620 | |
621 | // %%% Maybe add entry to OptoRuntime which directly throws the exc.? |
622 | // It won't be much cheaper than bailing to the interp., since we'll |
623 | // have to pass up all the debug-info, and the runtime will have to |
624 | // create the stack trace. |
625 | |
626 | // Usual case: Bail to interpreter. |
627 | // Reserve the right to recompile if we haven't seen anything yet. |
628 | |
629 | ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL__null; |
630 | Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; |
631 | if (treat_throw_as_hot |
632 | && (method()->method_data()->trap_recompiled_at(bci(), m) |
633 | || C->too_many_traps(reason))) { |
634 | // We cannot afford to take more traps here. Suffer in the interpreter. |
635 | if (C->log() != NULL__null) |
636 | C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", |
637 | Deoptimization::trap_reason_name(reason), |
638 | C->trap_count(reason)); |
639 | action = Deoptimization::Action_none; |
640 | } |
641 | |
642 | // "must_throw" prunes the JVM state to include only the stack, if there |
643 | // are no local exception handlers. This should cut down on register |
644 | // allocation time and code size, by drastically reducing the number |
645 | // of in-edges on the call to the uncommon trap. |
646 | |
647 | uncommon_trap(reason, action, (ciKlass*)NULL__null, (char*)NULL__null, must_throw); |
648 | } |
649 | |
650 | |
651 | //----------------------------PreserveJVMState--------------------------------- |
652 | PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) { |
653 | debug_only(kit->verify_map())kit->verify_map(); |
654 | _kit = kit; |
655 | _map = kit->map(); // preserve the map |
656 | _sp = kit->sp(); |
657 | kit->set_map(clone_map ? kit->clone_map() : NULL__null); |
658 | #ifdef ASSERT1 |
659 | _bci = kit->bci(); |
660 | Parse* parser = kit->is_Parse(); |
661 | int block = (parser == NULL__null || parser->block() == NULL__null) ? -1 : parser->block()->rpo(); |
662 | _block = block; |
663 | #endif |
664 | } |
665 | PreserveJVMState::~PreserveJVMState() { |
666 | GraphKit* kit = _kit; |
667 | #ifdef ASSERT1 |
668 | assert(kit->bci() == _bci, "bci must not shift")do { if (!(kit->bci() == _bci)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 668, "assert(" "kit->bci() == _bci" ") failed", "bci must not shift" ); ::breakpoint(); } } while (0); |
669 | Parse* parser = kit->is_Parse(); |
670 | int block = (parser == NULL__null || parser->block() == NULL__null) ? -1 : parser->block()->rpo(); |
671 | assert(block == _block, "block must not shift")do { if (!(block == _block)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 671, "assert(" "block == _block" ") failed", "block must not shift" ); ::breakpoint(); } } while (0); |
672 | #endif |
673 | kit->set_map(_map); |
674 | kit->set_sp(_sp); |
675 | } |
676 | |
677 | |
678 | //-----------------------------BuildCutout------------------------------------- |
679 | BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt) |
680 | : PreserveJVMState(kit) |
681 | { |
682 | assert(p->is_Con() || p->is_Bool(), "test must be a bool")do { if (!(p->is_Con() || p->is_Bool())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 682, "assert(" "p->is_Con() || p->is_Bool()" ") failed" , "test must be a bool"); ::breakpoint(); } } while (0); |
683 | SafePointNode* outer_map = _map; // preserved map is caller's |
684 | SafePointNode* inner_map = kit->map(); |
685 | IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt); |
686 | outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) )); |
687 | inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) )); |
688 | } |
689 | BuildCutout::~BuildCutout() { |
690 | GraphKit* kit = _kit; |
691 | assert(kit->stopped(), "cutout code must stop, throw, return, etc.")do { if (!(kit->stopped())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 691, "assert(" "kit->stopped()" ") failed", "cutout code must stop, throw, return, etc." ); ::breakpoint(); } } while (0); |
692 | } |
693 | |
694 | //---------------------------PreserveReexecuteState---------------------------- |
695 | PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) { |
696 | assert(!kit->stopped(), "must call stopped() before")do { if (!(!kit->stopped())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 696, "assert(" "!kit->stopped()" ") failed", "must call stopped() before" ); ::breakpoint(); } } while (0); |
697 | _kit = kit; |
698 | _sp = kit->sp(); |
699 | _reexecute = kit->jvms()->_reexecute; |
700 | } |
701 | PreserveReexecuteState::~PreserveReexecuteState() { |
702 | if (_kit->stopped()) return; |
703 | _kit->jvms()->_reexecute = _reexecute; |
704 | _kit->set_sp(_sp); |
705 | } |
706 | |
707 | //------------------------------clone_map-------------------------------------- |
708 | // Implementation of PreserveJVMState |
709 | // |
710 | // Only clone_map(...) here. If this function is only used in the |
711 | // PreserveJVMState class we may want to get rid of this extra |
712 | // function eventually and do it all there. |
713 | |
714 | SafePointNode* GraphKit::clone_map() { |
715 | if (map() == NULL__null) return NULL__null; |
716 | |
717 | // Clone the memory edge first |
718 | Node* mem = MergeMemNode::make(map()->memory()); |
719 | gvn().set_type_bottom(mem); |
720 | |
721 | SafePointNode *clonemap = (SafePointNode*)map()->clone(); |
722 | JVMState* jvms = this->jvms(); |
723 | JVMState* clonejvms = jvms->clone_shallow(C); |
724 | clonemap->set_memory(mem); |
725 | clonemap->set_jvms(clonejvms); |
726 | clonejvms->set_map(clonemap); |
727 | record_for_igvn(clonemap); |
728 | gvn().set_type_bottom(clonemap); |
729 | return clonemap; |
730 | } |
731 | |
732 | |
733 | //-----------------------------set_map_clone----------------------------------- |
734 | void GraphKit::set_map_clone(SafePointNode* m) { |
735 | _map = m; |
736 | _map = clone_map(); |
737 | _map->set_next_exception(NULL__null); |
738 | debug_only(verify_map())verify_map(); |
739 | } |
740 | |
741 | |
742 | //----------------------------kill_dead_locals--------------------------------- |
743 | // Detect any locals which are known to be dead, and force them to top. |
744 | void GraphKit::kill_dead_locals() { |
745 | // Consult the liveness information for the locals. If any |
746 | // of them are unused, then they can be replaced by top(). This |
747 | // should help register allocation time and cut down on the size |
748 | // of the deoptimization information. |
749 | |
750 | // This call is made from many of the bytecode handling |
751 | // subroutines called from the Big Switch in do_one_bytecode. |
752 | // Every bytecode which might include a slow path is responsible |
753 | // for killing its dead locals. The more consistent we |
754 | // are about killing deads, the fewer useless phis will be |
755 | // constructed for them at various merge points. |
756 | |
757 | // bci can be -1 (InvocationEntryBci). We return the entry |
758 | // liveness for the method. |
759 | |
760 | if (method() == NULL__null || method()->code_size() == 0) { |
761 | // We are building a graph for a call to a native method. |
762 | // All locals are live. |
763 | return; |
764 | } |
765 | |
766 | ResourceMark rm; |
767 | |
768 | // Consult the liveness information for the locals. If any |
769 | // of them are unused, then they can be replaced by top(). This |
770 | // should help register allocation time and cut down on the size |
771 | // of the deoptimization information. |
772 | MethodLivenessResult live_locals = method()->liveness_at_bci(bci()); |
773 | |
774 | int len = (int)live_locals.size(); |
775 | assert(len <= jvms()->loc_size(), "too many live locals")do { if (!(len <= jvms()->loc_size())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 775, "assert(" "len <= jvms()->loc_size()" ") failed" , "too many live locals"); ::breakpoint(); } } while (0); |
776 | for (int local = 0; local < len; local++) { |
777 | if (!live_locals.at(local)) { |
778 | set_local(local, top()); |
779 | } |
780 | } |
781 | } |
782 | |
783 | #ifdef ASSERT1 |
784 | //-------------------------dead_locals_are_killed------------------------------ |
785 | // Return true if all dead locals are set to top in the map. |
786 | // Used to assert "clean" debug info at various points. |
787 | bool GraphKit::dead_locals_are_killed() { |
788 | if (method() == NULL__null || method()->code_size() == 0) { |
789 | // No locals need to be dead, so all is as it should be. |
790 | return true; |
791 | } |
792 | |
793 | // Make sure somebody called kill_dead_locals upstream. |
794 | ResourceMark rm; |
795 | for (JVMState* jvms = this->jvms(); jvms != NULL__null; jvms = jvms->caller()) { |
796 | if (jvms->loc_size() == 0) continue; // no locals to consult |
797 | SafePointNode* map = jvms->map(); |
798 | ciMethod* method = jvms->method(); |
799 | int bci = jvms->bci(); |
800 | if (jvms == this->jvms()) { |
801 | bci = this->bci(); // it might not yet be synched |
802 | } |
803 | MethodLivenessResult live_locals = method->liveness_at_bci(bci); |
804 | int len = (int)live_locals.size(); |
805 | if (!live_locals.is_valid() || len == 0) |
806 | // This method is trivial, or is poisoned by a breakpoint. |
807 | return true; |
808 | assert(len == jvms->loc_size(), "live map consistent with locals map")do { if (!(len == jvms->loc_size())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 808, "assert(" "len == jvms->loc_size()" ") failed", "live map consistent with locals map" ); ::breakpoint(); } } while (0); |
809 | for (int local = 0; local < len; local++) { |
810 | if (!live_locals.at(local) && map->local(jvms, local) != top()) { |
811 | if (PrintMiscellaneous && (Verbose || WizardMode)) { |
812 | tty->print_cr("Zombie local %d: ", local); |
813 | jvms->dump(); |
814 | } |
815 | return false; |
816 | } |
817 | } |
818 | } |
819 | return true; |
820 | } |
821 | |
822 | #endif //ASSERT |
823 | |
824 | // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens. |
825 | static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) { |
826 | ciMethod* cur_method = jvms->method(); |
827 | int cur_bci = jvms->bci(); |
828 | if (cur_method != NULL__null && cur_bci != InvocationEntryBci) { |
829 | Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); |
830 | return Interpreter::bytecode_should_reexecute(code) || |
831 | (is_anewarray && code == Bytecodes::_multianewarray); |
832 | // Reexecute _multianewarray bytecode which was replaced with |
833 | // sequence of [a]newarray. See Parse::do_multianewarray(). |
834 | // |
835 | // Note: interpreter should not have it set since this optimization |
836 | // is limited by dimensions and guarded by flag so in some cases |
837 | // multianewarray() runtime calls will be generated and |
838 | // the bytecode should not be reexecutes (stack will not be reset). |
839 | } else { |
840 | return false; |
841 | } |
842 | } |
843 | |
844 | // Helper function for adding JVMState and debug information to node |
845 | void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { |
846 | // Add the safepoint edges to the call (or other safepoint). |
847 | |
848 | // Make sure dead locals are set to top. This |
849 | // should help register allocation time and cut down on the size |
850 | // of the deoptimization information. |
851 | assert(dead_locals_are_killed(), "garbage in debug info before safepoint")do { if (!(dead_locals_are_killed())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 851, "assert(" "dead_locals_are_killed()" ") failed", "garbage in debug info before safepoint" ); ::breakpoint(); } } while (0); |
852 | |
853 | // Walk the inline list to fill in the correct set of JVMState's |
854 | // Also fill in the associated edges for each JVMState. |
855 | |
856 | // If the bytecode needs to be reexecuted we need to put |
857 | // the arguments back on the stack. |
858 | const bool should_reexecute = jvms()->should_reexecute(); |
859 | JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms(); |
860 | |
861 | // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to |
862 | // undefined if the bci is different. This is normal for Parse but it |
863 | // should not happen for LibraryCallKit because only one bci is processed. |
864 | assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),do { if (!(!is_LibraryCallKit() || (jvms()->should_reexecute () == should_reexecute))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 865, "assert(" "!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute)" ") failed", "in LibraryCallKit the reexecute bit should not change" ); ::breakpoint(); } } while (0) |
865 | "in LibraryCallKit the reexecute bit should not change")do { if (!(!is_LibraryCallKit() || (jvms()->should_reexecute () == should_reexecute))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 865, "assert(" "!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute)" ") failed", "in LibraryCallKit the reexecute bit should not change" ); ::breakpoint(); } } while (0); |
866 | |
867 | // If we are guaranteed to throw, we can prune everything but the |
868 | // input to the current bytecode. |
869 | bool can_prune_locals = false; |
870 | uint stack_slots_not_pruned = 0; |
871 | int inputs = 0, depth = 0; |
872 | if (must_throw) { |
873 | assert(method() == youngest_jvms->method(), "sanity")do { if (!(method() == youngest_jvms->method())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 873, "assert(" "method() == youngest_jvms->method()" ") failed" , "sanity"); ::breakpoint(); } } while (0); |
874 | if (compute_stack_effects(inputs, depth)) { |
875 | can_prune_locals = true; |
876 | stack_slots_not_pruned = inputs; |
877 | } |
878 | } |
879 | |
880 | if (env()->should_retain_local_variables()) { |
881 | // At any safepoint, this method can get breakpointed, which would |
882 | // then require an immediate deoptimization. |
883 | can_prune_locals = false; // do not prune locals |
884 | stack_slots_not_pruned = 0; |
885 | } |
886 | |
887 | // do not scribble on the input jvms |
888 | JVMState* out_jvms = youngest_jvms->clone_deep(C); |
889 | call->set_jvms(out_jvms); // Start jvms list for call node |
890 | |
891 | // For a known set of bytecodes, the interpreter should reexecute them if |
892 | // deoptimization happens. We set the reexecute state for them here |
893 | if (out_jvms->is_reexecute_undefined() && //don't change if already specified |
894 | should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) { |
895 | #ifdef ASSERT1 |
896 | int inputs = 0, not_used; // initialized by GraphKit::compute_stack_effects() |
897 | assert(method() == youngest_jvms->method(), "sanity")do { if (!(method() == youngest_jvms->method())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 897, "assert(" "method() == youngest_jvms->method()" ") failed" , "sanity"); ::breakpoint(); } } while (0); |
898 | assert(compute_stack_effects(inputs, not_used), "unknown bytecode: %s", Bytecodes::name(java_bc()))do { if (!(compute_stack_effects(inputs, not_used))) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 898, "assert(" "compute_stack_effects(inputs, not_used)" ") failed" , "unknown bytecode: %s", Bytecodes::name(java_bc())); ::breakpoint (); } } while (0); |
899 | assert(out_jvms->sp() >= (uint)inputs, "not enough operands for reexecution")do { if (!(out_jvms->sp() >= (uint)inputs)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 899, "assert(" "out_jvms->sp() >= (uint)inputs" ") failed" , "not enough operands for reexecution"); ::breakpoint(); } } while (0); |
900 | #endif // ASSERT |
901 | out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed |
902 | } |
903 | |
904 | // Presize the call: |
905 | DEBUG_ONLY(uint non_debug_edges = call->req())uint non_debug_edges = call->req(); |
906 | call->add_req_batch(top(), youngest_jvms->debug_depth()); |
907 | assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "")do { if (!(call->req() == non_debug_edges + youngest_jvms-> debug_depth())) { (*g_assert_poison) = 'X';; report_vm_error( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 907, "assert(" "call->req() == non_debug_edges + youngest_jvms->debug_depth()" ") failed", ""); ::breakpoint(); } } while (0); |
908 | |
909 | // Set up edges so that the call looks like this: |
910 | // Call [state:] ctl io mem fptr retadr |
911 | // [parms:] parm0 ... parmN |
912 | // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN |
913 | // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...] |
914 | // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN |
915 | // Note that caller debug info precedes callee debug info. |
916 | |
917 | // Fill pointer walks backwards from "young:" to "root:" in the diagram above: |
918 | uint debug_ptr = call->req(); |
919 | |
920 | // Loop over the map input edges associated with jvms, add them |
921 | // to the call node, & reset all offsets to match call node array. |
922 | for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL__null; ) { |
923 | uint debug_end = debug_ptr; |
924 | uint debug_start = debug_ptr - in_jvms->debug_size(); |
925 | debug_ptr = debug_start; // back up the ptr |
926 | |
927 | uint p = debug_start; // walks forward in [debug_start, debug_end) |
928 | uint j, k, l; |
929 | SafePointNode* in_map = in_jvms->map(); |
930 | out_jvms->set_map(call); |
931 | |
932 | if (can_prune_locals) { |
933 | assert(in_jvms->method() == out_jvms->method(), "sanity")do { if (!(in_jvms->method() == out_jvms->method())) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 933, "assert(" "in_jvms->method() == out_jvms->method()" ") failed", "sanity"); ::breakpoint(); } } while (0); |
934 | // If the current throw can reach an exception handler in this JVMS, |
935 | // then we must keep everything live that can reach that handler. |
936 | // As a quick and dirty approximation, we look for any handlers at all. |
937 | if (in_jvms->method()->has_exception_handlers()) { |
938 | can_prune_locals = false; |
939 | } |
940 | } |
941 | |
942 | // Add the Locals |
943 | k = in_jvms->locoff(); |
944 | l = in_jvms->loc_size(); |
945 | out_jvms->set_locoff(p); |
946 | if (!can_prune_locals) { |
947 | for (j = 0; j < l; j++) |
948 | call->set_req(p++, in_map->in(k+j)); |
949 | } else { |
950 | p += l; // already set to top above by add_req_batch |
951 | } |
952 | |
953 | // Add the Expression Stack |
954 | k = in_jvms->stkoff(); |
955 | l = in_jvms->sp(); |
956 | out_jvms->set_stkoff(p); |
957 | if (!can_prune_locals) { |
958 | for (j = 0; j < l; j++) |
959 | call->set_req(p++, in_map->in(k+j)); |
960 | } else if (can_prune_locals && stack_slots_not_pruned != 0) { |
961 | // Divide stack into {S0,...,S1}, where S0 is set to top. |
962 | uint s1 = stack_slots_not_pruned; |
963 | stack_slots_not_pruned = 0; // for next iteration |
964 | if (s1 > l) s1 = l; |
965 | uint s0 = l - s1; |
966 | p += s0; // skip the tops preinstalled by add_req_batch |
967 | for (j = s0; j < l; j++) |
968 | call->set_req(p++, in_map->in(k+j)); |
969 | } else { |
970 | p += l; // already set to top above by add_req_batch |
971 | } |
972 | |
973 | // Add the Monitors |
974 | k = in_jvms->monoff(); |
975 | l = in_jvms->mon_size(); |
976 | out_jvms->set_monoff(p); |
977 | for (j = 0; j < l; j++) |
978 | call->set_req(p++, in_map->in(k+j)); |
979 | |
980 | // Copy any scalar object fields. |
981 | k = in_jvms->scloff(); |
982 | l = in_jvms->scl_size(); |
983 | out_jvms->set_scloff(p); |
984 | for (j = 0; j < l; j++) |
985 | call->set_req(p++, in_map->in(k+j)); |
986 | |
987 | // Finish the new jvms. |
988 | out_jvms->set_endoff(p); |
989 | |
990 | assert(out_jvms->endoff() == debug_end, "fill ptr must match")do { if (!(out_jvms->endoff() == debug_end)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 990, "assert(" "out_jvms->endoff() == debug_end" ") failed" , "fill ptr must match"); ::breakpoint(); } } while (0); |
991 | assert(out_jvms->depth() == in_jvms->depth(), "depth must match")do { if (!(out_jvms->depth() == in_jvms->depth())) { (* g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 991, "assert(" "out_jvms->depth() == in_jvms->depth()" ") failed", "depth must match"); ::breakpoint(); } } while ( 0); |
992 | assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match")do { if (!(out_jvms->loc_size() == in_jvms->loc_size()) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 992, "assert(" "out_jvms->loc_size() == in_jvms->loc_size()" ") failed", "size must match"); ::breakpoint(); } } while (0 ); |
993 | assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match")do { if (!(out_jvms->mon_size() == in_jvms->mon_size()) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 993, "assert(" "out_jvms->mon_size() == in_jvms->mon_size()" ") failed", "size must match"); ::breakpoint(); } } while (0 ); |
994 | assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match")do { if (!(out_jvms->scl_size() == in_jvms->scl_size()) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 994, "assert(" "out_jvms->scl_size() == in_jvms->scl_size()" ") failed", "size must match"); ::breakpoint(); } } while (0 ); |
995 | assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match")do { if (!(out_jvms->debug_size() == in_jvms->debug_size ())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 995, "assert(" "out_jvms->debug_size() == in_jvms->debug_size()" ") failed", "size must match"); ::breakpoint(); } } while (0 ); |
996 | |
997 | // Update the two tail pointers in parallel. |
998 | out_jvms = out_jvms->caller(); |
999 | in_jvms = in_jvms->caller(); |
1000 | } |
1001 | |
1002 | assert(debug_ptr == non_debug_edges, "debug info must fit exactly")do { if (!(debug_ptr == non_debug_edges)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1002, "assert(" "debug_ptr == non_debug_edges" ") failed", "debug info must fit exactly" ); ::breakpoint(); } } while (0); |
1003 | |
1004 | // Test the correctness of JVMState::debug_xxx accessors: |
1005 | assert(call->jvms()->debug_start() == non_debug_edges, "")do { if (!(call->jvms()->debug_start() == non_debug_edges )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1005, "assert(" "call->jvms()->debug_start() == non_debug_edges" ") failed", ""); ::breakpoint(); } } while (0); |
1006 | assert(call->jvms()->debug_end() == call->req(), "")do { if (!(call->jvms()->debug_end() == call->req()) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1006, "assert(" "call->jvms()->debug_end() == call->req()" ") failed", ""); ::breakpoint(); } } while (0); |
1007 | assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "")do { if (!(call->jvms()->debug_depth() == call->req( ) - non_debug_edges)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1007, "assert(" "call->jvms()->debug_depth() == call->req() - non_debug_edges" ") failed", ""); ::breakpoint(); } } while (0); |
1008 | } |
1009 | |
1010 | bool GraphKit::compute_stack_effects(int& inputs, int& depth) { |
1011 | Bytecodes::Code code = java_bc(); |
1012 | if (code == Bytecodes::_wide) { |
1013 | code = method()->java_code_at_bci(bci() + 1); |
1014 | } |
1015 | |
1016 | BasicType rtype = T_ILLEGAL; |
1017 | int rsize = 0; |
1018 | |
1019 | if (code != Bytecodes::_illegal) { |
1020 | depth = Bytecodes::depth(code); // checkcast=0, athrow=-1 |
1021 | rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V |
1022 | if (rtype < T_CONFLICT) |
1023 | rsize = type2size[rtype]; |
1024 | } |
1025 | |
1026 | switch (code) { |
1027 | case Bytecodes::_illegal: |
1028 | return false; |
1029 | |
1030 | case Bytecodes::_ldc: |
1031 | case Bytecodes::_ldc_w: |
1032 | case Bytecodes::_ldc2_w: |
1033 | inputs = 0; |
1034 | break; |
1035 | |
1036 | case Bytecodes::_dup: inputs = 1; break; |
1037 | case Bytecodes::_dup_x1: inputs = 2; break; |
1038 | case Bytecodes::_dup_x2: inputs = 3; break; |
1039 | case Bytecodes::_dup2: inputs = 2; break; |
1040 | case Bytecodes::_dup2_x1: inputs = 3; break; |
1041 | case Bytecodes::_dup2_x2: inputs = 4; break; |
1042 | case Bytecodes::_swap: inputs = 2; break; |
1043 | case Bytecodes::_arraylength: inputs = 1; break; |
1044 | |
1045 | case Bytecodes::_getstatic: |
1046 | case Bytecodes::_putstatic: |
1047 | case Bytecodes::_getfield: |
1048 | case Bytecodes::_putfield: |
1049 | { |
1050 | bool ignored_will_link; |
1051 | ciField* field = method()->get_field_at_bci(bci(), ignored_will_link); |
1052 | int size = field->type()->size(); |
1053 | bool is_get = (depth >= 0), is_static = (depth & 1); |
1054 | inputs = (is_static ? 0 : 1); |
1055 | if (is_get) { |
1056 | depth = size - inputs; |
1057 | } else { |
1058 | inputs += size; // putxxx pops the value from the stack |
1059 | depth = - inputs; |
1060 | } |
1061 | } |
1062 | break; |
1063 | |
1064 | case Bytecodes::_invokevirtual: |
1065 | case Bytecodes::_invokespecial: |
1066 | case Bytecodes::_invokestatic: |
1067 | case Bytecodes::_invokedynamic: |
1068 | case Bytecodes::_invokeinterface: |
1069 | { |
1070 | bool ignored_will_link; |
1071 | ciSignature* declared_signature = NULL__null; |
1072 | ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); |
1073 | assert(declared_signature != NULL, "cannot be null")do { if (!(declared_signature != __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1073, "assert(" "declared_signature != __null" ") failed", "cannot be null" ); ::breakpoint(); } } while (0); |
1074 | inputs = declared_signature->arg_size_for_bc(code); |
1075 | int size = declared_signature->return_type()->size(); |
1076 | depth = size - inputs; |
1077 | } |
1078 | break; |
1079 | |
1080 | case Bytecodes::_multianewarray: |
1081 | { |
1082 | ciBytecodeStream iter(method()); |
1083 | iter.reset_to_bci(bci()); |
1084 | iter.next(); |
1085 | inputs = iter.get_dimensions(); |
1086 | assert(rsize == 1, "")do { if (!(rsize == 1)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1086, "assert(" "rsize == 1" ") failed", ""); ::breakpoint( ); } } while (0); |
1087 | depth = rsize - inputs; |
1088 | } |
1089 | break; |
1090 | |
1091 | case Bytecodes::_ireturn: |
1092 | case Bytecodes::_lreturn: |
1093 | case Bytecodes::_freturn: |
1094 | case Bytecodes::_dreturn: |
1095 | case Bytecodes::_areturn: |
1096 | assert(rsize == -depth, "")do { if (!(rsize == -depth)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1096, "assert(" "rsize == -depth" ") failed", ""); ::breakpoint (); } } while (0); |
1097 | inputs = rsize; |
1098 | break; |
1099 | |
1100 | case Bytecodes::_jsr: |
1101 | case Bytecodes::_jsr_w: |
1102 | inputs = 0; |
1103 | depth = 1; // S.B. depth=1, not zero |
1104 | break; |
1105 | |
1106 | default: |
1107 | // bytecode produces a typed result |
1108 | inputs = rsize - depth; |
1109 | assert(inputs >= 0, "")do { if (!(inputs >= 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1109, "assert(" "inputs >= 0" ") failed", ""); ::breakpoint (); } } while (0); |
1110 | break; |
1111 | } |
1112 | |
1113 | #ifdef ASSERT1 |
1114 | // spot check |
1115 | int outputs = depth + inputs; |
1116 | assert(outputs >= 0, "sanity")do { if (!(outputs >= 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1116, "assert(" "outputs >= 0" ") failed", "sanity"); :: breakpoint(); } } while (0); |
1117 | switch (code) { |
1118 | case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, "")do { if (!(inputs == 1 && outputs == 1)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1118, "assert(" "inputs == 1 && outputs == 1" ") failed" , ""); ::breakpoint(); } } while (0); break; |
1119 | case Bytecodes::_athrow: assert(inputs == 1 && outputs == 0, "")do { if (!(inputs == 1 && outputs == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1119, "assert(" "inputs == 1 && outputs == 0" ") failed" , ""); ::breakpoint(); } } while (0); break; |
1120 | case Bytecodes::_aload_0: assert(inputs == 0 && outputs == 1, "")do { if (!(inputs == 0 && outputs == 1)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1120, "assert(" "inputs == 0 && outputs == 1" ") failed" , ""); ::breakpoint(); } } while (0); break; |
1121 | case Bytecodes::_return: assert(inputs == 0 && outputs == 0, "")do { if (!(inputs == 0 && outputs == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1121, "assert(" "inputs == 0 && outputs == 0" ") failed" , ""); ::breakpoint(); } } while (0); break; |
1122 | case Bytecodes::_drem: assert(inputs == 4 && outputs == 2, "")do { if (!(inputs == 4 && outputs == 2)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1122, "assert(" "inputs == 4 && outputs == 2" ") failed" , ""); ::breakpoint(); } } while (0); break; |
1123 | default: break; |
1124 | } |
1125 | #endif //ASSERT |
1126 | |
1127 | return true; |
1128 | } |
1129 | |
1130 | |
1131 | |
1132 | //------------------------------basic_plus_adr--------------------------------- |
1133 | Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { |
1134 | // short-circuit a common case |
1135 | if (offset == intcon(0)) return ptr; |
1136 | return _gvn.transform( new AddPNode(base, ptr, offset) ); |
1137 | } |
1138 | |
1139 | Node* GraphKit::ConvI2L(Node* offset) { |
1140 | // short-circuit a common case |
1141 | jint offset_con = find_int_con(offset, Type::OffsetBot); |
1142 | if (offset_con != Type::OffsetBot) { |
1143 | return longcon((jlong) offset_con); |
1144 | } |
1145 | return _gvn.transform( new ConvI2LNode(offset)); |
1146 | } |
1147 | |
1148 | Node* GraphKit::ConvI2UL(Node* offset) { |
1149 | juint offset_con = (juint) find_int_con(offset, Type::OffsetBot); |
1150 | if (offset_con != (juint) Type::OffsetBot) { |
1151 | return longcon((julong) offset_con); |
1152 | } |
1153 | Node* conv = _gvn.transform( new ConvI2LNode(offset)); |
1154 | Node* mask = _gvn.transform(ConLNode::make((julong) max_juint)); |
1155 | return _gvn.transform( new AndLNode(conv, mask) ); |
1156 | } |
1157 | |
1158 | Node* GraphKit::ConvL2I(Node* offset) { |
1159 | // short-circuit a common case |
1160 | jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot); |
1161 | if (offset_con != (jlong)Type::OffsetBot) { |
1162 | return intcon((int) offset_con); |
1163 | } |
1164 | return _gvn.transform( new ConvL2INode(offset)); |
1165 | } |
1166 | |
1167 | //-------------------------load_object_klass----------------------------------- |
1168 | Node* GraphKit::load_object_klass(Node* obj) { |
1169 | // Special-case a fresh allocation to avoid building nodes: |
1170 | Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); |
1171 | if (akls != NULL__null) return akls; |
1172 | Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); |
1173 | return _gvn.transform(LoadKlassNode::make(_gvn, NULL__null, immutable_memory(), k_adr, TypeInstPtr::KLASS)); |
1174 | } |
1175 | |
1176 | //-------------------------load_array_length----------------------------------- |
1177 | Node* GraphKit::load_array_length(Node* array) { |
1178 | // Special-case a fresh allocation to avoid building nodes: |
1179 | AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn); |
1180 | Node *alen; |
1181 | if (alloc == NULL__null) { |
1182 | Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); |
1183 | alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); |
1184 | } else { |
1185 | alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false); |
1186 | } |
1187 | return alen; |
1188 | } |
1189 | |
1190 | Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc, |
1191 | const TypeOopPtr* oop_type, |
1192 | bool replace_length_in_map) { |
1193 | Node* length = alloc->Ideal_length(); |
1194 | if (replace_length_in_map == false || map()->find_edge(length) >= 0) { |
1195 | Node* ccast = alloc->make_ideal_length(oop_type, &_gvn); |
1196 | if (ccast != length) { |
1197 | // do not transfrom ccast here, it might convert to top node for |
1198 | // negative array length and break assumptions in parsing stage. |
1199 | _gvn.set_type_bottom(ccast); |
1200 | record_for_igvn(ccast); |
1201 | if (replace_length_in_map) { |
1202 | replace_in_map(length, ccast); |
1203 | } |
1204 | return ccast; |
1205 | } |
1206 | } |
1207 | return length; |
1208 | } |
1209 | |
1210 | //------------------------------do_null_check---------------------------------- |
1211 | // Helper function to do a NULL pointer check. Returned value is |
1212 | // the incoming address with NULL casted away. You are allowed to use the |
1213 | // not-null value only if you are control dependent on the test. |
1214 | #ifndef PRODUCT |
1215 | extern int explicit_null_checks_inserted, |
1216 | explicit_null_checks_elided; |
1217 | #endif |
1218 | Node* GraphKit::null_check_common(Node* value, BasicType type, |
1219 | // optional arguments for variations: |
1220 | bool assert_null, |
1221 | Node* *null_control, |
1222 | bool speculative) { |
1223 | assert(!assert_null || null_control == NULL, "not both at once")do { if (!(!assert_null || null_control == __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1223, "assert(" "!assert_null || null_control == __null" ") failed" , "not both at once"); ::breakpoint(); } } while (0); |
1224 | if (stopped()) return top(); |
1225 | NOT_PRODUCT(explicit_null_checks_inserted++)explicit_null_checks_inserted++; |
1226 | |
1227 | // Construct NULL check |
1228 | Node *chk = NULL__null; |
1229 | switch(type) { |
1230 | case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break; |
1231 | case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break; |
1232 | case T_ARRAY : // fall through |
1233 | type = T_OBJECT; // simplify further tests |
1234 | case T_OBJECT : { |
1235 | const Type *t = _gvn.type( value ); |
1236 | |
1237 | const TypeOopPtr* tp = t->isa_oopptr(); |
1238 | if (tp != NULL__null && tp->klass() != NULL__null && !tp->klass()->is_loaded() |
1239 | // Only for do_null_check, not any of its siblings: |
1240 | && !assert_null && null_control == NULL__null) { |
1241 | // Usually, any field access or invocation on an unloaded oop type |
1242 | // will simply fail to link, since the statically linked class is |
1243 | // likely also to be unloaded. However, in -Xcomp mode, sometimes |
1244 | // the static class is loaded but the sharper oop type is not. |
1245 | // Rather than checking for this obscure case in lots of places, |
1246 | // we simply observe that a null check on an unloaded class |
1247 | // will always be followed by a nonsense operation, so we |
1248 | // can just issue the uncommon trap here. |
1249 | // Our access to the unloaded class will only be correct |
1250 | // after it has been loaded and initialized, which requires |
1251 | // a trip through the interpreter. |
1252 | #ifndef PRODUCT |
1253 | if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); } |
1254 | #endif |
1255 | uncommon_trap(Deoptimization::Reason_unloaded, |
1256 | Deoptimization::Action_reinterpret, |
1257 | tp->klass(), "!loaded"); |
1258 | return top(); |
1259 | } |
1260 | |
1261 | if (assert_null) { |
1262 | // See if the type is contained in NULL_PTR. |
1263 | // If so, then the value is already null. |
1264 | if (t->higher_equal(TypePtr::NULL_PTR)) { |
1265 | NOT_PRODUCT(explicit_null_checks_elided++)explicit_null_checks_elided++; |
1266 | return value; // Elided null assert quickly! |
1267 | } |
1268 | } else { |
1269 | // See if mixing in the NULL pointer changes type. |
1270 | // If so, then the NULL pointer was not allowed in the original |
1271 | // type. In other words, "value" was not-null. |
1272 | if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) { |
1273 | // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... |
1274 | NOT_PRODUCT(explicit_null_checks_elided++)explicit_null_checks_elided++; |
1275 | return value; // Elided null check quickly! |
1276 | } |
1277 | } |
1278 | chk = new CmpPNode( value, null() ); |
1279 | break; |
1280 | } |
1281 | |
1282 | default: |
1283 | fatal("unexpected type: %s", type2name(type))do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1283, "unexpected type: %s", type2name(type)); ::breakpoint (); } while (0); |
1284 | } |
1285 | assert(chk != NULL, "sanity check")do { if (!(chk != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1285, "assert(" "chk != __null" ") failed", "sanity check") ; ::breakpoint(); } } while (0); |
1286 | chk = _gvn.transform(chk); |
1287 | |
1288 | BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne; |
1289 | BoolNode *btst = new BoolNode( chk, btest); |
1290 | Node *tst = _gvn.transform( btst ); |
1291 | |
1292 | //----------- |
1293 | // if peephole optimizations occurred, a prior test existed. |
1294 | // If a prior test existed, maybe it dominates as we can avoid this test. |
1295 | if (tst != btst && type == T_OBJECT) { |
1296 | // At this point we want to scan up the CFG to see if we can |
1297 | // find an identical test (and so avoid this test altogether). |
1298 | Node *cfg = control(); |
1299 | int depth = 0; |
1300 | while( depth < 16 ) { // Limit search depth for speed |
1301 | if( cfg->Opcode() == Op_IfTrue && |
1302 | cfg->in(0)->in(1) == tst ) { |
1303 | // Found prior test. Use "cast_not_null" to construct an identical |
1304 | // CastPP (and hence hash to) as already exists for the prior test. |
1305 | // Return that casted value. |
1306 | if (assert_null) { |
1307 | replace_in_map(value, null()); |
1308 | return null(); // do not issue the redundant test |
1309 | } |
1310 | Node *oldcontrol = control(); |
1311 | set_control(cfg); |
1312 | Node *res = cast_not_null(value); |
1313 | set_control(oldcontrol); |
1314 | NOT_PRODUCT(explicit_null_checks_elided++)explicit_null_checks_elided++; |
1315 | return res; |
1316 | } |
1317 | cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true); |
1318 | if (cfg == NULL__null) break; // Quit at region nodes |
1319 | depth++; |
1320 | } |
1321 | } |
1322 | |
1323 | //----------- |
1324 | // Branch to failure if null |
1325 | float ok_prob = PROB_MAX(1.0f-(1e-6f)); // a priori estimate: nulls never happen |
1326 | Deoptimization::DeoptReason reason; |
1327 | if (assert_null) { |
1328 | reason = Deoptimization::reason_null_assert(speculative); |
1329 | } else if (type == T_OBJECT) { |
1330 | reason = Deoptimization::reason_null_check(speculative); |
1331 | } else { |
1332 | reason = Deoptimization::Reason_div0_check; |
1333 | } |
1334 | // %%% Since Reason_unhandled is not recorded on a per-bytecode basis, |
1335 | // ciMethodData::has_trap_at will return a conservative -1 if any |
1336 | // must-be-null assertion has failed. This could cause performance |
1337 | // problems for a method after its first do_null_assert failure. |
1338 | // Consider using 'Reason_class_check' instead? |
1339 | |
1340 | // To cause an implicit null check, we set the not-null probability |
1341 | // to the maximum (PROB_MAX). For an explicit check the probability |
1342 | // is set to a smaller value. |
1343 | if (null_control != NULL__null || too_many_traps(reason)) { |
1344 | // probability is less likely |
1345 | ok_prob = PROB_LIKELY_MAG(3)(1.0f-(1e-3f)); |
1346 | } else if (!assert_null && |
1347 | (ImplicitNullCheckThreshold > 0) && |
1348 | method() != NULL__null && |
1349 | (method()->method_data()->trap_count(reason) |
1350 | >= (uint)ImplicitNullCheckThreshold)) { |
1351 | ok_prob = PROB_LIKELY_MAG(3)(1.0f-(1e-3f)); |
1352 | } |
1353 | |
1354 | if (null_control != NULL__null) { |
1355 | IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN(-1.0f)); |
1356 | Node* null_true = _gvn.transform( new IfFalseNode(iff)); |
1357 | set_control( _gvn.transform( new IfTrueNode(iff))); |
1358 | #ifndef PRODUCT |
1359 | if (null_true == top()) { |
1360 | explicit_null_checks_elided++; |
1361 | } |
1362 | #endif |
1363 | (*null_control) = null_true; |
1364 | } else { |
1365 | BuildCutout unless(this, tst, ok_prob); |
1366 | // Check for optimizer eliding test at parse time |
1367 | if (stopped()) { |
1368 | // Failure not possible; do not bother making uncommon trap. |
1369 | NOT_PRODUCT(explicit_null_checks_elided++)explicit_null_checks_elided++; |
1370 | } else if (assert_null) { |
1371 | uncommon_trap(reason, |
1372 | Deoptimization::Action_make_not_entrant, |
1373 | NULL__null, "assert_null"); |
1374 | } else { |
1375 | replace_in_map(value, zerocon(type)); |
1376 | builtin_throw(reason); |
1377 | } |
1378 | } |
1379 | |
1380 | // Must throw exception, fall-thru not possible? |
1381 | if (stopped()) { |
1382 | return top(); // No result |
1383 | } |
1384 | |
1385 | if (assert_null) { |
1386 | // Cast obj to null on this path. |
1387 | replace_in_map(value, zerocon(type)); |
1388 | return zerocon(type); |
1389 | } |
1390 | |
1391 | // Cast obj to not-null on this path, if there is no null_control. |
1392 | // (If there is a null_control, a non-null value may come back to haunt us.) |
1393 | if (type == T_OBJECT) { |
1394 | Node* cast = cast_not_null(value, false); |
1395 | if (null_control == NULL__null || (*null_control) == top()) |
1396 | replace_in_map(value, cast); |
1397 | value = cast; |
1398 | } |
1399 | |
1400 | return value; |
1401 | } |
1402 | |
1403 | |
1404 | //------------------------------cast_not_null---------------------------------- |
1405 | // Cast obj to not-null on this path |
1406 | Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { |
1407 | const Type *t = _gvn.type(obj); |
1408 | const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); |
1409 | // Object is already not-null? |
1410 | if( t == t_not_null ) return obj; |
1411 | |
1412 | Node *cast = new CastPPNode(obj,t_not_null); |
1413 | cast->init_req(0, control()); |
1414 | cast = _gvn.transform( cast ); |
1415 | |
1416 | // Scan for instances of 'obj' in the current JVM mapping. |
1417 | // These instances are known to be not-null after the test. |
1418 | if (do_replace_in_map) |
1419 | replace_in_map(obj, cast); |
1420 | |
1421 | return cast; // Return casted value |
1422 | } |
1423 | |
1424 | // Sometimes in intrinsics, we implicitly know an object is not null |
1425 | // (there's no actual null check) so we can cast it to not null. In |
1426 | // the course of optimizations, the input to the cast can become null. |
1427 | // In that case that data path will die and we need the control path |
1428 | // to become dead as well to keep the graph consistent. So we have to |
1429 | // add a check for null for which one branch can't be taken. It uses |
1430 | // an Opaque4 node that will cause the check to be removed after loop |
1431 | // opts so the test goes away and the compiled code doesn't execute a |
1432 | // useless check. |
1433 | Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) { |
1434 | if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(value))) { |
1435 | return value; |
1436 | } |
1437 | Node* chk = _gvn.transform(new CmpPNode(value, null())); |
1438 | Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne)); |
1439 | Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1))); |
1440 | IfNode *iff = new IfNode(control(), opaq, PROB_MAX(1.0f-(1e-6f)), COUNT_UNKNOWN(-1.0f)); |
1441 | _gvn.set_type(iff, iff->Value(&_gvn)); |
1442 | Node *if_f = _gvn.transform(new IfFalseNode(iff)); |
1443 | Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr)); |
1444 | Node* halt = _gvn.transform(new HaltNode(if_f, frame, "unexpected null in intrinsic")); |
1445 | C->root()->add_req(halt); |
1446 | Node *if_t = _gvn.transform(new IfTrueNode(iff)); |
1447 | set_control(if_t); |
1448 | return cast_not_null(value, do_replace_in_map); |
1449 | } |
1450 | |
1451 | |
1452 | //--------------------------replace_in_map------------------------------------- |
1453 | void GraphKit::replace_in_map(Node* old, Node* neww) { |
1454 | if (old == neww) { |
1455 | return; |
1456 | } |
1457 | |
1458 | map()->replace_edge(old, neww); |
1459 | |
1460 | // Note: This operation potentially replaces any edge |
1461 | // on the map. This includes locals, stack, and monitors |
1462 | // of the current (innermost) JVM state. |
1463 | |
1464 | // don't let inconsistent types from profiling escape this |
1465 | // method |
1466 | |
1467 | const Type* told = _gvn.type(old); |
1468 | const Type* tnew = _gvn.type(neww); |
1469 | |
1470 | if (!tnew->higher_equal(told)) { |
1471 | return; |
1472 | } |
1473 | |
1474 | map()->record_replaced_node(old, neww); |
1475 | } |
1476 | |
1477 | |
1478 | //============================================================================= |
1479 | //--------------------------------memory--------------------------------------- |
1480 | Node* GraphKit::memory(uint alias_idx) { |
1481 | MergeMemNode* mem = merged_memory(); |
1482 | Node* p = mem->memory_at(alias_idx); |
1483 | assert(p != mem->empty_memory(), "empty")do { if (!(p != mem->empty_memory())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1483, "assert(" "p != mem->empty_memory()" ") failed", "empty" ); ::breakpoint(); } } while (0); |
1484 | _gvn.set_type(p, Type::MEMORY); // must be mapped |
1485 | return p; |
1486 | } |
1487 | |
1488 | //-----------------------------reset_memory------------------------------------ |
1489 | Node* GraphKit::reset_memory() { |
1490 | Node* mem = map()->memory(); |
1491 | // do not use this node for any more parsing! |
1492 | debug_only( map()->set_memory((Node*)NULL) )map()->set_memory((Node*)__null); |
1493 | return _gvn.transform( mem ); |
1494 | } |
1495 | |
1496 | //------------------------------set_all_memory--------------------------------- |
1497 | void GraphKit::set_all_memory(Node* newmem) { |
1498 | Node* mergemem = MergeMemNode::make(newmem); |
1499 | gvn().set_type_bottom(mergemem); |
1500 | map()->set_memory(mergemem); |
1501 | } |
1502 | |
1503 | //------------------------------set_all_memory_call---------------------------- |
1504 | void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) { |
1505 | Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) ); |
1506 | set_all_memory(newmem); |
1507 | } |
1508 | |
1509 | //============================================================================= |
1510 | // |
1511 | // parser factory methods for MemNodes |
1512 | // |
1513 | // These are layered on top of the factory methods in LoadNode and StoreNode, |
1514 | // and integrate with the parser's memory state and _gvn engine. |
1515 | // |
1516 | |
1517 | // factory methods in "int adr_idx" |
1518 | Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, |
1519 | int adr_idx, |
1520 | MemNode::MemOrd mo, |
1521 | LoadNode::ControlDependency control_dependency, |
1522 | bool require_atomic_access, |
1523 | bool unaligned, |
1524 | bool mismatched, |
1525 | bool unsafe, |
1526 | uint8_t barrier_data) { |
1527 | assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" )do { if (!(adr_idx != Compile::AliasIdxTop)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1527, "assert(" "adr_idx != Compile::AliasIdxTop" ") failed" , "use other make_load factory"); ::breakpoint(); } } while ( 0); |
1528 | const TypePtr* adr_type = NULL__null; // debug-mode-only argument |
1529 | debug_only(adr_type = C->get_adr_type(adr_idx))adr_type = C->get_adr_type(adr_idx); |
1530 | Node* mem = memory(adr_idx); |
1531 | Node* ld; |
1532 | if (require_atomic_access && bt == T_LONG) { |
1533 | ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data); |
1534 | } else if (require_atomic_access && bt == T_DOUBLE) { |
1535 | ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data); |
1536 | } else { |
1537 | ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data); |
1538 | } |
1539 | ld = _gvn.transform(ld); |
1540 | if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) { |
1541 | // Improve graph before escape analysis and boxing elimination. |
1542 | record_for_igvn(ld); |
1543 | } |
1544 | return ld; |
1545 | } |
1546 | |
1547 | Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, |
1548 | int adr_idx, |
1549 | MemNode::MemOrd mo, |
1550 | bool require_atomic_access, |
1551 | bool unaligned, |
1552 | bool mismatched, |
1553 | bool unsafe) { |
1554 | assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" )do { if (!(adr_idx != Compile::AliasIdxTop)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1554, "assert(" "adr_idx != Compile::AliasIdxTop" ") failed" , "use other store_to_memory factory"); ::breakpoint(); } } while (0); |
1555 | const TypePtr* adr_type = NULL__null; |
1556 | debug_only(adr_type = C->get_adr_type(adr_idx))adr_type = C->get_adr_type(adr_idx); |
1557 | Node *mem = memory(adr_idx); |
1558 | Node* st; |
1559 | if (require_atomic_access && bt == T_LONG) { |
1560 | st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo); |
1561 | } else if (require_atomic_access && bt == T_DOUBLE) { |
1562 | st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo); |
1563 | } else { |
1564 | st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); |
1565 | } |
1566 | if (unaligned) { |
1567 | st->as_Store()->set_unaligned_access(); |
1568 | } |
1569 | if (mismatched) { |
1570 | st->as_Store()->set_mismatched_access(); |
1571 | } |
1572 | if (unsafe) { |
1573 | st->as_Store()->set_unsafe_access(); |
1574 | } |
1575 | st = _gvn.transform(st); |
1576 | set_memory(st, adr_idx); |
1577 | // Back-to-back stores can only remove intermediate store with DU info |
1578 | // so push on worklist for optimizer. |
1579 | if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address)) |
1580 | record_for_igvn(st); |
1581 | |
1582 | return st; |
1583 | } |
1584 | |
1585 | Node* GraphKit::access_store_at(Node* obj, |
1586 | Node* adr, |
1587 | const TypePtr* adr_type, |
1588 | Node* val, |
1589 | const Type* val_type, |
1590 | BasicType bt, |
1591 | DecoratorSet decorators) { |
1592 | // Transformation of a value which could be NULL pointer (CastPP #NULL) |
1593 | // could be delayed during Parse (for example, in adjust_map_after_if()). |
1594 | // Execute transformation here to avoid barrier generation in such case. |
1595 | if (_gvn.type(val) == TypePtr::NULL_PTR) { |
1596 | val = _gvn.makecon(TypePtr::NULL_PTR); |
1597 | } |
1598 | |
1599 | if (stopped()) { |
1600 | return top(); // Dead path ? |
1601 | } |
1602 | |
1603 | assert(val != NULL, "not dead path")do { if (!(val != __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1603, "assert(" "val != __null" ") failed", "not dead path" ); ::breakpoint(); } } while (0); |
1604 | |
1605 | C2AccessValuePtr addr(adr, adr_type); |
1606 | C2AccessValue value(val, val_type); |
1607 | C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr); |
1608 | if (access.is_raw()) { |
1609 | return _barrier_set->BarrierSetC2::store_at(access, value); |
1610 | } else { |
1611 | return _barrier_set->store_at(access, value); |
1612 | } |
1613 | } |
1614 | |
1615 | Node* GraphKit::access_load_at(Node* obj, // containing obj |
1616 | Node* adr, // actual adress to store val at |
1617 | const TypePtr* adr_type, |
1618 | const Type* val_type, |
1619 | BasicType bt, |
1620 | DecoratorSet decorators) { |
1621 | if (stopped()) { |
1622 | return top(); // Dead path ? |
1623 | } |
1624 | |
1625 | C2AccessValuePtr addr(adr, adr_type); |
1626 | C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr); |
1627 | if (access.is_raw()) { |
1628 | return _barrier_set->BarrierSetC2::load_at(access, val_type); |
1629 | } else { |
1630 | return _barrier_set->load_at(access, val_type); |
1631 | } |
1632 | } |
1633 | |
1634 | Node* GraphKit::access_load(Node* adr, // actual adress to load val at |
1635 | const Type* val_type, |
1636 | BasicType bt, |
1637 | DecoratorSet decorators) { |
1638 | if (stopped()) { |
1639 | return top(); // Dead path ? |
1640 | } |
1641 | |
1642 | C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr()); |
1643 | C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL__null, addr); |
1644 | if (access.is_raw()) { |
1645 | return _barrier_set->BarrierSetC2::load_at(access, val_type); |
1646 | } else { |
1647 | return _barrier_set->load_at(access, val_type); |
1648 | } |
1649 | } |
1650 | |
1651 | Node* GraphKit::access_atomic_cmpxchg_val_at(Node* obj, |
1652 | Node* adr, |
1653 | const TypePtr* adr_type, |
1654 | int alias_idx, |
1655 | Node* expected_val, |
1656 | Node* new_val, |
1657 | const Type* value_type, |
1658 | BasicType bt, |
1659 | DecoratorSet decorators) { |
1660 | C2AccessValuePtr addr(adr, adr_type); |
1661 | C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, |
1662 | bt, obj, addr, alias_idx); |
1663 | if (access.is_raw()) { |
1664 | return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type); |
1665 | } else { |
1666 | return _barrier_set->atomic_cmpxchg_val_at(access, expected_val, new_val, value_type); |
1667 | } |
1668 | } |
1669 | |
1670 | Node* GraphKit::access_atomic_cmpxchg_bool_at(Node* obj, |
1671 | Node* adr, |
1672 | const TypePtr* adr_type, |
1673 | int alias_idx, |
1674 | Node* expected_val, |
1675 | Node* new_val, |
1676 | const Type* value_type, |
1677 | BasicType bt, |
1678 | DecoratorSet decorators) { |
1679 | C2AccessValuePtr addr(adr, adr_type); |
1680 | C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, |
1681 | bt, obj, addr, alias_idx); |
1682 | if (access.is_raw()) { |
1683 | return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type); |
1684 | } else { |
1685 | return _barrier_set->atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type); |
1686 | } |
1687 | } |
1688 | |
1689 | Node* GraphKit::access_atomic_xchg_at(Node* obj, |
1690 | Node* adr, |
1691 | const TypePtr* adr_type, |
1692 | int alias_idx, |
1693 | Node* new_val, |
1694 | const Type* value_type, |
1695 | BasicType bt, |
1696 | DecoratorSet decorators) { |
1697 | C2AccessValuePtr addr(adr, adr_type); |
1698 | C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, |
1699 | bt, obj, addr, alias_idx); |
1700 | if (access.is_raw()) { |
1701 | return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type); |
1702 | } else { |
1703 | return _barrier_set->atomic_xchg_at(access, new_val, value_type); |
1704 | } |
1705 | } |
1706 | |
1707 | Node* GraphKit::access_atomic_add_at(Node* obj, |
1708 | Node* adr, |
1709 | const TypePtr* adr_type, |
1710 | int alias_idx, |
1711 | Node* new_val, |
1712 | const Type* value_type, |
1713 | BasicType bt, |
1714 | DecoratorSet decorators) { |
1715 | C2AccessValuePtr addr(adr, adr_type); |
1716 | C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx); |
1717 | if (access.is_raw()) { |
1718 | return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type); |
1719 | } else { |
1720 | return _barrier_set->atomic_add_at(access, new_val, value_type); |
1721 | } |
1722 | } |
1723 | |
1724 | void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) { |
1725 | return _barrier_set->clone(this, src, dst, size, is_array); |
1726 | } |
1727 | |
1728 | //-------------------------array_element_address------------------------- |
1729 | Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, |
1730 | const TypeInt* sizetype, Node* ctrl) { |
1731 | uint shift = exact_log2(type2aelembytes(elembt)); |
1732 | uint header = arrayOopDesc::base_offset_in_bytes(elembt); |
1733 | |
1734 | // short-circuit a common case (saves lots of confusing waste motion) |
1735 | jint idx_con = find_int_con(idx, -1); |
1736 | if (idx_con >= 0) { |
1737 | intptr_t offset = header + ((intptr_t)idx_con << shift); |
1738 | return basic_plus_adr(ary, offset); |
1739 | } |
1740 | |
1741 | // must be correct type for alignment purposes |
1742 | Node* base = basic_plus_adr(ary, header); |
1743 | idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl); |
1744 | Node* scale = _gvn.transform( new LShiftXNodeLShiftLNode(idx, intcon(shift)) ); |
1745 | return basic_plus_adr(ary, base, scale); |
1746 | } |
1747 | |
1748 | //-------------------------load_array_element------------------------- |
1749 | Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) { |
1750 | const Type* elemtype = arytype->elem(); |
1751 | BasicType elembt = elemtype->array_element_basic_type(); |
1752 | Node* adr = array_element_address(ary, idx, elembt, arytype->size()); |
1753 | if (elembt == T_NARROWOOP) { |
1754 | elembt = T_OBJECT; // To satisfy switch in LoadNode::make() |
1755 | } |
1756 | Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt, |
1757 | IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0)); |
1758 | return ld; |
1759 | } |
1760 | |
1761 | //-------------------------set_arguments_for_java_call------------------------- |
1762 | // Arguments (pre-popped from the stack) are taken from the JVMS. |
1763 | void GraphKit::set_arguments_for_java_call(CallJavaNode* call) { |
1764 | // Add the call arguments: |
1765 | uint nargs = call->method()->arg_size(); |
1766 | for (uint i = 0; i < nargs; i++) { |
1767 | Node* arg = argument(i); |
1768 | call->init_req(i + TypeFunc::Parms, arg); |
1769 | } |
1770 | } |
1771 | |
1772 | //---------------------------set_edges_for_java_call--------------------------- |
1773 | // Connect a newly created call into the current JVMS. |
1774 | // A return value node (if any) is returned from set_edges_for_java_call. |
1775 | void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) { |
1776 | |
1777 | // Add the predefined inputs: |
1778 | call->init_req( TypeFunc::Control, control() ); |
1779 | call->init_req( TypeFunc::I_O , i_o() ); |
1780 | call->init_req( TypeFunc::Memory , reset_memory() ); |
1781 | call->init_req( TypeFunc::FramePtr, frameptr() ); |
1782 | call->init_req( TypeFunc::ReturnAdr, top() ); |
1783 | |
1784 | add_safepoint_edges(call, must_throw); |
1785 | |
1786 | Node* xcall = _gvn.transform(call); |
1787 | |
1788 | if (xcall == top()) { |
1789 | set_control(top()); |
1790 | return; |
1791 | } |
1792 | assert(xcall == call, "call identity is stable")do { if (!(xcall == call)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1792, "assert(" "xcall == call" ") failed", "call identity is stable" ); ::breakpoint(); } } while (0); |
1793 | |
1794 | // Re-use the current map to produce the result. |
1795 | |
1796 | set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control))); |
1797 | set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj))); |
1798 | set_all_memory_call(xcall, separate_io_proj); |
1799 | |
1800 | //return xcall; // no need, caller already has it |
1801 | } |
1802 | |
1803 | Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) { |
1804 | if (stopped()) return top(); // maybe the call folded up? |
1805 | |
1806 | // Capture the return value, if any. |
1807 | Node* ret; |
1808 | if (call->method() == NULL__null || |
1809 | call->method()->return_type()->basic_type() == T_VOID) |
1810 | ret = top(); |
1811 | else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); |
1812 | |
1813 | // Note: Since any out-of-line call can produce an exception, |
1814 | // we always insert an I_O projection from the call into the result. |
1815 | |
1816 | make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize); |
1817 | |
1818 | if (separate_io_proj) { |
1819 | // The caller requested separate projections be used by the fall |
1820 | // through and exceptional paths, so replace the projections for |
1821 | // the fall through path. |
1822 | set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); |
1823 | set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); |
1824 | } |
1825 | return ret; |
1826 | } |
1827 | |
1828 | //--------------------set_predefined_input_for_runtime_call-------------------- |
1829 | // Reading and setting the memory state is way conservative here. |
1830 | // The real problem is that I am not doing real Type analysis on memory, |
1831 | // so I cannot distinguish card mark stores from other stores. Across a GC |
1832 | // point the Store Barrier and the card mark memory has to agree. I cannot |
1833 | // have a card mark store and its barrier split across the GC point from |
1834 | // either above or below. Here I get that to happen by reading ALL of memory. |
1835 | // A better answer would be to separate out card marks from other memory. |
1836 | // For now, return the input memory state, so that it can be reused |
1837 | // after the call, if this call has restricted memory effects. |
1838 | Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) { |
1839 | // Set fixed predefined input arguments |
1840 | Node* memory = reset_memory(); |
1841 | Node* m = narrow_mem == NULL__null ? memory : narrow_mem; |
1842 | call->init_req( TypeFunc::Control, control() ); |
1843 | call->init_req( TypeFunc::I_O, top() ); // does no i/o |
1844 | call->init_req( TypeFunc::Memory, m ); // may gc ptrs |
1845 | call->init_req( TypeFunc::FramePtr, frameptr() ); |
1846 | call->init_req( TypeFunc::ReturnAdr, top() ); |
1847 | return memory; |
1848 | } |
1849 | |
1850 | //-------------------set_predefined_output_for_runtime_call-------------------- |
1851 | // Set control and memory (not i_o) from the call. |
1852 | // If keep_mem is not NULL, use it for the output state, |
1853 | // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM. |
1854 | // If hook_mem is NULL, this call produces no memory effects at all. |
1855 | // If hook_mem is a Java-visible memory slice (such as arraycopy operands), |
1856 | // then only that memory slice is taken from the call. |
1857 | // In the last case, we must put an appropriate memory barrier before |
1858 | // the call, so as to create the correct anti-dependencies on loads |
1859 | // preceding the call. |
1860 | void GraphKit::set_predefined_output_for_runtime_call(Node* call, |
1861 | Node* keep_mem, |
1862 | const TypePtr* hook_mem) { |
1863 | // no i/o |
1864 | set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) )); |
1865 | if (keep_mem) { |
1866 | // First clone the existing memory state |
1867 | set_all_memory(keep_mem); |
1868 | if (hook_mem != NULL__null) { |
1869 | // Make memory for the call |
1870 | Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) ); |
1871 | // Set the RawPtr memory state only. This covers all the heap top/GC stuff |
1872 | // We also use hook_mem to extract specific effects from arraycopy stubs. |
1873 | set_memory(mem, hook_mem); |
1874 | } |
1875 | // ...else the call has NO memory effects. |
1876 | |
1877 | // Make sure the call advertises its memory effects precisely. |
1878 | // This lets us build accurate anti-dependences in gcm.cpp. |
1879 | assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem),do { if (!(C->alias_type(call->adr_type()) == C->alias_type (hook_mem))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1880, "assert(" "C->alias_type(call->adr_type()) == C->alias_type(hook_mem)" ") failed", "call node must be constructed correctly"); ::breakpoint (); } } while (0) |
1880 | "call node must be constructed correctly")do { if (!(C->alias_type(call->adr_type()) == C->alias_type (hook_mem))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1880, "assert(" "C->alias_type(call->adr_type()) == C->alias_type(hook_mem)" ") failed", "call node must be constructed correctly"); ::breakpoint (); } } while (0); |
1881 | } else { |
1882 | assert(hook_mem == NULL, "")do { if (!(hook_mem == __null)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 1882, "assert(" "hook_mem == __null" ") failed", ""); ::breakpoint (); } } while (0); |
1883 | // This is not a "slow path" call; all memory comes from the call. |
1884 | set_all_memory_call(call); |
1885 | } |
1886 | } |
1887 | |
1888 | // Keep track of MergeMems feeding into other MergeMems |
1889 | static void add_mergemem_users_to_worklist(Unique_Node_List& wl, Node* mem) { |
1890 | if (!mem->is_MergeMem()) { |
1891 | return; |
1892 | } |
1893 | for (SimpleDUIterator i(mem); i.has_next(); i.next()) { |
1894 | Node* use = i.get(); |
1895 | if (use->is_MergeMem()) { |
1896 | wl.push(use); |
1897 | } |
1898 | } |
1899 | } |
1900 | |
1901 | // Replace the call with the current state of the kit. |
1902 | void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) { |
1903 | JVMState* ejvms = NULL__null; |
1904 | if (has_exceptions()) { |
1905 | ejvms = transfer_exceptions_into_jvms(); |
1906 | } |
1907 | |
1908 | ReplacedNodes replaced_nodes = map()->replaced_nodes(); |
1909 | ReplacedNodes replaced_nodes_exception; |
1910 | Node* ex_ctl = top(); |
1911 | |
1912 | SafePointNode* final_state = stop(); |
1913 | |
1914 | // Find all the needed outputs of this call |
1915 | CallProjections callprojs; |
1916 | call->extract_projections(&callprojs, true); |
1917 | |
1918 | Unique_Node_List wl; |
1919 | Node* init_mem = call->in(TypeFunc::Memory); |
1920 | Node* final_mem = final_state->in(TypeFunc::Memory); |
1921 | Node* final_ctl = final_state->in(TypeFunc::Control); |
1922 | Node* final_io = final_state->in(TypeFunc::I_O); |
1923 | |
1924 | // Replace all the old call edges with the edges from the inlining result |
1925 | if (callprojs.fallthrough_catchproj != NULL__null) { |
1926 | C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl); |
1927 | } |
1928 | if (callprojs.fallthrough_memproj != NULL__null) { |
1929 | if (final_mem->is_MergeMem()) { |
1930 | // Parser's exits MergeMem was not transformed but may be optimized |
1931 | final_mem = _gvn.transform(final_mem); |
1932 | } |
1933 | C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem); |
1934 | add_mergemem_users_to_worklist(wl, final_mem); |
1935 | } |
1936 | if (callprojs.fallthrough_ioproj != NULL__null) { |
1937 | C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io); |
1938 | } |
1939 | |
1940 | // Replace the result with the new result if it exists and is used |
1941 | if (callprojs.resproj != NULL__null && result != NULL__null) { |
1942 | C->gvn_replace_by(callprojs.resproj, result); |
1943 | } |
1944 | |
1945 | if (ejvms == NULL__null) { |
1946 | // No exception edges to simply kill off those paths |
1947 | if (callprojs.catchall_catchproj != NULL__null) { |
1948 | C->gvn_replace_by(callprojs.catchall_catchproj, C->top()); |
1949 | } |
1950 | if (callprojs.catchall_memproj != NULL__null) { |
1951 | C->gvn_replace_by(callprojs.catchall_memproj, C->top()); |
1952 | } |
1953 | if (callprojs.catchall_ioproj != NULL__null) { |
1954 | C->gvn_replace_by(callprojs.catchall_ioproj, C->top()); |
1955 | } |
1956 | // Replace the old exception object with top |
1957 | if (callprojs.exobj != NULL__null) { |
1958 | C->gvn_replace_by(callprojs.exobj, C->top()); |
1959 | } |
1960 | } else { |
1961 | GraphKit ekit(ejvms); |
1962 | |
1963 | // Load my combined exception state into the kit, with all phis transformed: |
1964 | SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states(); |
1965 | replaced_nodes_exception = ex_map->replaced_nodes(); |
1966 | |
1967 | Node* ex_oop = ekit.use_exception_state(ex_map); |
1968 | |
1969 | if (callprojs.catchall_catchproj != NULL__null) { |
1970 | C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control()); |
1971 | ex_ctl = ekit.control(); |
1972 | } |
1973 | if (callprojs.catchall_memproj != NULL__null) { |
1974 | Node* ex_mem = ekit.reset_memory(); |
1975 | C->gvn_replace_by(callprojs.catchall_memproj, ex_mem); |
1976 | add_mergemem_users_to_worklist(wl, ex_mem); |
1977 | } |
1978 | if (callprojs.catchall_ioproj != NULL__null) { |
1979 | C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o()); |
1980 | } |
1981 | |
1982 | // Replace the old exception object with the newly created one |
1983 | if (callprojs.exobj != NULL__null) { |
1984 | C->gvn_replace_by(callprojs.exobj, ex_oop); |
1985 | } |
1986 | } |
1987 | |
1988 | // Disconnect the call from the graph |
1989 | call->disconnect_inputs(C); |
1990 | C->gvn_replace_by(call, C->top()); |
1991 | |
1992 | // Clean up any MergeMems that feed other MergeMems since the |
1993 | // optimizer doesn't like that. |
1994 | while (wl.size() > 0) { |
1995 | _gvn.transform(wl.pop()); |
1996 | } |
1997 | |
1998 | if (callprojs.fallthrough_catchproj != NULL__null && !final_ctl->is_top() && do_replaced_nodes) { |
1999 | replaced_nodes.apply(C, final_ctl); |
2000 | } |
2001 | if (!ex_ctl->is_top() && do_replaced_nodes) { |
2002 | replaced_nodes_exception.apply(C, ex_ctl); |
2003 | } |
2004 | } |
2005 | |
2006 | |
2007 | //------------------------------increment_counter------------------------------ |
2008 | // for statistics: increment a VM counter by 1 |
2009 | |
2010 | void GraphKit::increment_counter(address counter_addr) { |
2011 | Node* adr1 = makecon(TypeRawPtr::make(counter_addr)); |
2012 | increment_counter(adr1); |
2013 | } |
2014 | |
2015 | void GraphKit::increment_counter(Node* counter_addr) { |
2016 | int adr_type = Compile::AliasIdxRaw; |
2017 | Node* ctrl = control(); |
2018 | Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered); |
2019 | Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1))); |
2020 | store_to_memory(ctrl, counter_addr, incr, T_LONG, adr_type, MemNode::unordered); |
2021 | } |
2022 | |
2023 | |
2024 | //------------------------------uncommon_trap---------------------------------- |
2025 | // Bail out to the interpreter in mid-method. Implemented by calling the |
2026 | // uncommon_trap blob. This helper function inserts a runtime call with the |
2027 | // right debug info. |
2028 | void GraphKit::uncommon_trap(int trap_request, |
2029 | ciKlass* klass, const char* comment, |
2030 | bool must_throw, |
2031 | bool keep_exact_action) { |
2032 | if (failing()) stop(); |
2033 | if (stopped()) return; // trap reachable? |
2034 | |
2035 | // Note: If ProfileTraps is true, and if a deopt. actually |
2036 | // occurs here, the runtime will make sure an MDO exists. There is |
2037 | // no need to call method()->ensure_method_data() at this point. |
2038 | |
2039 | // Set the stack pointer to the right value for reexecution: |
2040 | set_sp(reexecute_sp()); |
2041 | |
2042 | #ifdef ASSERT1 |
2043 | if (!must_throw) { |
2044 | // Make sure the stack has at least enough depth to execute |
2045 | // the current bytecode. |
2046 | int inputs, ignored_depth; |
2047 | if (compute_stack_effects(inputs, ignored_depth)) { |
2048 | assert(sp() >= inputs, "must have enough JVMS stack to execute %s: sp=%d, inputs=%d",do { if (!(sp() >= inputs)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2049, "assert(" "sp() >= inputs" ") failed", "must have enough JVMS stack to execute %s: sp=%d, inputs=%d" , Bytecodes::name(java_bc()), sp(), inputs); ::breakpoint(); } } while (0) |
2049 | Bytecodes::name(java_bc()), sp(), inputs)do { if (!(sp() >= inputs)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2049, "assert(" "sp() >= inputs" ") failed", "must have enough JVMS stack to execute %s: sp=%d, inputs=%d" , Bytecodes::name(java_bc()), sp(), inputs); ::breakpoint(); } } while (0); |
2050 | } |
2051 | } |
2052 | #endif |
2053 | |
2054 | Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); |
2055 | Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); |
2056 | |
2057 | switch (action) { |
2058 | case Deoptimization::Action_maybe_recompile: |
2059 | case Deoptimization::Action_reinterpret: |
2060 | // Temporary fix for 6529811 to allow virtual calls to be sure they |
2061 | // get the chance to go from mono->bi->mega |
2062 | if (!keep_exact_action && |
2063 | Deoptimization::trap_request_index(trap_request) < 0 && |
2064 | too_many_recompiles(reason)) { |
2065 | // This BCI is causing too many recompilations. |
2066 | if (C->log() != NULL__null) { |
2067 | C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'", |
2068 | Deoptimization::trap_reason_name(reason), |
2069 | Deoptimization::trap_action_name(action)); |
2070 | } |
2071 | action = Deoptimization::Action_none; |
2072 | trap_request = Deoptimization::make_trap_request(reason, action); |
2073 | } else { |
2074 | C->set_trap_can_recompile(true); |
2075 | } |
2076 | break; |
2077 | case Deoptimization::Action_make_not_entrant: |
2078 | C->set_trap_can_recompile(true); |
2079 | break; |
2080 | case Deoptimization::Action_none: |
2081 | case Deoptimization::Action_make_not_compilable: |
2082 | break; |
2083 | default: |
2084 | #ifdef ASSERT1 |
2085 | fatal("unknown action %d: %s", action, Deoptimization::trap_action_name(action))do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2085, "unknown action %d: %s", action, Deoptimization::trap_action_name (action)); ::breakpoint(); } while (0); |
2086 | #endif |
2087 | break; |
2088 | } |
2089 | |
2090 | if (TraceOptoParse) { |
2091 | char buf[100]; |
2092 | tty->print_cr("Uncommon trap %s at bci:%d", |
2093 | Deoptimization::format_trap_request(buf, sizeof(buf), |
2094 | trap_request), bci()); |
2095 | } |
2096 | |
2097 | CompileLog* log = C->log(); |
2098 | if (log != NULL__null) { |
2099 | int kid = (klass == NULL__null)? -1: log->identify(klass); |
2100 | log->begin_elem("uncommon_trap bci='%d'", bci()); |
2101 | char buf[100]; |
2102 | log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf), |
2103 | trap_request)); |
2104 | if (kid >= 0) log->print(" klass='%d'", kid); |
2105 | if (comment != NULL__null) log->print(" comment='%s'", comment); |
2106 | log->end_elem(); |
2107 | } |
2108 | |
2109 | // Make sure any guarding test views this path as very unlikely |
2110 | Node *i0 = control()->in(0); |
2111 | if (i0 != NULL__null && i0->is_If()) { // Found a guarding if test? |
2112 | IfNode *iff = i0->as_If(); |
2113 | float f = iff->_prob; // Get prob |
2114 | if (control()->Opcode() == Op_IfTrue) { |
2115 | if (f > PROB_UNLIKELY_MAG(4)(1e-4f)) |
2116 | iff->_prob = PROB_MIN(1e-6f); |
2117 | } else { |
2118 | if (f < PROB_LIKELY_MAG(4)(1.0f-(1e-4f))) |
2119 | iff->_prob = PROB_MAX(1.0f-(1e-6f)); |
2120 | } |
2121 | } |
2122 | |
2123 | // Clear out dead values from the debug info. |
2124 | kill_dead_locals(); |
2125 | |
2126 | // Now insert the uncommon trap subroutine call |
2127 | address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); |
2128 | const TypePtr* no_memory_effects = NULL__null; |
2129 | // Pass the index of the class to be loaded |
2130 | Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON | |
2131 | (must_throw ? RC_MUST_THROW : 0), |
2132 | OptoRuntime::uncommon_trap_Type(), |
2133 | call_addr, "uncommon_trap", no_memory_effects, |
2134 | intcon(trap_request)); |
2135 | assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request,do { if (!(call->as_CallStaticJava()->uncommon_trap_request () == trap_request)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2136, "assert(" "call->as_CallStaticJava()->uncommon_trap_request() == trap_request" ") failed", "must extract request correctly from the graph") ; ::breakpoint(); } } while (0) |
2136 | "must extract request correctly from the graph")do { if (!(call->as_CallStaticJava()->uncommon_trap_request () == trap_request)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2136, "assert(" "call->as_CallStaticJava()->uncommon_trap_request() == trap_request" ") failed", "must extract request correctly from the graph") ; ::breakpoint(); } } while (0); |
2137 | assert(trap_request != 0, "zero value reserved by uncommon_trap_request")do { if (!(trap_request != 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2137, "assert(" "trap_request != 0" ") failed", "zero value reserved by uncommon_trap_request" ); ::breakpoint(); } } while (0); |
2138 | |
2139 | call->set_req(TypeFunc::ReturnAdr, returnadr()); |
2140 | // The debug info is the only real input to this call. |
2141 | |
2142 | // Halt-and-catch fire here. The above call should never return! |
2143 | HaltNode* halt = new HaltNode(control(), frameptr(), "uncommon trap returned which should never happen" |
2144 | PRODUCT_ONLY(COMMA /*reachable*/false)); |
2145 | _gvn.set_type_bottom(halt); |
2146 | root()->add_req(halt); |
2147 | |
2148 | stop_and_kill_map(); |
2149 | } |
2150 | |
2151 | |
2152 | //--------------------------just_allocated_object------------------------------ |
2153 | // Report the object that was just allocated. |
2154 | // It must be the case that there are no intervening safepoints. |
2155 | // We use this to determine if an object is so "fresh" that |
2156 | // it does not require card marks. |
2157 | Node* GraphKit::just_allocated_object(Node* current_control) { |
2158 | Node* ctrl = current_control; |
2159 | // Object::<init> is invoked after allocation, most of invoke nodes |
2160 | // will be reduced, but a region node is kept in parse time, we check |
2161 | // the pattern and skip the region node if it degraded to a copy. |
2162 | if (ctrl != NULL__null && ctrl->is_Region() && ctrl->req() == 2 && |
2163 | ctrl->as_Region()->is_copy()) { |
2164 | ctrl = ctrl->as_Region()->is_copy(); |
2165 | } |
2166 | if (C->recent_alloc_ctl() == ctrl) { |
2167 | return C->recent_alloc_obj(); |
2168 | } |
2169 | return NULL__null; |
2170 | } |
2171 | |
2172 | |
2173 | /** |
2174 | * Record profiling data exact_kls for Node n with the type system so |
2175 | * that it can propagate it (speculation) |
2176 | * |
2177 | * @param n node that the type applies to |
2178 | * @param exact_kls type from profiling |
2179 | * @param maybe_null did profiling see null? |
2180 | * |
2181 | * @return node with improved type |
2182 | */ |
2183 | Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) { |
2184 | const Type* current_type = _gvn.type(n); |
2185 | assert(UseTypeSpeculation, "type speculation must be on")do { if (!(UseTypeSpeculation)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2185, "assert(" "UseTypeSpeculation" ") failed", "type speculation must be on" ); ::breakpoint(); } } while (0); |
2186 | |
2187 | const TypePtr* speculative = current_type->speculative(); |
2188 | |
2189 | // Should the klass from the profile be recorded in the speculative type? |
2190 | if (current_type->would_improve_type(exact_kls, jvms()->depth())) { |
2191 | const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls); |
2192 | const TypeOopPtr* xtype = tklass->as_instance_type(); |
2193 | assert(xtype->klass_is_exact(), "Should be exact")do { if (!(xtype->klass_is_exact())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2193, "assert(" "xtype->klass_is_exact()" ") failed", "Should be exact" ); ::breakpoint(); } } while (0); |
2194 | // Any reason to believe n is not null (from this profiling or a previous one)? |
2195 | assert(ptr_kind != ProfileAlwaysNull, "impossible here")do { if (!(ptr_kind != ProfileAlwaysNull)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2195, "assert(" "ptr_kind != ProfileAlwaysNull" ") failed", "impossible here"); ::breakpoint(); } } while (0); |
2196 | const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL; |
2197 | // record the new speculative type's depth |
2198 | speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr(); |
2199 | speculative = speculative->with_inline_depth(jvms()->depth()); |
2200 | } else if (current_type->would_improve_ptr(ptr_kind)) { |
2201 | // Profiling report that null was never seen so we can change the |
2202 | // speculative type to non null ptr. |
2203 | if (ptr_kind == ProfileAlwaysNull) { |
2204 | speculative = TypePtr::NULL_PTR; |
2205 | } else { |
2206 | assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement")do { if (!(ptr_kind == ProfileNeverNull)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2206, "assert(" "ptr_kind == ProfileNeverNull" ") failed", "nothing else is an improvement" ); ::breakpoint(); } } while (0); |
2207 | const TypePtr* ptr = TypePtr::NOTNULL; |
2208 | if (speculative != NULL__null) { |
2209 | speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr(); |
2210 | } else { |
2211 | speculative = ptr; |
2212 | } |
2213 | } |
2214 | } |
2215 | |
2216 | if (speculative != current_type->speculative()) { |
2217 | // Build a type with a speculative type (what we think we know |
2218 | // about the type but will need a guard when we use it) |
2219 | const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative); |
2220 | // We're changing the type, we need a new CheckCast node to carry |
2221 | // the new type. The new type depends on the control: what |
2222 | // profiling tells us is only valid from here as far as we can |
2223 | // tell. |
2224 | Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); |
2225 | cast = _gvn.transform(cast); |
2226 | replace_in_map(n, cast); |
2227 | n = cast; |
2228 | } |
2229 | |
2230 | return n; |
2231 | } |
2232 | |
2233 | /** |
2234 | * Record profiling data from receiver profiling at an invoke with the |
2235 | * type system so that it can propagate it (speculation) |
2236 | * |
2237 | * @param n receiver node |
2238 | * |
2239 | * @return node with improved type |
2240 | */ |
2241 | Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { |
2242 | if (!UseTypeSpeculation) { |
2243 | return n; |
2244 | } |
2245 | ciKlass* exact_kls = profile_has_unique_klass(); |
2246 | ProfilePtrKind ptr_kind = ProfileMaybeNull; |
2247 | if ((java_bc() == Bytecodes::_checkcast || |
2248 | java_bc() == Bytecodes::_instanceof || |
2249 | java_bc() == Bytecodes::_aastore) && |
2250 | method()->method_data()->is_mature()) { |
2251 | ciProfileData* data = method()->method_data()->bci_to_data(bci()); |
2252 | if (data != NULL__null) { |
2253 | if (!data->as_BitData()->null_seen()) { |
2254 | ptr_kind = ProfileNeverNull; |
2255 | } else { |
2256 | assert(data->is_ReceiverTypeData(), "bad profile data type")do { if (!(data->is_ReceiverTypeData())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2256, "assert(" "data->is_ReceiverTypeData()" ") failed" , "bad profile data type"); ::breakpoint(); } } while (0); |
2257 | ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData(); |
2258 | uint i = 0; |
2259 | for (; i < call->row_limit(); i++) { |
2260 | ciKlass* receiver = call->receiver(i); |
2261 | if (receiver != NULL__null) { |
2262 | break; |
2263 | } |
2264 | } |
2265 | ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull; |
2266 | } |
2267 | } |
2268 | } |
2269 | return record_profile_for_speculation(n, exact_kls, ptr_kind); |
2270 | } |
2271 | |
2272 | /** |
2273 | * Record profiling data from argument profiling at an invoke with the |
2274 | * type system so that it can propagate it (speculation) |
2275 | * |
2276 | * @param dest_method target method for the call |
2277 | * @param bc what invoke bytecode is this? |
2278 | */ |
2279 | void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) { |
2280 | if (!UseTypeSpeculation) { |
2281 | return; |
2282 | } |
2283 | const TypeFunc* tf = TypeFunc::make(dest_method); |
2284 | int nargs = tf->domain()->cnt() - TypeFunc::Parms; |
2285 | int skip = Bytecodes::has_receiver(bc) ? 1 : 0; |
2286 | for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) { |
2287 | const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms); |
2288 | if (is_reference_type(targ->basic_type())) { |
2289 | ProfilePtrKind ptr_kind = ProfileMaybeNull; |
2290 | ciKlass* better_type = NULL__null; |
2291 | if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) { |
2292 | record_profile_for_speculation(argument(j), better_type, ptr_kind); |
2293 | } |
2294 | i++; |
2295 | } |
2296 | } |
2297 | } |
2298 | |
2299 | /** |
2300 | * Record profiling data from parameter profiling at an invoke with |
2301 | * the type system so that it can propagate it (speculation) |
2302 | */ |
2303 | void GraphKit::record_profiled_parameters_for_speculation() { |
2304 | if (!UseTypeSpeculation) { |
2305 | return; |
2306 | } |
2307 | for (int i = 0, j = 0; i < method()->arg_size() ; i++) { |
2308 | if (_gvn.type(local(i))->isa_oopptr()) { |
2309 | ProfilePtrKind ptr_kind = ProfileMaybeNull; |
2310 | ciKlass* better_type = NULL__null; |
2311 | if (method()->parameter_profiled_type(j, better_type, ptr_kind)) { |
2312 | record_profile_for_speculation(local(i), better_type, ptr_kind); |
2313 | } |
2314 | j++; |
2315 | } |
2316 | } |
2317 | } |
2318 | |
2319 | /** |
2320 | * Record profiling data from return value profiling at an invoke with |
2321 | * the type system so that it can propagate it (speculation) |
2322 | */ |
2323 | void GraphKit::record_profiled_return_for_speculation() { |
2324 | if (!UseTypeSpeculation) { |
2325 | return; |
2326 | } |
2327 | ProfilePtrKind ptr_kind = ProfileMaybeNull; |
2328 | ciKlass* better_type = NULL__null; |
2329 | if (method()->return_profiled_type(bci(), better_type, ptr_kind)) { |
2330 | // If profiling reports a single type for the return value, |
2331 | // feed it to the type system so it can propagate it as a |
2332 | // speculative type |
2333 | record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind); |
2334 | } |
2335 | } |
2336 | |
2337 | void GraphKit::round_double_arguments(ciMethod* dest_method) { |
2338 | if (Matcher::strict_fp_requires_explicit_rounding) { |
2339 | // (Note: TypeFunc::make has a cache that makes this fast.) |
2340 | const TypeFunc* tf = TypeFunc::make(dest_method); |
2341 | int nargs = tf->domain()->cnt() - TypeFunc::Parms; |
2342 | for (int j = 0; j < nargs; j++) { |
2343 | const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms); |
2344 | if (targ->basic_type() == T_DOUBLE) { |
2345 | // If any parameters are doubles, they must be rounded before |
2346 | // the call, dstore_rounding does gvn.transform |
2347 | Node *arg = argument(j); |
2348 | arg = dstore_rounding(arg); |
2349 | set_argument(j, arg); |
2350 | } |
2351 | } |
2352 | } |
2353 | } |
2354 | |
2355 | // rounding for strict float precision conformance |
2356 | Node* GraphKit::precision_rounding(Node* n) { |
2357 | if (Matcher::strict_fp_requires_explicit_rounding) { |
2358 | #ifdef IA32 |
2359 | if (UseSSE == 0) { |
2360 | return _gvn.transform(new RoundFloatNode(0, n)); |
2361 | } |
2362 | #else |
2363 | Unimplemented()do { (*g_assert_poison) = 'X';; report_unimplemented("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2363); ::breakpoint(); } while (0); |
2364 | #endif // IA32 |
2365 | } |
2366 | return n; |
2367 | } |
2368 | |
2369 | // rounding for strict double precision conformance |
2370 | Node* GraphKit::dprecision_rounding(Node *n) { |
2371 | if (Matcher::strict_fp_requires_explicit_rounding) { |
2372 | #ifdef IA32 |
2373 | if (UseSSE < 2) { |
2374 | return _gvn.transform(new RoundDoubleNode(0, n)); |
2375 | } |
2376 | #else |
2377 | Unimplemented()do { (*g_assert_poison) = 'X';; report_unimplemented("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2377); ::breakpoint(); } while (0); |
2378 | #endif // IA32 |
2379 | } |
2380 | return n; |
2381 | } |
2382 | |
2383 | // rounding for non-strict double stores |
2384 | Node* GraphKit::dstore_rounding(Node* n) { |
2385 | if (Matcher::strict_fp_requires_explicit_rounding) { |
2386 | #ifdef IA32 |
2387 | if (UseSSE < 2) { |
2388 | return _gvn.transform(new RoundDoubleNode(0, n)); |
2389 | } |
2390 | #else |
2391 | Unimplemented()do { (*g_assert_poison) = 'X';; report_unimplemented("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2391); ::breakpoint(); } while (0); |
2392 | #endif // IA32 |
2393 | } |
2394 | return n; |
2395 | } |
2396 | |
2397 | //============================================================================= |
2398 | // Generate a fast path/slow path idiom. Graph looks like: |
2399 | // [foo] indicates that 'foo' is a parameter |
2400 | // |
2401 | // [in] NULL |
2402 | // \ / |
2403 | // CmpP |
2404 | // Bool ne |
2405 | // If |
2406 | // / \ |
2407 | // True False-<2> |
2408 | // / | |
2409 | // / cast_not_null |
2410 | // Load | | ^ |
2411 | // [fast_test] | | |
2412 | // gvn to opt_test | | |
2413 | // / \ | <1> |
2414 | // True False | |
2415 | // | \\ | |
2416 | // [slow_call] \[fast_result] |
2417 | // Ctl Val \ \ |
2418 | // | \ \ |
2419 | // Catch <1> \ \ |
2420 | // / \ ^ \ \ |
2421 | // Ex No_Ex | \ \ |
2422 | // | \ \ | \ <2> \ |
2423 | // ... \ [slow_res] | | \ [null_result] |
2424 | // \ \--+--+--- | | |
2425 | // \ | / \ | / |
2426 | // --------Region Phi |
2427 | // |
2428 | //============================================================================= |
2429 | // Code is structured as a series of driver functions all called 'do_XXX' that |
2430 | // call a set of helper functions. Helper functions first, then drivers. |
2431 | |
2432 | //------------------------------null_check_oop--------------------------------- |
2433 | // Null check oop. Set null-path control into Region in slot 3. |
2434 | // Make a cast-not-nullness use the other not-null control. Return cast. |
2435 | Node* GraphKit::null_check_oop(Node* value, Node* *null_control, |
2436 | bool never_see_null, |
2437 | bool safe_for_replace, |
2438 | bool speculative) { |
2439 | // Initial NULL check taken path |
2440 | (*null_control) = top(); |
2441 | Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative); |
2442 | |
2443 | // Generate uncommon_trap: |
2444 | if (never_see_null && (*null_control) != top()) { |
2445 | // If we see an unexpected null at a check-cast we record it and force a |
2446 | // recompile; the offending check-cast will be compiled to handle NULLs. |
2447 | // If we see more than one offending BCI, then all checkcasts in the |
2448 | // method will be compiled to handle NULLs. |
2449 | PreserveJVMState pjvms(this); |
2450 | set_control(*null_control); |
2451 | replace_in_map(value, null()); |
2452 | Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative); |
2453 | uncommon_trap(reason, |
2454 | Deoptimization::Action_make_not_entrant); |
2455 | (*null_control) = top(); // NULL path is dead |
2456 | } |
2457 | if ((*null_control) == top() && safe_for_replace) { |
2458 | replace_in_map(value, cast); |
2459 | } |
2460 | |
2461 | // Cast away null-ness on the result |
2462 | return cast; |
2463 | } |
2464 | |
2465 | //------------------------------opt_iff---------------------------------------- |
2466 | // Optimize the fast-check IfNode. Set the fast-path region slot 2. |
2467 | // Return slow-path control. |
2468 | Node* GraphKit::opt_iff(Node* region, Node* iff) { |
2469 | IfNode *opt_iff = _gvn.transform(iff)->as_If(); |
2470 | |
2471 | // Fast path taken; set region slot 2 |
2472 | Node *fast_taken = _gvn.transform( new IfFalseNode(opt_iff) ); |
2473 | region->init_req(2,fast_taken); // Capture fast-control |
2474 | |
2475 | // Fast path not-taken, i.e. slow path |
2476 | Node *slow_taken = _gvn.transform( new IfTrueNode(opt_iff) ); |
2477 | return slow_taken; |
2478 | } |
2479 | |
2480 | //-----------------------------make_runtime_call------------------------------- |
2481 | Node* GraphKit::make_runtime_call(int flags, |
2482 | const TypeFunc* call_type, address call_addr, |
2483 | const char* call_name, |
2484 | const TypePtr* adr_type, |
2485 | // The following parms are all optional. |
2486 | // The first NULL ends the list. |
2487 | Node* parm0, Node* parm1, |
2488 | Node* parm2, Node* parm3, |
2489 | Node* parm4, Node* parm5, |
2490 | Node* parm6, Node* parm7, |
2491 | Node* parm8) { |
2492 | assert(call_addr != NULL, "must not call NULL targets")do { if (!(call_addr != __null)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2492, "assert(" "call_addr != __null" ") failed", "must not call NULL targets" ); ::breakpoint(); } } while (0); |
2493 | |
2494 | // Slow-path call |
2495 | bool is_leaf = !(flags & RC_NO_LEAF); |
2496 | bool has_io = (!is_leaf && !(flags & RC_NO_IO)); |
2497 | if (call_name == NULL__null) { |
2498 | assert(!is_leaf, "must supply name for leaf")do { if (!(!is_leaf)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2498, "assert(" "!is_leaf" ") failed", "must supply name for leaf" ); ::breakpoint(); } } while (0); |
2499 | call_name = OptoRuntime::stub_name(call_addr); |
2500 | } |
2501 | CallNode* call; |
2502 | if (!is_leaf) { |
2503 | call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type); |
2504 | } else if (flags & RC_NO_FP) { |
2505 | call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); |
2506 | } else if (flags & RC_VECTOR){ |
2507 | uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte; |
2508 | call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits); |
2509 | } else { |
2510 | call = new CallLeafNode(call_type, call_addr, call_name, adr_type); |
2511 | } |
2512 | |
2513 | // The following is similar to set_edges_for_java_call, |
2514 | // except that the memory effects of the call are restricted to AliasIdxRaw. |
2515 | |
2516 | // Slow path call has no side-effects, uses few values |
2517 | bool wide_in = !(flags & RC_NARROW_MEM); |
2518 | bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot); |
2519 | |
2520 | Node* prev_mem = NULL__null; |
2521 | if (wide_in) { |
2522 | prev_mem = set_predefined_input_for_runtime_call(call); |
2523 | } else { |
2524 | assert(!wide_out, "narrow in => narrow out")do { if (!(!wide_out)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2524, "assert(" "!wide_out" ") failed", "narrow in => narrow out" ); ::breakpoint(); } } while (0); |
2525 | Node* narrow_mem = memory(adr_type); |
2526 | prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem); |
2527 | } |
2528 | |
2529 | // Hook each parm in order. Stop looking at the first NULL. |
2530 | if (parm0 != NULL__null) { call->init_req(TypeFunc::Parms+0, parm0); |
2531 | if (parm1 != NULL__null) { call->init_req(TypeFunc::Parms+1, parm1); |
2532 | if (parm2 != NULL__null) { call->init_req(TypeFunc::Parms+2, parm2); |
2533 | if (parm3 != NULL__null) { call->init_req(TypeFunc::Parms+3, parm3); |
2534 | if (parm4 != NULL__null) { call->init_req(TypeFunc::Parms+4, parm4); |
2535 | if (parm5 != NULL__null) { call->init_req(TypeFunc::Parms+5, parm5); |
2536 | if (parm6 != NULL__null) { call->init_req(TypeFunc::Parms+6, parm6); |
2537 | if (parm7 != NULL__null) { call->init_req(TypeFunc::Parms+7, parm7); |
2538 | if (parm8 != NULL__null) { call->init_req(TypeFunc::Parms+8, parm8); |
2539 | /* close each nested if ===> */ } } } } } } } } } |
2540 | assert(call->in(call->req()-1) != NULL, "must initialize all parms")do { if (!(call->in(call->req()-1) != __null)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2540, "assert(" "call->in(call->req()-1) != __null" ") failed" , "must initialize all parms"); ::breakpoint(); } } while (0); |
2541 | |
2542 | if (!is_leaf) { |
2543 | // Non-leaves can block and take safepoints: |
2544 | add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0)); |
2545 | } |
2546 | // Non-leaves can throw exceptions: |
2547 | if (has_io) { |
2548 | call->set_req(TypeFunc::I_O, i_o()); |
2549 | } |
2550 | |
2551 | if (flags & RC_UNCOMMON) { |
2552 | // Set the count to a tiny probability. Cf. Estimate_Block_Frequency. |
2553 | // (An "if" probability corresponds roughly to an unconditional count. |
2554 | // Sort of.) |
2555 | call->set_cnt(PROB_UNLIKELY_MAG(4)(1e-4f)); |
2556 | } |
2557 | |
2558 | Node* c = _gvn.transform(call); |
2559 | assert(c == call, "cannot disappear")do { if (!(c == call)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2559, "assert(" "c == call" ") failed", "cannot disappear") ; ::breakpoint(); } } while (0); |
2560 | |
2561 | if (wide_out) { |
2562 | // Slow path call has full side-effects. |
2563 | set_predefined_output_for_runtime_call(call); |
2564 | } else { |
2565 | // Slow path call has few side-effects, and/or sets few values. |
2566 | set_predefined_output_for_runtime_call(call, prev_mem, adr_type); |
2567 | } |
2568 | |
2569 | if (has_io) { |
2570 | set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O))); |
2571 | } |
2572 | return call; |
2573 | |
2574 | } |
2575 | |
2576 | // i2b |
2577 | Node* GraphKit::sign_extend_byte(Node* in) { |
2578 | Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24))); |
2579 | return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24))); |
2580 | } |
2581 | |
2582 | // i2s |
2583 | Node* GraphKit::sign_extend_short(Node* in) { |
2584 | Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16))); |
2585 | return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16))); |
2586 | } |
2587 | |
2588 | //-----------------------------make_native_call------------------------------- |
2589 | Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) { |
2590 | // Select just the actual call args to pass on |
2591 | // [MethodHandle fallback, long addr, HALF addr, ... args , NativeEntryPoint nep] |
2592 | // | | |
2593 | // V V |
2594 | // [ ... args ] |
2595 | uint n_filtered_args = nargs - 4; // -fallback, -addr (2), -nep; |
2596 | ResourceMark rm; |
2597 | Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args)(Node**) resource_allocate_bytes((n_filtered_args) * sizeof(Node *)); |
2598 | const Type** arg_types = TypeTuple::fields(n_filtered_args); |
2599 | GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad()); |
2600 | |
2601 | VMReg* argRegs = nep->argMoves(); |
2602 | { |
2603 | for (uint vm_arg_pos = 0, java_arg_read_pos = 0; |
2604 | vm_arg_pos < n_filtered_args; vm_arg_pos++) { |
2605 | uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long) |
2606 | Node* node = argument(vm_unfiltered_arg_pos); |
2607 | const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos); |
2608 | VMReg reg = type == Type::HALF |
2609 | ? VMRegImpl::Bad() |
2610 | : argRegs[java_arg_read_pos++]; |
2611 | |
2612 | argument_nodes[vm_arg_pos] = node; |
2613 | arg_types[TypeFunc::Parms + vm_arg_pos] = type; |
2614 | arg_regs.at_put(vm_arg_pos, reg); |
2615 | } |
2616 | } |
2617 | |
2618 | uint n_returns = call_type->range()->cnt() - TypeFunc::Parms; |
2619 | GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad()); |
2620 | const Type** ret_types = TypeTuple::fields(n_returns); |
2621 | |
2622 | VMReg* retRegs = nep->returnMoves(); |
2623 | { |
2624 | for (uint vm_ret_pos = 0, java_ret_read_pos = 0; |
2625 | vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1 |
2626 | const Type* type = call_type->range()->field_at(TypeFunc::Parms + vm_ret_pos); |
2627 | VMReg reg = type == Type::HALF |
2628 | ? VMRegImpl::Bad() |
2629 | : retRegs[java_ret_read_pos++]; |
2630 | |
2631 | ret_regs.at_put(vm_ret_pos, reg); |
2632 | ret_types[TypeFunc::Parms + vm_ret_pos] = type; |
2633 | } |
2634 | } |
2635 | |
2636 | const TypeFunc* new_call_type = TypeFunc::make( |
2637 | TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types), |
2638 | TypeTuple::make(TypeFunc::Parms + n_returns, ret_types) |
2639 | ); |
2640 | |
2641 | if (nep->need_transition()) { |
2642 | RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr, |
2643 | nep->shadow_space(), |
2644 | arg_regs, ret_regs); |
2645 | if (invoker == NULL__null) { |
2646 | C->record_failure("native invoker not implemented on this platform"); |
2647 | return NULL__null; |
2648 | } |
2649 | C->add_native_invoker(invoker); |
2650 | call_addr = invoker->code_begin(); |
2651 | } |
2652 | assert(call_addr != NULL, "sanity")do { if (!(call_addr != __null)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2652, "assert(" "call_addr != __null" ") failed", "sanity") ; ::breakpoint(); } } while (0); |
2653 | |
2654 | CallNativeNode* call = new CallNativeNode(new_call_type, call_addr, nep->name(), TypePtr::BOTTOM, |
2655 | arg_regs, |
2656 | ret_regs, |
2657 | nep->shadow_space(), |
2658 | nep->need_transition()); |
2659 | |
2660 | if (call->_need_transition) { |
2661 | add_safepoint_edges(call); |
2662 | } |
2663 | |
2664 | set_predefined_input_for_runtime_call(call); |
2665 | |
2666 | for (uint i = 0; i < n_filtered_args; i++) { |
2667 | call->init_req(i + TypeFunc::Parms, argument_nodes[i]); |
2668 | } |
2669 | |
2670 | Node* c = gvn().transform(call); |
2671 | assert(c == call, "cannot disappear")do { if (!(c == call)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2671, "assert(" "c == call" ") failed", "cannot disappear") ; ::breakpoint(); } } while (0); |
2672 | |
2673 | set_predefined_output_for_runtime_call(call); |
2674 | |
2675 | Node* ret; |
2676 | if (method() == NULL__null || method()->return_type()->basic_type() == T_VOID) { |
2677 | ret = top(); |
2678 | } else { |
2679 | ret = gvn().transform(new ProjNode(call, TypeFunc::Parms)); |
2680 | // Unpack native results if needed |
2681 | // Need this method type since it's unerased |
2682 | switch (nep->method_type()->rtype()->basic_type()) { |
2683 | case T_CHAR: |
2684 | ret = _gvn.transform(new AndINode(ret, _gvn.intcon(0xFFFF))); |
2685 | break; |
2686 | case T_BYTE: |
2687 | ret = sign_extend_byte(ret); |
2688 | break; |
2689 | case T_SHORT: |
2690 | ret = sign_extend_short(ret); |
2691 | break; |
2692 | default: // do nothing |
2693 | break; |
2694 | } |
2695 | } |
2696 | |
2697 | push_node(method()->return_type()->basic_type(), ret); |
2698 | |
2699 | return call; |
2700 | } |
2701 | |
2702 | //------------------------------merge_memory----------------------------------- |
2703 | // Merge memory from one path into the current memory state. |
2704 | void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) { |
2705 | for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) { |
2706 | Node* old_slice = mms.force_memory(); |
2707 | Node* new_slice = mms.memory2(); |
2708 | if (old_slice != new_slice) { |
2709 | PhiNode* phi; |
2710 | if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) { |
2711 | if (mms.is_empty()) { |
2712 | // clone base memory Phi's inputs for this memory slice |
2713 | assert(old_slice == mms.base_memory(), "sanity")do { if (!(old_slice == mms.base_memory())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2713, "assert(" "old_slice == mms.base_memory()" ") failed" , "sanity"); ::breakpoint(); } } while (0); |
2714 | phi = PhiNode::make(region, NULL__null, Type::MEMORY, mms.adr_type(C)); |
2715 | _gvn.set_type(phi, Type::MEMORY); |
2716 | for (uint i = 1; i < phi->req(); i++) { |
2717 | phi->init_req(i, old_slice->in(i)); |
2718 | } |
2719 | } else { |
2720 | phi = old_slice->as_Phi(); // Phi was generated already |
2721 | } |
2722 | } else { |
2723 | phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C)); |
2724 | _gvn.set_type(phi, Type::MEMORY); |
2725 | } |
2726 | phi->set_req(new_path, new_slice); |
2727 | mms.set_memory(phi); |
2728 | } |
2729 | } |
2730 | } |
2731 | |
2732 | //------------------------------make_slow_call_ex------------------------------ |
2733 | // Make the exception handler hookups for the slow call |
2734 | void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) { |
2735 | if (stopped()) return; |
2736 | |
2737 | // Make a catch node with just two handlers: fall-through and catch-all |
2738 | Node* i_o = _gvn.transform( new ProjNode(call, TypeFunc::I_O, separate_io_proj) ); |
2739 | Node* catc = _gvn.transform( new CatchNode(control(), i_o, 2) ); |
2740 | Node* norm = _gvn.transform( new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) ); |
2741 | Node* excp = _gvn.transform( new CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) ); |
2742 | |
2743 | { PreserveJVMState pjvms(this); |
2744 | set_control(excp); |
2745 | set_i_o(i_o); |
2746 | |
2747 | if (excp != top()) { |
2748 | if (deoptimize) { |
2749 | // Deoptimize if an exception is caught. Don't construct exception state in this case. |
2750 | uncommon_trap(Deoptimization::Reason_unhandled, |
2751 | Deoptimization::Action_none); |
2752 | } else { |
2753 | // Create an exception state also. |
2754 | // Use an exact type if the caller has a specific exception. |
2755 | const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull); |
2756 | Node* ex_oop = new CreateExNode(ex_type, control(), i_o); |
2757 | add_exception_state(make_exception_state(_gvn.transform(ex_oop))); |
2758 | } |
2759 | } |
2760 | } |
2761 | |
2762 | // Get the no-exception control from the CatchNode. |
2763 | set_control(norm); |
2764 | } |
2765 | |
2766 | static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN& gvn, BasicType bt) { |
2767 | Node* cmp = NULL__null; |
2768 | switch(bt) { |
2769 | case T_INT: cmp = new CmpINode(in1, in2); break; |
2770 | case T_ADDRESS: cmp = new CmpPNode(in1, in2); break; |
2771 | default: fatal("unexpected comparison type %s", type2name(bt))do { (*g_assert_poison) = 'X';; report_fatal(INTERNAL_ERROR, "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2771, "unexpected comparison type %s", type2name(bt)); ::breakpoint (); } while (0); |
2772 | } |
2773 | gvn.transform(cmp); |
2774 | Node* bol = gvn.transform(new BoolNode(cmp, test)); |
2775 | IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN(-1.0f)); |
2776 | gvn.transform(iff); |
2777 | if (!bol->is_Con()) gvn.record_for_igvn(iff); |
2778 | return iff; |
2779 | } |
2780 | |
2781 | //-------------------------------gen_subtype_check----------------------------- |
2782 | // Generate a subtyping check. Takes as input the subtype and supertype. |
2783 | // Returns 2 values: sets the default control() to the true path and returns |
2784 | // the false path. Only reads invariant memory; sets no (visible) memory. |
2785 | // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding |
2786 | // but that's not exposed to the optimizer. This call also doesn't take in an |
2787 | // Object; if you wish to check an Object you need to load the Object's class |
2788 | // prior to coming here. |
2789 | Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, Node* mem, PhaseGVN& gvn) { |
2790 | Compile* C = gvn.C; |
2791 | if ((*ctrl)->is_top()) { |
2792 | return C->top(); |
2793 | } |
2794 | |
2795 | // Fast check for identical types, perhaps identical constants. |
2796 | // The types can even be identical non-constants, in cases |
2797 | // involving Array.newInstance, Object.clone, etc. |
2798 | if (subklass == superklass) |
2799 | return C->top(); // false path is dead; no test needed. |
2800 | |
2801 | if (gvn.type(superklass)->singleton()) { |
2802 | ciKlass* superk = gvn.type(superklass)->is_klassptr()->klass(); |
2803 | ciKlass* subk = gvn.type(subklass)->is_klassptr()->klass(); |
2804 | |
2805 | // In the common case of an exact superklass, try to fold up the |
2806 | // test before generating code. You may ask, why not just generate |
2807 | // the code and then let it fold up? The answer is that the generated |
2808 | // code will necessarily include null checks, which do not always |
2809 | // completely fold away. If they are also needless, then they turn |
2810 | // into a performance loss. Example: |
2811 | // Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x; |
2812 | // Here, the type of 'fa' is often exact, so the store check |
2813 | // of fa[1]=x will fold up, without testing the nullness of x. |
2814 | switch (C->static_subtype_check(superk, subk)) { |
2815 | case Compile::SSC_always_false: |
2816 | { |
2817 | Node* always_fail = *ctrl; |
2818 | *ctrl = gvn.C->top(); |
2819 | return always_fail; |
2820 | } |
2821 | case Compile::SSC_always_true: |
2822 | return C->top(); |
2823 | case Compile::SSC_easy_test: |
2824 | { |
2825 | // Just do a direct pointer compare and be done. |
2826 | IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT(1.0f-(1e-1f)), gvn, T_ADDRESS); |
2827 | *ctrl = gvn.transform(new IfTrueNode(iff)); |
2828 | return gvn.transform(new IfFalseNode(iff)); |
2829 | } |
2830 | case Compile::SSC_full_test: |
2831 | break; |
2832 | default: |
2833 | ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2833); ::breakpoint(); } while (0); |
2834 | } |
2835 | } |
2836 | |
2837 | // %%% Possible further optimization: Even if the superklass is not exact, |
2838 | // if the subklass is the unique subtype of the superklass, the check |
2839 | // will always succeed. We could leave a dependency behind to ensure this. |
2840 | |
2841 | // First load the super-klass's check-offset |
2842 | Node *p1 = gvn.transform(new AddPNode(superklass, superklass, gvn.MakeConXlongcon(in_bytes(Klass::super_check_offset_offset())))); |
2843 | Node* m = C->immutable_memory(); |
2844 | Node *chk_off = gvn.transform(new LoadINode(NULL__null, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); |
2845 | int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); |
2846 | bool might_be_cache = (gvn.find_int_con(chk_off, cacheoff_con) == cacheoff_con); |
2847 | |
2848 | // Load from the sub-klass's super-class display list, or a 1-word cache of |
2849 | // the secondary superclass list, or a failing value with a sentinel offset |
2850 | // if the super-klass is an interface or exceptionally deep in the Java |
2851 | // hierarchy and we have to scan the secondary superclass list the hard way. |
2852 | // Worst-case type is a little odd: NULL is allowed as a result (usually |
2853 | // klass loads can never produce a NULL). |
2854 | Node *chk_off_X = chk_off; |
2855 | #ifdef _LP641 |
2856 | chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X)); |
2857 | #endif |
2858 | Node *p2 = gvn.transform(new AddPNode(subklass,subklass,chk_off_X)); |
2859 | // For some types like interfaces the following loadKlass is from a 1-word |
2860 | // cache which is mutable so can't use immutable memory. Other |
2861 | // types load from the super-class display table which is immutable. |
2862 | Node *kmem = C->immutable_memory(); |
2863 | // secondary_super_cache is not immutable but can be treated as such because: |
2864 | // - no ideal node writes to it in a way that could cause an |
2865 | // incorrect/missed optimization of the following Load. |
2866 | // - it's a cache so, worse case, not reading the latest value |
2867 | // wouldn't cause incorrect execution |
2868 | if (might_be_cache && mem != NULL__null) { |
2869 | kmem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(C->get_alias_index(gvn.type(p2)->is_ptr())) : mem; |
2870 | } |
2871 | Node *nkls = gvn.transform(LoadKlassNode::make(gvn, NULL__null, kmem, p2, gvn.type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL)); |
2872 | |
2873 | // Compile speed common case: ARE a subtype and we canNOT fail |
2874 | if( superklass == nkls ) |
2875 | return C->top(); // false path is dead; no test needed. |
2876 | |
2877 | // See if we get an immediate positive hit. Happens roughly 83% of the |
2878 | // time. Test to see if the value loaded just previously from the subklass |
2879 | // is exactly the superklass. |
2880 | IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f)((float) (0.83f)), gvn, T_ADDRESS); |
2881 | Node *iftrue1 = gvn.transform( new IfTrueNode (iff1)); |
2882 | *ctrl = gvn.transform(new IfFalseNode(iff1)); |
2883 | |
2884 | // Compile speed common case: Check for being deterministic right now. If |
2885 | // chk_off is a constant and not equal to cacheoff then we are NOT a |
2886 | // subklass. In this case we need exactly the 1 test above and we can |
2887 | // return those results immediately. |
2888 | if (!might_be_cache) { |
2889 | Node* not_subtype_ctrl = *ctrl; |
2890 | *ctrl = iftrue1; // We need exactly the 1 test above |
2891 | return not_subtype_ctrl; |
2892 | } |
2893 | |
2894 | // Gather the various success & failures here |
2895 | RegionNode *r_ok_subtype = new RegionNode(4); |
2896 | gvn.record_for_igvn(r_ok_subtype); |
2897 | RegionNode *r_not_subtype = new RegionNode(3); |
2898 | gvn.record_for_igvn(r_not_subtype); |
2899 | |
2900 | r_ok_subtype->init_req(1, iftrue1); |
2901 | |
2902 | // Check for immediate negative hit. Happens roughly 11% of the time (which |
2903 | // is roughly 63% of the remaining cases). Test to see if the loaded |
2904 | // check-offset points into the subklass display list or the 1-element |
2905 | // cache. If it points to the display (and NOT the cache) and the display |
2906 | // missed then it's not a subtype. |
2907 | Node *cacheoff = gvn.intcon(cacheoff_con); |
2908 | IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f)((float) (0.63f)), gvn, T_INT); |
2909 | r_not_subtype->init_req(1, gvn.transform(new IfTrueNode (iff2))); |
2910 | *ctrl = gvn.transform(new IfFalseNode(iff2)); |
2911 | |
2912 | // Check for self. Very rare to get here, but it is taken 1/3 the time. |
2913 | // No performance impact (too rare) but allows sharing of secondary arrays |
2914 | // which has some footprint reduction. |
2915 | IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f)((float) (0.36f)), gvn, T_ADDRESS); |
2916 | r_ok_subtype->init_req(2, gvn.transform(new IfTrueNode(iff3))); |
2917 | *ctrl = gvn.transform(new IfFalseNode(iff3)); |
2918 | |
2919 | // -- Roads not taken here: -- |
2920 | // We could also have chosen to perform the self-check at the beginning |
2921 | // of this code sequence, as the assembler does. This would not pay off |
2922 | // the same way, since the optimizer, unlike the assembler, can perform |
2923 | // static type analysis to fold away many successful self-checks. |
2924 | // Non-foldable self checks work better here in second position, because |
2925 | // the initial primary superclass check subsumes a self-check for most |
2926 | // types. An exception would be a secondary type like array-of-interface, |
2927 | // which does not appear in its own primary supertype display. |
2928 | // Finally, we could have chosen to move the self-check into the |
2929 | // PartialSubtypeCheckNode, and from there out-of-line in a platform |
2930 | // dependent manner. But it is worthwhile to have the check here, |
2931 | // where it can be perhaps be optimized. The cost in code space is |
2932 | // small (register compare, branch). |
2933 | |
2934 | // Now do a linear scan of the secondary super-klass array. Again, no real |
2935 | // performance impact (too rare) but it's gotta be done. |
2936 | // Since the code is rarely used, there is no penalty for moving it |
2937 | // out of line, and it can only improve I-cache density. |
2938 | // The decision to inline or out-of-line this final check is platform |
2939 | // dependent, and is found in the AD file definition of PartialSubtypeCheck. |
2940 | Node* psc = gvn.transform( |
2941 | new PartialSubtypeCheckNode(*ctrl, subklass, superklass)); |
2942 | |
2943 | IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR(0.5f), gvn, T_ADDRESS); |
2944 | r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4))); |
2945 | r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4))); |
2946 | |
2947 | // Return false path; set default control to true path. |
2948 | *ctrl = gvn.transform(r_ok_subtype); |
2949 | return gvn.transform(r_not_subtype); |
2950 | } |
2951 | |
2952 | Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) { |
2953 | bool expand_subtype_check = C->post_loop_opts_phase() || // macro node expansion is over |
2954 | ExpandSubTypeCheckAtParseTime; // forced expansion |
2955 | if (expand_subtype_check) { |
2956 | MergeMemNode* mem = merged_memory(); |
2957 | Node* ctrl = control(); |
2958 | Node* subklass = obj_or_subklass; |
2959 | if (!_gvn.type(obj_or_subklass)->isa_klassptr()) { |
2960 | subklass = load_object_klass(obj_or_subklass); |
2961 | } |
2962 | |
2963 | Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn); |
2964 | set_control(ctrl); |
2965 | return n; |
2966 | } |
2967 | |
2968 | Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass)); |
2969 | Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq)); |
2970 | IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT(1.0f-(1e-1f)), COUNT_UNKNOWN(-1.0f)); |
2971 | set_control(_gvn.transform(new IfTrueNode(iff))); |
2972 | return _gvn.transform(new IfFalseNode(iff)); |
2973 | } |
2974 | |
2975 | // Profile-driven exact type check: |
2976 | Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass, |
2977 | float prob, |
2978 | Node* *casted_receiver) { |
2979 | assert(!klass->is_interface(), "no exact type check on interfaces")do { if (!(!klass->is_interface())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2979, "assert(" "!klass->is_interface()" ") failed", "no exact type check on interfaces" ); ::breakpoint(); } } while (0); |
2980 | |
2981 | const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); |
2982 | Node* recv_klass = load_object_klass(receiver); |
2983 | Node* want_klass = makecon(tklass); |
2984 | Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass)); |
2985 | Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); |
2986 | IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN(-1.0f)); |
2987 | set_control( _gvn.transform(new IfTrueNode (iff))); |
2988 | Node* fail = _gvn.transform(new IfFalseNode(iff)); |
2989 | |
2990 | if (!stopped()) { |
2991 | const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr(); |
2992 | const TypeOopPtr* recvx_type = tklass->as_instance_type(); |
2993 | assert(recvx_type->klass_is_exact(), "")do { if (!(recvx_type->klass_is_exact())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 2993, "assert(" "recvx_type->klass_is_exact()" ") failed" , ""); ::breakpoint(); } } while (0); |
2994 | |
2995 | if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts |
2996 | // Subsume downstream occurrences of receiver with a cast to |
2997 | // recv_xtype, since now we know what the type will be. |
2998 | Node* cast = new CheckCastPPNode(control(), receiver, recvx_type); |
2999 | (*casted_receiver) = _gvn.transform(cast); |
3000 | // (User must make the replace_in_map call.) |
3001 | } |
3002 | } |
3003 | |
3004 | return fail; |
3005 | } |
3006 | |
3007 | //------------------------------subtype_check_receiver------------------------- |
3008 | Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass, |
3009 | Node** casted_receiver) { |
3010 | const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); |
3011 | Node* want_klass = makecon(tklass); |
3012 | |
3013 | Node* slow_ctl = gen_subtype_check(receiver, want_klass); |
3014 | |
3015 | // Ignore interface type information until interface types are properly tracked. |
3016 | if (!stopped() && !klass->is_interface()) { |
3017 | const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr(); |
3018 | const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type(); |
3019 | if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts |
3020 | Node* cast = new CheckCastPPNode(control(), receiver, recv_type); |
3021 | (*casted_receiver) = _gvn.transform(cast); |
3022 | } |
3023 | } |
3024 | |
3025 | return slow_ctl; |
3026 | } |
3027 | |
3028 | //------------------------------seems_never_null------------------------------- |
3029 | // Use null_seen information if it is available from the profile. |
3030 | // If we see an unexpected null at a type check we record it and force a |
3031 | // recompile; the offending check will be recompiled to handle NULLs. |
3032 | // If we see several offending BCIs, then all checks in the |
3033 | // method will be recompiled. |
3034 | bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) { |
3035 | speculating = !_gvn.type(obj)->speculative_maybe_null(); |
3036 | Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating); |
3037 | if (UncommonNullCast // Cutout for this technique |
3038 | && obj != null() // And not the -Xcomp stupid case? |
3039 | && !too_many_traps(reason) |
3040 | ) { |
3041 | if (speculating) { |
3042 | return true; |
3043 | } |
3044 | if (data == NULL__null) |
3045 | // Edge case: no mature data. Be optimistic here. |
3046 | return true; |
3047 | // If the profile has not seen a null, assume it won't happen. |
3048 | assert(java_bc() == Bytecodes::_checkcast ||do { if (!(java_bc() == Bytecodes::_checkcast || java_bc() == Bytecodes::_instanceof || java_bc() == Bytecodes::_aastore)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3050, "assert(" "java_bc() == Bytecodes::_checkcast || java_bc() == Bytecodes::_instanceof || java_bc() == Bytecodes::_aastore" ") failed", "MDO must collect null_seen bit here"); ::breakpoint (); } } while (0) |
3049 | java_bc() == Bytecodes::_instanceof ||do { if (!(java_bc() == Bytecodes::_checkcast || java_bc() == Bytecodes::_instanceof || java_bc() == Bytecodes::_aastore)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3050, "assert(" "java_bc() == Bytecodes::_checkcast || java_bc() == Bytecodes::_instanceof || java_bc() == Bytecodes::_aastore" ") failed", "MDO must collect null_seen bit here"); ::breakpoint (); } } while (0) |
3050 | java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here")do { if (!(java_bc() == Bytecodes::_checkcast || java_bc() == Bytecodes::_instanceof || java_bc() == Bytecodes::_aastore)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3050, "assert(" "java_bc() == Bytecodes::_checkcast || java_bc() == Bytecodes::_instanceof || java_bc() == Bytecodes::_aastore" ") failed", "MDO must collect null_seen bit here"); ::breakpoint (); } } while (0); |
3051 | return !data->as_BitData()->null_seen(); |
3052 | } |
3053 | speculating = false; |
3054 | return false; |
3055 | } |
3056 | |
3057 | void GraphKit::guard_klass_being_initialized(Node* klass) { |
3058 | int init_state_off = in_bytes(InstanceKlass::init_state_offset()); |
3059 | Node* adr = basic_plus_adr(top(), klass, init_state_off); |
3060 | Node* init_state = LoadNode::make(_gvn, NULL__null, immutable_memory(), adr, |
3061 | adr->bottom_type()->is_ptr(), TypeInt::BYTE, |
3062 | T_BYTE, MemNode::unordered); |
3063 | init_state = _gvn.transform(init_state); |
3064 | |
3065 | Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized)); |
3066 | |
3067 | Node* chk = _gvn.transform(new CmpINode(being_initialized_state, init_state)); |
3068 | Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq)); |
3069 | |
3070 | { BuildCutout unless(this, tst, PROB_MAX(1.0f-(1e-6f))); |
3071 | uncommon_trap(Deoptimization::Reason_initialized, Deoptimization::Action_reinterpret); |
3072 | } |
3073 | } |
3074 | |
3075 | void GraphKit::guard_init_thread(Node* klass) { |
3076 | int init_thread_off = in_bytes(InstanceKlass::init_thread_offset()); |
3077 | Node* adr = basic_plus_adr(top(), klass, init_thread_off); |
3078 | |
3079 | Node* init_thread = LoadNode::make(_gvn, NULL__null, immutable_memory(), adr, |
3080 | adr->bottom_type()->is_ptr(), TypePtr::NOTNULL, |
3081 | T_ADDRESS, MemNode::unordered); |
3082 | init_thread = _gvn.transform(init_thread); |
3083 | |
3084 | Node* cur_thread = _gvn.transform(new ThreadLocalNode()); |
3085 | |
3086 | Node* chk = _gvn.transform(new CmpPNode(cur_thread, init_thread)); |
3087 | Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq)); |
3088 | |
3089 | { BuildCutout unless(this, tst, PROB_MAX(1.0f-(1e-6f))); |
3090 | uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_none); |
3091 | } |
3092 | } |
3093 | |
3094 | void GraphKit::clinit_barrier(ciInstanceKlass* ik, ciMethod* context) { |
3095 | if (ik->is_being_initialized()) { |
3096 | if (C->needs_clinit_barrier(ik, context)) { |
3097 | Node* klass = makecon(TypeKlassPtr::make(ik)); |
3098 | guard_klass_being_initialized(klass); |
3099 | guard_init_thread(klass); |
3100 | insert_mem_bar(Op_MemBarCPUOrder); |
3101 | } |
3102 | } else if (ik->is_initialized()) { |
3103 | return; // no barrier needed |
3104 | } else { |
3105 | uncommon_trap(Deoptimization::Reason_uninitialized, |
3106 | Deoptimization::Action_reinterpret, |
3107 | NULL__null); |
3108 | } |
3109 | } |
3110 | |
3111 | //------------------------maybe_cast_profiled_receiver------------------------- |
3112 | // If the profile has seen exactly one type, narrow to exactly that type. |
3113 | // Subsequent type checks will always fold up. |
3114 | Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, |
3115 | ciKlass* require_klass, |
3116 | ciKlass* spec_klass, |
3117 | bool safe_for_replace) { |
3118 | if (!UseTypeProfile || !TypeProfileCasts) return NULL__null; |
3119 | |
3120 | Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL__null); |
3121 | |
3122 | // Make sure we haven't already deoptimized from this tactic. |
3123 | if (too_many_traps_or_recompiles(reason)) |
3124 | return NULL__null; |
3125 | |
3126 | // (No, this isn't a call, but it's enough like a virtual call |
3127 | // to use the same ciMethod accessor to get the profile info...) |
3128 | // If we have a speculative type use it instead of profiling (which |
3129 | // may not help us) |
3130 | ciKlass* exact_kls = spec_klass == NULL__null ? profile_has_unique_klass() : spec_klass; |
3131 | if (exact_kls != NULL__null) {// no cast failures here |
3132 | if (require_klass == NULL__null || |
3133 | C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) { |
3134 | // If we narrow the type to match what the type profile sees or |
3135 | // the speculative type, we can then remove the rest of the |
3136 | // cast. |
3137 | // This is a win, even if the exact_kls is very specific, |
3138 | // because downstream operations, such as method calls, |
3139 | // will often benefit from the sharper type. |
3140 | Node* exact_obj = not_null_obj; // will get updated in place... |
3141 | Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, |
3142 | &exact_obj); |
3143 | { PreserveJVMState pjvms(this); |
3144 | set_control(slow_ctl); |
3145 | uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); |
3146 | } |
3147 | if (safe_for_replace) { |
3148 | replace_in_map(not_null_obj, exact_obj); |
3149 | } |
3150 | return exact_obj; |
3151 | } |
3152 | // assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us. |
3153 | } |
3154 | |
3155 | return NULL__null; |
3156 | } |
3157 | |
3158 | /** |
3159 | * Cast obj to type and emit guard unless we had too many traps here |
3160 | * already |
3161 | * |
3162 | * @param obj node being casted |
3163 | * @param type type to cast the node to |
3164 | * @param not_null true if we know node cannot be null |
3165 | */ |
3166 | Node* GraphKit::maybe_cast_profiled_obj(Node* obj, |
3167 | ciKlass* type, |
3168 | bool not_null) { |
3169 | if (stopped()) { |
3170 | return obj; |
3171 | } |
3172 | |
3173 | // type == NULL if profiling tells us this object is always null |
3174 | if (type != NULL__null) { |
3175 | Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; |
3176 | Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check; |
3177 | |
3178 | if (!too_many_traps_or_recompiles(null_reason) && |
3179 | !too_many_traps_or_recompiles(class_reason)) { |
3180 | Node* not_null_obj = NULL__null; |
3181 | // not_null is true if we know the object is not null and |
3182 | // there's no need for a null check |
3183 | if (!not_null) { |
3184 | Node* null_ctl = top(); |
3185 | not_null_obj = null_check_oop(obj, &null_ctl, true, true, true); |
3186 | assert(null_ctl->is_top(), "no null control here")do { if (!(null_ctl->is_top())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3186, "assert(" "null_ctl->is_top()" ") failed", "no null control here" ); ::breakpoint(); } } while (0); |
3187 | } else { |
3188 | not_null_obj = obj; |
3189 | } |
3190 | |
3191 | Node* exact_obj = not_null_obj; |
3192 | ciKlass* exact_kls = type; |
3193 | Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, |
3194 | &exact_obj); |
3195 | { |
3196 | PreserveJVMState pjvms(this); |
3197 | set_control(slow_ctl); |
3198 | uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile); |
3199 | } |
3200 | replace_in_map(not_null_obj, exact_obj); |
3201 | obj = exact_obj; |
3202 | } |
3203 | } else { |
3204 | if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) { |
3205 | Node* exact_obj = null_assert(obj); |
3206 | replace_in_map(obj, exact_obj); |
3207 | obj = exact_obj; |
3208 | } |
3209 | } |
3210 | return obj; |
3211 | } |
3212 | |
3213 | //-------------------------------gen_instanceof-------------------------------- |
3214 | // Generate an instance-of idiom. Used by both the instance-of bytecode |
3215 | // and the reflective instance-of call. |
3216 | Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) { |
3217 | kill_dead_locals(); // Benefit all the uncommon traps |
3218 | assert( !stopped(), "dead parse path should be checked in callers" )do { if (!(!stopped())) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3218, "assert(" "!stopped()" ") failed", "dead parse path should be checked in callers" ); ::breakpoint(); } } while (0); |
3219 | assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),do { if (!(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass )->is_klassptr()))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3220, "assert(" "!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr())" ") failed", "must check for not-null not-dead klass in callers" ); ::breakpoint(); } } while (0) |
3220 | "must check for not-null not-dead klass in callers")do { if (!(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass )->is_klassptr()))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3220, "assert(" "!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr())" ") failed", "must check for not-null not-dead klass in callers" ); ::breakpoint(); } } while (0); |
3221 | |
3222 | // Make the merge point |
3223 | enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT }; |
3224 | RegionNode* region = new RegionNode(PATH_LIMIT); |
3225 | Node* phi = new PhiNode(region, TypeInt::BOOL); |
3226 | C->set_has_split_ifs(true); // Has chance for split-if optimization |
3227 | |
3228 | ciProfileData* data = NULL__null; |
3229 | if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode |
3230 | data = method()->method_data()->bci_to_data(bci()); |
3231 | } |
3232 | bool speculative_not_null = false; |
3233 | bool never_see_null = (ProfileDynamicTypes // aggressive use of profile |
3234 | && seems_never_null(obj, data, speculative_not_null)); |
3235 | |
3236 | // Null check; get casted pointer; set region slot 3 |
3237 | Node* null_ctl = top(); |
3238 | Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); |
3239 | |
3240 | // If not_null_obj is dead, only null-path is taken |
3241 | if (stopped()) { // Doing instance-of on a NULL? |
3242 | set_control(null_ctl); |
3243 | return intcon(0); |
3244 | } |
3245 | region->init_req(_null_path, null_ctl); |
3246 | phi ->init_req(_null_path, intcon(0)); // Set null path value |
3247 | if (null_ctl == top()) { |
3248 | // Do this eagerly, so that pattern matches like is_diamond_phi |
3249 | // will work even during parsing. |
3250 | assert(_null_path == PATH_LIMIT-1, "delete last")do { if (!(_null_path == PATH_LIMIT-1)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3250, "assert(" "_null_path == PATH_LIMIT-1" ") failed", "delete last" ); ::breakpoint(); } } while (0); |
3251 | region->del_req(_null_path); |
3252 | phi ->del_req(_null_path); |
3253 | } |
3254 | |
3255 | // Do we know the type check always succeed? |
3256 | bool known_statically = false; |
3257 | if (_gvn.type(superklass)->singleton()) { |
3258 | ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); |
3259 | ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass(); |
3260 | if (subk != NULL__null && subk->is_loaded()) { |
3261 | int static_res = C->static_subtype_check(superk, subk); |
3262 | known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false); |
3263 | } |
3264 | } |
3265 | |
3266 | if (!known_statically) { |
3267 | const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); |
3268 | // We may not have profiling here or it may not help us. If we |
3269 | // have a speculative type use it to perform an exact cast. |
3270 | ciKlass* spec_obj_type = obj_type->speculative_type(); |
3271 | if (spec_obj_type != NULL__null || (ProfileDynamicTypes && data != NULL__null)) { |
3272 | Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL__null, spec_obj_type, safe_for_replace); |
3273 | if (stopped()) { // Profile disagrees with this path. |
3274 | set_control(null_ctl); // Null is the only remaining possibility. |
3275 | return intcon(0); |
3276 | } |
3277 | if (cast_obj != NULL__null) { |
3278 | not_null_obj = cast_obj; |
3279 | } |
3280 | } |
3281 | } |
3282 | |
3283 | // Generate the subtype check |
3284 | Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass); |
3285 | |
3286 | // Plug in the success path to the general merge in slot 1. |
3287 | region->init_req(_obj_path, control()); |
3288 | phi ->init_req(_obj_path, intcon(1)); |
3289 | |
3290 | // Plug in the failing path to the general merge in slot 2. |
3291 | region->init_req(_fail_path, not_subtype_ctrl); |
3292 | phi ->init_req(_fail_path, intcon(0)); |
3293 | |
3294 | // Return final merged results |
3295 | set_control( _gvn.transform(region) ); |
3296 | record_for_igvn(region); |
3297 | |
3298 | // If we know the type check always succeeds then we don't use the |
3299 | // profiling data at this bytecode. Don't lose it, feed it to the |
3300 | // type system as a speculative type. |
3301 | if (safe_for_replace) { |
3302 | Node* casted_obj = record_profiled_receiver_for_speculation(obj); |
3303 | replace_in_map(obj, casted_obj); |
3304 | } |
3305 | |
3306 | return _gvn.transform(phi); |
3307 | } |
3308 | |
3309 | //-------------------------------gen_checkcast--------------------------------- |
3310 | // Generate a checkcast idiom. Used by both the checkcast bytecode and the |
3311 | // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the |
3312 | // uncommon-trap paths work. Adjust stack after this call. |
3313 | // If failure_control is supplied and not null, it is filled in with |
3314 | // the control edge for the cast failure. Otherwise, an appropriate |
3315 | // uncommon trap or exception is thrown. |
3316 | Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, |
3317 | Node* *failure_control) { |
3318 | kill_dead_locals(); // Benefit all the uncommon traps |
3319 | const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr(); |
3320 | const Type *toop = TypeOopPtr::make_from_klass(tk->klass()); |
3321 | |
3322 | // Fast cutout: Check the case that the cast is vacuously true. |
3323 | // This detects the common cases where the test will short-circuit |
3324 | // away completely. We do this before we perform the null check, |
3325 | // because if the test is going to turn into zero code, we don't |
3326 | // want a residual null check left around. (Causes a slowdown, |
3327 | // for example, in some objArray manipulations, such as a[i]=a[j].) |
3328 | if (tk->singleton()) { |
3329 | const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); |
3330 | if (objtp != NULL__null && objtp->klass() != NULL__null) { |
3331 | switch (C->static_subtype_check(tk->klass(), objtp->klass())) { |
3332 | case Compile::SSC_always_true: |
3333 | // If we know the type check always succeed then we don't use |
3334 | // the profiling data at this bytecode. Don't lose it, feed it |
3335 | // to the type system as a speculative type. |
3336 | return record_profiled_receiver_for_speculation(obj); |
3337 | case Compile::SSC_always_false: |
3338 | // It needs a null check because a null will *pass* the cast check. |
3339 | // A non-null value will always produce an exception. |
3340 | if (!objtp->maybe_null()) { |
3341 | bool is_aastore = (java_bc() == Bytecodes::_aastore); |
3342 | Deoptimization::DeoptReason reason = is_aastore ? |
3343 | Deoptimization::Reason_array_check : Deoptimization::Reason_class_check; |
3344 | builtin_throw(reason, makecon(TypeKlassPtr::make(objtp->klass()))); |
3345 | return top(); |
3346 | } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) { |
3347 | return null_assert(obj); |
3348 | } |
3349 | break; // Fall through to full check |
3350 | } |
3351 | } |
3352 | } |
3353 | |
3354 | ciProfileData* data = NULL__null; |
3355 | bool safe_for_replace = false; |
3356 | if (failure_control == NULL__null) { // use MDO in regular case only |
3357 | assert(java_bc() == Bytecodes::_aastore ||do { if (!(java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes ::_checkcast)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3359, "assert(" "java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes::_checkcast" ") failed", "interpreter profiles type checks only for these BCs" ); ::breakpoint(); } } while (0) |
3358 | java_bc() == Bytecodes::_checkcast,do { if (!(java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes ::_checkcast)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3359, "assert(" "java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes::_checkcast" ") failed", "interpreter profiles type checks only for these BCs" ); ::breakpoint(); } } while (0) |
3359 | "interpreter profiles type checks only for these BCs")do { if (!(java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes ::_checkcast)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3359, "assert(" "java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes::_checkcast" ") failed", "interpreter profiles type checks only for these BCs" ); ::breakpoint(); } } while (0); |
3360 | data = method()->method_data()->bci_to_data(bci()); |
3361 | safe_for_replace = true; |
3362 | } |
3363 | |
3364 | // Make the merge point |
3365 | enum { _obj_path = 1, _null_path, PATH_LIMIT }; |
3366 | RegionNode* region = new RegionNode(PATH_LIMIT); |
3367 | Node* phi = new PhiNode(region, toop); |
3368 | C->set_has_split_ifs(true); // Has chance for split-if optimization |
3369 | |
3370 | // Use null-cast information if it is available |
3371 | bool speculative_not_null = false; |
3372 | bool never_see_null = ((failure_control == NULL__null) // regular case only |
3373 | && seems_never_null(obj, data, speculative_not_null)); |
3374 | |
3375 | // Null check; get casted pointer; set region slot 3 |
3376 | Node* null_ctl = top(); |
3377 | Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); |
3378 | |
3379 | // If not_null_obj is dead, only null-path is taken |
3380 | if (stopped()) { // Doing instance-of on a NULL? |
3381 | set_control(null_ctl); |
3382 | return null(); |
3383 | } |
3384 | region->init_req(_null_path, null_ctl); |
3385 | phi ->init_req(_null_path, null()); // Set null path value |
3386 | if (null_ctl == top()) { |
3387 | // Do this eagerly, so that pattern matches like is_diamond_phi |
3388 | // will work even during parsing. |
3389 | assert(_null_path == PATH_LIMIT-1, "delete last")do { if (!(_null_path == PATH_LIMIT-1)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3389, "assert(" "_null_path == PATH_LIMIT-1" ") failed", "delete last" ); ::breakpoint(); } } while (0); |
3390 | region->del_req(_null_path); |
3391 | phi ->del_req(_null_path); |
3392 | } |
3393 | |
3394 | Node* cast_obj = NULL__null; |
3395 | if (tk->klass_is_exact()) { |
3396 | // The following optimization tries to statically cast the speculative type of the object |
3397 | // (for example obtained during profiling) to the type of the superklass and then do a |
3398 | // dynamic check that the type of the object is what we expect. To work correctly |
3399 | // for checkcast and aastore the type of superklass should be exact. |
3400 | const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); |
3401 | // We may not have profiling here or it may not help us. If we have |
3402 | // a speculative type use it to perform an exact cast. |
3403 | ciKlass* spec_obj_type = obj_type->speculative_type(); |
3404 | if (spec_obj_type != NULL__null || data != NULL__null) { |
3405 | cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); |
3406 | if (cast_obj != NULL__null) { |
3407 | if (failure_control != NULL__null) // failure is now impossible |
3408 | (*failure_control) = top(); |
3409 | // adjust the type of the phi to the exact klass: |
3410 | phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR)); |
3411 | } |
3412 | } |
3413 | } |
3414 | |
3415 | if (cast_obj == NULL__null) { |
3416 | // Generate the subtype check |
3417 | Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass ); |
3418 | |
3419 | // Plug in success path into the merge |
3420 | cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); |
3421 | // Failure path ends in uncommon trap (or may be dead - failure impossible) |
3422 | if (failure_control == NULL__null) { |
3423 | if (not_subtype_ctrl != top()) { // If failure is possible |
3424 | PreserveJVMState pjvms(this); |
3425 | set_control(not_subtype_ctrl); |
3426 | bool is_aastore = (java_bc() == Bytecodes::_aastore); |
3427 | Deoptimization::DeoptReason reason = is_aastore ? |
3428 | Deoptimization::Reason_array_check : Deoptimization::Reason_class_check; |
3429 | builtin_throw(reason, load_object_klass(not_null_obj)); |
3430 | } |
3431 | } else { |
3432 | (*failure_control) = not_subtype_ctrl; |
3433 | } |
3434 | } |
3435 | |
3436 | region->init_req(_obj_path, control()); |
3437 | phi ->init_req(_obj_path, cast_obj); |
3438 | |
3439 | // A merge of NULL or Casted-NotNull obj |
3440 | Node* res = _gvn.transform(phi); |
3441 | |
3442 | // Note I do NOT always 'replace_in_map(obj,result)' here. |
3443 | // if( tk->klass()->can_be_primary_super() ) |
3444 | // This means that if I successfully store an Object into an array-of-String |
3445 | // I 'forget' that the Object is really now known to be a String. I have to |
3446 | // do this because we don't have true union types for interfaces - if I store |
3447 | // a Baz into an array-of-Interface and then tell the optimizer it's an |
3448 | // Interface, I forget that it's also a Baz and cannot do Baz-like field |
3449 | // references to it. FIX THIS WHEN UNION TYPES APPEAR! |
3450 | // replace_in_map( obj, res ); |
3451 | |
3452 | // Return final merged results |
3453 | set_control( _gvn.transform(region) ); |
3454 | record_for_igvn(region); |
3455 | |
3456 | return record_profiled_receiver_for_speculation(res); |
3457 | } |
3458 | |
3459 | //------------------------------next_monitor----------------------------------- |
3460 | // What number should be given to the next monitor? |
3461 | int GraphKit::next_monitor() { |
3462 | int current = jvms()->monitor_depth()* C->sync_stack_slots(); |
3463 | int next = current + C->sync_stack_slots(); |
3464 | // Keep the toplevel high water mark current: |
3465 | if (C->fixed_slots() < next) C->set_fixed_slots(next); |
3466 | return current; |
3467 | } |
3468 | |
3469 | //------------------------------insert_mem_bar--------------------------------- |
3470 | // Memory barrier to avoid floating things around |
3471 | // The membar serves as a pinch point between both control and all memory slices. |
3472 | Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) { |
3473 | MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); |
3474 | mb->init_req(TypeFunc::Control, control()); |
3475 | mb->init_req(TypeFunc::Memory, reset_memory()); |
3476 | Node* membar = _gvn.transform(mb); |
3477 | set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); |
3478 | set_all_memory_call(membar); |
3479 | return membar; |
3480 | } |
3481 | |
3482 | //-------------------------insert_mem_bar_volatile---------------------------- |
3483 | // Memory barrier to avoid floating things around |
3484 | // The membar serves as a pinch point between both control and memory(alias_idx). |
3485 | // If you want to make a pinch point on all memory slices, do not use this |
3486 | // function (even with AliasIdxBot); use insert_mem_bar() instead. |
3487 | Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) { |
3488 | // When Parse::do_put_xxx updates a volatile field, it appends a series |
3489 | // of MemBarVolatile nodes, one for *each* volatile field alias category. |
3490 | // The first membar is on the same memory slice as the field store opcode. |
3491 | // This forces the membar to follow the store. (Bug 6500685 broke this.) |
3492 | // All the other membars (for other volatile slices, including AliasIdxBot, |
3493 | // which stands for all unknown volatile slices) are control-dependent |
3494 | // on the first membar. This prevents later volatile loads or stores |
3495 | // from sliding up past the just-emitted store. |
3496 | |
3497 | MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent); |
3498 | mb->set_req(TypeFunc::Control,control()); |
3499 | if (alias_idx == Compile::AliasIdxBot) { |
3500 | mb->set_req(TypeFunc::Memory, merged_memory()->base_memory()); |
3501 | } else { |
3502 | assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller")do { if (!(!(opcode == Op_Initialize && alias_idx != Compile ::AliasIdxRaw))) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3502, "assert(" "!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw)" ") failed", "fix caller"); ::breakpoint(); } } while (0); |
3503 | mb->set_req(TypeFunc::Memory, memory(alias_idx)); |
3504 | } |
3505 | Node* membar = _gvn.transform(mb); |
3506 | set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); |
3507 | if (alias_idx == Compile::AliasIdxBot) { |
3508 | merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory))); |
3509 | } else { |
3510 | set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx); |
3511 | } |
3512 | return membar; |
3513 | } |
3514 | |
3515 | //------------------------------shared_lock------------------------------------ |
3516 | // Emit locking code. |
3517 | FastLockNode* GraphKit::shared_lock(Node* obj) { |
3518 | // bci is either a monitorenter bc or InvocationEntryBci |
3519 | // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces |
3520 | assert(SynchronizationEntryBCI == InvocationEntryBci, "")do { if (!(SynchronizationEntryBCI == InvocationEntryBci)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3520, "assert(" "SynchronizationEntryBCI == InvocationEntryBci" ") failed", ""); ::breakpoint(); } } while (0); |
3521 | |
3522 | if( !GenerateSynchronizationCode ) |
3523 | return NULL__null; // Not locking things? |
3524 | if (stopped()) // Dead monitor? |
3525 | return NULL__null; |
3526 | |
3527 | assert(dead_locals_are_killed(), "should kill locals before sync. point")do { if (!(dead_locals_are_killed())) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3527, "assert(" "dead_locals_are_killed()" ") failed", "should kill locals before sync. point" ); ::breakpoint(); } } while (0); |
3528 | |
3529 | // Box the stack location |
3530 | Node* box = _gvn.transform(new BoxLockNode(next_monitor())); |
3531 | Node* mem = reset_memory(); |
3532 | |
3533 | FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock(); |
3534 | |
3535 | // Create the rtm counters for this fast lock if needed. |
3536 | flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci |
3537 | |
3538 | // Add monitor to debug info for the slow path. If we block inside the |
3539 | // slow path and de-opt, we need the monitor hanging around |
3540 | map()->push_monitor( flock ); |
3541 | |
3542 | const TypeFunc *tf = LockNode::lock_type(); |
3543 | LockNode *lock = new LockNode(C, tf); |
3544 | |
3545 | lock->init_req( TypeFunc::Control, control() ); |
3546 | lock->init_req( TypeFunc::Memory , mem ); |
3547 | lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o |
3548 | lock->init_req( TypeFunc::FramePtr, frameptr() ); |
3549 | lock->init_req( TypeFunc::ReturnAdr, top() ); |
3550 | |
3551 | lock->init_req(TypeFunc::Parms + 0, obj); |
3552 | lock->init_req(TypeFunc::Parms + 1, box); |
3553 | lock->init_req(TypeFunc::Parms + 2, flock); |
3554 | add_safepoint_edges(lock); |
3555 | |
3556 | lock = _gvn.transform( lock )->as_Lock(); |
3557 | |
3558 | // lock has no side-effects, sets few values |
3559 | set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM); |
3560 | |
3561 | insert_mem_bar(Op_MemBarAcquireLock); |
3562 | |
3563 | // Add this to the worklist so that the lock can be eliminated |
3564 | record_for_igvn(lock); |
3565 | |
3566 | #ifndef PRODUCT |
3567 | if (PrintLockStatistics) { |
3568 | // Update the counter for this lock. Don't bother using an atomic |
3569 | // operation since we don't require absolute accuracy. |
3570 | lock->create_lock_counter(map()->jvms()); |
3571 | increment_counter(lock->counter()->addr()); |
3572 | } |
3573 | #endif |
3574 | |
3575 | return flock; |
3576 | } |
3577 | |
3578 | |
3579 | //------------------------------shared_unlock---------------------------------- |
3580 | // Emit unlocking code. |
3581 | void GraphKit::shared_unlock(Node* box, Node* obj) { |
3582 | // bci is either a monitorenter bc or InvocationEntryBci |
3583 | // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces |
3584 | assert(SynchronizationEntryBCI == InvocationEntryBci, "")do { if (!(SynchronizationEntryBCI == InvocationEntryBci)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3584, "assert(" "SynchronizationEntryBCI == InvocationEntryBci" ") failed", ""); ::breakpoint(); } } while (0); |
3585 | |
3586 | if( !GenerateSynchronizationCode ) |
3587 | return; |
3588 | if (stopped()) { // Dead monitor? |
3589 | map()->pop_monitor(); // Kill monitor from debug info |
3590 | return; |
3591 | } |
3592 | |
3593 | // Memory barrier to avoid floating things down past the locked region |
3594 | insert_mem_bar(Op_MemBarReleaseLock); |
3595 | |
3596 | const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type(); |
3597 | UnlockNode *unlock = new UnlockNode(C, tf); |
3598 | #ifdef ASSERT1 |
3599 | unlock->set_dbg_jvms(sync_jvms()); |
3600 | #endif |
3601 | uint raw_idx = Compile::AliasIdxRaw; |
3602 | unlock->init_req( TypeFunc::Control, control() ); |
3603 | unlock->init_req( TypeFunc::Memory , memory(raw_idx) ); |
3604 | unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o |
3605 | unlock->init_req( TypeFunc::FramePtr, frameptr() ); |
3606 | unlock->init_req( TypeFunc::ReturnAdr, top() ); |
3607 | |
3608 | unlock->init_req(TypeFunc::Parms + 0, obj); |
3609 | unlock->init_req(TypeFunc::Parms + 1, box); |
3610 | unlock = _gvn.transform(unlock)->as_Unlock(); |
3611 | |
3612 | Node* mem = reset_memory(); |
3613 | |
3614 | // unlock has no side-effects, sets few values |
3615 | set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM); |
3616 | |
3617 | // Kill monitor from debug info |
3618 | map()->pop_monitor( ); |
3619 | } |
3620 | |
3621 | //-------------------------------get_layout_helper----------------------------- |
3622 | // If the given klass is a constant or known to be an array, |
3623 | // fetch the constant layout helper value into constant_value |
3624 | // and return (Node*)NULL. Otherwise, load the non-constant |
3625 | // layout helper value, and return the node which represents it. |
3626 | // This two-faced routine is useful because allocation sites |
3627 | // almost always feature constant types. |
3628 | Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { |
3629 | const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); |
3630 | if (!StressReflectiveCode && inst_klass != NULL__null) { |
3631 | ciKlass* klass = inst_klass->klass(); |
3632 | bool xklass = inst_klass->klass_is_exact(); |
3633 | if (xklass || klass->is_array_klass()) { |
3634 | jint lhelper = klass->layout_helper(); |
3635 | if (lhelper != Klass::_lh_neutral_value) { |
3636 | constant_value = lhelper; |
3637 | return (Node*) NULL__null; |
3638 | } |
3639 | } |
3640 | } |
3641 | constant_value = Klass::_lh_neutral_value; // put in a known value |
3642 | Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); |
3643 | return make_load(NULL__null, lhp, TypeInt::INT, T_INT, MemNode::unordered); |
3644 | } |
3645 | |
3646 | // We just put in an allocate/initialize with a big raw-memory effect. |
3647 | // Hook selected additional alias categories on the initialization. |
3648 | static void hook_memory_on_init(GraphKit& kit, int alias_idx, |
3649 | MergeMemNode* init_in_merge, |
3650 | Node* init_out_raw) { |
3651 | DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory())Node* init_in_raw = init_in_merge->base_memory(); |
3652 | assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "")do { if (!(init_in_merge->memory_at(alias_idx) == init_in_raw )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3652, "assert(" "init_in_merge->memory_at(alias_idx) == init_in_raw" ") failed", ""); ::breakpoint(); } } while (0); |
3653 | |
3654 | Node* prevmem = kit.memory(alias_idx); |
3655 | init_in_merge->set_memory_at(alias_idx, prevmem); |
3656 | kit.set_memory(init_out_raw, alias_idx); |
3657 | } |
3658 | |
3659 | //---------------------------set_output_for_allocation------------------------- |
3660 | Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, |
3661 | const TypeOopPtr* oop_type, |
3662 | bool deoptimize_on_exception) { |
3663 | int rawidx = Compile::AliasIdxRaw; |
3664 | alloc->set_req( TypeFunc::FramePtr, frameptr() ); |
3665 | add_safepoint_edges(alloc); |
3666 | Node* allocx = _gvn.transform(alloc); |
3667 | set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) ); |
3668 | // create memory projection for i_o |
3669 | set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx ); |
3670 | make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception); |
3671 | |
3672 | // create a memory projection as for the normal control path |
3673 | Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory)); |
3674 | set_memory(malloc, rawidx); |
3675 | |
3676 | // a normal slow-call doesn't change i_o, but an allocation does |
3677 | // we create a separate i_o projection for the normal control path |
3678 | set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) ); |
3679 | Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) ); |
3680 | |
3681 | // put in an initialization barrier |
3682 | InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx, |
3683 | rawoop)->as_Initialize(); |
3684 | assert(alloc->initialization() == init, "2-way macro link must work")do { if (!(alloc->initialization() == init)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3684, "assert(" "alloc->initialization() == init" ") failed" , "2-way macro link must work"); ::breakpoint(); } } while (0 ); |
3685 | assert(init ->allocation() == alloc, "2-way macro link must work")do { if (!(init ->allocation() == alloc)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3685, "assert(" "init ->allocation() == alloc" ") failed" , "2-way macro link must work"); ::breakpoint(); } } while (0 ); |
3686 | { |
3687 | // Extract memory strands which may participate in the new object's |
3688 | // initialization, and source them from the new InitializeNode. |
3689 | // This will allow us to observe initializations when they occur, |
3690 | // and link them properly (as a group) to the InitializeNode. |
3691 | assert(init->in(InitializeNode::Memory) == malloc, "")do { if (!(init->in(InitializeNode::Memory) == malloc)) { ( *g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3691, "assert(" "init->in(InitializeNode::Memory) == malloc" ") failed", ""); ::breakpoint(); } } while (0); |
3692 | MergeMemNode* minit_in = MergeMemNode::make(malloc); |
3693 | init->set_req(InitializeNode::Memory, minit_in); |
3694 | record_for_igvn(minit_in); // fold it up later, if possible |
3695 | Node* minit_out = memory(rawidx); |
3696 | assert(minit_out->is_Proj() && minit_out->in(0) == init, "")do { if (!(minit_out->is_Proj() && minit_out->in (0) == init)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3696, "assert(" "minit_out->is_Proj() && minit_out->in(0) == init" ") failed", ""); ::breakpoint(); } } while (0); |
3697 | // Add an edge in the MergeMem for the header fields so an access |
3698 | // to one of those has correct memory state |
3699 | set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()))); |
3700 | set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()))); |
3701 | if (oop_type->isa_aryptr()) { |
3702 | const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); |
3703 | int elemidx = C->get_alias_index(telemref); |
3704 | hook_memory_on_init(*this, elemidx, minit_in, minit_out); |
3705 | } else if (oop_type->isa_instptr()) { |
3706 | ciInstanceKlass* ik = oop_type->klass()->as_instance_klass(); |
3707 | for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { |
3708 | ciField* field = ik->nonstatic_field_at(i); |
3709 | if (field->offset() >= TrackedInitializationLimit * HeapWordSize) |
3710 | continue; // do not bother to track really large numbers of fields |
3711 | // Find (or create) the alias category for this field: |
3712 | int fieldidx = C->alias_type(field)->index(); |
3713 | hook_memory_on_init(*this, fieldidx, minit_in, minit_out); |
3714 | } |
3715 | } |
3716 | } |
3717 | |
3718 | // Cast raw oop to the real thing... |
3719 | Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type); |
3720 | javaoop = _gvn.transform(javaoop); |
3721 | C->set_recent_alloc(control(), javaoop); |
3722 | assert(just_allocated_object(control()) == javaoop, "just allocated")do { if (!(just_allocated_object(control()) == javaoop)) { (* g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3722, "assert(" "just_allocated_object(control()) == javaoop" ") failed", "just allocated"); ::breakpoint(); } } while (0); |
3723 | |
3724 | #ifdef ASSERT1 |
3725 | { // Verify that the AllocateNode::Ideal_allocation recognizers work: |
3726 | assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,do { if (!(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3727, "assert(" "AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0) |
3727 | "Ideal_allocation works")do { if (!(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3727, "assert(" "AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0); |
3728 | assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,do { if (!(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3729, "assert(" "AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0) |
3729 | "Ideal_allocation works")do { if (!(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3729, "assert(" "AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0); |
3730 | if (alloc->is_AllocateArray()) { |
3731 | assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),do { if (!(AllocateArrayNode::Ideal_array_allocation(rawoop, & _gvn) == alloc->as_AllocateArray())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3732, "assert(" "AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray()" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0) |
3732 | "Ideal_allocation works")do { if (!(AllocateArrayNode::Ideal_array_allocation(rawoop, & _gvn) == alloc->as_AllocateArray())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3732, "assert(" "AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray()" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0); |
3733 | assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),do { if (!(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3734, "assert(" "AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray()" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0) |
3734 | "Ideal_allocation works")do { if (!(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray())) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3734, "assert(" "AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray()" ") failed", "Ideal_allocation works"); ::breakpoint(); } } while (0); |
3735 | } else { |
3736 | assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please")do { if (!(alloc->in(AllocateNode::ALength)->is_top())) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3736, "assert(" "alloc->in(AllocateNode::ALength)->is_top()" ") failed", "no length, please"); ::breakpoint(); } } while ( 0); |
3737 | } |
3738 | } |
3739 | #endif //ASSERT |
3740 | |
3741 | return javaoop; |
3742 | } |
3743 | |
3744 | //---------------------------new_instance-------------------------------------- |
3745 | // This routine takes a klass_node which may be constant (for a static type) |
3746 | // or may be non-constant (for reflective code). It will work equally well |
3747 | // for either, and the graph will fold nicely if the optimizer later reduces |
3748 | // the type to a constant. |
3749 | // The optional arguments are for specialized use by intrinsics: |
3750 | // - If 'extra_slow_test' if not null is an extra condition for the slow-path. |
3751 | // - If 'return_size_val', report the the total object size to the caller. |
3752 | // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) |
3753 | Node* GraphKit::new_instance(Node* klass_node, |
3754 | Node* extra_slow_test, |
3755 | Node* *return_size_val, |
3756 | bool deoptimize_on_exception) { |
3757 | // Compute size in doublewords |
3758 | // The size is always an integral number of doublewords, represented |
3759 | // as a positive bytewise size stored in the klass's layout_helper. |
3760 | // The layout_helper also encodes (in a low bit) the need for a slow path. |
3761 | jint layout_con = Klass::_lh_neutral_value; |
3762 | Node* layout_val = get_layout_helper(klass_node, layout_con); |
3763 | int layout_is_con = (layout_val == NULL__null); |
3764 | |
3765 | if (extra_slow_test == NULL__null) extra_slow_test = intcon(0); |
3766 | // Generate the initial go-slow test. It's either ALWAYS (return a |
3767 | // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective |
3768 | // case) a computed value derived from the layout_helper. |
3769 | Node* initial_slow_test = NULL__null; |
3770 | if (layout_is_con) { |
3771 | assert(!StressReflectiveCode, "stress mode does not use these paths")do { if (!(!StressReflectiveCode)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3771, "assert(" "!StressReflectiveCode" ") failed", "stress mode does not use these paths" ); ::breakpoint(); } } while (0); |
3772 | bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con); |
3773 | initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test; |
3774 | } else { // reflective case |
3775 | // This reflective path is used by Unsafe.allocateInstance. |
3776 | // (It may be stress-tested by specifying StressReflectiveCode.) |
3777 | // Basically, we want to get into the VM is there's an illegal argument. |
3778 | Node* bit = intcon(Klass::_lh_instance_slow_path_bit); |
3779 | initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) ); |
3780 | if (extra_slow_test != intcon(0)) { |
3781 | initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) ); |
3782 | } |
3783 | // (Macro-expander will further convert this to a Bool, if necessary.) |
3784 | } |
3785 | |
3786 | // Find the size in bytes. This is easy; it's the layout_helper. |
3787 | // The size value must be valid even if the slow path is taken. |
3788 | Node* size = NULL__null; |
3789 | if (layout_is_con) { |
3790 | size = MakeConXlongcon(Klass::layout_helper_size_in_bytes(layout_con)); |
3791 | } else { // reflective case |
3792 | // This reflective path is used by clone and Unsafe.allocateInstance. |
3793 | size = ConvI2X(layout_val)ConvI2L(layout_val); |
3794 | |
3795 | // Clear the low bits to extract layout_helper_size_in_bytes: |
3796 | assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit")do { if (!((int)Klass::_lh_instance_slow_path_bit < BytesPerLong )) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3796, "assert(" "(int)Klass::_lh_instance_slow_path_bit < BytesPerLong" ") failed", "clear bit"); ::breakpoint(); } } while (0); |
3797 | Node* mask = MakeConXlongcon(~ (intptr_t)right_n_bits(LogBytesPerLong)((((LogBytesPerLong) >= BitsPerWord) ? 0 : (OneBit << (LogBytesPerLong))) - 1)); |
3798 | size = _gvn.transform( new AndXNodeAndLNode(size, mask) ); |
3799 | } |
3800 | if (return_size_val != NULL__null) { |
3801 | (*return_size_val) = size; |
3802 | } |
3803 | |
3804 | // This is a precise notnull oop of the klass. |
3805 | // (Actually, it need not be precise if this is a reflective allocation.) |
3806 | // It's what we cast the result to. |
3807 | const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr(); |
3808 | if (!tklass) tklass = TypeInstKlassPtr::OBJECT; |
3809 | const TypeOopPtr* oop_type = tklass->as_instance_type(); |
3810 | |
3811 | // Now generate allocation code |
3812 | |
3813 | // The entire memory state is needed for slow path of the allocation |
3814 | // since GC and deoptimization can happened. |
3815 | Node *mem = reset_memory(); |
3816 | set_all_memory(mem); // Create new memory state |
3817 | |
3818 | AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), |
3819 | control(), mem, i_o(), |
3820 | size, klass_node, |
3821 | initial_slow_test); |
3822 | |
3823 | return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); |
3824 | } |
3825 | |
3826 | //-------------------------------new_array------------------------------------- |
3827 | // helper for both newarray and anewarray |
3828 | // The 'length' parameter is (obviously) the length of the array. |
3829 | // See comments on new_instance for the meaning of the other arguments. |
3830 | Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) |
3831 | Node* length, // number of array elements |
3832 | int nargs, // number of arguments to push back for uncommon trap |
3833 | Node* *return_size_val, |
3834 | bool deoptimize_on_exception) { |
3835 | jint layout_con = Klass::_lh_neutral_value; |
3836 | Node* layout_val = get_layout_helper(klass_node, layout_con); |
3837 | int layout_is_con = (layout_val == NULL__null); |
3838 | |
3839 | if (!layout_is_con && !StressReflectiveCode && |
3840 | !too_many_traps(Deoptimization::Reason_class_check)) { |
3841 | // This is a reflective array creation site. |
3842 | // Optimistically assume that it is a subtype of Object[], |
3843 | // so that we can fold up all the address arithmetic. |
3844 | layout_con = Klass::array_layout_helper(T_OBJECT); |
3845 | Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) ); |
3846 | Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) ); |
3847 | { BuildCutout unless(this, bol_lh, PROB_MAX(1.0f-(1e-6f))); |
3848 | inc_sp(nargs); |
3849 | uncommon_trap(Deoptimization::Reason_class_check, |
3850 | Deoptimization::Action_maybe_recompile); |
3851 | } |
3852 | layout_val = NULL__null; |
3853 | layout_is_con = true; |
3854 | } |
3855 | |
3856 | // Generate the initial go-slow test. Make sure we do not overflow |
3857 | // if length is huge (near 2Gig) or negative! We do not need |
3858 | // exact double-words here, just a close approximation of needed |
3859 | // double-words. We can't add any offset or rounding bits, lest we |
3860 | // take a size -1 of bytes and make it positive. Use an unsigned |
3861 | // compare, so negative sizes look hugely positive. |
3862 | int fast_size_limit = FastAllocateSizeLimit; |
3863 | if (layout_is_con) { |
3864 | assert(!StressReflectiveCode, "stress mode does not use these paths")do { if (!(!StressReflectiveCode)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3864, "assert(" "!StressReflectiveCode" ") failed", "stress mode does not use these paths" ); ::breakpoint(); } } while (0); |
3865 | // Increase the size limit if we have exact knowledge of array type. |
3866 | int log2_esize = Klass::layout_helper_log2_element_size(layout_con); |
3867 | fast_size_limit <<= (LogBytesPerLong - log2_esize); |
3868 | } |
3869 | |
3870 | Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) ); |
3871 | Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) ); |
3872 | |
3873 | // --- Size Computation --- |
3874 | // array_size = round_to_heap(array_header + (length << elem_shift)); |
3875 | // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes) |
3876 | // and align_to(x, y) == ((x + y-1) & ~(y-1)) |
3877 | // The rounding mask is strength-reduced, if possible. |
3878 | int round_mask = MinObjAlignmentInBytes - 1; |
3879 | Node* header_size = NULL__null; |
3880 | int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
3881 | // (T_BYTE has the weakest alignment and size restrictions...) |
3882 | if (layout_is_con) { |
3883 | int hsize = Klass::layout_helper_header_size(layout_con); |
3884 | int eshift = Klass::layout_helper_log2_element_size(layout_con); |
3885 | BasicType etype = Klass::layout_helper_element_type(layout_con); |
3886 | if ((round_mask & ~right_n_bits(eshift)((((eshift) >= BitsPerWord) ? 0 : (OneBit << (eshift ))) - 1)) == 0) |
3887 | round_mask = 0; // strength-reduce it if it goes away completely |
3888 | assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded")do { if (!((hsize & ((((eshift) >= BitsPerWord) ? 0 : ( OneBit << (eshift))) - 1)) == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3888, "assert(" "(hsize & ((((eshift) >= BitsPerWord) ? 0 : (OneBit << (eshift))) - 1)) == 0" ") failed", "hsize is pre-rounded"); ::breakpoint(); } } while (0); |
3889 | assert(header_size_min <= hsize, "generic minimum is smallest")do { if (!(header_size_min <= hsize)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3889, "assert(" "header_size_min <= hsize" ") failed", "generic minimum is smallest" ); ::breakpoint(); } } while (0); |
3890 | header_size_min = hsize; |
Value stored to 'header_size_min' is never read | |
3891 | header_size = intcon(hsize + round_mask); |
3892 | } else { |
3893 | Node* hss = intcon(Klass::_lh_header_size_shift); |
3894 | Node* hsm = intcon(Klass::_lh_header_size_mask); |
3895 | Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) ); |
3896 | hsize = _gvn.transform( new AndINode(hsize, hsm) ); |
3897 | Node* mask = intcon(round_mask); |
3898 | header_size = _gvn.transform( new AddINode(hsize, mask) ); |
3899 | } |
3900 | |
3901 | Node* elem_shift = NULL__null; |
3902 | if (layout_is_con) { |
3903 | int eshift = Klass::layout_helper_log2_element_size(layout_con); |
3904 | if (eshift != 0) |
3905 | elem_shift = intcon(eshift); |
3906 | } else { |
3907 | // There is no need to mask or shift this value. |
3908 | // The semantics of LShiftINode include an implicit mask to 0x1F. |
3909 | assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place")do { if (!(Klass::_lh_log2_element_size_shift == 0)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 3909, "assert(" "Klass::_lh_log2_element_size_shift == 0" ") failed" , "use shift in place"); ::breakpoint(); } } while (0); |
3910 | elem_shift = layout_val; |
3911 | } |
3912 | |
3913 | // Transition to native address size for all offset calculations: |
3914 | Node* lengthx = ConvI2X(length)ConvI2L(length); |
3915 | Node* headerx = ConvI2X(header_size)ConvI2L(header_size); |
3916 | #ifdef _LP641 |
3917 | { const TypeInt* tilen = _gvn.find_int_type(length); |
3918 | if (tilen != NULL__null && tilen->_lo < 0) { |
3919 | // Add a manual constraint to a positive range. Cf. array_element_address. |
3920 | jint size_max = fast_size_limit; |
3921 | if (size_max > tilen->_hi) size_max = tilen->_hi; |
3922 | const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin); |
3923 | |
3924 | // Only do a narrow I2L conversion if the range check passed. |
3925 | IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN(1e-6f), COUNT_UNKNOWN(-1.0f)); |
3926 | _gvn.transform(iff); |
3927 | RegionNode* region = new RegionNode(3); |
3928 | _gvn.set_type(region, Type::CONTROL); |
3929 | lengthx = new PhiNode(region, TypeLong::LONG); |
3930 | _gvn.set_type(lengthx, TypeLong::LONG); |
3931 | |
3932 | // Range check passed. Use ConvI2L node with narrow type. |
3933 | Node* passed = IfFalse(iff); |
3934 | region->init_req(1, passed); |
3935 | // Make I2L conversion control dependent to prevent it from |
3936 | // floating above the range check during loop optimizations. |
3937 | lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed)); |
3938 | |
3939 | // Range check failed. Use ConvI2L with wide type because length may be invalid. |
3940 | region->init_req(2, IfTrue(iff)); |
3941 | lengthx->init_req(2, ConvI2X(length)ConvI2L(length)); |
3942 | |
3943 | set_control(region); |
3944 | record_for_igvn(region); |
3945 | record_for_igvn(lengthx); |
3946 | } |
3947 | } |
3948 | #endif |
3949 | |
3950 | // Combine header size (plus rounding) and body size. Then round down. |
3951 | // This computation cannot overflow, because it is used only in two |
3952 | // places, one where the length is sharply limited, and the other |
3953 | // after a successful allocation. |
3954 | Node* abody = lengthx; |
3955 | if (elem_shift != NULL__null) |
3956 | abody = _gvn.transform( new LShiftXNodeLShiftLNode(lengthx, elem_shift) ); |
3957 | Node* size = _gvn.transform( new AddXNodeAddLNode(headerx, abody) ); |
3958 | if (round_mask != 0) { |
3959 | Node* mask = MakeConXlongcon(~round_mask); |
3960 | size = _gvn.transform( new AndXNodeAndLNode(size, mask) ); |
3961 | } |
3962 | // else if round_mask == 0, the size computation is self-rounding |
3963 | |
3964 | if (return_size_val != NULL__null) { |
3965 | // This is the size |
3966 | (*return_size_val) = size; |
3967 | } |
3968 | |
3969 | // Now generate allocation code |
3970 | |
3971 | // The entire memory state is needed for slow path of the allocation |
3972 | // since GC and deoptimization can happened. |
3973 | Node *mem = reset_memory(); |
3974 | set_all_memory(mem); // Create new memory state |
3975 | |
3976 | if (initial_slow_test->is_Bool()) { |
3977 | // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. |
3978 | initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); |
3979 | } |
3980 | |
3981 | // Create the AllocateArrayNode and its result projections |
3982 | AllocateArrayNode* alloc |
3983 | = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), |
3984 | control(), mem, i_o(), |
3985 | size, klass_node, |
3986 | initial_slow_test, |
3987 | length); |
3988 | |
3989 | // Cast to correct type. Note that the klass_node may be constant or not, |
3990 | // and in the latter case the actual array type will be inexact also. |
3991 | // (This happens via a non-constant argument to inline_native_newArray.) |
3992 | // In any case, the value of klass_node provides the desired array type. |
3993 | const TypeInt* length_type = _gvn.find_int_type(length); |
3994 | const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); |
3995 | if (ary_type->isa_aryptr() && length_type != NULL__null) { |
3996 | // Try to get a better type than POS for the size |
3997 | ary_type = ary_type->is_aryptr()->cast_to_size(length_type); |
3998 | } |
3999 | |
4000 | Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception); |
4001 | |
4002 | array_ideal_length(alloc, ary_type, true); |
4003 | return javaoop; |
4004 | } |
4005 | |
4006 | // The following "Ideal_foo" functions are placed here because they recognize |
4007 | // the graph shapes created by the functions immediately above. |
4008 | |
4009 | //---------------------------Ideal_allocation---------------------------------- |
4010 | // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode. |
4011 | AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) { |
4012 | if (ptr == NULL__null) { // reduce dumb test in callers |
4013 | return NULL__null; |
4014 | } |
4015 | |
4016 | BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
4017 | ptr = bs->step_over_gc_barrier(ptr); |
4018 | |
4019 | if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast |
4020 | ptr = ptr->in(1); |
4021 | if (ptr == NULL__null) return NULL__null; |
4022 | } |
4023 | // Return NULL for allocations with several casts: |
4024 | // j.l.reflect.Array.newInstance(jobject, jint) |
4025 | // Object.clone() |
4026 | // to keep more precise type from last cast. |
4027 | if (ptr->is_Proj()) { |
4028 | Node* allo = ptr->in(0); |
4029 | if (allo != NULL__null && allo->is_Allocate()) { |
4030 | return allo->as_Allocate(); |
4031 | } |
4032 | } |
4033 | // Report failure to match. |
4034 | return NULL__null; |
4035 | } |
4036 | |
4037 | // Fancy version which also strips off an offset (and reports it to caller). |
4038 | AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase, |
4039 | intptr_t& offset) { |
4040 | Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset); |
4041 | if (base == NULL__null) return NULL__null; |
4042 | return Ideal_allocation(base, phase); |
4043 | } |
4044 | |
4045 | // Trace Initialize <- Proj[Parm] <- Allocate |
4046 | AllocateNode* InitializeNode::allocation() { |
4047 | Node* rawoop = in(InitializeNode::RawAddress); |
4048 | if (rawoop->is_Proj()) { |
4049 | Node* alloc = rawoop->in(0); |
4050 | if (alloc->is_Allocate()) { |
4051 | return alloc->as_Allocate(); |
4052 | } |
4053 | } |
4054 | return NULL__null; |
4055 | } |
4056 | |
4057 | // Trace Allocate -> Proj[Parm] -> Initialize |
4058 | InitializeNode* AllocateNode::initialization() { |
4059 | ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress); |
4060 | if (rawoop == NULL__null) return NULL__null; |
4061 | for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) { |
4062 | Node* init = rawoop->fast_out(i); |
4063 | if (init->is_Initialize()) { |
4064 | assert(init->as_Initialize()->allocation() == this, "2-way link")do { if (!(init->as_Initialize()->allocation() == this) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 4064, "assert(" "init->as_Initialize()->allocation() == this" ") failed", "2-way link"); ::breakpoint(); } } while (0); |
4065 | return init->as_Initialize(); |
4066 | } |
4067 | } |
4068 | return NULL__null; |
4069 | } |
4070 | |
4071 | //----------------------------- loop predicates --------------------------- |
4072 | |
4073 | //------------------------------add_predicate_impl---------------------------- |
4074 | void GraphKit::add_empty_predicate_impl(Deoptimization::DeoptReason reason, int nargs) { |
4075 | // Too many traps seen? |
4076 | if (too_many_traps(reason)) { |
4077 | #ifdef ASSERT1 |
4078 | if (TraceLoopPredicate) { |
4079 | int tc = C->trap_count(reason); |
4080 | tty->print("too many traps=%s tcount=%d in ", |
4081 | Deoptimization::trap_reason_name(reason), tc); |
4082 | method()->print(); // which method has too many predicate traps |
4083 | tty->cr(); |
4084 | } |
4085 | #endif |
4086 | // We cannot afford to take more traps here, |
4087 | // do not generate predicate. |
4088 | return; |
4089 | } |
4090 | |
4091 | Node *cont = _gvn.intcon(1); |
4092 | Node* opq = _gvn.transform(new Opaque1Node(C, cont)); |
4093 | Node *bol = _gvn.transform(new Conv2BNode(opq)); |
4094 | IfNode* iff = create_and_map_if(control(), bol, PROB_MAX(1.0f-(1e-6f)), COUNT_UNKNOWN(-1.0f)); |
4095 | Node* iffalse = _gvn.transform(new IfFalseNode(iff)); |
4096 | C->add_predicate_opaq(opq); |
4097 | { |
4098 | PreserveJVMState pjvms(this); |
4099 | set_control(iffalse); |
4100 | inc_sp(nargs); |
4101 | uncommon_trap(reason, Deoptimization::Action_maybe_recompile); |
4102 | } |
4103 | Node* iftrue = _gvn.transform(new IfTrueNode(iff)); |
4104 | set_control(iftrue); |
4105 | } |
4106 | |
4107 | //------------------------------add_predicate--------------------------------- |
4108 | void GraphKit::add_empty_predicates(int nargs) { |
4109 | // These loop predicates remain empty. All concrete loop predicates are inserted above the corresponding |
4110 | // empty loop predicate later by 'PhaseIdealLoop::create_new_if_for_predicate'. All concrete loop predicates of |
4111 | // a specific kind (normal, profile or limit check) share the same uncommon trap as the empty loop predicate. |
4112 | if (UseLoopPredicate) { |
4113 | add_empty_predicate_impl(Deoptimization::Reason_predicate, nargs); |
4114 | } |
4115 | if (UseProfiledLoopPredicate) { |
4116 | add_empty_predicate_impl(Deoptimization::Reason_profile_predicate, nargs); |
4117 | } |
4118 | // loop's limit check predicate should be near the loop. |
4119 | add_empty_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs); |
4120 | } |
4121 | |
4122 | void GraphKit::sync_kit(IdealKit& ideal) { |
4123 | set_all_memory(ideal.merged_memory()); |
4124 | set_i_o(ideal.i_o()); |
4125 | set_control(ideal.ctrl()); |
4126 | } |
4127 | |
4128 | void GraphKit::final_sync(IdealKit& ideal) { |
4129 | // Final sync IdealKit and graphKit. |
4130 | sync_kit(ideal); |
4131 | } |
4132 | |
4133 | Node* GraphKit::load_String_length(Node* str, bool set_ctrl) { |
4134 | Node* len = load_array_length(load_String_value(str, set_ctrl)); |
4135 | Node* coder = load_String_coder(str, set_ctrl); |
4136 | // Divide length by 2 if coder is UTF16 |
4137 | return _gvn.transform(new RShiftINode(len, coder)); |
4138 | } |
4139 | |
4140 | Node* GraphKit::load_String_value(Node* str, bool set_ctrl) { |
4141 | int value_offset = java_lang_String::value_offset(); |
4142 | const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), |
4143 | false, NULL__null, 0); |
4144 | const TypePtr* value_field_type = string_type->add_offset(value_offset); |
4145 | const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, |
4146 | TypeAry::make(TypeInt::BYTE, TypeInt::POS), |
4147 | ciTypeArrayKlass::make(T_BYTE), true, 0); |
4148 | Node* p = basic_plus_adr(str, str, value_offset); |
4149 | Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT, |
4150 | IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); |
4151 | return load; |
4152 | } |
4153 | |
4154 | Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) { |
4155 | if (!CompactStrings) { |
4156 | return intcon(java_lang_String::CODER_UTF16); |
4157 | } |
4158 | int coder_offset = java_lang_String::coder_offset(); |
4159 | const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), |
4160 | false, NULL__null, 0); |
4161 | const TypePtr* coder_field_type = string_type->add_offset(coder_offset); |
4162 | |
4163 | Node* p = basic_plus_adr(str, str, coder_offset); |
4164 | Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE, |
4165 | IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); |
4166 | return load; |
4167 | } |
4168 | |
4169 | void GraphKit::store_String_value(Node* str, Node* value) { |
4170 | int value_offset = java_lang_String::value_offset(); |
4171 | const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), |
4172 | false, NULL__null, 0); |
4173 | const TypePtr* value_field_type = string_type->add_offset(value_offset); |
4174 | |
4175 | access_store_at(str, basic_plus_adr(str, value_offset), value_field_type, |
4176 | value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED); |
4177 | } |
4178 | |
4179 | void GraphKit::store_String_coder(Node* str, Node* value) { |
4180 | int coder_offset = java_lang_String::coder_offset(); |
4181 | const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), |
4182 | false, NULL__null, 0); |
4183 | const TypePtr* coder_field_type = string_type->add_offset(coder_offset); |
4184 | |
4185 | access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type, |
4186 | value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED); |
4187 | } |
4188 | |
4189 | // Capture src and dst memory state with a MergeMemNode |
4190 | Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) { |
4191 | if (src_type == dst_type) { |
4192 | // Types are equal, we don't need a MergeMemNode |
4193 | return memory(src_type); |
4194 | } |
4195 | MergeMemNode* merge = MergeMemNode::make(map()->memory()); |
4196 | record_for_igvn(merge); // fold it up later, if possible |
4197 | int src_idx = C->get_alias_index(src_type); |
4198 | int dst_idx = C->get_alias_index(dst_type); |
4199 | merge->set_memory_at(src_idx, memory(src_idx)); |
4200 | merge->set_memory_at(dst_idx, memory(dst_idx)); |
4201 | return merge; |
4202 | } |
4203 | |
4204 | Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) { |
4205 | assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported")do { if (!(Matcher::match_rule_supported(Op_StrCompressedCopy ))) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 4205, "assert(" "Matcher::match_rule_supported(Op_StrCompressedCopy)" ") failed", "Intrinsic not supported"); ::breakpoint(); } } while (0); |
4206 | assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type")do { if (!(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr ::CHARS)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 4206, "assert(" "src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS" ") failed", "invalid source type"); ::breakpoint(); } } while (0); |
4207 | // If input and output memory types differ, capture both states to preserve |
4208 | // the dependency between preceding and subsequent loads/stores. |
4209 | // For example, the following program: |
4210 | // StoreB |
4211 | // compress_string |
4212 | // LoadB |
4213 | // has this memory graph (use->def): |
4214 | // LoadB -> compress_string -> CharMem |
4215 | // ... -> StoreB -> ByteMem |
4216 | // The intrinsic hides the dependency between LoadB and StoreB, causing |
4217 | // the load to read from memory not containing the result of the StoreB. |
4218 | // The correct memory graph should look like this: |
4219 | // LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem)) |
4220 | Node* mem = capture_memory(src_type, TypeAryPtr::BYTES); |
4221 | StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count); |
4222 | Node* res_mem = _gvn.transform(new SCMemProjNode(_gvn.transform(str))); |
4223 | set_memory(res_mem, TypeAryPtr::BYTES); |
4224 | return str; |
4225 | } |
4226 | |
4227 | void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) { |
4228 | assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported")do { if (!(Matcher::match_rule_supported(Op_StrInflatedCopy)) ) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 4228, "assert(" "Matcher::match_rule_supported(Op_StrInflatedCopy)" ") failed", "Intrinsic not supported"); ::breakpoint(); } } while (0); |
4229 | assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type")do { if (!(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr ::CHARS)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/share/opto/graphKit.cpp" , 4229, "assert(" "dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS" ") failed", "invalid dest type"); ::breakpoint(); } } while ( 0); |
4230 | // Capture src and dst memory (see comment in 'compress_string'). |
4231 | Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type); |
4232 | StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count); |
4233 | set_memory(_gvn.transform(str), dst_type); |
4234 | } |
4235 | |
4236 | void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) { |
4237 | /** |
4238 | * int i_char = start; |
4239 | * for (int i_byte = 0; i_byte < count; i_byte++) { |
4240 | * dst[i_char++] = (char)(src[i_byte] & 0xff); |
4241 | * } |
4242 | */ |
4243 | add_empty_predicates(); |
4244 | C->set_has_loops(true); |
4245 | |
4246 | RegionNode* head = new RegionNode(3); |
4247 | head->init_req(1, control()); |
4248 | gvn().set_type(head, Type::CONTROL); |
4249 | record_for_igvn(head); |
4250 | |
4251 | Node* i_byte = new PhiNode(head, TypeInt::INT); |
4252 | i_byte->init_req(1, intcon(0)); |
4253 | gvn().set_type(i_byte, TypeInt::INT); |
4254 | record_for_igvn(i_byte); |
4255 | |
4256 | Node* i_char = new PhiNode(head, TypeInt::INT); |
4257 | i_char->init_req(1, start); |
4258 | gvn().set_type(i_char, TypeInt::INT); |
4259 | record_for_igvn(i_char); |
4260 | |
4261 | Node* mem = PhiNode::make(head, memory(TypeAryPtr::BYTES), Type::MEMORY, TypeAryPtr::BYTES); |
4262 | gvn().set_type(mem, Type::MEMORY); |
4263 | record_for_igvn(mem); |
4264 | set_control(head); |
4265 | set_memory(mem, TypeAryPtr::BYTES); |
4266 | Node* ch = load_array_element(src, i_byte, TypeAryPtr::BYTES, /* set_ctrl */ true); |
4267 | Node* st = store_to_memory(control(), array_element_address(dst, i_char, T_BYTE), |
4268 | AndI(ch, intcon(0xff)), T_CHAR, TypeAryPtr::BYTES, MemNode::unordered, |
4269 | false, false, true /* mismatched */); |
4270 | |
4271 | IfNode* iff = create_and_map_if(head, Bool(CmpI(i_byte, count), BoolTest::lt), PROB_FAIR(0.5f), COUNT_UNKNOWN(-1.0f)); |
4272 | head->init_req(2, IfTrue(iff)); |
4273 | mem->init_req(2, st); |
4274 | i_byte->init_req(2, AddI(i_byte, intcon(1))); |
4275 | i_char->init_req(2, AddI(i_char, intcon(2))); |
4276 | |
4277 | set_control(IfFalse(iff)); |
4278 | set_memory(st, TypeAryPtr::BYTES); |
4279 | } |
4280 | |
4281 | Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) { |
4282 | if (!field->is_constant()) { |
4283 | return NULL__null; // Field not marked as constant. |
4284 | } |
4285 | ciInstance* holder = NULL__null; |
4286 | if (!field->is_static()) { |
4287 | ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop(); |
4288 | if (const_oop != NULL__null && const_oop->is_instance()) { |
4289 | holder = const_oop->as_instance(); |
4290 | } |
4291 | } |
4292 | const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(), |
4293 | /*is_unsigned_load=*/false); |
4294 | if (con_type != NULL__null) { |
4295 | return makecon(con_type); |
4296 | } |
4297 | return NULL__null; |
4298 | } |