File: | jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp |
Warning: | line 338, column 70 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. | |||
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |||
4 | * | |||
5 | * This code is free software; you can redistribute it and/or modify it | |||
6 | * under the terms of the GNU General Public License version 2 only, as | |||
7 | * published by the Free Software Foundation. | |||
8 | * | |||
9 | * This code is distributed in the hope that it will be useful, but WITHOUT | |||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
12 | * version 2 for more details (a copy is included in the LICENSE file that | |||
13 | * accompanied this code). | |||
14 | * | |||
15 | * You should have received a copy of the GNU General Public License version | |||
16 | * 2 along with this work; if not, write to the Free Software Foundation, | |||
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |||
18 | * | |||
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |||
20 | * or visit www.oracle.com if you need additional information or have any | |||
21 | * questions. | |||
22 | * | |||
23 | */ | |||
24 | ||||
25 | #include "precompiled.hpp" | |||
26 | #include "asm/assembler.hpp" | |||
27 | #include "c1/c1_Defs.hpp" | |||
28 | #include "c1/c1_MacroAssembler.hpp" | |||
29 | #include "c1/c1_Runtime1.hpp" | |||
30 | #include "ci/ciUtilities.hpp" | |||
31 | #include "compiler/oopMap.hpp" | |||
32 | #include "gc/shared/cardTable.hpp" | |||
33 | #include "gc/shared/cardTableBarrierSet.hpp" | |||
34 | #include "gc/shared/collectedHeap.hpp" | |||
35 | #include "gc/shared/tlab_globals.hpp" | |||
36 | #include "interpreter/interpreter.hpp" | |||
37 | #include "memory/universe.hpp" | |||
38 | #include "nativeInst_x86.hpp" | |||
39 | #include "oops/compiledICHolder.hpp" | |||
40 | #include "oops/oop.inline.hpp" | |||
41 | #include "prims/jvmtiExport.hpp" | |||
42 | #include "register_x86.hpp" | |||
43 | #include "runtime/sharedRuntime.hpp" | |||
44 | #include "runtime/signature.hpp" | |||
45 | #include "runtime/stubRoutines.hpp" | |||
46 | #include "runtime/vframeArray.hpp" | |||
47 | #include "utilities/macros.hpp" | |||
48 | #include "vmreg_x86.inline.hpp" | |||
49 | ||||
50 | // Implementation of StubAssembler | |||
51 | ||||
52 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { | |||
53 | // setup registers | |||
54 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread)r15_thread; // is callee-saved register (Visual C++ calling conventions) | |||
55 | assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different")do { if (!(!(oop_result1->is_valid() || metadata_result-> is_valid()) || oop_result1 != metadata_result)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 55, "assert(" "!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result" ") failed", "registers must be different"); ::breakpoint(); } } while (0); | |||
56 | assert(oop_result1 != thread && metadata_result != thread, "registers must be different")do { if (!(oop_result1 != thread && metadata_result != thread)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 56, "assert(" "oop_result1 != thread && metadata_result != thread" ") failed", "registers must be different"); ::breakpoint(); } } while (0); | |||
57 | assert(args_size >= 0, "illegal args_size")do { if (!(args_size >= 0)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 57, "assert(" "args_size >= 0" ") failed", "illegal args_size" ); ::breakpoint(); } } while (0); | |||
58 | bool align_stack = false; | |||
59 | #ifdef _LP641 | |||
60 | // At a method handle call, the stack may not be properly aligned | |||
61 | // when returning with an exception. | |||
62 | align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); | |||
63 | #endif | |||
64 | ||||
65 | #ifdef _LP641 | |||
66 | mov(c_rarg0, thread); | |||
67 | set_num_rt_args(0); // Nothing on stack | |||
68 | #else | |||
69 | set_num_rt_args(1 + args_size); | |||
70 | ||||
71 | // push java thread (becomes first argument of C function) | |||
72 | get_thread(thread); | |||
73 | push(thread); | |||
74 | #endif // _LP64 | |||
75 | ||||
76 | int call_offset = -1; | |||
77 | if (!align_stack) { | |||
78 | set_last_Java_frame(thread, noreg, rbp, NULL__null); | |||
79 | } else { | |||
80 | address the_pc = pc(); | |||
81 | call_offset = offset(); | |||
82 | set_last_Java_frame(thread, noreg, rbp, the_pc); | |||
83 | andptr(rsp, -(StackAlignmentInBytes)); // Align stack | |||
84 | } | |||
85 | ||||
86 | // do the call | |||
87 | call(RuntimeAddress(entry)); | |||
88 | if (!align_stack) { | |||
89 | call_offset = offset(); | |||
90 | } | |||
91 | // verify callee-saved register | |||
92 | #ifdef ASSERT1 | |||
93 | guarantee(thread != rax, "change this code")do { if (!(thread != rax)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 93, "guarantee(" "thread != rax" ") failed", "change this code" ); ::breakpoint(); } } while (0); | |||
94 | push(rax); | |||
95 | { Label L; | |||
96 | get_thread(rax); | |||
97 | cmpptr(thread, rax); | |||
98 | jcc(Assembler::equal, L); | |||
99 | int3(); | |||
100 | stop("StubAssembler::call_RT: rdi not callee saved?"); | |||
101 | bind(L); | |||
102 | } | |||
103 | pop(rax); | |||
104 | #endif | |||
105 | reset_last_Java_frame(thread, true); | |||
106 | ||||
107 | // discard thread and arguments | |||
108 | NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); | |||
109 | ||||
110 | // check for pending exceptions | |||
111 | { Label L; | |||
112 | cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD0L); | |||
113 | jcc(Assembler::equal, L); | |||
114 | // exception pending => remove activation and forward to exception handler | |||
115 | movptr(rax, Address(thread, Thread::pending_exception_offset())); | |||
116 | // make sure that the vm_results are cleared | |||
117 | if (oop_result1->is_valid()) { | |||
118 | movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD0L); | |||
119 | } | |||
120 | if (metadata_result->is_valid()) { | |||
121 | movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD0L); | |||
122 | } | |||
123 | if (frame_size() == no_frame_size) { | |||
124 | leave(); | |||
125 | jump(RuntimeAddress(StubRoutines::forward_exception_entry())); | |||
126 | } else if (_stub_id == Runtime1::forward_exception_id) { | |||
127 | should_not_reach_here(); | |||
128 | } else { | |||
129 | jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); | |||
130 | } | |||
131 | bind(L); | |||
132 | } | |||
133 | // get oop results if there are any and reset the values in the thread | |||
134 | if (oop_result1->is_valid()) { | |||
135 | get_vm_result(oop_result1, thread); | |||
136 | } | |||
137 | if (metadata_result->is_valid()) { | |||
138 | get_vm_result_2(metadata_result, thread); | |||
139 | } | |||
140 | ||||
141 | assert(call_offset >= 0, "Should be set")do { if (!(call_offset >= 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 141, "assert(" "call_offset >= 0" ") failed", "Should be set" ); ::breakpoint(); } } while (0); | |||
142 | return call_offset; | |||
143 | } | |||
144 | ||||
145 | ||||
146 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { | |||
147 | #ifdef _LP641 | |||
148 | mov(c_rarg1, arg1); | |||
149 | #else | |||
150 | push(arg1); | |||
151 | #endif // _LP64 | |||
152 | return call_RT(oop_result1, metadata_result, entry, 1); | |||
153 | } | |||
154 | ||||
155 | ||||
156 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { | |||
157 | #ifdef _LP641 | |||
158 | if (c_rarg1 == arg2) { | |||
159 | if (c_rarg2 == arg1) { | |||
160 | xchgq(arg1, arg2); | |||
161 | } else { | |||
162 | mov(c_rarg2, arg2); | |||
163 | mov(c_rarg1, arg1); | |||
164 | } | |||
165 | } else { | |||
166 | mov(c_rarg1, arg1); | |||
167 | mov(c_rarg2, arg2); | |||
168 | } | |||
169 | #else | |||
170 | push(arg2); | |||
171 | push(arg1); | |||
172 | #endif // _LP64 | |||
173 | return call_RT(oop_result1, metadata_result, entry, 2); | |||
174 | } | |||
175 | ||||
176 | ||||
177 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { | |||
178 | #ifdef _LP641 | |||
179 | // if there is any conflict use the stack | |||
180 | if (arg1 == c_rarg2 || arg1 == c_rarg3 || | |||
181 | arg2 == c_rarg1 || arg2 == c_rarg3 || | |||
182 | arg3 == c_rarg1 || arg3 == c_rarg2) { | |||
183 | push(arg3); | |||
184 | push(arg2); | |||
185 | push(arg1); | |||
186 | pop(c_rarg1); | |||
187 | pop(c_rarg2); | |||
188 | pop(c_rarg3); | |||
189 | } else { | |||
190 | mov(c_rarg1, arg1); | |||
191 | mov(c_rarg2, arg2); | |||
192 | mov(c_rarg3, arg3); | |||
193 | } | |||
194 | #else | |||
195 | push(arg3); | |||
196 | push(arg2); | |||
197 | push(arg1); | |||
198 | #endif // _LP64 | |||
199 | return call_RT(oop_result1, metadata_result, entry, 3); | |||
200 | } | |||
201 | ||||
202 | ||||
203 | // Implementation of StubFrame | |||
204 | ||||
205 | class StubFrame: public StackObj { | |||
206 | private: | |||
207 | StubAssembler* _sasm; | |||
208 | ||||
209 | public: | |||
210 | StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); | |||
211 | void load_argument(int offset_in_words, Register reg); | |||
212 | ||||
213 | ~StubFrame(); | |||
214 | }; | |||
215 | ||||
216 | void StubAssembler::prologue(const char* name, bool must_gc_arguments) { | |||
217 | set_info(name, must_gc_arguments); | |||
218 | enter(); | |||
219 | } | |||
220 | ||||
221 | void StubAssembler::epilogue() { | |||
222 | leave(); | |||
223 | ret(0); | |||
224 | } | |||
225 | ||||
226 | #define __ _sasm-> | |||
227 | ||||
228 | StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { | |||
229 | _sasm = sasm; | |||
230 | __ prologue(name, must_gc_arguments); | |||
231 | } | |||
232 | ||||
233 | // load parameters that were stored with LIR_Assembler::store_parameter | |||
234 | // Note: offsets for store_parameter and load_argument must match | |||
235 | void StubFrame::load_argument(int offset_in_words, Register reg) { | |||
236 | __ load_parameter(offset_in_words, reg); | |||
237 | } | |||
238 | ||||
239 | ||||
240 | StubFrame::~StubFrame() { | |||
241 | __ epilogue(); | |||
242 | } | |||
243 | ||||
244 | #undef __ | |||
245 | ||||
246 | ||||
247 | // Implementation of Runtime1 | |||
248 | ||||
249 | const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; | |||
250 | const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; | |||
251 | ||||
252 | // Stack layout for saving/restoring all the registers needed during a runtime | |||
253 | // call (this includes deoptimization) | |||
254 | // Note: note that users of this frame may well have arguments to some runtime | |||
255 | // while these values are on the stack. These positions neglect those arguments | |||
256 | // but the code in save_live_registers will take the argument count into | |||
257 | // account. | |||
258 | // | |||
259 | #ifdef _LP641 | |||
260 | #define SLOT2(x)x, x, | |||
261 | #define SLOT_PER_WORD2 2 | |||
262 | #else | |||
263 | #define SLOT2(x)x, | |||
264 | #define SLOT_PER_WORD2 1 | |||
265 | #endif // _LP64 | |||
266 | ||||
267 | enum reg_save_layout { | |||
268 | // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that | |||
269 | // happen and will assert if the stack size we create is misaligned | |||
270 | #ifdef _LP641 | |||
271 | align_dummy_0, align_dummy_1, | |||
272 | #endif // _LP64 | |||
273 | #ifdef _WIN64 | |||
274 | // Windows always allocates space for it's argument registers (see | |||
275 | // frame::arg_reg_save_area_bytes). | |||
276 | arg_reg_save_1, arg_reg_save_1H, // 0, 4 | |||
277 | arg_reg_save_2, arg_reg_save_2H, // 8, 12 | |||
278 | arg_reg_save_3, arg_reg_save_3H, // 16, 20 | |||
279 | arg_reg_save_4, arg_reg_save_4H, // 24, 28 | |||
280 | #endif // _WIN64 | |||
281 | xmm_regs_as_doubles_off, // 32 | |||
282 | float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 | |||
283 | fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 | |||
284 | // fpu_state_end_off is exclusive | |||
285 | fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD2), // 352 | |||
286 | marker = fpu_state_end_off, SLOT2(markerH)markerH, // 352, 356 | |||
287 | extra_space_offset, // 360 | |||
288 | #ifdef _LP641 | |||
289 | r15_off = extra_space_offset, r15H_off, // 360, 364 | |||
290 | r14_off, r14H_off, // 368, 372 | |||
291 | r13_off, r13H_off, // 376, 380 | |||
292 | r12_off, r12H_off, // 384, 388 | |||
293 | r11_off, r11H_off, // 392, 396 | |||
294 | r10_off, r10H_off, // 400, 404 | |||
295 | r9_off, r9H_off, // 408, 412 | |||
296 | r8_off, r8H_off, // 416, 420 | |||
297 | rdi_off, rdiH_off, // 424, 428 | |||
298 | #else | |||
299 | rdi_off = extra_space_offset, | |||
300 | #endif // _LP64 | |||
301 | rsi_off, SLOT2(rsiH_off)rsiH_off, // 432, 436 | |||
302 | rbp_off, SLOT2(rbpH_off)rbpH_off, // 440, 444 | |||
303 | rsp_off, SLOT2(rspH_off)rspH_off, // 448, 452 | |||
304 | rbx_off, SLOT2(rbxH_off)rbxH_off, // 456, 460 | |||
305 | rdx_off, SLOT2(rdxH_off)rdxH_off, // 464, 468 | |||
306 | rcx_off, SLOT2(rcxH_off)rcxH_off, // 472, 476 | |||
307 | rax_off, SLOT2(raxH_off)raxH_off, // 480, 484 | |||
308 | saved_rbp_off, SLOT2(saved_rbpH_off)saved_rbpH_off, // 488, 492 | |||
309 | return_off, SLOT2(returnH_off)returnH_off, // 496, 500 | |||
310 | reg_save_frame_size // As noted: neglects any parameters to runtime // 504 | |||
311 | }; | |||
312 | ||||
313 | // Save off registers which might be killed by calls into the runtime. | |||
314 | // Tries to smart of about FP registers. In particular we separate | |||
315 | // saving and describing the FPU registers for deoptimization since we | |||
316 | // have to save the FPU registers twice if we describe them and on P4 | |||
317 | // saving FPU registers which don't contain anything appears | |||
318 | // expensive. The deopt blob is the only thing which needs to | |||
319 | // describe FPU registers. In all other cases it should be sufficient | |||
320 | // to simply save their current value. | |||
321 | // | |||
322 | // Register is a class, but it would be assigned numerical value. | |||
323 | // "0" is assigned for rax. Thus we need to ignore -Wnonnull. | |||
324 | PRAGMA_DIAG_PUSHGCC diagnostic push | |||
325 | PRAGMA_NONNULL_IGNOREDGCC diagnostic ignored "-Wnonnull" | |||
326 | static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, | |||
327 | bool save_fpu_registers = true) { | |||
328 | ||||
329 | // In 64bit all the args are in regs so there are no additional stack slots | |||
330 | LP64_ONLY(num_rt_args = 0)num_rt_args = 0; | |||
331 | LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)do { if (!((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 331, "assert(" "(reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0" ") failed", "must be 16 byte aligned"); ::breakpoint(); } } while (0); | |||
332 | int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread | |||
333 | sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); | |||
334 | ||||
335 | // record saved value locations in an OopMap | |||
336 | // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread | |||
337 | OopMap* map = new OopMap(frame_size_in_slots, 0); | |||
338 | map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); | |||
| ||||
339 | map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); | |||
340 | map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); | |||
341 | map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); | |||
342 | map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); | |||
343 | map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); | |||
344 | #ifdef _LP641 | |||
345 | map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); | |||
346 | map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); | |||
347 | map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); | |||
348 | map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); | |||
349 | map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); | |||
350 | map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); | |||
351 | map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); | |||
352 | map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); | |||
353 | ||||
354 | // This is stupid but needed. | |||
355 | map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); | |||
356 | map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); | |||
357 | map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); | |||
358 | map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); | |||
359 | map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); | |||
360 | map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); | |||
361 | ||||
362 | map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); | |||
363 | map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); | |||
364 | map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); | |||
365 | map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); | |||
366 | map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); | |||
367 | map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); | |||
368 | map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); | |||
369 | map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); | |||
370 | #endif // _LP64 | |||
371 | ||||
372 | int xmm_bypass_limit = FrameMap::nof_xmm_regs; | |||
373 | #ifdef _LP641 | |||
374 | if (UseAVX < 3) { | |||
375 | xmm_bypass_limit = xmm_bypass_limit / 2; | |||
376 | } | |||
377 | #endif | |||
378 | ||||
379 | if (save_fpu_registers) { | |||
380 | #ifndef _LP641 | |||
381 | if (UseSSE < 2) { | |||
382 | int fpu_off = float_regs_as_doubles_off; | |||
383 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { | |||
384 | VMReg fpu_name_0 = FrameMap::fpu_regname(n); | |||
385 | map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); | |||
386 | // %%% This is really a waste but we'll keep things as they were for now | |||
387 | if (true) { | |||
388 | map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); | |||
389 | } | |||
390 | fpu_off += 2; | |||
391 | } | |||
392 | assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots")do { if (!(fpu_off == fpu_state_off)) { (*g_assert_poison) = 'X' ;; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 392, "assert(" "fpu_off == fpu_state_off" ") failed", "incorrect number of fpu stack slots" ); ::breakpoint(); } } while (0); | |||
393 | ||||
394 | if (UseSSE == 1) { | |||
395 | int xmm_off = xmm_regs_as_doubles_off; | |||
396 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { | |||
397 | VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); | |||
398 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); | |||
399 | xmm_off += 2; | |||
400 | } | |||
401 | assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers")do { if (!(xmm_off == float_regs_as_doubles_off)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 401, "assert(" "xmm_off == float_regs_as_doubles_off" ") failed" , "incorrect number of xmm registers"); ::breakpoint(); } } while (0); | |||
402 | } | |||
403 | } | |||
404 | #endif // !LP64 | |||
405 | ||||
406 | if (UseSSE >= 2) { | |||
407 | int xmm_off = xmm_regs_as_doubles_off; | |||
408 | for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { | |||
409 | if (n < xmm_bypass_limit) { | |||
410 | VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); | |||
411 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); | |||
412 | // %%% This is really a waste but we'll keep things as they were for now | |||
413 | if (true) { | |||
414 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); | |||
415 | } | |||
416 | } | |||
417 | xmm_off += 2; | |||
418 | } | |||
419 | assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers")do { if (!(xmm_off == float_regs_as_doubles_off)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 419, "assert(" "xmm_off == float_regs_as_doubles_off" ") failed" , "incorrect number of xmm registers"); ::breakpoint(); } } while (0); | |||
420 | } | |||
421 | } | |||
422 | ||||
423 | return map; | |||
424 | } | |||
425 | PRAGMA_DIAG_POPGCC diagnostic pop | |||
426 | ||||
427 | #define __ this-> | |||
428 | ||||
429 | void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) { | |||
430 | __ block_comment("save_live_registers"); | |||
431 | ||||
432 | __ pusha(); // integer registers | |||
433 | ||||
434 | // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); | |||
435 | // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); | |||
436 | ||||
437 | __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); | |||
438 | ||||
439 | #ifdef ASSERT1 | |||
440 | __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); | |||
441 | #endif | |||
442 | ||||
443 | if (save_fpu_registers) { | |||
444 | #ifndef _LP641 | |||
445 | if (UseSSE < 2) { | |||
446 | // save FPU stack | |||
447 | __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); | |||
448 | __ fwait(); | |||
449 | ||||
450 | #ifdef ASSERT1 | |||
451 | Label ok; | |||
452 | __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::x86::fpu_cntrl_wrd_std()); | |||
453 | __ jccb(Assembler::equal, ok)jccb_0(Assembler::equal, ok, "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 453); | |||
454 | __ stop("corrupted control word detected"); | |||
455 | __ bind(ok); | |||
456 | #endif | |||
457 | ||||
458 | // Reset the control word to guard against exceptions being unmasked | |||
459 | // since fstp_d can cause FPU stack underflow exceptions. Write it | |||
460 | // into the on stack copy and then reload that to make sure that the | |||
461 | // current and future values are correct. | |||
462 | __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::x86::fpu_cntrl_wrd_std()); | |||
463 | __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); | |||
464 | ||||
465 | // Save the FPU registers in de-opt-able form | |||
466 | int offset = 0; | |||
467 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { | |||
468 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); | |||
469 | offset += 8; | |||
470 | } | |||
471 | ||||
472 | if (UseSSE == 1) { | |||
473 | // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) | |||
474 | int offset = 0; | |||
475 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { | |||
476 | XMMRegister xmm_name = as_XMMRegister(n); | |||
477 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); | |||
478 | offset += 8; | |||
479 | } | |||
480 | } | |||
481 | } | |||
482 | #endif // !_LP64 | |||
483 | ||||
484 | if (UseSSE >= 2) { | |||
485 | // save XMM registers | |||
486 | // XMM registers can contain float or double values, but this is not known here, | |||
487 | // so always save them as doubles. | |||
488 | // note that float values are _not_ converted automatically, so for float values | |||
489 | // the second word contains only garbage data. | |||
490 | int xmm_bypass_limit = FrameMap::nof_xmm_regs; | |||
491 | int offset = 0; | |||
492 | #ifdef _LP641 | |||
493 | if (UseAVX < 3) { | |||
494 | xmm_bypass_limit = xmm_bypass_limit / 2; | |||
495 | } | |||
496 | #endif | |||
497 | for (int n = 0; n < xmm_bypass_limit; n++) { | |||
498 | XMMRegister xmm_name = as_XMMRegister(n); | |||
499 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); | |||
500 | offset += 8; | |||
501 | } | |||
502 | } | |||
503 | } | |||
504 | ||||
505 | // FPU stack must be empty now | |||
506 | NOT_LP64( __ verify_FPU(0, "save_live_registers"); ) | |||
507 | } | |||
508 | ||||
509 | #undef __ | |||
510 | #define __ sasm-> | |||
511 | ||||
512 | static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) { | |||
513 | #ifdef _LP641 | |||
514 | if (restore_fpu_registers) { | |||
515 | // restore XMM registers | |||
516 | int xmm_bypass_limit = FrameMap::nof_xmm_regs; | |||
517 | if (UseAVX < 3) { | |||
518 | xmm_bypass_limit = xmm_bypass_limit / 2; | |||
519 | } | |||
520 | int offset = 0; | |||
521 | for (int n = 0; n < xmm_bypass_limit; n++) { | |||
522 | XMMRegister xmm_name = as_XMMRegister(n); | |||
523 | __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); | |||
524 | offset += 8; | |||
525 | } | |||
526 | } | |||
527 | #else | |||
528 | if (restore_fpu_registers) { | |||
529 | if (UseSSE >= 2) { | |||
530 | // restore XMM registers | |||
531 | int xmm_bypass_limit = FrameMap::nof_xmm_regs; | |||
532 | int offset = 0; | |||
533 | for (int n = 0; n < xmm_bypass_limit; n++) { | |||
534 | XMMRegister xmm_name = as_XMMRegister(n); | |||
535 | __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); | |||
536 | offset += 8; | |||
537 | } | |||
538 | } else if (UseSSE == 1) { | |||
539 | // restore XMM registers(num MMX == num fpu) | |||
540 | int offset = 0; | |||
541 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { | |||
542 | XMMRegister xmm_name = as_XMMRegister(n); | |||
543 | __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); | |||
544 | offset += 8; | |||
545 | } | |||
546 | } | |||
547 | ||||
548 | if (UseSSE < 2) { | |||
549 | __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); | |||
550 | } else { | |||
551 | // check that FPU stack is really empty | |||
552 | __ verify_FPU(0, "restore_live_registers"); | |||
553 | } | |||
554 | } else { | |||
555 | // check that FPU stack is really empty | |||
556 | __ verify_FPU(0, "restore_live_registers"); | |||
557 | } | |||
558 | #endif // _LP64 | |||
559 | ||||
560 | #ifdef ASSERT1 | |||
561 | { | |||
562 | Label ok; | |||
563 | __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); | |||
564 | __ jcc(Assembler::equal, ok); | |||
565 | __ stop("bad offsets in frame"); | |||
566 | __ bind(ok); | |||
567 | } | |||
568 | #endif // ASSERT | |||
569 | ||||
570 | __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); | |||
571 | } | |||
572 | ||||
573 | #undef __ | |||
574 | #define __ this-> | |||
575 | ||||
576 | void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) { | |||
577 | __ block_comment("restore_live_registers"); | |||
578 | ||||
579 | restore_fpu(this, restore_fpu_registers); | |||
580 | __ popa(); | |||
581 | } | |||
582 | ||||
583 | ||||
584 | void C1_MacroAssembler::restore_live_registers_except_rax(bool restore_fpu_registers) { | |||
585 | __ block_comment("restore_live_registers_except_rax"); | |||
586 | ||||
587 | restore_fpu(this, restore_fpu_registers); | |||
588 | ||||
589 | #ifdef _LP641 | |||
590 | __ movptr(r15, Address(rsp, 0)); | |||
591 | __ movptr(r14, Address(rsp, wordSize)); | |||
592 | __ movptr(r13, Address(rsp, 2 * wordSize)); | |||
593 | __ movptr(r12, Address(rsp, 3 * wordSize)); | |||
594 | __ movptr(r11, Address(rsp, 4 * wordSize)); | |||
595 | __ movptr(r10, Address(rsp, 5 * wordSize)); | |||
596 | __ movptr(r9, Address(rsp, 6 * wordSize)); | |||
597 | __ movptr(r8, Address(rsp, 7 * wordSize)); | |||
598 | __ movptr(rdi, Address(rsp, 8 * wordSize)); | |||
599 | __ movptr(rsi, Address(rsp, 9 * wordSize)); | |||
600 | __ movptr(rbp, Address(rsp, 10 * wordSize)); | |||
601 | // skip rsp | |||
602 | __ movptr(rbx, Address(rsp, 12 * wordSize)); | |||
603 | __ movptr(rdx, Address(rsp, 13 * wordSize)); | |||
604 | __ movptr(rcx, Address(rsp, 14 * wordSize)); | |||
605 | ||||
606 | __ addptr(rsp, 16 * wordSize); | |||
607 | #else | |||
608 | ||||
609 | __ pop(rdi); | |||
610 | __ pop(rsi); | |||
611 | __ pop(rbp); | |||
612 | __ pop(rbx); // skip this value | |||
613 | __ pop(rbx); | |||
614 | __ pop(rdx); | |||
615 | __ pop(rcx); | |||
616 | __ addptr(rsp, BytesPerWord); | |||
617 | #endif // _LP64 | |||
618 | } | |||
619 | ||||
620 | #undef __ | |||
621 | #define __ sasm-> | |||
622 | ||||
623 | static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, | |||
624 | bool save_fpu_registers = true) { | |||
625 | __ save_live_registers_no_oop_map(save_fpu_registers); | |||
626 | return generate_oop_map(sasm, num_rt_args, save_fpu_registers); | |||
627 | } | |||
628 | ||||
629 | static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { | |||
630 | __ restore_live_registers(restore_fpu_registers); | |||
631 | } | |||
632 | ||||
633 | static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { | |||
634 | sasm->restore_live_registers_except_rax(restore_fpu_registers); | |||
635 | } | |||
636 | ||||
637 | ||||
638 | void Runtime1::initialize_pd() { | |||
639 | // nothing to do | |||
640 | } | |||
641 | ||||
642 | ||||
643 | // Target: the entry point of the method that creates and posts the exception oop. | |||
644 | // has_argument: true if the exception needs arguments (passed on the stack because | |||
645 | // registers must be preserved). | |||
646 | OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { | |||
647 | // Preserve all registers. | |||
648 | int num_rt_args = has_argument ? (2 + 1) : 1; | |||
649 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); | |||
650 | ||||
651 | // Now all registers are saved and can be used freely. | |||
652 | // Verify that no old value is used accidentally. | |||
653 | __ invalidate_registers(true, true, true, true, true, true); | |||
654 | ||||
655 | // Registers used by this stub. | |||
656 | const Register temp_reg = rbx; | |||
657 | ||||
658 | // Load arguments for exception that are passed as arguments into the stub. | |||
659 | if (has_argument) { | |||
660 | #ifdef _LP641 | |||
661 | __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); | |||
662 | __ movptr(c_rarg2, Address(rbp, 3*BytesPerWord)); | |||
663 | #else | |||
664 | __ movptr(temp_reg, Address(rbp, 3*BytesPerWord)); | |||
665 | __ push(temp_reg); | |||
666 | __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); | |||
667 | __ push(temp_reg); | |||
668 | #endif // _LP64 | |||
669 | } | |||
670 | int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); | |||
671 | ||||
672 | OopMapSet* oop_maps = new OopMapSet(); | |||
673 | oop_maps->add_gc_map(call_offset, oop_map); | |||
674 | ||||
675 | __ stop("should not reach here"); | |||
676 | ||||
677 | return oop_maps; | |||
678 | } | |||
679 | ||||
680 | ||||
681 | OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { | |||
682 | __ block_comment("generate_handle_exception"); | |||
683 | ||||
684 | // incoming parameters | |||
685 | const Register exception_oop = rax; | |||
686 | const Register exception_pc = rdx; | |||
687 | // other registers used in this stub | |||
688 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread)r15_thread; | |||
689 | ||||
690 | // Save registers, if required. | |||
691 | OopMapSet* oop_maps = new OopMapSet(); | |||
692 | OopMap* oop_map = NULL__null; | |||
693 | switch (id) { | |||
694 | case forward_exception_id: | |||
695 | // We're handling an exception in the context of a compiled frame. | |||
696 | // The registers have been saved in the standard places. Perform | |||
697 | // an exception lookup in the caller and dispatch to the handler | |||
698 | // if found. Otherwise unwind and dispatch to the callers | |||
699 | // exception handler. | |||
700 | oop_map = generate_oop_map(sasm, 1 /*thread*/); | |||
701 | ||||
702 | // load and clear pending exception oop into RAX | |||
703 | __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); | |||
704 | __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD0L); | |||
705 | ||||
706 | // load issuing PC (the return address for this stub) into rdx | |||
707 | __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); | |||
708 | ||||
709 | // make sure that the vm_results are cleared (may be unnecessary) | |||
710 | __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD0L); | |||
711 | __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD0L); | |||
712 | break; | |||
713 | case handle_exception_nofpu_id: | |||
714 | case handle_exception_id: | |||
715 | // At this point all registers MAY be live. | |||
716 | oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); | |||
717 | break; | |||
718 | case handle_exception_from_callee_id: { | |||
719 | // At this point all registers except exception oop (RAX) and | |||
720 | // exception pc (RDX) are dead. | |||
721 | const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); | |||
722 | oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); | |||
723 | sasm->set_frame_size(frame_size); | |||
724 | WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); | |||
725 | break; | |||
726 | } | |||
727 | default: ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 727); ::breakpoint(); } while (0); | |||
728 | } | |||
729 | ||||
730 | #if !defined(_LP641) && defined(COMPILER21) | |||
731 | if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) { | |||
732 | // C2 can leave the fpu stack dirty | |||
733 | __ empty_FPU_stack(); | |||
734 | } | |||
735 | #endif // !_LP64 && COMPILER2 | |||
736 | ||||
737 | // verify that only rax, and rdx is valid at this time | |||
738 | __ invalidate_registers(false, true, true, false, true, true); | |||
739 | // verify that rax, contains a valid exception | |||
740 | __ verify_not_null_oop(exception_oop); | |||
741 | ||||
742 | // load address of JavaThread object for thread-local data | |||
743 | NOT_LP64(__ get_thread(thread);) | |||
744 | ||||
745 | #ifdef ASSERT1 | |||
746 | // check that fields in JavaThread for exception oop and issuing pc are | |||
747 | // empty before writing to them | |||
748 | Label oop_empty; | |||
749 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD0L); | |||
750 | __ jcc(Assembler::equal, oop_empty); | |||
751 | __ stop("exception oop already set"); | |||
752 | __ bind(oop_empty); | |||
753 | ||||
754 | Label pc_empty; | |||
755 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); | |||
756 | __ jcc(Assembler::equal, pc_empty); | |||
757 | __ stop("exception pc already set"); | |||
758 | __ bind(pc_empty); | |||
759 | #endif | |||
760 | ||||
761 | // save exception oop and issuing pc into JavaThread | |||
762 | // (exception handler will load it from here) | |||
763 | __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); | |||
764 | __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); | |||
765 | ||||
766 | // patch throwing pc into return address (has bci & oop map) | |||
767 | __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); | |||
768 | ||||
769 | // compute the exception handler. | |||
770 | // the exception oop and the throwing pc are read from the fields in JavaThread | |||
771 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)((address)((address_word)(exception_handler_for_pc)))); | |||
772 | oop_maps->add_gc_map(call_offset, oop_map); | |||
773 | ||||
774 | // rax: handler address | |||
775 | // will be the deopt blob if nmethod was deoptimized while we looked up | |||
776 | // handler regardless of whether handler existed in the nmethod. | |||
777 | ||||
778 | // only rax, is valid at this time, all other registers have been destroyed by the runtime call | |||
779 | __ invalidate_registers(false, true, true, true, true, true); | |||
780 | ||||
781 | // patch the return address, this stub will directly return to the exception handler | |||
782 | __ movptr(Address(rbp, 1*BytesPerWord), rax); | |||
783 | ||||
784 | switch (id) { | |||
785 | case forward_exception_id: | |||
786 | case handle_exception_nofpu_id: | |||
787 | case handle_exception_id: | |||
788 | // Restore the registers that were saved at the beginning. | |||
789 | restore_live_registers(sasm, id != handle_exception_nofpu_id); | |||
790 | break; | |||
791 | case handle_exception_from_callee_id: | |||
792 | // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP | |||
793 | // since we do a leave anyway. | |||
794 | ||||
795 | // Pop the return address. | |||
796 | __ leave(); | |||
797 | __ pop(rcx); | |||
798 | __ jmp(rcx); // jump to exception handler | |||
799 | break; | |||
800 | default: ShouldNotReachHere()do { (*g_assert_poison) = 'X';; report_should_not_reach_here( "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 800); ::breakpoint(); } while (0); | |||
801 | } | |||
802 | ||||
803 | return oop_maps; | |||
804 | } | |||
805 | ||||
806 | ||||
807 | void Runtime1::generate_unwind_exception(StubAssembler *sasm) { | |||
808 | // incoming parameters | |||
809 | const Register exception_oop = rax; | |||
810 | // callee-saved copy of exception_oop during runtime call | |||
811 | const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14)r14; | |||
812 | // other registers used in this stub | |||
813 | const Register exception_pc = rdx; | |||
814 | const Register handler_addr = rbx; | |||
815 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread)r15_thread; | |||
816 | ||||
817 | // verify that only rax, is valid at this time | |||
818 | __ invalidate_registers(false, true, true, true, true, true); | |||
819 | ||||
820 | #ifdef ASSERT1 | |||
821 | // check that fields in JavaThread for exception oop and issuing pc are empty | |||
822 | NOT_LP64(__ get_thread(thread);) | |||
823 | Label oop_empty; | |||
824 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); | |||
825 | __ jcc(Assembler::equal, oop_empty); | |||
826 | __ stop("exception oop must be empty"); | |||
827 | __ bind(oop_empty); | |||
828 | ||||
829 | Label pc_empty; | |||
830 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); | |||
831 | __ jcc(Assembler::equal, pc_empty); | |||
832 | __ stop("exception pc must be empty"); | |||
833 | __ bind(pc_empty); | |||
834 | #endif | |||
835 | ||||
836 | // clear the FPU stack in case any FPU results are left behind | |||
837 | NOT_LP64( __ empty_FPU_stack(); ) | |||
838 | ||||
839 | // save exception_oop in callee-saved register to preserve it during runtime calls | |||
840 | __ verify_not_null_oop(exception_oop); | |||
841 | __ movptr(exception_oop_callee_saved, exception_oop); | |||
842 | ||||
843 | NOT_LP64(__ get_thread(thread);) | |||
844 | // Get return address (is on top of stack after leave). | |||
845 | __ movptr(exception_pc, Address(rsp, 0)); | |||
846 | ||||
847 | // search the exception handler address of the caller (using the return address) | |||
848 | __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address)((address)((address_word)(SharedRuntime::exception_handler_for_return_address ))), thread, exception_pc); | |||
849 | // rax: exception handler address of the caller | |||
850 | ||||
851 | // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. | |||
852 | __ invalidate_registers(false, true, true, true, false, true); | |||
853 | ||||
854 | // move result of call into correct register | |||
855 | __ movptr(handler_addr, rax); | |||
856 | ||||
857 | // Restore exception oop to RAX (required convention of exception handler). | |||
858 | __ movptr(exception_oop, exception_oop_callee_saved); | |||
859 | ||||
860 | // verify that there is really a valid exception in rax | |||
861 | __ verify_not_null_oop(exception_oop); | |||
862 | ||||
863 | // get throwing pc (= return address). | |||
864 | // rdx has been destroyed by the call, so it must be set again | |||
865 | // the pop is also necessary to simulate the effect of a ret(0) | |||
866 | __ pop(exception_pc); | |||
867 | ||||
868 | // continue at exception handler (return address removed) | |||
869 | // note: do *not* remove arguments when unwinding the | |||
870 | // activation since the caller assumes having | |||
871 | // all arguments on the stack when entering the | |||
872 | // runtime to determine the exception handler | |||
873 | // (GC happens at call site with arguments!) | |||
874 | // rax: exception oop | |||
875 | // rdx: throwing pc | |||
876 | // rbx: exception handler | |||
877 | __ jmp(handler_addr); | |||
878 | } | |||
879 | ||||
880 | ||||
881 | OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { | |||
882 | // use the maximum number of runtime-arguments here because it is difficult to | |||
883 | // distinguish each RT-Call. | |||
884 | // Note: This number affects also the RT-Call in generate_handle_exception because | |||
885 | // the oop-map is shared for all calls. | |||
886 | const int num_rt_args = 2; // thread + dummy | |||
887 | ||||
888 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); | |||
889 | assert(deopt_blob != NULL, "deoptimization blob must have been created")do { if (!(deopt_blob != __null)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 889, "assert(" "deopt_blob != __null" ") failed", "deoptimization blob must have been created" ); ::breakpoint(); } } while (0); | |||
890 | ||||
891 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); | |||
892 | ||||
893 | #ifdef _LP641 | |||
894 | const Register thread = r15_thread; | |||
895 | // No need to worry about dummy | |||
896 | __ mov(c_rarg0, thread); | |||
897 | #else | |||
898 | __ push(rax); // push dummy | |||
899 | ||||
900 | const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) | |||
901 | // push java thread (becomes first argument of C function) | |||
902 | __ get_thread(thread); | |||
903 | __ push(thread); | |||
904 | #endif // _LP64 | |||
905 | __ set_last_Java_frame(thread, noreg, rbp, NULL__null); | |||
906 | // do the call | |||
907 | __ call(RuntimeAddress(target)); | |||
908 | OopMapSet* oop_maps = new OopMapSet(); | |||
909 | oop_maps->add_gc_map(__ offset(), oop_map); | |||
910 | // verify callee-saved register | |||
911 | #ifdef ASSERT1 | |||
912 | guarantee(thread != rax, "change this code")do { if (!(thread != rax)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 912, "guarantee(" "thread != rax" ") failed", "change this code" ); ::breakpoint(); } } while (0); | |||
913 | __ push(rax); | |||
914 | { Label L; | |||
915 | __ get_thread(rax); | |||
916 | __ cmpptr(thread, rax); | |||
917 | __ jcc(Assembler::equal, L); | |||
918 | __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); | |||
919 | __ bind(L); | |||
920 | } | |||
921 | __ pop(rax); | |||
922 | #endif | |||
923 | __ reset_last_Java_frame(thread, true); | |||
924 | #ifndef _LP641 | |||
925 | __ pop(rcx); // discard thread arg | |||
926 | __ pop(rcx); // discard dummy | |||
927 | #endif // _LP64 | |||
928 | ||||
929 | // check for pending exceptions | |||
930 | { Label L; | |||
931 | __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD0L); | |||
932 | __ jcc(Assembler::equal, L); | |||
933 | // exception pending => remove activation and forward to exception handler | |||
934 | ||||
935 | __ testptr(rax, rax); // have we deoptimized? | |||
936 | __ jump_cc(Assembler::equal, | |||
937 | RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); | |||
938 | ||||
939 | // the deopt blob expects exceptions in the special fields of | |||
940 | // JavaThread, so copy and clear pending exception. | |||
941 | ||||
942 | // load and clear pending exception | |||
943 | __ movptr(rax, Address(thread, Thread::pending_exception_offset())); | |||
944 | __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD0L); | |||
945 | ||||
946 | // check that there is really a valid exception | |||
947 | __ verify_not_null_oop(rax); | |||
948 | ||||
949 | // load throwing pc: this is the return address of the stub | |||
950 | __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); | |||
951 | ||||
952 | #ifdef ASSERT1 | |||
953 | // check that fields in JavaThread for exception oop and issuing pc are empty | |||
954 | Label oop_empty; | |||
955 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD0L); | |||
956 | __ jcc(Assembler::equal, oop_empty); | |||
957 | __ stop("exception oop must be empty"); | |||
958 | __ bind(oop_empty); | |||
959 | ||||
960 | Label pc_empty; | |||
961 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD0L); | |||
962 | __ jcc(Assembler::equal, pc_empty); | |||
963 | __ stop("exception pc must be empty"); | |||
964 | __ bind(pc_empty); | |||
965 | #endif | |||
966 | ||||
967 | // store exception oop and throwing pc to JavaThread | |||
968 | __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); | |||
969 | __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); | |||
970 | ||||
971 | restore_live_registers(sasm); | |||
972 | ||||
973 | __ leave(); | |||
974 | __ addptr(rsp, BytesPerWord); // remove return address from stack | |||
975 | ||||
976 | // Forward the exception directly to deopt blob. We can blow no | |||
977 | // registers and must leave throwing pc on the stack. A patch may | |||
978 | // have values live in registers so the entry point with the | |||
979 | // exception in tls. | |||
980 | __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); | |||
981 | ||||
982 | __ bind(L); | |||
983 | } | |||
984 | ||||
985 | ||||
986 | // Runtime will return true if the nmethod has been deoptimized during | |||
987 | // the patching process. In that case we must do a deopt reexecute instead. | |||
988 | ||||
989 | Label cont; | |||
990 | ||||
991 | __ testptr(rax, rax); // have we deoptimized? | |||
992 | __ jcc(Assembler::equal, cont); // no | |||
993 | ||||
994 | // Will reexecute. Proper return address is already on the stack we just restore | |||
995 | // registers, pop all of our frame but the return address and jump to the deopt blob | |||
996 | restore_live_registers(sasm); | |||
997 | __ leave(); | |||
998 | __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); | |||
999 | ||||
1000 | __ bind(cont); | |||
1001 | restore_live_registers(sasm); | |||
1002 | __ leave(); | |||
1003 | __ ret(0); | |||
1004 | ||||
1005 | return oop_maps; | |||
1006 | } | |||
1007 | ||||
1008 | ||||
1009 | OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { | |||
1010 | ||||
1011 | // for better readability | |||
1012 | const bool must_gc_arguments = true; | |||
1013 | const bool dont_gc_arguments = false; | |||
1014 | ||||
1015 | // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu | |||
1016 | bool save_fpu_registers = true; | |||
1017 | ||||
1018 | // stub code & info for the different stubs | |||
1019 | OopMapSet* oop_maps = NULL__null; | |||
1020 | switch (id) { | |||
| ||||
1021 | case forward_exception_id: | |||
1022 | { | |||
1023 | oop_maps = generate_handle_exception(id, sasm); | |||
1024 | __ leave(); | |||
1025 | __ ret(0); | |||
1026 | } | |||
1027 | break; | |||
1028 | ||||
1029 | case new_instance_id: | |||
1030 | case fast_new_instance_id: | |||
1031 | case fast_new_instance_init_check_id: | |||
1032 | { | |||
1033 | Register klass = rdx; // Incoming | |||
1034 | Register obj = rax; // Result | |||
1035 | ||||
1036 | if (id == new_instance_id) { | |||
1037 | __ set_info("new_instance", dont_gc_arguments); | |||
1038 | } else if (id == fast_new_instance_id) { | |||
1039 | __ set_info("fast new_instance", dont_gc_arguments); | |||
1040 | } else { | |||
1041 | assert(id == fast_new_instance_init_check_id, "bad StubID")do { if (!(id == fast_new_instance_init_check_id)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1041, "assert(" "id == fast_new_instance_init_check_id" ") failed" , "bad StubID"); ::breakpoint(); } } while (0); | |||
1042 | __ set_info("fast new_instance init check", dont_gc_arguments); | |||
1043 | } | |||
1044 | ||||
1045 | // If TLAB is disabled, see if there is support for inlining contiguous | |||
1046 | // allocations. | |||
1047 | // Otherwise, just go to the slow path. | |||
1048 | if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB | |||
1049 | && Universe::heap()->supports_inline_contig_alloc()) { | |||
1050 | Label slow_path; | |||
1051 | Register obj_size = rcx; | |||
1052 | Register t1 = rbx; | |||
1053 | Register t2 = rsi; | |||
1054 | assert_different_registers(klass, obj, obj_size, t1, t2); | |||
1055 | ||||
1056 | __ push(rdi); | |||
1057 | __ push(rbx); | |||
1058 | ||||
1059 | if (id == fast_new_instance_init_check_id) { | |||
1060 | // make sure the klass is initialized | |||
1061 | __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); | |||
1062 | __ jcc(Assembler::notEqual, slow_path); | |||
1063 | } | |||
1064 | ||||
1065 | #ifdef ASSERT1 | |||
1066 | // assert object can be fast path allocated | |||
1067 | { | |||
1068 | Label ok, not_ok; | |||
1069 | __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); | |||
1070 | __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) | |||
1071 | __ jcc(Assembler::lessEqual, not_ok); | |||
1072 | __ testl(obj_size, Klass::_lh_instance_slow_path_bit); | |||
1073 | __ jcc(Assembler::zero, ok); | |||
1074 | __ bind(not_ok); | |||
1075 | __ stop("assert(can be fast path allocated)"); | |||
1076 | __ should_not_reach_here(); | |||
1077 | __ bind(ok); | |||
1078 | } | |||
1079 | #endif // ASSERT | |||
1080 | ||||
1081 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread)r15_thread; | |||
1082 | NOT_LP64(__ get_thread(thread)); | |||
1083 | ||||
1084 | // get the instance size (size is postive so movl is fine for 64bit) | |||
1085 | __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); | |||
1086 | ||||
1087 | __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path); | |||
1088 | ||||
1089 | __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); | |||
1090 | __ verify_oop(obj)_verify_oop_checked(obj, "broken oop " "obj", "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1090); | |||
1091 | __ pop(rbx); | |||
1092 | __ pop(rdi); | |||
1093 | __ ret(0); | |||
1094 | ||||
1095 | __ bind(slow_path); | |||
1096 | __ pop(rbx); | |||
1097 | __ pop(rdi); | |||
1098 | } | |||
1099 | ||||
1100 | __ enter(); | |||
1101 | OopMap* map = save_live_registers(sasm, 2); | |||
1102 | int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance)((address)((address_word)(new_instance))), klass); | |||
1103 | oop_maps = new OopMapSet(); | |||
1104 | oop_maps->add_gc_map(call_offset, map); | |||
1105 | restore_live_registers_except_rax(sasm); | |||
1106 | __ verify_oop(obj)_verify_oop_checked(obj, "broken oop " "obj", "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1106); | |||
1107 | __ leave(); | |||
1108 | __ ret(0); | |||
1109 | ||||
1110 | // rax,: new instance | |||
1111 | } | |||
1112 | ||||
1113 | break; | |||
1114 | ||||
1115 | case counter_overflow_id: | |||
1116 | { | |||
1117 | Register bci = rax, method = rbx; | |||
1118 | __ enter(); | |||
1119 | OopMap* map = save_live_registers(sasm, 3); | |||
1120 | // Retrieve bci | |||
1121 | __ movl(bci, Address(rbp, 2*BytesPerWord)); | |||
1122 | // And a pointer to the Method* | |||
1123 | __ movptr(method, Address(rbp, 3*BytesPerWord)); | |||
1124 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow)((address)((address_word)(counter_overflow))), bci, method); | |||
1125 | oop_maps = new OopMapSet(); | |||
1126 | oop_maps->add_gc_map(call_offset, map); | |||
1127 | restore_live_registers(sasm); | |||
1128 | __ leave(); | |||
1129 | __ ret(0); | |||
1130 | } | |||
1131 | break; | |||
1132 | ||||
1133 | case new_type_array_id: | |||
1134 | case new_object_array_id: | |||
1135 | { | |||
1136 | Register length = rbx; // Incoming | |||
1137 | Register klass = rdx; // Incoming | |||
1138 | Register obj = rax; // Result | |||
1139 | ||||
1140 | if (id == new_type_array_id) { | |||
1141 | __ set_info("new_type_array", dont_gc_arguments); | |||
1142 | } else { | |||
1143 | __ set_info("new_object_array", dont_gc_arguments); | |||
1144 | } | |||
1145 | ||||
1146 | #ifdef ASSERT1 | |||
1147 | // assert object type is really an array of the proper kind | |||
1148 | { | |||
1149 | Label ok; | |||
1150 | Register t0 = obj; | |||
1151 | __ movl(t0, Address(klass, Klass::layout_helper_offset())); | |||
1152 | __ sarl(t0, Klass::_lh_array_tag_shift); | |||
1153 | int tag = ((id == new_type_array_id) | |||
1154 | ? Klass::_lh_array_tag_type_value | |||
1155 | : Klass::_lh_array_tag_obj_value); | |||
1156 | __ cmpl(t0, tag); | |||
1157 | __ jcc(Assembler::equal, ok); | |||
1158 | __ stop("assert(is an array klass)"); | |||
1159 | __ should_not_reach_here(); | |||
1160 | __ bind(ok); | |||
1161 | } | |||
1162 | #endif // ASSERT | |||
1163 | ||||
1164 | // If TLAB is disabled, see if there is support for inlining contiguous | |||
1165 | // allocations. | |||
1166 | // Otherwise, just go to the slow path. | |||
1167 | if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { | |||
1168 | Register arr_size = rsi; | |||
1169 | Register t1 = rcx; // must be rcx for use as shift count | |||
1170 | Register t2 = rdi; | |||
1171 | Label slow_path; | |||
1172 | ||||
1173 | // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) | |||
1174 | // since size is positive movl does right thing on 64bit | |||
1175 | __ movl(t1, Address(klass, Klass::layout_helper_offset())); | |||
1176 | // since size is postive movl does right thing on 64bit | |||
1177 | __ movl(arr_size, length); | |||
1178 | assert(t1 == rcx, "fixed register usage")do { if (!(t1 == rcx)) { (*g_assert_poison) = 'X';; report_vm_error ("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1178, "assert(" "t1 == rcx" ") failed", "fixed register usage" ); ::breakpoint(); } } while (0); | |||
1179 | __ shlptr(arr_size /* by t1=rcx, mod 32 */); | |||
1180 | __ shrptr(t1, Klass::_lh_header_size_shift); | |||
1181 | __ andptr(t1, Klass::_lh_header_size_mask); | |||
1182 | __ addptr(arr_size, t1); | |||
1183 | __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up | |||
1184 | __ andptr(arr_size, ~MinObjAlignmentInBytesMask); | |||
1185 | ||||
1186 | // Using t2 for non 64-bit. | |||
1187 | const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread)r15_thread; | |||
1188 | NOT_LP64(__ get_thread(thread)); | |||
1189 | __ eden_allocate(thread, obj, arr_size, 0, t1, slow_path); // preserves arr_size | |||
1190 | ||||
1191 | __ initialize_header(obj, klass, length, t1, t2); | |||
1192 | __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); | |||
1193 | assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise")do { if (!(Klass::_lh_header_size_shift % BitsPerByte == 0)) { (*g_assert_poison) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1193, "assert(" "Klass::_lh_header_size_shift % BitsPerByte == 0" ") failed", "bytewise"); ::breakpoint(); } } while (0); | |||
1194 | assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise")do { if (!(Klass::_lh_header_size_mask <= 0xFF)) { (*g_assert_poison ) = 'X';; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1194, "assert(" "Klass::_lh_header_size_mask <= 0xFF" ") failed" , "bytewise"); ::breakpoint(); } } while (0); | |||
1195 | __ andptr(t1, Klass::_lh_header_size_mask); | |||
1196 | __ subptr(arr_size, t1); // body length | |||
1197 | __ addptr(t1, obj); // body start | |||
1198 | __ initialize_body(t1, arr_size, 0, t2); | |||
1199 | __ verify_oop(obj)_verify_oop_checked(obj, "broken oop " "obj", "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1199); | |||
1200 | __ ret(0); | |||
1201 | ||||
1202 | __ bind(slow_path); | |||
1203 | } | |||
1204 | ||||
1205 | __ enter(); | |||
1206 | OopMap* map = save_live_registers(sasm, 3); | |||
1207 | int call_offset; | |||
1208 | if (id == new_type_array_id) { | |||
1209 | call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array)((address)((address_word)(new_type_array))), klass, length); | |||
1210 | } else { | |||
1211 | call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array)((address)((address_word)(new_object_array))), klass, length); | |||
1212 | } | |||
1213 | ||||
1214 | oop_maps = new OopMapSet(); | |||
1215 | oop_maps->add_gc_map(call_offset, map); | |||
1216 | restore_live_registers_except_rax(sasm); | |||
1217 | ||||
1218 | __ verify_oop(obj)_verify_oop_checked(obj, "broken oop " "obj", "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1218); | |||
1219 | __ leave(); | |||
1220 | __ ret(0); | |||
1221 | ||||
1222 | // rax,: new array | |||
1223 | } | |||
1224 | break; | |||
1225 | ||||
1226 | case new_multi_array_id: | |||
1227 | { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); | |||
1228 | // rax,: klass | |||
1229 | // rbx,: rank | |||
1230 | // rcx: address of 1st dimension | |||
1231 | OopMap* map = save_live_registers(sasm, 4); | |||
1232 | int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array)((address)((address_word)(new_multi_array))), rax, rbx, rcx); | |||
1233 | ||||
1234 | oop_maps = new OopMapSet(); | |||
1235 | oop_maps->add_gc_map(call_offset, map); | |||
1236 | restore_live_registers_except_rax(sasm); | |||
1237 | ||||
1238 | // rax,: new multi array | |||
1239 | __ verify_oop(rax)_verify_oop_checked(rax, "broken oop " "rax", "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1239); | |||
1240 | } | |||
1241 | break; | |||
1242 | ||||
1243 | case register_finalizer_id: | |||
1244 | { | |||
1245 | __ set_info("register_finalizer", dont_gc_arguments); | |||
1246 | ||||
1247 | // This is called via call_runtime so the arguments | |||
1248 | // will be place in C abi locations | |||
1249 | ||||
1250 | #ifdef _LP641 | |||
1251 | __ verify_oop(c_rarg0)_verify_oop_checked(c_rarg0, "broken oop " "c_rarg0", "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1251); | |||
1252 | __ mov(rax, c_rarg0); | |||
1253 | #else | |||
1254 | // The object is passed on the stack and we haven't pushed a | |||
1255 | // frame yet so it's one work away from top of stack. | |||
1256 | __ movptr(rax, Address(rsp, 1 * BytesPerWord)); | |||
1257 | __ verify_oop(rax)_verify_oop_checked(rax, "broken oop " "rax", "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1257); | |||
1258 | #endif // _LP64 | |||
1259 | ||||
1260 | // load the klass and check the has finalizer flag | |||
1261 | Label register_finalizer; | |||
1262 | Register tmp_load_klass = LP64_ONLY(rscratch1)rscratch1 NOT_LP64(noreg); | |||
1263 | Register t = rsi; | |||
1264 | __ load_klass(t, rax, tmp_load_klass); | |||
1265 | __ movl(t, Address(t, Klass::access_flags_offset())); | |||
1266 | __ testl(t, JVM_ACC_HAS_FINALIZER); | |||
1267 | __ jcc(Assembler::notZero, register_finalizer); | |||
1268 | __ ret(0); | |||
1269 | ||||
1270 | __ bind(register_finalizer); | |||
1271 | __ enter(); | |||
1272 | OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); | |||
1273 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer)((address)((address_word)(SharedRuntime::register_finalizer)) ), rax); | |||
1274 | oop_maps = new OopMapSet(); | |||
1275 | oop_maps->add_gc_map(call_offset, oop_map); | |||
1276 | ||||
1277 | // Now restore all the live registers | |||
1278 | restore_live_registers(sasm); | |||
1279 | ||||
1280 | __ leave(); | |||
1281 | __ ret(0); | |||
1282 | } | |||
1283 | break; | |||
1284 | ||||
1285 | case throw_range_check_failed_id: | |||
1286 | { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); | |||
1287 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception)((address)((address_word)(throw_range_check_exception))), true); | |||
1288 | } | |||
1289 | break; | |||
1290 | ||||
1291 | case throw_index_exception_id: | |||
1292 | { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); | |||
1293 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception)((address)((address_word)(throw_index_exception))), true); | |||
1294 | } | |||
1295 | break; | |||
1296 | ||||
1297 | case throw_div0_exception_id: | |||
1298 | { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); | |||
1299 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception)((address)((address_word)(throw_div0_exception))), false); | |||
1300 | } | |||
1301 | break; | |||
1302 | ||||
1303 | case throw_null_pointer_exception_id: | |||
1304 | { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); | |||
1305 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception)((address)((address_word)(throw_null_pointer_exception))), false); | |||
1306 | } | |||
1307 | break; | |||
1308 | ||||
1309 | case handle_exception_nofpu_id: | |||
1310 | case handle_exception_id: | |||
1311 | { StubFrame f(sasm, "handle_exception", dont_gc_arguments); | |||
1312 | oop_maps = generate_handle_exception(id, sasm); | |||
1313 | } | |||
1314 | break; | |||
1315 | ||||
1316 | case handle_exception_from_callee_id: | |||
1317 | { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); | |||
1318 | oop_maps = generate_handle_exception(id, sasm); | |||
1319 | } | |||
1320 | break; | |||
1321 | ||||
1322 | case unwind_exception_id: | |||
1323 | { __ set_info("unwind_exception", dont_gc_arguments); | |||
1324 | // note: no stubframe since we are about to leave the current | |||
1325 | // activation and we are calling a leaf VM function only. | |||
1326 | generate_unwind_exception(sasm); | |||
1327 | } | |||
1328 | break; | |||
1329 | ||||
1330 | case throw_array_store_exception_id: | |||
1331 | { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); | |||
1332 | // tos + 0: link | |||
1333 | // + 1: return address | |||
1334 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception)((address)((address_word)(throw_array_store_exception))), true); | |||
1335 | } | |||
1336 | break; | |||
1337 | ||||
1338 | case throw_class_cast_exception_id: | |||
1339 | { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); | |||
1340 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception)((address)((address_word)(throw_class_cast_exception))), true); | |||
1341 | } | |||
1342 | break; | |||
1343 | ||||
1344 | case throw_incompatible_class_change_error_id: | |||
1345 | { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); | |||
1346 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error)((address)((address_word)(throw_incompatible_class_change_error ))), false); | |||
1347 | } | |||
1348 | break; | |||
1349 | ||||
1350 | case slow_subtype_check_id: | |||
1351 | { | |||
1352 | // Typical calling sequence: | |||
1353 | // __ push(klass_RInfo); // object klass or other subclass | |||
1354 | // __ push(sup_k_RInfo); // array element klass or other superclass | |||
1355 | // __ call(slow_subtype_check); | |||
1356 | // Note that the subclass is pushed first, and is therefore deepest. | |||
1357 | // Previous versions of this code reversed the names 'sub' and 'super'. | |||
1358 | // This was operationally harmless but made the code unreadable. | |||
1359 | enum layout { | |||
1360 | rax_off, SLOT2(raxH_off)raxH_off, | |||
1361 | rcx_off, SLOT2(rcxH_off)rcxH_off, | |||
1362 | rsi_off, SLOT2(rsiH_off)rsiH_off, | |||
1363 | rdi_off, SLOT2(rdiH_off)rdiH_off, | |||
1364 | // saved_rbp_off, SLOT2(saved_rbpH_off) | |||
1365 | return_off, SLOT2(returnH_off)returnH_off, | |||
1366 | sup_k_off, SLOT2(sup_kH_off)sup_kH_off, | |||
1367 | klass_off, SLOT2(superH_off)superH_off, | |||
1368 | framesize, | |||
1369 | result_off = klass_off // deepest argument is also the return value | |||
1370 | }; | |||
1371 | ||||
1372 | __ set_info("slow_subtype_check", dont_gc_arguments); | |||
1373 | __ push(rdi); | |||
1374 | __ push(rsi); | |||
1375 | __ push(rcx); | |||
1376 | __ push(rax); | |||
1377 | ||||
1378 | // This is called by pushing args and not with C abi | |||
1379 | __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass | |||
1380 | __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass | |||
1381 | ||||
1382 | Label miss; | |||
1383 | __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL__null, &miss); | |||
1384 | ||||
1385 | // fallthrough on success: | |||
1386 | __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result | |||
1387 | __ pop(rax); | |||
1388 | __ pop(rcx); | |||
1389 | __ pop(rsi); | |||
1390 | __ pop(rdi); | |||
1391 | __ ret(0); | |||
1392 | ||||
1393 | __ bind(miss); | |||
1394 | __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD0L); // result | |||
1395 | __ pop(rax); | |||
1396 | __ pop(rcx); | |||
1397 | __ pop(rsi); | |||
1398 | __ pop(rdi); | |||
1399 | __ ret(0); | |||
1400 | } | |||
1401 | break; | |||
1402 | ||||
1403 | case monitorenter_nofpu_id: | |||
1404 | save_fpu_registers = false; | |||
1405 | // fall through | |||
1406 | case monitorenter_id: | |||
1407 | { | |||
1408 | StubFrame f(sasm, "monitorenter", dont_gc_arguments); | |||
1409 | OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); | |||
1410 | ||||
1411 | // Called with store_parameter and not C abi | |||
1412 | ||||
1413 | f.load_argument(1, rax); // rax,: object | |||
1414 | f.load_argument(0, rbx); // rbx,: lock address | |||
1415 | ||||
1416 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter)((address)((address_word)(monitorenter))), rax, rbx); | |||
1417 | ||||
1418 | oop_maps = new OopMapSet(); | |||
1419 | oop_maps->add_gc_map(call_offset, map); | |||
1420 | restore_live_registers(sasm, save_fpu_registers); | |||
1421 | } | |||
1422 | break; | |||
1423 | ||||
1424 | case monitorexit_nofpu_id: | |||
1425 | save_fpu_registers = false; | |||
1426 | // fall through | |||
1427 | case monitorexit_id: | |||
1428 | { | |||
1429 | StubFrame f(sasm, "monitorexit", dont_gc_arguments); | |||
1430 | OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); | |||
1431 | ||||
1432 | // Called with store_parameter and not C abi | |||
1433 | ||||
1434 | f.load_argument(0, rax); // rax,: lock address | |||
1435 | ||||
1436 | // note: really a leaf routine but must setup last java sp | |||
1437 | // => use call_RT for now (speed can be improved by | |||
1438 | // doing last java sp setup manually) | |||
1439 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit)((address)((address_word)(monitorexit))), rax); | |||
1440 | ||||
1441 | oop_maps = new OopMapSet(); | |||
1442 | oop_maps->add_gc_map(call_offset, map); | |||
1443 | restore_live_registers(sasm, save_fpu_registers); | |||
1444 | } | |||
1445 | break; | |||
1446 | ||||
1447 | case deoptimize_id: | |||
1448 | { | |||
1449 | StubFrame f(sasm, "deoptimize", dont_gc_arguments); | |||
1450 | const int num_rt_args = 2; // thread, trap_request | |||
1451 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); | |||
1452 | f.load_argument(0, rax); | |||
1453 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize)((address)((address_word)(deoptimize))), rax); | |||
1454 | oop_maps = new OopMapSet(); | |||
1455 | oop_maps->add_gc_map(call_offset, oop_map); | |||
1456 | restore_live_registers(sasm); | |||
1457 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); | |||
1458 | assert(deopt_blob != NULL, "deoptimization blob must have been created")do { if (!(deopt_blob != __null)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1458, "assert(" "deopt_blob != __null" ") failed", "deoptimization blob must have been created" ); ::breakpoint(); } } while (0); | |||
1459 | __ leave(); | |||
1460 | __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); | |||
1461 | } | |||
1462 | break; | |||
1463 | ||||
1464 | case access_field_patching_id: | |||
1465 | { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); | |||
1466 | // we should set up register map | |||
1467 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)((address)((address_word)(access_field_patching)))); | |||
1468 | } | |||
1469 | break; | |||
1470 | ||||
1471 | case load_klass_patching_id: | |||
1472 | { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); | |||
1473 | // we should set up register map | |||
1474 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)((address)((address_word)(move_klass_patching)))); | |||
1475 | } | |||
1476 | break; | |||
1477 | ||||
1478 | case load_mirror_patching_id: | |||
1479 | { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); | |||
1480 | // we should set up register map | |||
1481 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)((address)((address_word)(move_mirror_patching)))); | |||
1482 | } | |||
1483 | break; | |||
1484 | ||||
1485 | case load_appendix_patching_id: | |||
1486 | { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); | |||
1487 | // we should set up register map | |||
1488 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)((address)((address_word)(move_appendix_patching)))); | |||
1489 | } | |||
1490 | break; | |||
1491 | ||||
1492 | case dtrace_object_alloc_id: | |||
1493 | { // rax,: object | |||
1494 | StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); | |||
1495 | // we can't gc here so skip the oopmap but make sure that all | |||
1496 | // the live registers get saved. | |||
1497 | save_live_registers(sasm, 1); | |||
1498 | ||||
1499 | __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax))mov(c_rarg0, rax); | |||
1500 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc))((address)((address_word)(static_cast<int (*)(oopDesc*)> (SharedRuntime::dtrace_object_alloc)))))); | |||
1501 | NOT_LP64(__ pop(rax)); | |||
1502 | ||||
1503 | restore_live_registers(sasm); | |||
1504 | } | |||
1505 | break; | |||
1506 | ||||
1507 | case fpu2long_stub_id: | |||
1508 | { | |||
1509 | #ifdef _LP641 | |||
1510 | Label done; | |||
1511 | __ cvttsd2siq(rax, Address(rsp, wordSize)); | |||
1512 | __ cmp64(rax, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); | |||
1513 | __ jccb(Assembler::notEqual, done)jccb_0(Assembler::notEqual, done, "/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1513); | |||
1514 | __ movq(rax, Address(rsp, wordSize)); | |||
1515 | __ subptr(rsp, 8); | |||
1516 | __ movq(Address(rsp, 0), rax); | |||
1517 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())((address)((address_word)(StubRoutines::x86::d2l_fixup()))))); | |||
1518 | __ pop(rax); | |||
1519 | __ bind(done); | |||
1520 | __ ret(0); | |||
1521 | #else | |||
1522 | // rax, and rdx are destroyed, but should be free since the result is returned there | |||
1523 | // preserve rsi,ecx | |||
1524 | __ push(rsi); | |||
1525 | __ push(rcx); | |||
1526 | ||||
1527 | // check for NaN | |||
1528 | Label return0, do_return, return_min_jlong, do_convert; | |||
1529 | ||||
1530 | Address value_high_word(rsp, wordSize + 4); | |||
1531 | Address value_low_word(rsp, wordSize); | |||
1532 | Address result_high_word(rsp, 3*wordSize + 4); | |||
1533 | Address result_low_word(rsp, 3*wordSize); | |||
1534 | ||||
1535 | __ subptr(rsp, 32); // more than enough on 32bit | |||
1536 | __ fst_d(value_low_word); | |||
1537 | __ movl(rax, value_high_word); | |||
1538 | __ andl(rax, 0x7ff00000); | |||
1539 | __ cmpl(rax, 0x7ff00000); | |||
1540 | __ jcc(Assembler::notEqual, do_convert); | |||
1541 | __ movl(rax, value_high_word); | |||
1542 | __ andl(rax, 0xfffff); | |||
1543 | __ orl(rax, value_low_word); | |||
1544 | __ jcc(Assembler::notZero, return0); | |||
1545 | ||||
1546 | __ bind(do_convert); | |||
1547 | __ fnstcw(Address(rsp, 0)); | |||
1548 | __ movzwl(rax, Address(rsp, 0)); | |||
1549 | __ orl(rax, 0xc00); | |||
1550 | __ movw(Address(rsp, 2), rax); | |||
1551 | __ fldcw(Address(rsp, 2)); | |||
1552 | __ fwait(); | |||
1553 | __ fistp_d(result_low_word); | |||
1554 | __ fldcw(Address(rsp, 0)); | |||
1555 | __ fwait(); | |||
1556 | // This gets the entire long in rax on 64bit | |||
1557 | __ movptr(rax, result_low_word); | |||
1558 | // testing of high bits | |||
1559 | __ movl(rdx, result_high_word); | |||
1560 | __ mov(rcx, rax); | |||
1561 | // What the heck is the point of the next instruction??? | |||
1562 | __ xorl(rcx, 0x0); | |||
1563 | __ movl(rsi, 0x80000000); | |||
1564 | __ xorl(rsi, rdx); | |||
1565 | __ orl(rcx, rsi); | |||
1566 | __ jcc(Assembler::notEqual, do_return); | |||
1567 | __ fldz(); | |||
1568 | __ fcomp_d(value_low_word); | |||
1569 | __ fnstsw_ax(); | |||
1570 | __ sahf(); | |||
1571 | __ jcc(Assembler::above, return_min_jlong); | |||
1572 | // return max_jlong | |||
1573 | __ movl(rdx, 0x7fffffff); | |||
1574 | __ movl(rax, 0xffffffff); | |||
1575 | __ jmp(do_return); | |||
1576 | ||||
1577 | __ bind(return_min_jlong); | |||
1578 | __ movl(rdx, 0x80000000); | |||
1579 | __ xorl(rax, rax); | |||
1580 | __ jmp(do_return); | |||
1581 | ||||
1582 | __ bind(return0); | |||
1583 | __ fpop(); | |||
1584 | __ xorptr(rdx,rdx); | |||
1585 | __ xorptr(rax,rax); | |||
1586 | ||||
1587 | __ bind(do_return); | |||
1588 | __ addptr(rsp, 32); | |||
1589 | __ pop(rcx); | |||
1590 | __ pop(rsi); | |||
1591 | __ ret(0); | |||
1592 | #endif // _LP64 | |||
1593 | } | |||
1594 | break; | |||
1595 | ||||
1596 | case predicate_failed_trap_id: | |||
1597 | { | |||
1598 | StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); | |||
1599 | ||||
1600 | OopMap* map = save_live_registers(sasm, 1); | |||
1601 | ||||
1602 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)((address)((address_word)(predicate_failed_trap)))); | |||
1603 | oop_maps = new OopMapSet(); | |||
1604 | oop_maps->add_gc_map(call_offset, map); | |||
1605 | restore_live_registers(sasm); | |||
1606 | __ leave(); | |||
1607 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); | |||
1608 | assert(deopt_blob != NULL, "deoptimization blob must have been created")do { if (!(deopt_blob != __null)) { (*g_assert_poison) = 'X'; ; report_vm_error("/home/daniel/Projects/java/jdk/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp" , 1608, "assert(" "deopt_blob != __null" ") failed", "deoptimization blob must have been created" ); ::breakpoint(); } } while (0); | |||
1609 | ||||
1610 | __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); | |||
1611 | } | |||
1612 | break; | |||
1613 | ||||
1614 | default: | |||
1615 | { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); | |||
1616 | __ movptr(rax, (int)id); | |||
1617 | __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry)((address)((address_word)(unimplemented_entry))), rax); | |||
1618 | __ should_not_reach_here(); | |||
1619 | } | |||
1620 | break; | |||
1621 | } | |||
1622 | return oop_maps; | |||
1623 | } | |||
1624 | ||||
1625 | #undef __ | |||
1626 | ||||
1627 | const char *Runtime1::pd_name_for_address(address entry) { | |||
1628 | return "<unknown function>"; | |||
1629 | } |