1 /*
2 * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36 #include "vmreg_x86.inline.hpp"
37
38
39 #define __ ce->masm()->
40
41 #ifndef _LP64
42 float ConversionStub::float_zero = 0.0;
43 double ConversionStub::double_zero = 0.0;
44
45 void ConversionStub::emit_code(LIR_Assembler* ce) {
46 __ bind(_entry);
47 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
48
49
50 if (input()->is_single_xmm()) {
51 __ comiss(input()->as_xmm_float_reg(),
52 ExternalAddress((address)&float_zero));
53 } else if (input()->is_double_xmm()) {
54 __ comisd(input()->as_xmm_double_reg(),
55 ExternalAddress((address)&double_zero));
56 } else {
57 __ push(rax);
58 __ ftst();
59 __ fnstsw_ax();
60 __ sahf();
61 __ pop(rax);
62 }
63
64 Label NaN, do_return;
65 __ jccb(Assembler::parity, NaN);
66 __ jccb(Assembler::below, do_return);
67
68 // input is > 0 -> return maxInt
69 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
70 __ decrement(result()->as_register());
71 __ jmpb(do_return);
72
73 // input is NaN -> return 0
74 __ bind(NaN);
75 __ xorptr(result()->as_register(), result()->as_register());
76
77 __ bind(do_return);
78 __ jmp(_continuation);
79 }
80 #endif // !_LP64
81
82 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
83 __ bind(_entry);
84 Metadata *m = _method->as_constant_ptr()->as_metadata();
85 ce->store_parameter(m, 1);
86 ce->store_parameter(_bci, 0);
87 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
88 ce->add_call_info_here(_info);
89 ce->verify_oop_map(_info);
90 __ jmp(_continuation);
91 }
92
93 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
94 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
95 assert(info != NULL, "must have info");
96 _info = new CodeEmitInfo(info);
97 }
98
99 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
100 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
101 assert(info != NULL, "must have info");
102 _info = new CodeEmitInfo(info);
103 }
104
105 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
106 __ bind(_entry);
107 if (_info->deoptimize_on_exception()) {
108 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
109 __ call(RuntimeAddress(a));
110 ce->add_call_info_here(_info);
111 ce->verify_oop_map(_info);
112 debug_only(__ should_not_reach_here());
113 return;
114 }
115
116 // pass the array index on stack because all registers must be preserved
117 if (_index->is_cpu_register()) {
118 ce->store_parameter(_index->as_register(), 0);
119 } else {
120 ce->store_parameter(_index->as_jint(), 0);
121 }
122 Runtime1::StubID stub_id;
123 if (_throw_index_out_of_bounds_exception) {
124 stub_id = Runtime1::throw_index_exception_id;
125 } else {
126 stub_id = Runtime1::throw_range_check_failed_id;
127 ce->store_parameter(_array->as_pointer_register(), 1);
128 }
129 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
130 ce->add_call_info_here(_info);
131 ce->verify_oop_map(_info);
132 debug_only(__ should_not_reach_here());
133 }
134
135 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
136 _info = new CodeEmitInfo(info);
137 }
138
139 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
140 __ bind(_entry);
141 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
142 __ call(RuntimeAddress(a));
143 ce->add_call_info_here(_info);
144 ce->verify_oop_map(_info);
145 debug_only(__ should_not_reach_here());
146 }
147
148 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
149 if (_offset != -1) {
150 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
151 }
152 __ bind(_entry);
153 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
154 ce->add_call_info_here(_info);
155 debug_only(__ should_not_reach_here());
156 }
157
158
159 // Implementation of NewInstanceStub
160
161 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
162 _result = result;
163 _klass = klass;
164 _klass_reg = klass_reg;
165 _info = new CodeEmitInfo(info);
166 assert(stub_id == Runtime1::new_instance_id ||
167 stub_id == Runtime1::fast_new_instance_id ||
168 stub_id == Runtime1::fast_new_instance_init_check_id,
169 "need new_instance id");
170 _stub_id = stub_id;
171 }
172
173
174 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
175 assert(__ rsp_offset() == 0, "frame size should be fixed");
176 __ bind(_entry);
177 __ movptr(rdx, _klass_reg->as_register());
178 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
179 ce->add_call_info_here(_info);
180 ce->verify_oop_map(_info);
181 assert(_result->as_register() == rax, "result must in rax,");
182 __ jmp(_continuation);
183 }
184
185
186 // Implementation of NewTypeArrayStub
187
188 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
189 _klass_reg = klass_reg;
190 _length = length;
191 _result = result;
192 _info = new CodeEmitInfo(info);
193 }
194
195
196 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
197 assert(__ rsp_offset() == 0, "frame size should be fixed");
198 __ bind(_entry);
199 assert(_length->as_register() == rbx, "length must in rbx,");
200 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
201 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
202 ce->add_call_info_here(_info);
203 ce->verify_oop_map(_info);
204 assert(_result->as_register() == rax, "result must in rax,");
205 __ jmp(_continuation);
206 }
207
208
209 // Implementation of NewObjectArrayStub
210
211 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
212 _klass_reg = klass_reg;
213 _result = result;
214 _length = length;
215 _info = new CodeEmitInfo(info);
216 }
217
218
219 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
220 assert(__ rsp_offset() == 0, "frame size should be fixed");
221 __ bind(_entry);
222 assert(_length->as_register() == rbx, "length must in rbx,");
223 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
224 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
225 ce->add_call_info_here(_info);
226 ce->verify_oop_map(_info);
227 assert(_result->as_register() == rax, "result must in rax,");
228 __ jmp(_continuation);
229 }
230
231
232 // Implementation of MonitorAccessStubs
233
234 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
235 : MonitorAccessStub(obj_reg, lock_reg)
236 {
237 _info = new CodeEmitInfo(info);
238 }
239
240
241 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
242 assert(__ rsp_offset() == 0, "frame size should be fixed");
243 __ bind(_entry);
244 ce->store_parameter(_obj_reg->as_register(), 1);
245 ce->store_parameter(_lock_reg->as_register(), 0);
246 Runtime1::StubID enter_id;
247 if (ce->compilation()->has_fpu_code()) {
248 enter_id = Runtime1::monitorenter_id;
249 } else {
250 enter_id = Runtime1::monitorenter_nofpu_id;
251 }
252 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
253 ce->add_call_info_here(_info);
254 ce->verify_oop_map(_info);
255 __ jmp(_continuation);
256 }
257
258
259 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
260 __ bind(_entry);
261 if (_compute_lock) {
262 // lock_reg was destroyed by fast unlocking attempt => recompute it
263 ce->monitor_address(_monitor_ix, _lock_reg);
264 }
265 ce->store_parameter(_lock_reg->as_register(), 0);
266 // note: non-blocking leaf routine => no call info needed
267 Runtime1::StubID exit_id;
268 if (ce->compilation()->has_fpu_code()) {
269 exit_id = Runtime1::monitorexit_id;
270 } else {
271 exit_id = Runtime1::monitorexit_nofpu_id;
272 }
273 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
274 __ jmp(_continuation);
275 }
276
277
278 // Implementation of patching:
279 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
280 // - Replace original code with a call to the stub
281 // At Runtime:
282 // - call to stub, jump to runtime
283 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
284 // - in runtime: after initializing class, restore original code, reexecute instruction
285
286 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
287
288 void PatchingStub::align_patch_site(MacroAssembler* masm) {
289 // We're patching a 5-7 byte instruction on intel and we need to
290 // make sure that we don't see a piece of the instruction. It
291 // appears mostly impossible on Intel to simply invalidate other
292 // processors caches and since they may do aggressive prefetch it's
293 // very hard to make a guess about what code might be in the icache.
294 // Force the instruction to be double word aligned so that it
295 // doesn't span a cache line.
296 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
297 }
298
299 void PatchingStub::emit_code(LIR_Assembler* ce) {
300 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
301
302 Label call_patch;
303
304 // static field accesses have special semantics while the class
305 // initializer is being run so we emit a test which can be used to
306 // check that this code is being executed by the initializing
307 // thread.
308 address being_initialized_entry = __ pc();
309 if (CommentedAssembly) {
310 __ block_comment(" patch template");
311 }
312 if (_id == load_klass_id) {
313 // produce a copy of the load klass instruction for use by the being initialized case
314 #ifdef ASSERT
315 address start = __ pc();
316 #endif
317 Metadata* o = NULL;
318 __ mov_metadata(_obj, o);
319 #ifdef ASSERT
320 for (int i = 0; i < _bytes_to_copy; i++) {
321 address ptr = (address)(_pc_start + i);
322 int a_byte = (*ptr) & 0xFF;
323 assert(a_byte == *start++, "should be the same code");
324 }
325 #endif
326 } else if (_id == load_mirror_id) {
327 // produce a copy of the load mirror instruction for use by the being
328 // initialized case
329 #ifdef ASSERT
330 address start = __ pc();
331 #endif
332 jobject o = NULL;
333 __ movoop(_obj, o);
334 #ifdef ASSERT
335 for (int i = 0; i < _bytes_to_copy; i++) {
336 address ptr = (address)(_pc_start + i);
337 int a_byte = (*ptr) & 0xFF;
338 assert(a_byte == *start++, "should be the same code");
339 }
340 #endif
341 } else {
342 // make a copy the code which is going to be patched.
343 for (int i = 0; i < _bytes_to_copy; i++) {
344 address ptr = (address)(_pc_start + i);
345 int a_byte = (*ptr) & 0xFF;
346 __ emit_int8(a_byte);
347 *ptr = 0x90; // make the site look like a nop
348 }
349 }
350
351 address end_of_patch = __ pc();
352 int bytes_to_skip = 0;
353 if (_id == load_mirror_id) {
354 int offset = __ offset();
355 if (CommentedAssembly) {
356 __ block_comment(" being_initialized check");
357 }
358 assert(_obj != noreg, "must be a valid register");
359 Register tmp = rax;
360 Register tmp2 = rbx;
361 __ push(tmp);
362 __ push(tmp2);
363 // Load without verification to keep code size small. We need it because
364 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
365 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset()));
366 __ get_thread(tmp);
367 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
368 __ pop(tmp2);
369 __ pop(tmp);
370 __ jcc(Assembler::notEqual, call_patch);
371
372 // access_field patches may execute the patched code before it's
373 // copied back into place so we need to jump back into the main
374 // code of the nmethod to continue execution.
375 __ jmp(_patch_site_continuation);
376
377 // make sure this extra code gets skipped
378 bytes_to_skip += __ offset() - offset;
379 }
380 if (CommentedAssembly) {
381 __ block_comment("patch data encoded as movl");
382 }
383 // Now emit the patch record telling the runtime how to find the
384 // pieces of the patch. We only need 3 bytes but for readability of
385 // the disassembly we make the data look like a movl reg, imm32,
386 // which requires 5 bytes
387 int sizeof_patch_record = 5;
388 bytes_to_skip += sizeof_patch_record;
389
390 // emit the offsets needed to find the code to patch
391 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
392
393 __ emit_int8((unsigned char)0xB8);
394 __ emit_int8(0);
395 __ emit_int8(being_initialized_entry_offset);
396 __ emit_int8(bytes_to_skip);
397 __ emit_int8(_bytes_to_copy);
398 address patch_info_pc = __ pc();
399 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
400
401 address entry = __ pc();
402 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
403 address target = NULL;
404 relocInfo::relocType reloc_type = relocInfo::none;
405 switch (_id) {
406 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
407 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
408 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
409 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
410 default: ShouldNotReachHere();
411 }
412 __ bind(call_patch);
413
414 if (CommentedAssembly) {
415 __ block_comment("patch entry point");
416 }
417 __ call(RuntimeAddress(target));
418 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
419 ce->add_call_info_here(_info);
420 int jmp_off = __ offset();
421 __ jmp(_patch_site_entry);
422 // Add enough nops so deoptimization can overwrite the jmp above with a call
423 // and not destroy the world. We cannot use fat nops here, since the concurrent
424 // code rewrite may transiently create the illegal instruction sequence.
425 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
426 __ nop();
427 }
428 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
429 CodeSection* cs = __ code_section();
430 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
431 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
432 }
433 }
434
435
436 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
437 __ bind(_entry);
438 ce->store_parameter(_trap_request, 0);
439 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
440 ce->add_call_info_here(_info);
441 DEBUG_ONLY(__ should_not_reach_here());
442 }
443
444
445 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
446 address a;
447 if (_info->deoptimize_on_exception()) {
448 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
449 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
450 } else {
451 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
452 }
453
454 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
455 __ bind(_entry);
456 __ call(RuntimeAddress(a));
457 ce->add_call_info_here(_info);
458 ce->verify_oop_map(_info);
459 debug_only(__ should_not_reach_here());
460 }
461
462
463 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
464 assert(__ rsp_offset() == 0, "frame size should be fixed");
465
466 __ bind(_entry);
467 // pass the object on stack because all registers must be preserved
468 if (_obj->is_cpu_register()) {
469 ce->store_parameter(_obj->as_register(), 0);
470 }
471 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
472 ce->add_call_info_here(_info);
473 debug_only(__ should_not_reach_here());
474 }
475
476
477 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
478 //---------------slow case: call to native-----------------
479 __ bind(_entry);
480 // Figure out where the args should go
481 // This should really convert the IntrinsicID to the Method* and signature
482 // but I don't know how to do that.
483 //
484 VMRegPair args[5];
485 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
486 SharedRuntime::java_calling_convention(signature, args, 5, true);
487
488 // push parameters
489 // (src, src_pos, dest, destPos, length)
490 Register r[5];
491 r[0] = src()->as_register();
492 r[1] = src_pos()->as_register();
493 r[2] = dst()->as_register();
494 r[3] = dst_pos()->as_register();
495 r[4] = length()->as_register();
496
497 // next registers will get stored on the stack
498 for (int i = 0; i < 5 ; i++ ) {
499 VMReg r_1 = args[i].first();
500 if (r_1->is_stack()) {
501 int st_off = r_1->reg2stack() * wordSize;
502 __ movptr (Address(rsp, st_off), r[i]);
503 } else {
504 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
505 }
506 }
507
508 ce->align_call(lir_static_call);
509
510 ce->emit_static_call_stub();
511 if (ce->compilation()->bailed_out()) {
512 return; // CodeCache is full
513 }
514 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
515 relocInfo::static_call_type);
516 __ call(resolve);
517 ce->add_call_info_here(info());
518
519 #ifndef PRODUCT
520 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
521 #endif
522
523 __ jmp(_continuation);
524 }
525
526 #undef __