1 /*
2 * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/macros.hpp"
37 #include "vmreg_x86.inline.hpp"
38
39
40 #define __ ce->masm()->
41
42 #ifndef _LP64
43 float ConversionStub::float_zero = 0.0;
44 double ConversionStub::double_zero = 0.0;
45
46 void ConversionStub::emit_code(LIR_Assembler* ce) {
47 __ bind(_entry);
48 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
49
50
51 if (input()->is_single_xmm()) {
52 __ comiss(input()->as_xmm_float_reg(),
53 ExternalAddress((address)&float_zero));
54 } else if (input()->is_double_xmm()) {
55 __ comisd(input()->as_xmm_double_reg(),
56 ExternalAddress((address)&double_zero));
57 } else {
58 __ push(rax);
59 __ ftst();
60 __ fnstsw_ax();
61 __ sahf();
62 __ pop(rax);
63 }
64
65 Label NaN, do_return;
66 __ jccb(Assembler::parity, NaN);
67 __ jccb(Assembler::below, do_return);
68
69 // input is > 0 -> return maxInt
70 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
71 __ decrement(result()->as_register());
72 __ jmpb(do_return);
73
74 // input is NaN -> return 0
75 __ bind(NaN);
76 __ xorptr(result()->as_register(), result()->as_register());
77
78 __ bind(do_return);
79 __ jmp(_continuation);
80 }
81 #endif // !_LP64
82
83 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
84 __ bind(_entry);
85 Metadata *m = _method->as_constant_ptr()->as_metadata();
86 ce->store_parameter(m, 1);
87 ce->store_parameter(_bci, 0);
88 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
89 ce->add_call_info_here(_info);
90 ce->verify_oop_map(_info);
91 __ jmp(_continuation);
92 }
93
94 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
95 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
96 assert(info != NULL, "must have info");
97 _info = new CodeEmitInfo(info);
98 }
99
100 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
101 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
102 assert(info != NULL, "must have info");
103 _info = new CodeEmitInfo(info);
104 }
105
106 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
107 __ bind(_entry);
108 if (_info->deoptimize_on_exception()) {
109 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
110 __ call(RuntimeAddress(a));
111 ce->add_call_info_here(_info);
112 ce->verify_oop_map(_info);
113 debug_only(__ should_not_reach_here());
114 return;
115 }
116
117 // pass the array index on stack because all registers must be preserved
118 if (_index->is_cpu_register()) {
119 ce->store_parameter(_index->as_register(), 0);
120 } else {
121 ce->store_parameter(_index->as_jint(), 0);
122 }
123 Runtime1::StubID stub_id;
124 if (_throw_index_out_of_bounds_exception) {
125 stub_id = Runtime1::throw_index_exception_id;
126 } else {
127 stub_id = Runtime1::throw_range_check_failed_id;
128 ce->store_parameter(_array->as_pointer_register(), 1);
129 }
130 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
131 ce->add_call_info_here(_info);
132 ce->verify_oop_map(_info);
133 debug_only(__ should_not_reach_here());
134 }
135
136 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
137 _info = new CodeEmitInfo(info);
138 }
139
140 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
141 __ bind(_entry);
142 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
143 __ call(RuntimeAddress(a));
144 ce->add_call_info_here(_info);
145 ce->verify_oop_map(_info);
146 debug_only(__ should_not_reach_here());
147 }
148
149 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
150 if (_offset != -1) {
151 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
152 }
153 __ bind(_entry);
154 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
155 ce->add_call_info_here(_info);
156 debug_only(__ should_not_reach_here());
157 }
158
159
160 // Implementation of LoadFlattenedArrayStub
161
162 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
163 _array = array;
164 _index = index;
165 _result = result;
166 // Tell the register allocator that the runtime call will scratch rax.
167 _scratch_reg = FrameMap::rax_oop_opr;
168 _info = new CodeEmitInfo(info);
169 }
170
171 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
172 assert(__ rsp_offset() == 0, "frame size should be fixed");
173 __ bind(_entry);
174 ce->store_parameter(_array->as_register(), 1);
175 ce->store_parameter(_index->as_register(), 0);
176 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_flattened_array_id)));
177 ce->add_call_info_here(_info);
178 ce->verify_oop_map(_info);
179 if (_result->as_register() != rax) {
180 __ movptr(_result->as_register(), rax);
181 }
182 __ jmp(_continuation);
183 }
184
185
186 // Implementation of StoreFlattenedArrayStub
187
188 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
189 _array = array;
190 _index = index;
191 _value = value;
192 // Tell the register allocator that the runtime call will scratch rax.
193 _scratch_reg = FrameMap::rax_oop_opr;
194 _info = new CodeEmitInfo(info);
195 }
196
197
198 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
199 assert(__ rsp_offset() == 0, "frame size should be fixed");
200 __ bind(_entry);
201 ce->store_parameter(_array->as_register(), 2);
202 ce->store_parameter(_index->as_register(), 1);
203 ce->store_parameter(_value->as_register(), 0);
204 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::store_flattened_array_id)));
205 ce->add_call_info_here(_info);
206 ce->verify_oop_map(_info);
207 __ jmp(_continuation);
208 }
209
210
211 // Implementation of SubstitutabilityCheckStub
212
213 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
214 _left = left;
215 _right = right;
216 // Tell the register allocator that the runtime call will scratch rax.
217 _scratch_reg = FrameMap::rax_oop_opr;
218 _info = new CodeEmitInfo(info);
219 }
220
221 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) {
222 assert(__ rsp_offset() == 0, "frame size should be fixed");
223 __ bind(_entry);
224 ce->store_parameter(_left->as_register(), 1);
225 ce->store_parameter(_right->as_register(), 0);
226 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::substitutability_check_id)));
227 ce->add_call_info_here(_info);
228 ce->verify_oop_map(_info);
229 __ jmp(_continuation);
230 }
231
232
233 // Implementation of NewInstanceStub
234
235 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
236 _result = result;
237 _klass = klass;
238 _klass_reg = klass_reg;
239 _info = new CodeEmitInfo(info);
240 assert(stub_id == Runtime1::new_instance_id ||
241 stub_id == Runtime1::fast_new_instance_id ||
242 stub_id == Runtime1::fast_new_instance_init_check_id,
243 "need new_instance id");
244 _stub_id = stub_id;
245 }
246
247
248 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
249 assert(__ rsp_offset() == 0, "frame size should be fixed");
250 __ bind(_entry);
251 __ movptr(rdx, _klass_reg->as_register());
252 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
253 ce->add_call_info_here(_info);
254 ce->verify_oop_map(_info);
255 assert(_result->as_register() == rax, "result must in rax,");
256 __ jmp(_continuation);
257 }
258
259
260 // Implementation of NewTypeArrayStub
261
262 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
263 _klass_reg = klass_reg;
264 _length = length;
265 _result = result;
266 _info = new CodeEmitInfo(info);
267 }
268
269
270 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
271 assert(__ rsp_offset() == 0, "frame size should be fixed");
272 __ bind(_entry);
273 assert(_length->as_register() == rbx, "length must in rbx,");
274 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
275 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
276 ce->add_call_info_here(_info);
277 ce->verify_oop_map(_info);
278 assert(_result->as_register() == rax, "result must in rax,");
279 __ jmp(_continuation);
280 }
281
282
283 // Implementation of NewObjectArrayStub
284
285 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
286 CodeEmitInfo* info, bool is_inline_type) {
287 _klass_reg = klass_reg;
288 _result = result;
289 _length = length;
290 _info = new CodeEmitInfo(info);
291 _is_inline_type = is_inline_type;
292 }
293
294
295 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
296 assert(__ rsp_offset() == 0, "frame size should be fixed");
297 __ bind(_entry);
298 assert(_length->as_register() == rbx, "length must in rbx,");
299 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
300 if (_is_inline_type) {
301 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_flat_array_id)));
302 } else {
303 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
304 }
305 ce->add_call_info_here(_info);
306 ce->verify_oop_map(_info);
307 assert(_result->as_register() == rax, "result must in rax,");
308 __ jmp(_continuation);
309 }
310
311
312 // Implementation of MonitorAccessStubs
313
314 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg)
315 : MonitorAccessStub(obj_reg, lock_reg)
316 {
317 _info = new CodeEmitInfo(info);
318 _throw_imse_stub = throw_imse_stub;
319 _scratch_reg = scratch_reg;
320 if (_throw_imse_stub != NULL) {
321 assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be");
322 }
323 }
324
325
326 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
327 assert(__ rsp_offset() == 0, "frame size should be fixed");
328 __ bind(_entry);
329 if (_throw_imse_stub != NULL) {
330 // When we come here, _obj_reg has already been checked to be non-null.
331 const int is_value_mask = markWord::always_locked_pattern;
332 Register mark = _scratch_reg->as_register();
333 __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes()));
334 __ andptr(mark, is_value_mask);
335 __ cmpl(mark, is_value_mask);
336 __ jcc(Assembler::equal, *_throw_imse_stub->entry());
337 }
338 ce->store_parameter(_obj_reg->as_register(), 1);
339 ce->store_parameter(_lock_reg->as_register(), 0);
340 Runtime1::StubID enter_id;
341 if (ce->compilation()->has_fpu_code()) {
342 enter_id = Runtime1::monitorenter_id;
343 } else {
344 enter_id = Runtime1::monitorenter_nofpu_id;
345 }
346 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
347 ce->add_call_info_here(_info);
348 ce->verify_oop_map(_info);
349 __ jmp(_continuation);
350 }
351
352
353 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
354 __ bind(_entry);
355 if (_compute_lock) {
356 // lock_reg was destroyed by fast unlocking attempt => recompute it
357 ce->monitor_address(_monitor_ix, _lock_reg);
358 }
359 ce->store_parameter(_lock_reg->as_register(), 0);
360 // note: non-blocking leaf routine => no call info needed
361 Runtime1::StubID exit_id;
362 if (ce->compilation()->has_fpu_code()) {
363 exit_id = Runtime1::monitorexit_id;
364 } else {
365 exit_id = Runtime1::monitorexit_nofpu_id;
366 }
367 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
368 __ jmp(_continuation);
369 }
370
371
372 // Implementation of patching:
373 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
374 // - Replace original code with a call to the stub
375 // At Runtime:
376 // - call to stub, jump to runtime
377 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
378 // - in runtime: after initializing class, restore original code, reexecute instruction
379
380 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
381
382 void PatchingStub::align_patch_site(MacroAssembler* masm) {
383 // We're patching a 5-7 byte instruction on intel and we need to
384 // make sure that we don't see a piece of the instruction. It
385 // appears mostly impossible on Intel to simply invalidate other
386 // processors caches and since they may do aggressive prefetch it's
387 // very hard to make a guess about what code might be in the icache.
388 // Force the instruction to be double word aligned so that it
389 // doesn't span a cache line.
390 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
391 }
392
393 void PatchingStub::emit_code(LIR_Assembler* ce) {
394 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
395
396 Label call_patch;
397
398 // static field accesses have special semantics while the class
399 // initializer is being run so we emit a test which can be used to
400 // check that this code is being executed by the initializing
401 // thread.
402 address being_initialized_entry = __ pc();
403 if (CommentedAssembly) {
404 __ block_comment(" patch template");
405 }
406 if (_id == load_klass_id) {
407 // produce a copy of the load klass instruction for use by the being initialized case
408 #ifdef ASSERT
409 address start = __ pc();
410 #endif
411 Metadata* o = NULL;
412 __ mov_metadata(_obj, o);
413 #ifdef ASSERT
414 for (int i = 0; i < _bytes_to_copy; i++) {
415 address ptr = (address)(_pc_start + i);
416 int a_byte = (*ptr) & 0xFF;
417 assert(a_byte == *start++, "should be the same code");
418 }
419 #endif
420 } else if (_id == load_mirror_id) {
421 // produce a copy of the load mirror instruction for use by the being
422 // initialized case
423 #ifdef ASSERT
424 address start = __ pc();
425 #endif
426 jobject o = NULL;
427 __ movoop(_obj, o);
428 #ifdef ASSERT
429 for (int i = 0; i < _bytes_to_copy; i++) {
430 address ptr = (address)(_pc_start + i);
431 int a_byte = (*ptr) & 0xFF;
432 assert(a_byte == *start++, "should be the same code");
433 }
434 #endif
435 } else {
436 // make a copy the code which is going to be patched.
437 for (int i = 0; i < _bytes_to_copy; i++) {
438 address ptr = (address)(_pc_start + i);
439 int a_byte = (*ptr) & 0xFF;
440 __ emit_int8(a_byte);
441 *ptr = 0x90; // make the site look like a nop
442 }
443 }
444
445 address end_of_patch = __ pc();
446 int bytes_to_skip = 0;
447 if (_id == load_mirror_id) {
448 int offset = __ offset();
449 if (CommentedAssembly) {
450 __ block_comment(" being_initialized check");
451 }
452 assert(_obj != noreg, "must be a valid register");
453 Register tmp = rax;
454 Register tmp2 = rbx;
455 __ push(tmp);
456 __ push(tmp2);
457 // Load without verification to keep code size small. We need it because
458 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
459 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset()));
460 __ get_thread(tmp);
461 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
462 __ pop(tmp2);
463 __ pop(tmp);
464 __ jcc(Assembler::notEqual, call_patch);
465
466 // access_field patches may execute the patched code before it's
467 // copied back into place so we need to jump back into the main
468 // code of the nmethod to continue execution.
469 __ jmp(_patch_site_continuation);
470
471 // make sure this extra code gets skipped
472 bytes_to_skip += __ offset() - offset;
473 }
474 if (CommentedAssembly) {
475 __ block_comment("patch data encoded as movl");
476 }
477 // Now emit the patch record telling the runtime how to find the
478 // pieces of the patch. We only need 3 bytes but for readability of
479 // the disassembly we make the data look like a movl reg, imm32,
480 // which requires 5 bytes
481 int sizeof_patch_record = 5;
482 bytes_to_skip += sizeof_patch_record;
483
484 // emit the offsets needed to find the code to patch
485 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
486
487 __ emit_int8((unsigned char)0xB8);
488 __ emit_int8(0);
489 __ emit_int8(being_initialized_entry_offset);
490 __ emit_int8(bytes_to_skip);
491 __ emit_int8(_bytes_to_copy);
492 address patch_info_pc = __ pc();
493 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
494
495 address entry = __ pc();
496 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
497 address target = NULL;
498 relocInfo::relocType reloc_type = relocInfo::none;
499 switch (_id) {
500 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
501 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
502 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
503 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
504 default: ShouldNotReachHere();
505 }
506 __ bind(call_patch);
507
508 if (CommentedAssembly) {
509 __ block_comment("patch entry point");
510 }
511 __ call(RuntimeAddress(target));
512 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
513 ce->add_call_info_here(_info);
514 int jmp_off = __ offset();
515 __ jmp(_patch_site_entry);
516 // Add enough nops so deoptimization can overwrite the jmp above with a call
517 // and not destroy the world. We cannot use fat nops here, since the concurrent
518 // code rewrite may transiently create the illegal instruction sequence.
519 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
520 __ nop();
521 }
522 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
523 CodeSection* cs = __ code_section();
524 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
525 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
526 }
527 }
528
529
530 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
531 __ bind(_entry);
532 ce->store_parameter(_trap_request, 0);
533 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
534 ce->add_call_info_here(_info);
535 DEBUG_ONLY(__ should_not_reach_here());
536 }
537
538
539 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
540 address a;
541 if (_info->deoptimize_on_exception()) {
542 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
543 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
544 } else {
545 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
546 }
547
548 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
549 __ bind(_entry);
550 __ call(RuntimeAddress(a));
551 ce->add_call_info_here(_info);
552 ce->verify_oop_map(_info);
553 debug_only(__ should_not_reach_here());
554 }
555
556
557 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
558 assert(__ rsp_offset() == 0, "frame size should be fixed");
559
560 __ bind(_entry);
561 // pass the object on stack because all registers must be preserved
562 if (_obj->is_cpu_register()) {
563 ce->store_parameter(_obj->as_register(), 0);
564 }
565 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
566 ce->add_call_info_here(_info);
567 debug_only(__ should_not_reach_here());
568 }
569
570
571 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
572 //---------------slow case: call to native-----------------
573 __ bind(_entry);
574 // Figure out where the args should go
575 // This should really convert the IntrinsicID to the Method* and signature
576 // but I don't know how to do that.
577 //
578 VMRegPair args[5];
579 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
580 SharedRuntime::java_calling_convention(signature, args, 5, true);
581
582 // push parameters
583 // (src, src_pos, dest, destPos, length)
584 Register r[5];
585 r[0] = src()->as_register();
586 r[1] = src_pos()->as_register();
587 r[2] = dst()->as_register();
588 r[3] = dst_pos()->as_register();
589 r[4] = length()->as_register();
590
591 // next registers will get stored on the stack
592 for (int i = 0; i < 5 ; i++ ) {
593 VMReg r_1 = args[i].first();
594 if (r_1->is_stack()) {
595 int st_off = r_1->reg2stack() * wordSize;
596 __ movptr (Address(rsp, st_off), r[i]);
597 } else {
598 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
599 }
600 }
601
602 ce->align_call(lir_static_call);
603
604 ce->emit_static_call_stub();
605 if (ce->compilation()->bailed_out()) {
606 return; // CodeCache is full
607 }
608 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
609 relocInfo::static_call_type);
610 __ call(resolve);
611 ce->add_call_info_here(info());
612
613 #ifndef PRODUCT
614 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
615 #endif
616
617 __ jmp(_continuation);
618 }
619
620 #undef __