1 /*
2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSetAssembler.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "interpreter/templateTable.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/methodData.hpp"
35 #include "oops/method.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/methodHandles.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/synchronizer.hpp"
43 #include "utilities/powerOfTwo.hpp"
44
45 #define __ _masm->
46
47 // Platform-dependent initialization
48
49 void TemplateTable::pd_initialize() {
50 // No aarch64 specific initialization
51 }
52
53 // Address computation: local variables
54
55 static inline Address iaddress(int n) {
56 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
57 }
58
59 static inline Address laddress(int n) {
60 return iaddress(n + 1);
61 }
62
63 static inline Address faddress(int n) {
64 return iaddress(n);
65 }
66
67 static inline Address daddress(int n) {
68 return laddress(n);
69 }
70
71 static inline Address aaddress(int n) {
72 return iaddress(n);
73 }
74
75 static inline Address iaddress(Register r) {
76 return Address(rlocals, r, Address::lsl(3));
77 }
78
79 static inline Address laddress(Register r, Register scratch,
80 InterpreterMacroAssembler* _masm) {
81 __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
82 return Address(scratch, Interpreter::local_offset_in_bytes(1));
83 }
84
85 static inline Address faddress(Register r) {
86 return iaddress(r);
87 }
88
89 static inline Address daddress(Register r, Register scratch,
90 InterpreterMacroAssembler* _masm) {
91 return laddress(r, scratch, _masm);
92 }
93
94 static inline Address aaddress(Register r) {
95 return iaddress(r);
96 }
97
98 static inline Address at_rsp() {
99 return Address(esp, 0);
100 }
101
102 // At top of Java expression stack which may be different than esp(). It
103 // isn't for category 1 objects.
104 static inline Address at_tos () {
105 return Address(esp, Interpreter::expr_offset_in_bytes(0));
106 }
107
108 static inline Address at_tos_p1() {
109 return Address(esp, Interpreter::expr_offset_in_bytes(1));
110 }
111
112 static inline Address at_tos_p2() {
113 return Address(esp, Interpreter::expr_offset_in_bytes(2));
114 }
115
116 static inline Address at_tos_p3() {
117 return Address(esp, Interpreter::expr_offset_in_bytes(3));
118 }
119
120 static inline Address at_tos_p4() {
121 return Address(esp, Interpreter::expr_offset_in_bytes(4));
122 }
123
124 static inline Address at_tos_p5() {
125 return Address(esp, Interpreter::expr_offset_in_bytes(5));
126 }
127
128 // Condition conversion
129 static Assembler::Condition j_not(TemplateTable::Condition cc) {
130 switch (cc) {
131 case TemplateTable::equal : return Assembler::NE;
132 case TemplateTable::not_equal : return Assembler::EQ;
133 case TemplateTable::less : return Assembler::GE;
134 case TemplateTable::less_equal : return Assembler::GT;
135 case TemplateTable::greater : return Assembler::LE;
136 case TemplateTable::greater_equal: return Assembler::LT;
137 }
138 ShouldNotReachHere();
139 return Assembler::EQ;
140 }
141
142
143 // Miscelaneous helper routines
144 // Store an oop (or NULL) at the Address described by obj.
145 // If val == noreg this means store a NULL
146 static void do_oop_store(InterpreterMacroAssembler* _masm,
147 Address dst,
148 Register val,
149 DecoratorSet decorators) {
150 assert(val == noreg || val == r0, "parameter is just for looks");
151 __ store_heap_oop(dst, val, r10, r1, decorators);
152 }
153
154 static void do_oop_load(InterpreterMacroAssembler* _masm,
155 Address src,
156 Register dst,
157 DecoratorSet decorators) {
158 __ load_heap_oop(dst, src, r10, r1, decorators);
159 }
160
161 Address TemplateTable::at_bcp(int offset) {
162 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
163 return Address(rbcp, offset);
164 }
165
166 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
167 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
168 int byte_no)
169 {
170 if (!RewriteBytecodes) return;
171 Label L_patch_done;
172
173 switch (bc) {
174 case Bytecodes::_fast_aputfield:
175 case Bytecodes::_fast_bputfield:
176 case Bytecodes::_fast_zputfield:
177 case Bytecodes::_fast_cputfield:
178 case Bytecodes::_fast_dputfield:
179 case Bytecodes::_fast_fputfield:
180 case Bytecodes::_fast_iputfield:
181 case Bytecodes::_fast_lputfield:
182 case Bytecodes::_fast_sputfield:
183 {
184 // We skip bytecode quickening for putfield instructions when
185 // the put_code written to the constant pool cache is zero.
186 // This is required so that every execution of this instruction
187 // calls out to InterpreterRuntime::resolve_get_put to do
188 // additional, required work.
189 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
190 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
191 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
192 __ movw(bc_reg, bc);
193 __ cbzw(temp_reg, L_patch_done); // don't patch
194 }
195 break;
196 default:
197 assert(byte_no == -1, "sanity");
198 // the pair bytecodes have already done the load.
199 if (load_bc_into_bc_reg) {
200 __ movw(bc_reg, bc);
201 }
202 }
203
204 if (JvmtiExport::can_post_breakpoint()) {
205 Label L_fast_patch;
206 // if a breakpoint is present we can't rewrite the stream directly
207 __ load_unsigned_byte(temp_reg, at_bcp(0));
208 __ cmpw(temp_reg, Bytecodes::_breakpoint);
209 __ br(Assembler::NE, L_fast_patch);
210 // Let breakpoint table handling rewrite to quicker bytecode
211 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
212 __ b(L_patch_done);
213 __ bind(L_fast_patch);
214 }
215
216 #ifdef ASSERT
217 Label L_okay;
218 __ load_unsigned_byte(temp_reg, at_bcp(0));
219 __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
220 __ br(Assembler::EQ, L_okay);
221 __ cmpw(temp_reg, bc_reg);
222 __ br(Assembler::EQ, L_okay);
223 __ stop("patching the wrong bytecode");
224 __ bind(L_okay);
225 #endif
226
227 // patch bytecode
228 __ strb(bc_reg, at_bcp(0));
229 __ bind(L_patch_done);
230 }
231
232
233 // Individual instructions
234
235 void TemplateTable::nop() {
236 transition(vtos, vtos);
237 // nothing to do
238 }
239
240 void TemplateTable::shouldnotreachhere() {
241 transition(vtos, vtos);
242 __ stop("shouldnotreachhere bytecode");
243 }
244
245 void TemplateTable::aconst_null()
246 {
247 transition(vtos, atos);
248 __ mov(r0, 0);
249 }
250
251 void TemplateTable::iconst(int value)
252 {
253 transition(vtos, itos);
254 __ mov(r0, value);
255 }
256
257 void TemplateTable::lconst(int value)
258 {
259 __ mov(r0, value);
260 }
261
262 void TemplateTable::fconst(int value)
263 {
264 transition(vtos, ftos);
265 switch (value) {
266 case 0:
267 __ fmovs(v0, zr);
268 break;
269 case 1:
270 __ fmovs(v0, 1.0);
271 break;
272 case 2:
273 __ fmovs(v0, 2.0);
274 break;
275 default:
276 ShouldNotReachHere();
277 break;
278 }
279 }
280
281 void TemplateTable::dconst(int value)
282 {
283 transition(vtos, dtos);
284 switch (value) {
285 case 0:
286 __ fmovd(v0, zr);
287 break;
288 case 1:
289 __ fmovd(v0, 1.0);
290 break;
291 case 2:
292 __ fmovd(v0, 2.0);
293 break;
294 default:
295 ShouldNotReachHere();
296 break;
297 }
298 }
299
300 void TemplateTable::bipush()
301 {
302 transition(vtos, itos);
303 __ load_signed_byte32(r0, at_bcp(1));
304 }
305
306 void TemplateTable::sipush()
307 {
308 transition(vtos, itos);
309 __ load_unsigned_short(r0, at_bcp(1));
310 __ revw(r0, r0);
311 __ asrw(r0, r0, 16);
312 }
313
314 void TemplateTable::ldc(bool wide)
315 {
316 transition(vtos, vtos);
317 Label call_ldc, notFloat, notClass, notInt, Done;
318
319 if (wide) {
320 __ get_unsigned_2_byte_index_at_bcp(r1, 1);
321 } else {
322 __ load_unsigned_byte(r1, at_bcp(1));
323 }
324 __ get_cpool_and_tags(r2, r0);
325
326 const int base_offset = ConstantPool::header_size() * wordSize;
327 const int tags_offset = Array<u1>::base_offset_in_bytes();
328
329 // get type
330 __ add(r3, r1, tags_offset);
331 __ lea(r3, Address(r0, r3));
332 __ ldarb(r3, r3);
333
334 // unresolved class - get the resolved class
335 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
336 __ br(Assembler::EQ, call_ldc);
337
338 // unresolved class in error state - call into runtime to throw the error
339 // from the first resolution attempt
340 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
341 __ br(Assembler::EQ, call_ldc);
342
343 // resolved class - need to call vm to get java mirror of the class
344 __ cmp(r3, (u1)JVM_CONSTANT_Class);
345 __ br(Assembler::NE, notClass);
346
347 __ bind(call_ldc);
348 __ mov(c_rarg1, wide);
349 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
350 __ push_ptr(r0);
351 __ verify_oop(r0);
352 __ b(Done);
353
354 __ bind(notClass);
355 __ cmp(r3, (u1)JVM_CONSTANT_Float);
356 __ br(Assembler::NE, notFloat);
357 // ftos
358 __ adds(r1, r2, r1, Assembler::LSL, 3);
359 __ ldrs(v0, Address(r1, base_offset));
360 __ push_f();
361 __ b(Done);
362
363 __ bind(notFloat);
364
365 __ cmp(r3, (u1)JVM_CONSTANT_Integer);
366 __ br(Assembler::NE, notInt);
367
368 // itos
369 __ adds(r1, r2, r1, Assembler::LSL, 3);
370 __ ldrw(r0, Address(r1, base_offset));
371 __ push_i(r0);
372 __ b(Done);
373
374 __ bind(notInt);
375 condy_helper(Done);
376
377 __ bind(Done);
378 }
379
380 // Fast path for caching oop constants.
381 void TemplateTable::fast_aldc(bool wide)
382 {
383 transition(vtos, atos);
384
385 Register result = r0;
386 Register tmp = r1;
387 Register rarg = r2;
388
389 int index_size = wide ? sizeof(u2) : sizeof(u1);
390
391 Label resolved;
392
393 // We are resolved if the resolved reference cache entry contains a
394 // non-null object (String, MethodType, etc.)
395 assert_different_registers(result, tmp);
396 __ get_cache_index_at_bcp(tmp, 1, index_size);
397 __ load_resolved_reference_at_index(result, tmp);
398 __ cbnz(result, resolved);
399
400 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
401
402 // first time invocation - must resolve first
403 __ mov(rarg, (int)bytecode());
404 __ call_VM(result, entry, rarg);
405
406 __ bind(resolved);
407
408 { // Check for the null sentinel.
409 // If we just called the VM, it already did the mapping for us,
410 // but it's harmless to retry.
411 Label notNull;
412
413 // Stash null_sentinel address to get its value later
414 __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
415 __ ldr(tmp, Address(rarg));
416 __ cmpoop(result, tmp);
417 __ br(Assembler::NE, notNull);
418 __ mov(result, 0); // NULL object reference
419 __ bind(notNull);
420 }
421
422 if (VerifyOops) {
423 // Safe to call with 0 result
424 __ verify_oop(result);
425 }
426 }
427
428 void TemplateTable::ldc2_w()
429 {
430 transition(vtos, vtos);
431 Label notDouble, notLong, Done;
432 __ get_unsigned_2_byte_index_at_bcp(r0, 1);
433
434 __ get_cpool_and_tags(r1, r2);
435 const int base_offset = ConstantPool::header_size() * wordSize;
436 const int tags_offset = Array<u1>::base_offset_in_bytes();
437
438 // get type
439 __ lea(r2, Address(r2, r0, Address::lsl(0)));
440 __ load_unsigned_byte(r2, Address(r2, tags_offset));
441 __ cmpw(r2, (int)JVM_CONSTANT_Double);
442 __ br(Assembler::NE, notDouble);
443
444 // dtos
445 __ lea (r2, Address(r1, r0, Address::lsl(3)));
446 __ ldrd(v0, Address(r2, base_offset));
447 __ push_d();
448 __ b(Done);
449
450 __ bind(notDouble);
451 __ cmpw(r2, (int)JVM_CONSTANT_Long);
452 __ br(Assembler::NE, notLong);
453
454 // ltos
455 __ lea(r0, Address(r1, r0, Address::lsl(3)));
456 __ ldr(r0, Address(r0, base_offset));
457 __ push_l();
458 __ b(Done);
459
460 __ bind(notLong);
461 condy_helper(Done);
462
463 __ bind(Done);
464 }
465
466 void TemplateTable::condy_helper(Label& Done)
467 {
468 Register obj = r0;
469 Register rarg = r1;
470 Register flags = r2;
471 Register off = r3;
472
473 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
474
475 __ mov(rarg, (int) bytecode());
476 __ call_VM(obj, entry, rarg);
477
478 __ get_vm_result_2(flags, rthread);
479
480 // VMr = obj = base address to find primitive value to push
481 // VMr2 = flags = (tos, off) using format of CPCE::_flags
482 __ mov(off, flags);
483 __ andw(off, off, ConstantPoolCacheEntry::field_index_mask);
484
485 const Address field(obj, off);
486
487 // What sort of thing are we loading?
488 // x86 uses a shift and mask or wings it with a shift plus assert
489 // the mask is not needed. aarch64 just uses bitfield extract
490 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,
491 ConstantPoolCacheEntry::tos_state_bits);
492
493 switch (bytecode()) {
494 case Bytecodes::_ldc:
495 case Bytecodes::_ldc_w:
496 {
497 // tos in (itos, ftos, stos, btos, ctos, ztos)
498 Label notInt, notFloat, notShort, notByte, notChar, notBool;
499 __ cmpw(flags, itos);
500 __ br(Assembler::NE, notInt);
501 // itos
502 __ ldrw(r0, field);
503 __ push(itos);
504 __ b(Done);
505
506 __ bind(notInt);
507 __ cmpw(flags, ftos);
508 __ br(Assembler::NE, notFloat);
509 // ftos
510 __ load_float(field);
511 __ push(ftos);
512 __ b(Done);
513
514 __ bind(notFloat);
515 __ cmpw(flags, stos);
516 __ br(Assembler::NE, notShort);
517 // stos
518 __ load_signed_short(r0, field);
519 __ push(stos);
520 __ b(Done);
521
522 __ bind(notShort);
523 __ cmpw(flags, btos);
524 __ br(Assembler::NE, notByte);
525 // btos
526 __ load_signed_byte(r0, field);
527 __ push(btos);
528 __ b(Done);
529
530 __ bind(notByte);
531 __ cmpw(flags, ctos);
532 __ br(Assembler::NE, notChar);
533 // ctos
534 __ load_unsigned_short(r0, field);
535 __ push(ctos);
536 __ b(Done);
537
538 __ bind(notChar);
539 __ cmpw(flags, ztos);
540 __ br(Assembler::NE, notBool);
541 // ztos
542 __ load_signed_byte(r0, field);
543 __ push(ztos);
544 __ b(Done);
545
546 __ bind(notBool);
547 break;
548 }
549
550 case Bytecodes::_ldc2_w:
551 {
552 Label notLong, notDouble;
553 __ cmpw(flags, ltos);
554 __ br(Assembler::NE, notLong);
555 // ltos
556 __ ldr(r0, field);
557 __ push(ltos);
558 __ b(Done);
559
560 __ bind(notLong);
561 __ cmpw(flags, dtos);
562 __ br(Assembler::NE, notDouble);
563 // dtos
564 __ load_double(field);
565 __ push(dtos);
566 __ b(Done);
567
568 __ bind(notDouble);
569 break;
570 }
571
572 default:
573 ShouldNotReachHere();
574 }
575
576 __ stop("bad ldc/condy");
577 }
578
579 void TemplateTable::locals_index(Register reg, int offset)
580 {
581 __ ldrb(reg, at_bcp(offset));
582 __ neg(reg, reg);
583 }
584
585 void TemplateTable::iload() {
586 iload_internal();
587 }
588
589 void TemplateTable::nofast_iload() {
590 iload_internal(may_not_rewrite);
591 }
592
593 void TemplateTable::iload_internal(RewriteControl rc) {
594 transition(vtos, itos);
595 if (RewriteFrequentPairs && rc == may_rewrite) {
596 Label rewrite, done;
597 Register bc = r4;
598
599 // get next bytecode
600 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
601
602 // if _iload, wait to rewrite to iload2. We only want to rewrite the
603 // last two iloads in a pair. Comparing against fast_iload means that
604 // the next bytecode is neither an iload or a caload, and therefore
605 // an iload pair.
606 __ cmpw(r1, Bytecodes::_iload);
607 __ br(Assembler::EQ, done);
608
609 // if _fast_iload rewrite to _fast_iload2
610 __ cmpw(r1, Bytecodes::_fast_iload);
611 __ movw(bc, Bytecodes::_fast_iload2);
612 __ br(Assembler::EQ, rewrite);
613
614 // if _caload rewrite to _fast_icaload
615 __ cmpw(r1, Bytecodes::_caload);
616 __ movw(bc, Bytecodes::_fast_icaload);
617 __ br(Assembler::EQ, rewrite);
618
619 // else rewrite to _fast_iload
620 __ movw(bc, Bytecodes::_fast_iload);
621
622 // rewrite
623 // bc: new bytecode
624 __ bind(rewrite);
625 patch_bytecode(Bytecodes::_iload, bc, r1, false);
626 __ bind(done);
627
628 }
629
630 // do iload, get the local value into tos
631 locals_index(r1);
632 __ ldr(r0, iaddress(r1));
633
634 }
635
636 void TemplateTable::fast_iload2()
637 {
638 transition(vtos, itos);
639 locals_index(r1);
640 __ ldr(r0, iaddress(r1));
641 __ push(itos);
642 locals_index(r1, 3);
643 __ ldr(r0, iaddress(r1));
644 }
645
646 void TemplateTable::fast_iload()
647 {
648 transition(vtos, itos);
649 locals_index(r1);
650 __ ldr(r0, iaddress(r1));
651 }
652
653 void TemplateTable::lload()
654 {
655 transition(vtos, ltos);
656 __ ldrb(r1, at_bcp(1));
657 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
658 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
659 }
660
661 void TemplateTable::fload()
662 {
663 transition(vtos, ftos);
664 locals_index(r1);
665 // n.b. we use ldrd here because this is a 64 bit slot
666 // this is comparable to the iload case
667 __ ldrd(v0, faddress(r1));
668 }
669
670 void TemplateTable::dload()
671 {
672 transition(vtos, dtos);
673 __ ldrb(r1, at_bcp(1));
674 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
675 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
676 }
677
678 void TemplateTable::aload()
679 {
680 transition(vtos, atos);
681 locals_index(r1);
682 __ ldr(r0, iaddress(r1));
683 }
684
685 void TemplateTable::locals_index_wide(Register reg) {
686 __ ldrh(reg, at_bcp(2));
687 __ rev16w(reg, reg);
688 __ neg(reg, reg);
689 }
690
691 void TemplateTable::wide_iload() {
692 transition(vtos, itos);
693 locals_index_wide(r1);
694 __ ldr(r0, iaddress(r1));
695 }
696
697 void TemplateTable::wide_lload()
698 {
699 transition(vtos, ltos);
700 __ ldrh(r1, at_bcp(2));
701 __ rev16w(r1, r1);
702 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
703 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
704 }
705
706 void TemplateTable::wide_fload()
707 {
708 transition(vtos, ftos);
709 locals_index_wide(r1);
710 // n.b. we use ldrd here because this is a 64 bit slot
711 // this is comparable to the iload case
712 __ ldrd(v0, faddress(r1));
713 }
714
715 void TemplateTable::wide_dload()
716 {
717 transition(vtos, dtos);
718 __ ldrh(r1, at_bcp(2));
719 __ rev16w(r1, r1);
720 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
721 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
722 }
723
724 void TemplateTable::wide_aload()
725 {
726 transition(vtos, atos);
727 locals_index_wide(r1);
728 __ ldr(r0, aaddress(r1));
729 }
730
731 void TemplateTable::index_check(Register array, Register index)
732 {
733 // destroys r1, rscratch1
734 // check array
735 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
736 // sign extend index for use by indexed load
737 // __ movl2ptr(index, index);
738 // check index
739 Register length = rscratch1;
740 __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
741 __ cmpw(index, length);
742 if (index != r1) {
743 // ??? convention: move aberrant index into r1 for exception message
744 assert(r1 != array, "different registers");
745 __ mov(r1, index);
746 }
747 Label ok;
748 __ br(Assembler::LO, ok);
749 // ??? convention: move array into r3 for exception message
750 __ mov(r3, array);
751 __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
752 __ br(rscratch1);
753 __ bind(ok);
754 }
755
756 void TemplateTable::iaload()
757 {
758 transition(itos, itos);
759 __ mov(r1, r0);
760 __ pop_ptr(r0);
761 // r0: array
762 // r1: index
763 index_check(r0, r1); // leaves index in r1, kills rscratch1
764 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
765 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
766 }
767
768 void TemplateTable::laload()
769 {
770 transition(itos, ltos);
771 __ mov(r1, r0);
772 __ pop_ptr(r0);
773 // r0: array
774 // r1: index
775 index_check(r0, r1); // leaves index in r1, kills rscratch1
776 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
777 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
778 }
779
780 void TemplateTable::faload()
781 {
782 transition(itos, ftos);
783 __ mov(r1, r0);
784 __ pop_ptr(r0);
785 // r0: array
786 // r1: index
787 index_check(r0, r1); // leaves index in r1, kills rscratch1
788 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
789 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
790 }
791
792 void TemplateTable::daload()
793 {
794 transition(itos, dtos);
795 __ mov(r1, r0);
796 __ pop_ptr(r0);
797 // r0: array
798 // r1: index
799 index_check(r0, r1); // leaves index in r1, kills rscratch1
800 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
801 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
802 }
803
804 void TemplateTable::aaload()
805 {
806 transition(itos, atos);
807 __ mov(r1, r0);
808 __ pop_ptr(r0);
809 // r0: array
810 // r1: index
811 index_check(r0, r1); // leaves index in r1, kills rscratch1
812 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
813 do_oop_load(_masm,
814 Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
815 r0,
816 IS_ARRAY);
817 }
818
819 void TemplateTable::baload()
820 {
821 transition(itos, itos);
822 __ mov(r1, r0);
823 __ pop_ptr(r0);
824 // r0: array
825 // r1: index
826 index_check(r0, r1); // leaves index in r1, kills rscratch1
827 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
828 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
829 }
830
831 void TemplateTable::caload()
832 {
833 transition(itos, itos);
834 __ mov(r1, r0);
835 __ pop_ptr(r0);
836 // r0: array
837 // r1: index
838 index_check(r0, r1); // leaves index in r1, kills rscratch1
839 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
840 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
841 }
842
843 // iload followed by caload frequent pair
844 void TemplateTable::fast_icaload()
845 {
846 transition(vtos, itos);
847 // load index out of locals
848 locals_index(r2);
849 __ ldr(r1, iaddress(r2));
850
851 __ pop_ptr(r0);
852
853 // r0: array
854 // r1: index
855 index_check(r0, r1); // leaves index in r1, kills rscratch1
856 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
857 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
858 }
859
860 void TemplateTable::saload()
861 {
862 transition(itos, itos);
863 __ mov(r1, r0);
864 __ pop_ptr(r0);
865 // r0: array
866 // r1: index
867 index_check(r0, r1); // leaves index in r1, kills rscratch1
868 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
869 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
870 }
871
872 void TemplateTable::iload(int n)
873 {
874 transition(vtos, itos);
875 __ ldr(r0, iaddress(n));
876 }
877
878 void TemplateTable::lload(int n)
879 {
880 transition(vtos, ltos);
881 __ ldr(r0, laddress(n));
882 }
883
884 void TemplateTable::fload(int n)
885 {
886 transition(vtos, ftos);
887 __ ldrs(v0, faddress(n));
888 }
889
890 void TemplateTable::dload(int n)
891 {
892 transition(vtos, dtos);
893 __ ldrd(v0, daddress(n));
894 }
895
896 void TemplateTable::aload(int n)
897 {
898 transition(vtos, atos);
899 __ ldr(r0, iaddress(n));
900 }
901
902 void TemplateTable::aload_0() {
903 aload_0_internal();
904 }
905
906 void TemplateTable::nofast_aload_0() {
907 aload_0_internal(may_not_rewrite);
908 }
909
910 void TemplateTable::aload_0_internal(RewriteControl rc) {
911 // According to bytecode histograms, the pairs:
912 //
913 // _aload_0, _fast_igetfield
914 // _aload_0, _fast_agetfield
915 // _aload_0, _fast_fgetfield
916 //
917 // occur frequently. If RewriteFrequentPairs is set, the (slow)
918 // _aload_0 bytecode checks if the next bytecode is either
919 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
920 // rewrites the current bytecode into a pair bytecode; otherwise it
921 // rewrites the current bytecode into _fast_aload_0 that doesn't do
922 // the pair check anymore.
923 //
924 // Note: If the next bytecode is _getfield, the rewrite must be
925 // delayed, otherwise we may miss an opportunity for a pair.
926 //
927 // Also rewrite frequent pairs
928 // aload_0, aload_1
929 // aload_0, iload_1
930 // These bytecodes with a small amount of code are most profitable
931 // to rewrite
932 if (RewriteFrequentPairs && rc == may_rewrite) {
933 Label rewrite, done;
934 const Register bc = r4;
935
936 // get next bytecode
937 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
938
939 // if _getfield then wait with rewrite
940 __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
941 __ br(Assembler::EQ, done);
942
943 // if _igetfield then rewrite to _fast_iaccess_0
944 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
945 __ cmpw(r1, Bytecodes::_fast_igetfield);
946 __ movw(bc, Bytecodes::_fast_iaccess_0);
947 __ br(Assembler::EQ, rewrite);
948
949 // if _agetfield then rewrite to _fast_aaccess_0
950 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
951 __ cmpw(r1, Bytecodes::_fast_agetfield);
952 __ movw(bc, Bytecodes::_fast_aaccess_0);
953 __ br(Assembler::EQ, rewrite);
954
955 // if _fgetfield then rewrite to _fast_faccess_0
956 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
957 __ cmpw(r1, Bytecodes::_fast_fgetfield);
958 __ movw(bc, Bytecodes::_fast_faccess_0);
959 __ br(Assembler::EQ, rewrite);
960
961 // else rewrite to _fast_aload0
962 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
963 __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
964
965 // rewrite
966 // bc: new bytecode
967 __ bind(rewrite);
968 patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
969
970 __ bind(done);
971 }
972
973 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
974 aload(0);
975 }
976
977 void TemplateTable::istore()
978 {
979 transition(itos, vtos);
980 locals_index(r1);
981 // FIXME: We're being very pernickerty here storing a jint in a
982 // local with strw, which costs an extra instruction over what we'd
983 // be able to do with a simple str. We should just store the whole
984 // word.
985 __ lea(rscratch1, iaddress(r1));
986 __ strw(r0, Address(rscratch1));
987 }
988
989 void TemplateTable::lstore()
990 {
991 transition(ltos, vtos);
992 locals_index(r1);
993 __ str(r0, laddress(r1, rscratch1, _masm));
994 }
995
996 void TemplateTable::fstore() {
997 transition(ftos, vtos);
998 locals_index(r1);
999 __ lea(rscratch1, iaddress(r1));
1000 __ strs(v0, Address(rscratch1));
1001 }
1002
1003 void TemplateTable::dstore() {
1004 transition(dtos, vtos);
1005 locals_index(r1);
1006 __ strd(v0, daddress(r1, rscratch1, _masm));
1007 }
1008
1009 void TemplateTable::astore()
1010 {
1011 transition(vtos, vtos);
1012 __ pop_ptr(r0);
1013 locals_index(r1);
1014 __ str(r0, aaddress(r1));
1015 }
1016
1017 void TemplateTable::wide_istore() {
1018 transition(vtos, vtos);
1019 __ pop_i();
1020 locals_index_wide(r1);
1021 __ lea(rscratch1, iaddress(r1));
1022 __ strw(r0, Address(rscratch1));
1023 }
1024
1025 void TemplateTable::wide_lstore() {
1026 transition(vtos, vtos);
1027 __ pop_l();
1028 locals_index_wide(r1);
1029 __ str(r0, laddress(r1, rscratch1, _masm));
1030 }
1031
1032 void TemplateTable::wide_fstore() {
1033 transition(vtos, vtos);
1034 __ pop_f();
1035 locals_index_wide(r1);
1036 __ lea(rscratch1, faddress(r1));
1037 __ strs(v0, rscratch1);
1038 }
1039
1040 void TemplateTable::wide_dstore() {
1041 transition(vtos, vtos);
1042 __ pop_d();
1043 locals_index_wide(r1);
1044 __ strd(v0, daddress(r1, rscratch1, _masm));
1045 }
1046
1047 void TemplateTable::wide_astore() {
1048 transition(vtos, vtos);
1049 __ pop_ptr(r0);
1050 locals_index_wide(r1);
1051 __ str(r0, aaddress(r1));
1052 }
1053
1054 void TemplateTable::iastore() {
1055 transition(itos, vtos);
1056 __ pop_i(r1);
1057 __ pop_ptr(r3);
1058 // r0: value
1059 // r1: index
1060 // r3: array
1061 index_check(r3, r1); // prefer index in r1
1062 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1063 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
1064 }
1065
1066 void TemplateTable::lastore() {
1067 transition(ltos, vtos);
1068 __ pop_i(r1);
1069 __ pop_ptr(r3);
1070 // r0: value
1071 // r1: index
1072 // r3: array
1073 index_check(r3, r1); // prefer index in r1
1074 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1075 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
1076 }
1077
1078 void TemplateTable::fastore() {
1079 transition(ftos, vtos);
1080 __ pop_i(r1);
1081 __ pop_ptr(r3);
1082 // v0: value
1083 // r1: index
1084 // r3: array
1085 index_check(r3, r1); // prefer index in r1
1086 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1087 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
1088 }
1089
1090 void TemplateTable::dastore() {
1091 transition(dtos, vtos);
1092 __ pop_i(r1);
1093 __ pop_ptr(r3);
1094 // v0: value
1095 // r1: index
1096 // r3: array
1097 index_check(r3, r1); // prefer index in r1
1098 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1099 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1100 }
1101
1102 void TemplateTable::aastore() {
1103 Label is_null, ok_is_subtype, done;
1104 transition(vtos, vtos);
1105 // stack: ..., array, index, value
1106 __ ldr(r0, at_tos()); // value
1107 __ ldr(r2, at_tos_p1()); // index
1108 __ ldr(r3, at_tos_p2()); // array
1109
1110 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1111
1112 index_check(r3, r2); // kills r1
1113 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1114
1115 // do array store check - check for NULL value first
1116 __ cbz(r0, is_null);
1117
1118 // Move subklass into r1
1119 __ load_klass(r1, r0);
1120 // Move superklass into r0
1121 __ load_klass(r0, r3);
1122 __ ldr(r0, Address(r0,
1123 ObjArrayKlass::element_klass_offset()));
1124 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1125
1126 // Generate subtype check. Blows r2, r5
1127 // Superklass in r0. Subklass in r1.
1128 __ gen_subtype_check(r1, ok_is_subtype);
1129
1130 // Come here on failure
1131 // object is at TOS
1132 __ b(Interpreter::_throw_ArrayStoreException_entry);
1133
1134 // Come here on success
1135 __ bind(ok_is_subtype);
1136
1137 // Get the value we will store
1138 __ ldr(r0, at_tos());
1139 // Now store using the appropriate barrier
1140 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1141 __ b(done);
1142
1143 // Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
1144 __ bind(is_null);
1145 __ profile_null_seen(r2);
1146
1147 // Store a NULL
1148 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1149
1150 // Pop stack arguments
1151 __ bind(done);
1152 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1153 }
1154
1155 void TemplateTable::bastore()
1156 {
1157 transition(itos, vtos);
1158 __ pop_i(r1);
1159 __ pop_ptr(r3);
1160 // r0: value
1161 // r1: index
1162 // r3: array
1163 index_check(r3, r1); // prefer index in r1
1164
1165 // Need to check whether array is boolean or byte
1166 // since both types share the bastore bytecode.
1167 __ load_klass(r2, r3);
1168 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1169 int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1170 Label L_skip;
1171 __ tbz(r2, diffbit_index, L_skip);
1172 __ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1173 __ bind(L_skip);
1174
1175 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1176 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
1177 }
1178
1179 void TemplateTable::castore()
1180 {
1181 transition(itos, vtos);
1182 __ pop_i(r1);
1183 __ pop_ptr(r3);
1184 // r0: value
1185 // r1: index
1186 // r3: array
1187 index_check(r3, r1); // prefer index in r1
1188 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1189 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
1190 }
1191
1192 void TemplateTable::sastore()
1193 {
1194 castore();
1195 }
1196
1197 void TemplateTable::istore(int n)
1198 {
1199 transition(itos, vtos);
1200 __ str(r0, iaddress(n));
1201 }
1202
1203 void TemplateTable::lstore(int n)
1204 {
1205 transition(ltos, vtos);
1206 __ str(r0, laddress(n));
1207 }
1208
1209 void TemplateTable::fstore(int n)
1210 {
1211 transition(ftos, vtos);
1212 __ strs(v0, faddress(n));
1213 }
1214
1215 void TemplateTable::dstore(int n)
1216 {
1217 transition(dtos, vtos);
1218 __ strd(v0, daddress(n));
1219 }
1220
1221 void TemplateTable::astore(int n)
1222 {
1223 transition(vtos, vtos);
1224 __ pop_ptr(r0);
1225 __ str(r0, iaddress(n));
1226 }
1227
1228 void TemplateTable::pop()
1229 {
1230 transition(vtos, vtos);
1231 __ add(esp, esp, Interpreter::stackElementSize);
1232 }
1233
1234 void TemplateTable::pop2()
1235 {
1236 transition(vtos, vtos);
1237 __ add(esp, esp, 2 * Interpreter::stackElementSize);
1238 }
1239
1240 void TemplateTable::dup()
1241 {
1242 transition(vtos, vtos);
1243 __ ldr(r0, Address(esp, 0));
1244 __ push(r0);
1245 // stack: ..., a, a
1246 }
1247
1248 void TemplateTable::dup_x1()
1249 {
1250 transition(vtos, vtos);
1251 // stack: ..., a, b
1252 __ ldr(r0, at_tos()); // load b
1253 __ ldr(r2, at_tos_p1()); // load a
1254 __ str(r0, at_tos_p1()); // store b
1255 __ str(r2, at_tos()); // store a
1256 __ push(r0); // push b
1257 // stack: ..., b, a, b
1258 }
1259
1260 void TemplateTable::dup_x2()
1261 {
1262 transition(vtos, vtos);
1263 // stack: ..., a, b, c
1264 __ ldr(r0, at_tos()); // load c
1265 __ ldr(r2, at_tos_p2()); // load a
1266 __ str(r0, at_tos_p2()); // store c in a
1267 __ push(r0); // push c
1268 // stack: ..., c, b, c, c
1269 __ ldr(r0, at_tos_p2()); // load b
1270 __ str(r2, at_tos_p2()); // store a in b
1271 // stack: ..., c, a, c, c
1272 __ str(r0, at_tos_p1()); // store b in c
1273 // stack: ..., c, a, b, c
1274 }
1275
1276 void TemplateTable::dup2()
1277 {
1278 transition(vtos, vtos);
1279 // stack: ..., a, b
1280 __ ldr(r0, at_tos_p1()); // load a
1281 __ push(r0); // push a
1282 __ ldr(r0, at_tos_p1()); // load b
1283 __ push(r0); // push b
1284 // stack: ..., a, b, a, b
1285 }
1286
1287 void TemplateTable::dup2_x1()
1288 {
1289 transition(vtos, vtos);
1290 // stack: ..., a, b, c
1291 __ ldr(r2, at_tos()); // load c
1292 __ ldr(r0, at_tos_p1()); // load b
1293 __ push(r0); // push b
1294 __ push(r2); // push c
1295 // stack: ..., a, b, c, b, c
1296 __ str(r2, at_tos_p3()); // store c in b
1297 // stack: ..., a, c, c, b, c
1298 __ ldr(r2, at_tos_p4()); // load a
1299 __ str(r2, at_tos_p2()); // store a in 2nd c
1300 // stack: ..., a, c, a, b, c
1301 __ str(r0, at_tos_p4()); // store b in a
1302 // stack: ..., b, c, a, b, c
1303 }
1304
1305 void TemplateTable::dup2_x2()
1306 {
1307 transition(vtos, vtos);
1308 // stack: ..., a, b, c, d
1309 __ ldr(r2, at_tos()); // load d
1310 __ ldr(r0, at_tos_p1()); // load c
1311 __ push(r0) ; // push c
1312 __ push(r2); // push d
1313 // stack: ..., a, b, c, d, c, d
1314 __ ldr(r0, at_tos_p4()); // load b
1315 __ str(r0, at_tos_p2()); // store b in d
1316 __ str(r2, at_tos_p4()); // store d in b
1317 // stack: ..., a, d, c, b, c, d
1318 __ ldr(r2, at_tos_p5()); // load a
1319 __ ldr(r0, at_tos_p3()); // load c
1320 __ str(r2, at_tos_p3()); // store a in c
1321 __ str(r0, at_tos_p5()); // store c in a
1322 // stack: ..., c, d, a, b, c, d
1323 }
1324
1325 void TemplateTable::swap()
1326 {
1327 transition(vtos, vtos);
1328 // stack: ..., a, b
1329 __ ldr(r2, at_tos_p1()); // load a
1330 __ ldr(r0, at_tos()); // load b
1331 __ str(r2, at_tos()); // store a in b
1332 __ str(r0, at_tos_p1()); // store b in a
1333 // stack: ..., b, a
1334 }
1335
1336 void TemplateTable::iop2(Operation op)
1337 {
1338 transition(itos, itos);
1339 // r0 <== r1 op r0
1340 __ pop_i(r1);
1341 switch (op) {
1342 case add : __ addw(r0, r1, r0); break;
1343 case sub : __ subw(r0, r1, r0); break;
1344 case mul : __ mulw(r0, r1, r0); break;
1345 case _and : __ andw(r0, r1, r0); break;
1346 case _or : __ orrw(r0, r1, r0); break;
1347 case _xor : __ eorw(r0, r1, r0); break;
1348 case shl : __ lslvw(r0, r1, r0); break;
1349 case shr : __ asrvw(r0, r1, r0); break;
1350 case ushr : __ lsrvw(r0, r1, r0);break;
1351 default : ShouldNotReachHere();
1352 }
1353 }
1354
1355 void TemplateTable::lop2(Operation op)
1356 {
1357 transition(ltos, ltos);
1358 // r0 <== r1 op r0
1359 __ pop_l(r1);
1360 switch (op) {
1361 case add : __ add(r0, r1, r0); break;
1362 case sub : __ sub(r0, r1, r0); break;
1363 case mul : __ mul(r0, r1, r0); break;
1364 case _and : __ andr(r0, r1, r0); break;
1365 case _or : __ orr(r0, r1, r0); break;
1366 case _xor : __ eor(r0, r1, r0); break;
1367 default : ShouldNotReachHere();
1368 }
1369 }
1370
1371 void TemplateTable::idiv()
1372 {
1373 transition(itos, itos);
1374 // explicitly check for div0
1375 Label no_div0;
1376 __ cbnzw(r0, no_div0);
1377 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1378 __ br(rscratch1);
1379 __ bind(no_div0);
1380 __ pop_i(r1);
1381 // r0 <== r1 idiv r0
1382 __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1383 }
1384
1385 void TemplateTable::irem()
1386 {
1387 transition(itos, itos);
1388 // explicitly check for div0
1389 Label no_div0;
1390 __ cbnzw(r0, no_div0);
1391 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1392 __ br(rscratch1);
1393 __ bind(no_div0);
1394 __ pop_i(r1);
1395 // r0 <== r1 irem r0
1396 __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1397 }
1398
1399 void TemplateTable::lmul()
1400 {
1401 transition(ltos, ltos);
1402 __ pop_l(r1);
1403 __ mul(r0, r0, r1);
1404 }
1405
1406 void TemplateTable::ldiv()
1407 {
1408 transition(ltos, ltos);
1409 // explicitly check for div0
1410 Label no_div0;
1411 __ cbnz(r0, no_div0);
1412 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1413 __ br(rscratch1);
1414 __ bind(no_div0);
1415 __ pop_l(r1);
1416 // r0 <== r1 ldiv r0
1417 __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1418 }
1419
1420 void TemplateTable::lrem()
1421 {
1422 transition(ltos, ltos);
1423 // explicitly check for div0
1424 Label no_div0;
1425 __ cbnz(r0, no_div0);
1426 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1427 __ br(rscratch1);
1428 __ bind(no_div0);
1429 __ pop_l(r1);
1430 // r0 <== r1 lrem r0
1431 __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1432 }
1433
1434 void TemplateTable::lshl()
1435 {
1436 transition(itos, ltos);
1437 // shift count is in r0
1438 __ pop_l(r1);
1439 __ lslv(r0, r1, r0);
1440 }
1441
1442 void TemplateTable::lshr()
1443 {
1444 transition(itos, ltos);
1445 // shift count is in r0
1446 __ pop_l(r1);
1447 __ asrv(r0, r1, r0);
1448 }
1449
1450 void TemplateTable::lushr()
1451 {
1452 transition(itos, ltos);
1453 // shift count is in r0
1454 __ pop_l(r1);
1455 __ lsrv(r0, r1, r0);
1456 }
1457
1458 void TemplateTable::fop2(Operation op)
1459 {
1460 transition(ftos, ftos);
1461 switch (op) {
1462 case add:
1463 // n.b. use ldrd because this is a 64 bit slot
1464 __ pop_f(v1);
1465 __ fadds(v0, v1, v0);
1466 break;
1467 case sub:
1468 __ pop_f(v1);
1469 __ fsubs(v0, v1, v0);
1470 break;
1471 case mul:
1472 __ pop_f(v1);
1473 __ fmuls(v0, v1, v0);
1474 break;
1475 case div:
1476 __ pop_f(v1);
1477 __ fdivs(v0, v1, v0);
1478 break;
1479 case rem:
1480 __ fmovs(v1, v0);
1481 __ pop_f(v0);
1482 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1483 break;
1484 default:
1485 ShouldNotReachHere();
1486 break;
1487 }
1488 }
1489
1490 void TemplateTable::dop2(Operation op)
1491 {
1492 transition(dtos, dtos);
1493 switch (op) {
1494 case add:
1495 // n.b. use ldrd because this is a 64 bit slot
1496 __ pop_d(v1);
1497 __ faddd(v0, v1, v0);
1498 break;
1499 case sub:
1500 __ pop_d(v1);
1501 __ fsubd(v0, v1, v0);
1502 break;
1503 case mul:
1504 __ pop_d(v1);
1505 __ fmuld(v0, v1, v0);
1506 break;
1507 case div:
1508 __ pop_d(v1);
1509 __ fdivd(v0, v1, v0);
1510 break;
1511 case rem:
1512 __ fmovd(v1, v0);
1513 __ pop_d(v0);
1514 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1515 break;
1516 default:
1517 ShouldNotReachHere();
1518 break;
1519 }
1520 }
1521
1522 void TemplateTable::ineg()
1523 {
1524 transition(itos, itos);
1525 __ negw(r0, r0);
1526
1527 }
1528
1529 void TemplateTable::lneg()
1530 {
1531 transition(ltos, ltos);
1532 __ neg(r0, r0);
1533 }
1534
1535 void TemplateTable::fneg()
1536 {
1537 transition(ftos, ftos);
1538 __ fnegs(v0, v0);
1539 }
1540
1541 void TemplateTable::dneg()
1542 {
1543 transition(dtos, dtos);
1544 __ fnegd(v0, v0);
1545 }
1546
1547 void TemplateTable::iinc()
1548 {
1549 transition(vtos, vtos);
1550 __ load_signed_byte(r1, at_bcp(2)); // get constant
1551 locals_index(r2);
1552 __ ldr(r0, iaddress(r2));
1553 __ addw(r0, r0, r1);
1554 __ str(r0, iaddress(r2));
1555 }
1556
1557 void TemplateTable::wide_iinc()
1558 {
1559 transition(vtos, vtos);
1560 // __ mov(r1, zr);
1561 __ ldrw(r1, at_bcp(2)); // get constant and index
1562 __ rev16(r1, r1);
1563 __ ubfx(r2, r1, 0, 16);
1564 __ neg(r2, r2);
1565 __ sbfx(r1, r1, 16, 16);
1566 __ ldr(r0, iaddress(r2));
1567 __ addw(r0, r0, r1);
1568 __ str(r0, iaddress(r2));
1569 }
1570
1571 void TemplateTable::convert()
1572 {
1573 // Checking
1574 #ifdef ASSERT
1575 {
1576 TosState tos_in = ilgl;
1577 TosState tos_out = ilgl;
1578 switch (bytecode()) {
1579 case Bytecodes::_i2l: // fall through
1580 case Bytecodes::_i2f: // fall through
1581 case Bytecodes::_i2d: // fall through
1582 case Bytecodes::_i2b: // fall through
1583 case Bytecodes::_i2c: // fall through
1584 case Bytecodes::_i2s: tos_in = itos; break;
1585 case Bytecodes::_l2i: // fall through
1586 case Bytecodes::_l2f: // fall through
1587 case Bytecodes::_l2d: tos_in = ltos; break;
1588 case Bytecodes::_f2i: // fall through
1589 case Bytecodes::_f2l: // fall through
1590 case Bytecodes::_f2d: tos_in = ftos; break;
1591 case Bytecodes::_d2i: // fall through
1592 case Bytecodes::_d2l: // fall through
1593 case Bytecodes::_d2f: tos_in = dtos; break;
1594 default : ShouldNotReachHere();
1595 }
1596 switch (bytecode()) {
1597 case Bytecodes::_l2i: // fall through
1598 case Bytecodes::_f2i: // fall through
1599 case Bytecodes::_d2i: // fall through
1600 case Bytecodes::_i2b: // fall through
1601 case Bytecodes::_i2c: // fall through
1602 case Bytecodes::_i2s: tos_out = itos; break;
1603 case Bytecodes::_i2l: // fall through
1604 case Bytecodes::_f2l: // fall through
1605 case Bytecodes::_d2l: tos_out = ltos; break;
1606 case Bytecodes::_i2f: // fall through
1607 case Bytecodes::_l2f: // fall through
1608 case Bytecodes::_d2f: tos_out = ftos; break;
1609 case Bytecodes::_i2d: // fall through
1610 case Bytecodes::_l2d: // fall through
1611 case Bytecodes::_f2d: tos_out = dtos; break;
1612 default : ShouldNotReachHere();
1613 }
1614 transition(tos_in, tos_out);
1615 }
1616 #endif // ASSERT
1617 // static const int64_t is_nan = 0x8000000000000000L;
1618
1619 // Conversion
1620 switch (bytecode()) {
1621 case Bytecodes::_i2l:
1622 __ sxtw(r0, r0);
1623 break;
1624 case Bytecodes::_i2f:
1625 __ scvtfws(v0, r0);
1626 break;
1627 case Bytecodes::_i2d:
1628 __ scvtfwd(v0, r0);
1629 break;
1630 case Bytecodes::_i2b:
1631 __ sxtbw(r0, r0);
1632 break;
1633 case Bytecodes::_i2c:
1634 __ uxthw(r0, r0);
1635 break;
1636 case Bytecodes::_i2s:
1637 __ sxthw(r0, r0);
1638 break;
1639 case Bytecodes::_l2i:
1640 __ uxtw(r0, r0);
1641 break;
1642 case Bytecodes::_l2f:
1643 __ scvtfs(v0, r0);
1644 break;
1645 case Bytecodes::_l2d:
1646 __ scvtfd(v0, r0);
1647 break;
1648 case Bytecodes::_f2i:
1649 {
1650 Label L_Okay;
1651 __ clear_fpsr();
1652 __ fcvtzsw(r0, v0);
1653 __ get_fpsr(r1);
1654 __ cbzw(r1, L_Okay);
1655 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1656 __ bind(L_Okay);
1657 }
1658 break;
1659 case Bytecodes::_f2l:
1660 {
1661 Label L_Okay;
1662 __ clear_fpsr();
1663 __ fcvtzs(r0, v0);
1664 __ get_fpsr(r1);
1665 __ cbzw(r1, L_Okay);
1666 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1667 __ bind(L_Okay);
1668 }
1669 break;
1670 case Bytecodes::_f2d:
1671 __ fcvts(v0, v0);
1672 break;
1673 case Bytecodes::_d2i:
1674 {
1675 Label L_Okay;
1676 __ clear_fpsr();
1677 __ fcvtzdw(r0, v0);
1678 __ get_fpsr(r1);
1679 __ cbzw(r1, L_Okay);
1680 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1681 __ bind(L_Okay);
1682 }
1683 break;
1684 case Bytecodes::_d2l:
1685 {
1686 Label L_Okay;
1687 __ clear_fpsr();
1688 __ fcvtzd(r0, v0);
1689 __ get_fpsr(r1);
1690 __ cbzw(r1, L_Okay);
1691 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1692 __ bind(L_Okay);
1693 }
1694 break;
1695 case Bytecodes::_d2f:
1696 __ fcvtd(v0, v0);
1697 break;
1698 default:
1699 ShouldNotReachHere();
1700 }
1701 }
1702
1703 void TemplateTable::lcmp()
1704 {
1705 transition(ltos, itos);
1706 Label done;
1707 __ pop_l(r1);
1708 __ cmp(r1, r0);
1709 __ mov(r0, (u_int64_t)-1L);
1710 __ br(Assembler::LT, done);
1711 // __ mov(r0, 1UL);
1712 // __ csel(r0, r0, zr, Assembler::NE);
1713 // and here is a faster way
1714 __ csinc(r0, zr, zr, Assembler::EQ);
1715 __ bind(done);
1716 }
1717
1718 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1719 {
1720 Label done;
1721 if (is_float) {
1722 // XXX get rid of pop here, use ... reg, mem32
1723 __ pop_f(v1);
1724 __ fcmps(v1, v0);
1725 } else {
1726 // XXX get rid of pop here, use ... reg, mem64
1727 __ pop_d(v1);
1728 __ fcmpd(v1, v0);
1729 }
1730 if (unordered_result < 0) {
1731 // we want -1 for unordered or less than, 0 for equal and 1 for
1732 // greater than.
1733 __ mov(r0, (u_int64_t)-1L);
1734 // for FP LT tests less than or unordered
1735 __ br(Assembler::LT, done);
1736 // install 0 for EQ otherwise 1
1737 __ csinc(r0, zr, zr, Assembler::EQ);
1738 } else {
1739 // we want -1 for less than, 0 for equal and 1 for unordered or
1740 // greater than.
1741 __ mov(r0, 1L);
1742 // for FP HI tests greater than or unordered
1743 __ br(Assembler::HI, done);
1744 // install 0 for EQ otherwise ~0
1745 __ csinv(r0, zr, zr, Assembler::EQ);
1746
1747 }
1748 __ bind(done);
1749 }
1750
1751 void TemplateTable::branch(bool is_jsr, bool is_wide)
1752 {
1753 // We might be moving to a safepoint. The thread which calls
1754 // Interpreter::notice_safepoints() will effectively flush its cache
1755 // when it makes a system call, but we need to do something to
1756 // ensure that we see the changed dispatch table.
1757 __ membar(MacroAssembler::LoadLoad);
1758
1759 __ profile_taken_branch(r0, r1);
1760 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1761 InvocationCounter::counter_offset();
1762 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1763 InvocationCounter::counter_offset();
1764
1765 // load branch displacement
1766 if (!is_wide) {
1767 __ ldrh(r2, at_bcp(1));
1768 __ rev16(r2, r2);
1769 // sign extend the 16 bit value in r2
1770 __ sbfm(r2, r2, 0, 15);
1771 } else {
1772 __ ldrw(r2, at_bcp(1));
1773 __ revw(r2, r2);
1774 // sign extend the 32 bit value in r2
1775 __ sbfm(r2, r2, 0, 31);
1776 }
1777
1778 // Handle all the JSR stuff here, then exit.
1779 // It's much shorter and cleaner than intermingling with the non-JSR
1780 // normal-branch stuff occurring below.
1781
1782 if (is_jsr) {
1783 // Pre-load the next target bytecode into rscratch1
1784 __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1785 // compute return address as bci
1786 __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1787 __ add(rscratch2, rscratch2,
1788 in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1789 __ sub(r1, rbcp, rscratch2);
1790 __ push_i(r1);
1791 // Adjust the bcp by the 16-bit displacement in r2
1792 __ add(rbcp, rbcp, r2);
1793 __ dispatch_only(vtos, /*generate_poll*/true);
1794 return;
1795 }
1796
1797 // Normal (non-jsr) branch handling
1798
1799 // Adjust the bcp by the displacement in r2
1800 __ add(rbcp, rbcp, r2);
1801
1802 assert(UseLoopCounter || !UseOnStackReplacement,
1803 "on-stack-replacement requires loop counters");
1804 Label backedge_counter_overflow;
1805 Label profile_method;
1806 Label dispatch;
1807 if (UseLoopCounter) {
1808 // increment backedge counter for backward branches
1809 // r0: MDO
1810 // w1: MDO bumped taken-count
1811 // r2: target offset
1812 __ cmp(r2, zr);
1813 __ br(Assembler::GT, dispatch); // count only if backward branch
1814
1815 // ECN: FIXME: This code smells
1816 // check if MethodCounters exists
1817 Label has_counters;
1818 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1819 __ cbnz(rscratch1, has_counters);
1820 __ push(r0);
1821 __ push(r1);
1822 __ push(r2);
1823 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1824 InterpreterRuntime::build_method_counters), rmethod);
1825 __ pop(r2);
1826 __ pop(r1);
1827 __ pop(r0);
1828 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1829 __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1830 __ bind(has_counters);
1831
1832 if (TieredCompilation) {
1833 Label no_mdo;
1834 int increment = InvocationCounter::count_increment;
1835 if (ProfileInterpreter) {
1836 // Are we profiling?
1837 __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1838 __ cbz(r1, no_mdo);
1839 // Increment the MDO backedge counter
1840 const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1841 in_bytes(InvocationCounter::counter_offset()));
1842 const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1843 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1844 r0, rscratch1, false, Assembler::EQ,
1845 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1846 __ b(dispatch);
1847 }
1848 __ bind(no_mdo);
1849 // Increment backedge counter in MethodCounters*
1850 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1851 const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1852 __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1853 r0, rscratch2, false, Assembler::EQ,
1854 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1855 } else { // not TieredCompilation
1856 // increment counter
1857 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
1858 __ ldrw(r0, Address(rscratch2, be_offset)); // load backedge counter
1859 __ addw(rscratch1, r0, InvocationCounter::count_increment); // increment counter
1860 __ strw(rscratch1, Address(rscratch2, be_offset)); // store counter
1861
1862 __ ldrw(r0, Address(rscratch2, inv_offset)); // load invocation counter
1863 __ andw(r0, r0, (unsigned)InvocationCounter::count_mask_value); // and the status bits
1864 __ addw(r0, r0, rscratch1); // add both counters
1865
1866 if (ProfileInterpreter) {
1867 // Test to see if we should create a method data oop
1868 __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
1869 __ cmpw(r0, rscratch1);
1870 __ br(Assembler::LT, dispatch);
1871
1872 // if no method data exists, go to profile method
1873 __ test_method_data_pointer(r0, profile_method);
1874
1875 if (UseOnStackReplacement) {
1876 // check for overflow against w1 which is the MDO taken count
1877 __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
1878 __ cmpw(r1, rscratch1);
1879 __ br(Assembler::LO, dispatch); // Intel == Assembler::below
1880
1881 // When ProfileInterpreter is on, the backedge_count comes
1882 // from the MethodData*, which value does not get reset on
1883 // the call to frequency_counter_overflow(). To avoid
1884 // excessive calls to the overflow routine while the method is
1885 // being compiled, add a second test to make sure the overflow
1886 // function is called only once every overflow_frequency.
1887 const int overflow_frequency = 1024;
1888 __ andsw(r1, r1, overflow_frequency - 1);
1889 __ br(Assembler::EQ, backedge_counter_overflow);
1890
1891 }
1892 } else {
1893 if (UseOnStackReplacement) {
1894 // check for overflow against w0, which is the sum of the
1895 // counters
1896 __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
1897 __ cmpw(r0, rscratch1);
1898 __ br(Assembler::HS, backedge_counter_overflow); // Intel == Assembler::aboveEqual
1899 }
1900 }
1901 }
1902 __ bind(dispatch);
1903 }
1904
1905 // Pre-load the next target bytecode into rscratch1
1906 __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1907
1908 // continue with the bytecode @ target
1909 // rscratch1: target bytecode
1910 // rbcp: target bcp
1911 __ dispatch_only(vtos, /*generate_poll*/true);
1912
1913 if (UseLoopCounter) {
1914 if (ProfileInterpreter) {
1915 // Out-of-line code to allocate method data oop.
1916 __ bind(profile_method);
1917 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1918 __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
1919 __ set_method_data_pointer_for_bcp();
1920 __ b(dispatch);
1921 }
1922
1923 if (UseOnStackReplacement) {
1924 // invocation counter overflow
1925 __ bind(backedge_counter_overflow);
1926 __ neg(r2, r2);
1927 __ add(r2, r2, rbcp); // branch bcp
1928 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1929 __ call_VM(noreg,
1930 CAST_FROM_FN_PTR(address,
1931 InterpreterRuntime::frequency_counter_overflow),
1932 r2);
1933 __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
1934
1935 // r0: osr nmethod (osr ok) or NULL (osr not possible)
1936 // w1: target bytecode
1937 // r2: scratch
1938 __ cbz(r0, dispatch); // test result -- no osr if null
1939 // nmethod may have been invalidated (VM may block upon call_VM return)
1940 __ ldrb(r2, Address(r0, nmethod::state_offset()));
1941 if (nmethod::in_use != 0)
1942 __ sub(r2, r2, nmethod::in_use);
1943 __ cbnz(r2, dispatch);
1944
1945 // We have the address of an on stack replacement routine in r0
1946 // We need to prepare to execute the OSR method. First we must
1947 // migrate the locals and monitors off of the stack.
1948
1949 __ mov(r19, r0); // save the nmethod
1950
1951 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1952
1953 // r0 is OSR buffer, move it to expected parameter location
1954 __ mov(j_rarg0, r0);
1955
1956 // remove activation
1957 // get sender esp
1958 __ ldr(esp,
1959 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1960 // remove frame anchor
1961 __ leave();
1962 // Ensure compiled code always sees stack at proper alignment
1963 __ andr(sp, esp, -16);
1964
1965 // and begin the OSR nmethod
1966 __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1967 __ br(rscratch1);
1968 }
1969 }
1970 }
1971
1972
1973 void TemplateTable::if_0cmp(Condition cc)
1974 {
1975 transition(itos, vtos);
1976 // assume branch is more often taken than not (loops use backward branches)
1977 Label not_taken;
1978 if (cc == equal)
1979 __ cbnzw(r0, not_taken);
1980 else if (cc == not_equal)
1981 __ cbzw(r0, not_taken);
1982 else {
1983 __ andsw(zr, r0, r0);
1984 __ br(j_not(cc), not_taken);
1985 }
1986
1987 branch(false, false);
1988 __ bind(not_taken);
1989 __ profile_not_taken_branch(r0);
1990 }
1991
1992 void TemplateTable::if_icmp(Condition cc)
1993 {
1994 transition(itos, vtos);
1995 // assume branch is more often taken than not (loops use backward branches)
1996 Label not_taken;
1997 __ pop_i(r1);
1998 __ cmpw(r1, r0, Assembler::LSL);
1999 __ br(j_not(cc), not_taken);
2000 branch(false, false);
2001 __ bind(not_taken);
2002 __ profile_not_taken_branch(r0);
2003 }
2004
2005 void TemplateTable::if_nullcmp(Condition cc)
2006 {
2007 transition(atos, vtos);
2008 // assume branch is more often taken than not (loops use backward branches)
2009 Label not_taken;
2010 if (cc == equal)
2011 __ cbnz(r0, not_taken);
2012 else
2013 __ cbz(r0, not_taken);
2014 branch(false, false);
2015 __ bind(not_taken);
2016 __ profile_not_taken_branch(r0);
2017 }
2018
2019 void TemplateTable::if_acmp(Condition cc)
2020 {
2021 transition(atos, vtos);
2022 // assume branch is more often taken than not (loops use backward branches)
2023 Label not_taken;
2024 __ pop_ptr(r1);
2025 __ cmpoop(r1, r0);
2026 __ br(j_not(cc), not_taken);
2027 branch(false, false);
2028 __ bind(not_taken);
2029 __ profile_not_taken_branch(r0);
2030 }
2031
2032 void TemplateTable::ret() {
2033 transition(vtos, vtos);
2034 // We might be moving to a safepoint. The thread which calls
2035 // Interpreter::notice_safepoints() will effectively flush its cache
2036 // when it makes a system call, but we need to do something to
2037 // ensure that we see the changed dispatch table.
2038 __ membar(MacroAssembler::LoadLoad);
2039
2040 locals_index(r1);
2041 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2042 __ profile_ret(r1, r2);
2043 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2044 __ lea(rbcp, Address(rbcp, r1));
2045 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2046 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2047 }
2048
2049 void TemplateTable::wide_ret() {
2050 transition(vtos, vtos);
2051 locals_index_wide(r1);
2052 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2053 __ profile_ret(r1, r2);
2054 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2055 __ lea(rbcp, Address(rbcp, r1));
2056 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2057 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2058 }
2059
2060
2061 void TemplateTable::tableswitch() {
2062 Label default_case, continue_execution;
2063 transition(itos, vtos);
2064 // align rbcp
2065 __ lea(r1, at_bcp(BytesPerInt));
2066 __ andr(r1, r1, -BytesPerInt);
2067 // load lo & hi
2068 __ ldrw(r2, Address(r1, BytesPerInt));
2069 __ ldrw(r3, Address(r1, 2 * BytesPerInt));
2070 __ rev32(r2, r2);
2071 __ rev32(r3, r3);
2072 // check against lo & hi
2073 __ cmpw(r0, r2);
2074 __ br(Assembler::LT, default_case);
2075 __ cmpw(r0, r3);
2076 __ br(Assembler::GT, default_case);
2077 // lookup dispatch offset
2078 __ subw(r0, r0, r2);
2079 __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2080 __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2081 __ profile_switch_case(r0, r1, r2);
2082 // continue execution
2083 __ bind(continue_execution);
2084 __ rev32(r3, r3);
2085 __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2086 __ add(rbcp, rbcp, r3, ext::sxtw);
2087 __ dispatch_only(vtos, /*generate_poll*/true);
2088 // handle default
2089 __ bind(default_case);
2090 __ profile_switch_default(r0);
2091 __ ldrw(r3, Address(r1, 0));
2092 __ b(continue_execution);
2093 }
2094
2095 void TemplateTable::lookupswitch() {
2096 transition(itos, itos);
2097 __ stop("lookupswitch bytecode should have been rewritten");
2098 }
2099
2100 void TemplateTable::fast_linearswitch() {
2101 transition(itos, vtos);
2102 Label loop_entry, loop, found, continue_execution;
2103 // bswap r0 so we can avoid bswapping the table entries
2104 __ rev32(r0, r0);
2105 // align rbcp
2106 __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2107 // this instruction (change offsets
2108 // below)
2109 __ andr(r19, r19, -BytesPerInt);
2110 // set counter
2111 __ ldrw(r1, Address(r19, BytesPerInt));
2112 __ rev32(r1, r1);
2113 __ b(loop_entry);
2114 // table search
2115 __ bind(loop);
2116 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2117 __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2118 __ cmpw(r0, rscratch1);
2119 __ br(Assembler::EQ, found);
2120 __ bind(loop_entry);
2121 __ subs(r1, r1, 1);
2122 __ br(Assembler::PL, loop);
2123 // default case
2124 __ profile_switch_default(r0);
2125 __ ldrw(r3, Address(r19, 0));
2126 __ b(continue_execution);
2127 // entry found -> get offset
2128 __ bind(found);
2129 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2130 __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2131 __ profile_switch_case(r1, r0, r19);
2132 // continue execution
2133 __ bind(continue_execution);
2134 __ rev32(r3, r3);
2135 __ add(rbcp, rbcp, r3, ext::sxtw);
2136 __ ldrb(rscratch1, Address(rbcp, 0));
2137 __ dispatch_only(vtos, /*generate_poll*/true);
2138 }
2139
2140 void TemplateTable::fast_binaryswitch() {
2141 transition(itos, vtos);
2142 // Implementation using the following core algorithm:
2143 //
2144 // int binary_search(int key, LookupswitchPair* array, int n) {
2145 // // Binary search according to "Methodik des Programmierens" by
2146 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2147 // int i = 0;
2148 // int j = n;
2149 // while (i+1 < j) {
2150 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2151 // // with Q: for all i: 0 <= i < n: key < a[i]
2152 // // where a stands for the array and assuming that the (inexisting)
2153 // // element a[n] is infinitely big.
2154 // int h = (i + j) >> 1;
2155 // // i < h < j
2156 // if (key < array[h].fast_match()) {
2157 // j = h;
2158 // } else {
2159 // i = h;
2160 // }
2161 // }
2162 // // R: a[i] <= key < a[i+1] or Q
2163 // // (i.e., if key is within array, i is the correct index)
2164 // return i;
2165 // }
2166
2167 // Register allocation
2168 const Register key = r0; // already set (tosca)
2169 const Register array = r1;
2170 const Register i = r2;
2171 const Register j = r3;
2172 const Register h = rscratch1;
2173 const Register temp = rscratch2;
2174
2175 // Find array start
2176 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2177 // get rid of this
2178 // instruction (change
2179 // offsets below)
2180 __ andr(array, array, -BytesPerInt);
2181
2182 // Initialize i & j
2183 __ mov(i, 0); // i = 0;
2184 __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2185
2186 // Convert j into native byteordering
2187 __ rev32(j, j);
2188
2189 // And start
2190 Label entry;
2191 __ b(entry);
2192
2193 // binary search loop
2194 {
2195 Label loop;
2196 __ bind(loop);
2197 // int h = (i + j) >> 1;
2198 __ addw(h, i, j); // h = i + j;
2199 __ lsrw(h, h, 1); // h = (i + j) >> 1;
2200 // if (key < array[h].fast_match()) {
2201 // j = h;
2202 // } else {
2203 // i = h;
2204 // }
2205 // Convert array[h].match to native byte-ordering before compare
2206 __ ldr(temp, Address(array, h, Address::lsl(3)));
2207 __ rev32(temp, temp);
2208 __ cmpw(key, temp);
2209 // j = h if (key < array[h].fast_match())
2210 __ csel(j, h, j, Assembler::LT);
2211 // i = h if (key >= array[h].fast_match())
2212 __ csel(i, h, i, Assembler::GE);
2213 // while (i+1 < j)
2214 __ bind(entry);
2215 __ addw(h, i, 1); // i+1
2216 __ cmpw(h, j); // i+1 < j
2217 __ br(Assembler::LT, loop);
2218 }
2219
2220 // end of binary search, result index is i (must check again!)
2221 Label default_case;
2222 // Convert array[i].match to native byte-ordering before compare
2223 __ ldr(temp, Address(array, i, Address::lsl(3)));
2224 __ rev32(temp, temp);
2225 __ cmpw(key, temp);
2226 __ br(Assembler::NE, default_case);
2227
2228 // entry found -> j = offset
2229 __ add(j, array, i, ext::uxtx, 3);
2230 __ ldrw(j, Address(j, BytesPerInt));
2231 __ profile_switch_case(i, key, array);
2232 __ rev32(j, j);
2233 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2234 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2235 __ dispatch_only(vtos, /*generate_poll*/true);
2236
2237 // default case -> j = default offset
2238 __ bind(default_case);
2239 __ profile_switch_default(i);
2240 __ ldrw(j, Address(array, -2 * BytesPerInt));
2241 __ rev32(j, j);
2242 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2243 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2244 __ dispatch_only(vtos, /*generate_poll*/true);
2245 }
2246
2247
2248 void TemplateTable::_return(TosState state)
2249 {
2250 transition(state, state);
2251 assert(_desc->calls_vm(),
2252 "inconsistent calls_vm information"); // call in remove_activation
2253
2254 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2255 assert(state == vtos, "only valid state");
2256
2257 __ ldr(c_rarg1, aaddress(0));
2258 __ load_klass(r3, c_rarg1);
2259 __ ldrw(r3, Address(r3, Klass::access_flags_offset()));
2260 Label skip_register_finalizer;
2261 __ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2262
2263 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2264
2265 __ bind(skip_register_finalizer);
2266 }
2267
2268 // Issue a StoreStore barrier after all stores but before return
2269 // from any constructor for any class with a final field. We don't
2270 // know if this is a finalizer, so we always do so.
2271 if (_desc->bytecode() == Bytecodes::_return)
2272 __ membar(MacroAssembler::StoreStore);
2273
2274 // Narrow result if state is itos but result type is smaller.
2275 // Need to narrow in the return bytecode rather than in generate_return_entry
2276 // since compiled code callers expect the result to already be narrowed.
2277 if (state == itos) {
2278 __ narrow(r0);
2279 }
2280
2281 __ remove_activation(state);
2282 __ ret(lr);
2283 }
2284
2285 // ----------------------------------------------------------------------------
2286 // Volatile variables demand their effects be made known to all CPU's
2287 // in order. Store buffers on most chips allow reads & writes to
2288 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2289 // without some kind of memory barrier (i.e., it's not sufficient that
2290 // the interpreter does not reorder volatile references, the hardware
2291 // also must not reorder them).
2292 //
2293 // According to the new Java Memory Model (JMM):
2294 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2295 // writes act as aquire & release, so:
2296 // (2) A read cannot let unrelated NON-volatile memory refs that
2297 // happen after the read float up to before the read. It's OK for
2298 // non-volatile memory refs that happen before the volatile read to
2299 // float down below it.
2300 // (3) Similar a volatile write cannot let unrelated NON-volatile
2301 // memory refs that happen BEFORE the write float down to after the
2302 // write. It's OK for non-volatile memory refs that happen after the
2303 // volatile write to float up before it.
2304 //
2305 // We only put in barriers around volatile refs (they are expensive),
2306 // not _between_ memory refs (that would require us to track the
2307 // flavor of the previous memory refs). Requirements (2) and (3)
2308 // require some barriers before volatile stores and after volatile
2309 // loads. These nearly cover requirement (1) but miss the
2310 // volatile-store-volatile-load case. This final case is placed after
2311 // volatile-stores although it could just as well go before
2312 // volatile-loads.
2313
2314 void TemplateTable::resolve_cache_and_index(int byte_no,
2315 Register Rcache,
2316 Register index,
2317 size_t index_size) {
2318 const Register temp = r19;
2319 assert_different_registers(Rcache, index, temp);
2320
2321 Label resolved, clinit_barrier_slow;
2322
2323 Bytecodes::Code code = bytecode();
2324 switch (code) {
2325 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2326 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2327 default: break;
2328 }
2329
2330 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2331 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2332 __ subs(zr, temp, (int) code); // have we resolved this bytecode?
2333 __ br(Assembler::EQ, resolved);
2334
2335 // resolve first time through
2336 // Class initialization barrier slow path lands here as well.
2337 __ bind(clinit_barrier_slow);
2338 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2339 __ mov(temp, (int) code);
2340 __ call_VM(noreg, entry, temp);
2341
2342 // Update registers with resolved info
2343 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2344 // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2345 // so all clients ofthis method must be modified accordingly
2346 __ bind(resolved);
2347
2348 // Class initialization barrier for static methods
2349 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2350 __ load_resolved_method_at_index(byte_no, temp, Rcache);
2351 __ load_method_holder(temp, temp);
2352 __ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow);
2353 }
2354 }
2355
2356 // The Rcache and index registers must be set before call
2357 // n.b unlike x86 cache already includes the index offset
2358 void TemplateTable::load_field_cp_cache_entry(Register obj,
2359 Register cache,
2360 Register index,
2361 Register off,
2362 Register flags,
2363 bool is_static = false) {
2364 assert_different_registers(cache, index, flags, off);
2365
2366 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2367 // Field offset
2368 __ ldr(off, Address(cache, in_bytes(cp_base_offset +
2369 ConstantPoolCacheEntry::f2_offset())));
2370 // Flags
2371 __ ldrw(flags, Address(cache, in_bytes(cp_base_offset +
2372 ConstantPoolCacheEntry::flags_offset())));
2373
2374 // klass overwrite register
2375 if (is_static) {
2376 __ ldr(obj, Address(cache, in_bytes(cp_base_offset +
2377 ConstantPoolCacheEntry::f1_offset())));
2378 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2379 __ ldr(obj, Address(obj, mirror_offset));
2380 __ resolve_oop_handle(obj);
2381 }
2382 }
2383
2384 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2385 Register method,
2386 Register itable_index,
2387 Register flags,
2388 bool is_invokevirtual,
2389 bool is_invokevfinal, /*unused*/
2390 bool is_invokedynamic) {
2391 // setup registers
2392 const Register cache = rscratch2;
2393 const Register index = r4;
2394 assert_different_registers(method, flags);
2395 assert_different_registers(method, cache, index);
2396 assert_different_registers(itable_index, flags);
2397 assert_different_registers(itable_index, cache, index);
2398 // determine constant pool cache field offsets
2399 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2400 const int method_offset = in_bytes(
2401 ConstantPoolCache::base_offset() +
2402 (is_invokevirtual
2403 ? ConstantPoolCacheEntry::f2_offset()
2404 : ConstantPoolCacheEntry::f1_offset()));
2405 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2406 ConstantPoolCacheEntry::flags_offset());
2407 // access constant pool cache fields
2408 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2409 ConstantPoolCacheEntry::f2_offset());
2410
2411 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2412 resolve_cache_and_index(byte_no, cache, index, index_size);
2413 __ ldr(method, Address(cache, method_offset));
2414
2415 if (itable_index != noreg) {
2416 __ ldr(itable_index, Address(cache, index_offset));
2417 }
2418 __ ldrw(flags, Address(cache, flags_offset));
2419 }
2420
2421
2422 // The registers cache and index expected to be set before call.
2423 // Correct values of the cache and index registers are preserved.
2424 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2425 bool is_static, bool has_tos) {
2426 // do the JVMTI work here to avoid disturbing the register state below
2427 // We use c_rarg registers here because we want to use the register used in
2428 // the call to the VM
2429 if (JvmtiExport::can_post_field_access()) {
2430 // Check to see if a field access watch has been set before we
2431 // take the time to call into the VM.
2432 Label L1;
2433 assert_different_registers(cache, index, r0);
2434 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2435 __ ldrw(r0, Address(rscratch1));
2436 __ cbzw(r0, L1);
2437
2438 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2439 __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
2440
2441 if (is_static) {
2442 __ mov(c_rarg1, zr); // NULL object reference
2443 } else {
2444 __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2445 __ verify_oop(c_rarg1);
2446 }
2447 // c_rarg1: object pointer or NULL
2448 // c_rarg2: cache entry pointer
2449 // c_rarg3: jvalue object on the stack
2450 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2451 InterpreterRuntime::post_field_access),
2452 c_rarg1, c_rarg2, c_rarg3);
2453 __ get_cache_and_index_at_bcp(cache, index, 1);
2454 __ bind(L1);
2455 }
2456 }
2457
2458 void TemplateTable::pop_and_check_object(Register r)
2459 {
2460 __ pop_ptr(r);
2461 __ null_check(r); // for field access must check obj.
2462 __ verify_oop(r);
2463 }
2464
2465 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2466 {
2467 const Register cache = r2;
2468 const Register index = r3;
2469 const Register obj = r4;
2470 const Register off = r19;
2471 const Register flags = r0;
2472 const Register raw_flags = r6;
2473 const Register bc = r4; // uses same reg as obj, so don't mix them
2474
2475 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2476 jvmti_post_field_access(cache, index, is_static, false);
2477 load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static);
2478
2479 if (!is_static) {
2480 // obj is on the stack
2481 pop_and_check_object(obj);
2482 }
2483
2484 // 8179954: We need to make sure that the code generated for
2485 // volatile accesses forms a sequentially-consistent set of
2486 // operations when combined with STLR and LDAR. Without a leading
2487 // membar it's possible for a simple Dekker test to fail if loads
2488 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2489 // the stores in one method and we interpret the loads in another.
2490 if (!is_c1_or_interpreter_only()){
2491 Label notVolatile;
2492 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2493 __ membar(MacroAssembler::AnyAny);
2494 __ bind(notVolatile);
2495 }
2496
2497 const Address field(obj, off);
2498
2499 Label Done, notByte, notBool, notInt, notShort, notChar,
2500 notLong, notFloat, notObj, notDouble;
2501
2502 // x86 uses a shift and mask or wings it with a shift plus assert
2503 // the mask is not needed. aarch64 just uses bitfield extract
2504 __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
2505 ConstantPoolCacheEntry::tos_state_bits);
2506
2507 assert(btos == 0, "change code, btos != 0");
2508 __ cbnz(flags, notByte);
2509
2510 // Don't rewrite getstatic, only getfield
2511 if (is_static) rc = may_not_rewrite;
2512
2513 // btos
2514 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2515 __ push(btos);
2516 // Rewrite bytecode to be faster
2517 if (rc == may_rewrite) {
2518 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2519 }
2520 __ b(Done);
2521
2522 __ bind(notByte);
2523 __ cmp(flags, (u1)ztos);
2524 __ br(Assembler::NE, notBool);
2525
2526 // ztos (same code as btos)
2527 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2528 __ push(ztos);
2529 // Rewrite bytecode to be faster
2530 if (rc == may_rewrite) {
2531 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2532 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2533 }
2534 __ b(Done);
2535
2536 __ bind(notBool);
2537 __ cmp(flags, (u1)atos);
2538 __ br(Assembler::NE, notObj);
2539 // atos
2540 do_oop_load(_masm, field, r0, IN_HEAP);
2541 __ push(atos);
2542 if (rc == may_rewrite) {
2543 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2544 }
2545 __ b(Done);
2546
2547 __ bind(notObj);
2548 __ cmp(flags, (u1)itos);
2549 __ br(Assembler::NE, notInt);
2550 // itos
2551 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2552 __ push(itos);
2553 // Rewrite bytecode to be faster
2554 if (rc == may_rewrite) {
2555 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2556 }
2557 __ b(Done);
2558
2559 __ bind(notInt);
2560 __ cmp(flags, (u1)ctos);
2561 __ br(Assembler::NE, notChar);
2562 // ctos
2563 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2564 __ push(ctos);
2565 // Rewrite bytecode to be faster
2566 if (rc == may_rewrite) {
2567 patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2568 }
2569 __ b(Done);
2570
2571 __ bind(notChar);
2572 __ cmp(flags, (u1)stos);
2573 __ br(Assembler::NE, notShort);
2574 // stos
2575 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2576 __ push(stos);
2577 // Rewrite bytecode to be faster
2578 if (rc == may_rewrite) {
2579 patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2580 }
2581 __ b(Done);
2582
2583 __ bind(notShort);
2584 __ cmp(flags, (u1)ltos);
2585 __ br(Assembler::NE, notLong);
2586 // ltos
2587 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2588 __ push(ltos);
2589 // Rewrite bytecode to be faster
2590 if (rc == may_rewrite) {
2591 patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2592 }
2593 __ b(Done);
2594
2595 __ bind(notLong);
2596 __ cmp(flags, (u1)ftos);
2597 __ br(Assembler::NE, notFloat);
2598 // ftos
2599 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2600 __ push(ftos);
2601 // Rewrite bytecode to be faster
2602 if (rc == may_rewrite) {
2603 patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2604 }
2605 __ b(Done);
2606
2607 __ bind(notFloat);
2608 #ifdef ASSERT
2609 __ cmp(flags, (u1)dtos);
2610 __ br(Assembler::NE, notDouble);
2611 #endif
2612 // dtos
2613 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2614 __ push(dtos);
2615 // Rewrite bytecode to be faster
2616 if (rc == may_rewrite) {
2617 patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2618 }
2619 #ifdef ASSERT
2620 __ b(Done);
2621
2622 __ bind(notDouble);
2623 __ stop("Bad state");
2624 #endif
2625
2626 __ bind(Done);
2627
2628 Label notVolatile;
2629 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2630 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2631 __ bind(notVolatile);
2632 }
2633
2634
2635 void TemplateTable::getfield(int byte_no)
2636 {
2637 getfield_or_static(byte_no, false);
2638 }
2639
2640 void TemplateTable::nofast_getfield(int byte_no) {
2641 getfield_or_static(byte_no, false, may_not_rewrite);
2642 }
2643
2644 void TemplateTable::getstatic(int byte_no)
2645 {
2646 getfield_or_static(byte_no, true);
2647 }
2648
2649 // The registers cache and index expected to be set before call.
2650 // The function may destroy various registers, just not the cache and index registers.
2651 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2652 transition(vtos, vtos);
2653
2654 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2655
2656 if (JvmtiExport::can_post_field_modification()) {
2657 // Check to see if a field modification watch has been set before
2658 // we take the time to call into the VM.
2659 Label L1;
2660 assert_different_registers(cache, index, r0);
2661 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2662 __ ldrw(r0, Address(rscratch1));
2663 __ cbz(r0, L1);
2664
2665 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2666
2667 if (is_static) {
2668 // Life is simple. Null out the object pointer.
2669 __ mov(c_rarg1, zr);
2670 } else {
2671 // Life is harder. The stack holds the value on top, followed by
2672 // the object. We don't know the size of the value, though; it
2673 // could be one or two words depending on its type. As a result,
2674 // we must find the type to determine where the object is.
2675 __ ldrw(c_rarg3, Address(c_rarg2,
2676 in_bytes(cp_base_offset +
2677 ConstantPoolCacheEntry::flags_offset())));
2678 __ lsr(c_rarg3, c_rarg3,
2679 ConstantPoolCacheEntry::tos_state_shift);
2680 ConstantPoolCacheEntry::verify_tos_state_shift();
2681 Label nope2, done, ok;
2682 __ ldr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2683 __ cmpw(c_rarg3, ltos);
2684 __ br(Assembler::EQ, ok);
2685 __ cmpw(c_rarg3, dtos);
2686 __ br(Assembler::NE, nope2);
2687 __ bind(ok);
2688 __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2689 __ bind(nope2);
2690 }
2691 // cache entry pointer
2692 __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
2693 // object (tos)
2694 __ mov(c_rarg3, esp);
2695 // c_rarg1: object pointer set up above (NULL if static)
2696 // c_rarg2: cache entry pointer
2697 // c_rarg3: jvalue object on the stack
2698 __ call_VM(noreg,
2699 CAST_FROM_FN_PTR(address,
2700 InterpreterRuntime::post_field_modification),
2701 c_rarg1, c_rarg2, c_rarg3);
2702 __ get_cache_and_index_at_bcp(cache, index, 1);
2703 __ bind(L1);
2704 }
2705 }
2706
2707 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2708 transition(vtos, vtos);
2709
2710 const Register cache = r2;
2711 const Register index = r3;
2712 const Register obj = r2;
2713 const Register off = r19;
2714 const Register flags = r0;
2715 const Register bc = r4;
2716
2717 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2718 jvmti_post_field_mod(cache, index, is_static);
2719 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2720
2721 Label Done;
2722 __ mov(r5, flags);
2723
2724 {
2725 Label notVolatile;
2726 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2727 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2728 __ bind(notVolatile);
2729 }
2730
2731 // field address
2732 const Address field(obj, off);
2733
2734 Label notByte, notBool, notInt, notShort, notChar,
2735 notLong, notFloat, notObj, notDouble;
2736
2737 // x86 uses a shift and mask or wings it with a shift plus assert
2738 // the mask is not needed. aarch64 just uses bitfield extract
2739 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2740
2741 assert(btos == 0, "change code, btos != 0");
2742 __ cbnz(flags, notByte);
2743
2744 // Don't rewrite putstatic, only putfield
2745 if (is_static) rc = may_not_rewrite;
2746
2747 // btos
2748 {
2749 __ pop(btos);
2750 if (!is_static) pop_and_check_object(obj);
2751 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2752 if (rc == may_rewrite) {
2753 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2754 }
2755 __ b(Done);
2756 }
2757
2758 __ bind(notByte);
2759 __ cmp(flags, (u1)ztos);
2760 __ br(Assembler::NE, notBool);
2761
2762 // ztos
2763 {
2764 __ pop(ztos);
2765 if (!is_static) pop_and_check_object(obj);
2766 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2767 if (rc == may_rewrite) {
2768 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2769 }
2770 __ b(Done);
2771 }
2772
2773 __ bind(notBool);
2774 __ cmp(flags, (u1)atos);
2775 __ br(Assembler::NE, notObj);
2776
2777 // atos
2778 {
2779 __ pop(atos);
2780 if (!is_static) pop_and_check_object(obj);
2781 // Store into the field
2782 do_oop_store(_masm, field, r0, IN_HEAP);
2783 if (rc == may_rewrite) {
2784 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2785 }
2786 __ b(Done);
2787 }
2788
2789 __ bind(notObj);
2790 __ cmp(flags, (u1)itos);
2791 __ br(Assembler::NE, notInt);
2792
2793 // itos
2794 {
2795 __ pop(itos);
2796 if (!is_static) pop_and_check_object(obj);
2797 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2798 if (rc == may_rewrite) {
2799 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2800 }
2801 __ b(Done);
2802 }
2803
2804 __ bind(notInt);
2805 __ cmp(flags, (u1)ctos);
2806 __ br(Assembler::NE, notChar);
2807
2808 // ctos
2809 {
2810 __ pop(ctos);
2811 if (!is_static) pop_and_check_object(obj);
2812 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
2813 if (rc == may_rewrite) {
2814 patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2815 }
2816 __ b(Done);
2817 }
2818
2819 __ bind(notChar);
2820 __ cmp(flags, (u1)stos);
2821 __ br(Assembler::NE, notShort);
2822
2823 // stos
2824 {
2825 __ pop(stos);
2826 if (!is_static) pop_and_check_object(obj);
2827 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
2828 if (rc == may_rewrite) {
2829 patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2830 }
2831 __ b(Done);
2832 }
2833
2834 __ bind(notShort);
2835 __ cmp(flags, (u1)ltos);
2836 __ br(Assembler::NE, notLong);
2837
2838 // ltos
2839 {
2840 __ pop(ltos);
2841 if (!is_static) pop_and_check_object(obj);
2842 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
2843 if (rc == may_rewrite) {
2844 patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2845 }
2846 __ b(Done);
2847 }
2848
2849 __ bind(notLong);
2850 __ cmp(flags, (u1)ftos);
2851 __ br(Assembler::NE, notFloat);
2852
2853 // ftos
2854 {
2855 __ pop(ftos);
2856 if (!is_static) pop_and_check_object(obj);
2857 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
2858 if (rc == may_rewrite) {
2859 patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2860 }
2861 __ b(Done);
2862 }
2863
2864 __ bind(notFloat);
2865 #ifdef ASSERT
2866 __ cmp(flags, (u1)dtos);
2867 __ br(Assembler::NE, notDouble);
2868 #endif
2869
2870 // dtos
2871 {
2872 __ pop(dtos);
2873 if (!is_static) pop_and_check_object(obj);
2874 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
2875 if (rc == may_rewrite) {
2876 patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2877 }
2878 }
2879
2880 #ifdef ASSERT
2881 __ b(Done);
2882
2883 __ bind(notDouble);
2884 __ stop("Bad state");
2885 #endif
2886
2887 __ bind(Done);
2888
2889 {
2890 Label notVolatile;
2891 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2892 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
2893 __ bind(notVolatile);
2894 }
2895 }
2896
2897 void TemplateTable::putfield(int byte_no)
2898 {
2899 putfield_or_static(byte_no, false);
2900 }
2901
2902 void TemplateTable::nofast_putfield(int byte_no) {
2903 putfield_or_static(byte_no, false, may_not_rewrite);
2904 }
2905
2906 void TemplateTable::putstatic(int byte_no) {
2907 putfield_or_static(byte_no, true);
2908 }
2909
2910 void TemplateTable::jvmti_post_fast_field_mod()
2911 {
2912 if (JvmtiExport::can_post_field_modification()) {
2913 // Check to see if a field modification watch has been set before
2914 // we take the time to call into the VM.
2915 Label L2;
2916 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2917 __ ldrw(c_rarg3, Address(rscratch1));
2918 __ cbzw(c_rarg3, L2);
2919 __ pop_ptr(r19); // copy the object pointer from tos
2920 __ verify_oop(r19);
2921 __ push_ptr(r19); // put the object pointer back on tos
2922 // Save tos values before call_VM() clobbers them. Since we have
2923 // to do it for every data type, we use the saved values as the
2924 // jvalue object.
2925 switch (bytecode()) { // load values into the jvalue object
2926 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2927 case Bytecodes::_fast_bputfield: // fall through
2928 case Bytecodes::_fast_zputfield: // fall through
2929 case Bytecodes::_fast_sputfield: // fall through
2930 case Bytecodes::_fast_cputfield: // fall through
2931 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2932 case Bytecodes::_fast_dputfield: __ push_d(); break;
2933 case Bytecodes::_fast_fputfield: __ push_f(); break;
2934 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2935
2936 default:
2937 ShouldNotReachHere();
2938 }
2939 __ mov(c_rarg3, esp); // points to jvalue on the stack
2940 // access constant pool cache entry
2941 __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2942 __ verify_oop(r19);
2943 // r19: object pointer copied above
2944 // c_rarg2: cache entry pointer
2945 // c_rarg3: jvalue object on the stack
2946 __ call_VM(noreg,
2947 CAST_FROM_FN_PTR(address,
2948 InterpreterRuntime::post_field_modification),
2949 r19, c_rarg2, c_rarg3);
2950
2951 switch (bytecode()) { // restore tos values
2952 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2953 case Bytecodes::_fast_bputfield: // fall through
2954 case Bytecodes::_fast_zputfield: // fall through
2955 case Bytecodes::_fast_sputfield: // fall through
2956 case Bytecodes::_fast_cputfield: // fall through
2957 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2958 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2959 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2960 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2961 default: break;
2962 }
2963 __ bind(L2);
2964 }
2965 }
2966
2967 void TemplateTable::fast_storefield(TosState state)
2968 {
2969 transition(state, vtos);
2970
2971 ByteSize base = ConstantPoolCache::base_offset();
2972
2973 jvmti_post_fast_field_mod();
2974
2975 // access constant pool cache
2976 __ get_cache_and_index_at_bcp(r2, r1, 1);
2977
2978 // Must prevent reordering of the following cp cache loads with bytecode load
2979 __ membar(MacroAssembler::LoadLoad);
2980
2981 // test for volatile with r3
2982 __ ldrw(r3, Address(r2, in_bytes(base +
2983 ConstantPoolCacheEntry::flags_offset())));
2984
2985 // replace index with field offset from cache entry
2986 __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2987
2988 {
2989 Label notVolatile;
2990 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2991 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2992 __ bind(notVolatile);
2993 }
2994
2995 Label notVolatile;
2996
2997 // Get object from stack
2998 pop_and_check_object(r2);
2999
3000 // field address
3001 const Address field(r2, r1);
3002
3003 // access field
3004 switch (bytecode()) {
3005 case Bytecodes::_fast_aputfield:
3006 do_oop_store(_masm, field, r0, IN_HEAP);
3007 break;
3008 case Bytecodes::_fast_lputfield:
3009 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
3010 break;
3011 case Bytecodes::_fast_iputfield:
3012 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3013 break;
3014 case Bytecodes::_fast_zputfield:
3015 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
3016 break;
3017 case Bytecodes::_fast_bputfield:
3018 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
3019 break;
3020 case Bytecodes::_fast_sputfield:
3021 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
3022 break;
3023 case Bytecodes::_fast_cputfield:
3024 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
3025 break;
3026 case Bytecodes::_fast_fputfield:
3027 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
3028 break;
3029 case Bytecodes::_fast_dputfield:
3030 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
3031 break;
3032 default:
3033 ShouldNotReachHere();
3034 }
3035
3036 {
3037 Label notVolatile;
3038 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3039 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3040 __ bind(notVolatile);
3041 }
3042 }
3043
3044
3045 void TemplateTable::fast_accessfield(TosState state)
3046 {
3047 transition(atos, state);
3048 // Do the JVMTI work here to avoid disturbing the register state below
3049 if (JvmtiExport::can_post_field_access()) {
3050 // Check to see if a field access watch has been set before we
3051 // take the time to call into the VM.
3052 Label L1;
3053 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3054 __ ldrw(r2, Address(rscratch1));
3055 __ cbzw(r2, L1);
3056 // access constant pool cache entry
3057 __ get_cache_entry_pointer_at_bcp(c_rarg2, rscratch2, 1);
3058 __ verify_oop(r0);
3059 __ push_ptr(r0); // save object pointer before call_VM() clobbers it
3060 __ mov(c_rarg1, r0);
3061 // c_rarg1: object pointer copied above
3062 // c_rarg2: cache entry pointer
3063 __ call_VM(noreg,
3064 CAST_FROM_FN_PTR(address,
3065 InterpreterRuntime::post_field_access),
3066 c_rarg1, c_rarg2);
3067 __ pop_ptr(r0); // restore object pointer
3068 __ bind(L1);
3069 }
3070
3071 // access constant pool cache
3072 __ get_cache_and_index_at_bcp(r2, r1, 1);
3073
3074 // Must prevent reordering of the following cp cache loads with bytecode load
3075 __ membar(MacroAssembler::LoadLoad);
3076
3077 __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3078 ConstantPoolCacheEntry::f2_offset())));
3079 __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3080 ConstantPoolCacheEntry::flags_offset())));
3081
3082 // r0: object
3083 __ verify_oop(r0);
3084 __ null_check(r0);
3085 const Address field(r0, r1);
3086
3087 // 8179954: We need to make sure that the code generated for
3088 // volatile accesses forms a sequentially-consistent set of
3089 // operations when combined with STLR and LDAR. Without a leading
3090 // membar it's possible for a simple Dekker test to fail if loads
3091 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3092 // the stores in one method and we interpret the loads in another.
3093 if (!is_c1_or_interpreter_only()) {
3094 Label notVolatile;
3095 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3096 __ membar(MacroAssembler::AnyAny);
3097 __ bind(notVolatile);
3098 }
3099
3100 // access field
3101 switch (bytecode()) {
3102 case Bytecodes::_fast_agetfield:
3103 do_oop_load(_masm, field, r0, IN_HEAP);
3104 __ verify_oop(r0);
3105 break;
3106 case Bytecodes::_fast_lgetfield:
3107 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3108 break;
3109 case Bytecodes::_fast_igetfield:
3110 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3111 break;
3112 case Bytecodes::_fast_bgetfield:
3113 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3114 break;
3115 case Bytecodes::_fast_sgetfield:
3116 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3117 break;
3118 case Bytecodes::_fast_cgetfield:
3119 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3120 break;
3121 case Bytecodes::_fast_fgetfield:
3122 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3123 break;
3124 case Bytecodes::_fast_dgetfield:
3125 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3126 break;
3127 default:
3128 ShouldNotReachHere();
3129 }
3130 {
3131 Label notVolatile;
3132 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3133 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3134 __ bind(notVolatile);
3135 }
3136 }
3137
3138 void TemplateTable::fast_xaccess(TosState state)
3139 {
3140 transition(vtos, state);
3141
3142 // get receiver
3143 __ ldr(r0, aaddress(0));
3144 // access constant pool cache
3145 __ get_cache_and_index_at_bcp(r2, r3, 2);
3146 __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3147 ConstantPoolCacheEntry::f2_offset())));
3148
3149 // 8179954: We need to make sure that the code generated for
3150 // volatile accesses forms a sequentially-consistent set of
3151 // operations when combined with STLR and LDAR. Without a leading
3152 // membar it's possible for a simple Dekker test to fail if loads
3153 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3154 // the stores in one method and we interpret the loads in another.
3155 if (!is_c1_or_interpreter_only()) {
3156 Label notVolatile;
3157 __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3158 ConstantPoolCacheEntry::flags_offset())));
3159 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3160 __ membar(MacroAssembler::AnyAny);
3161 __ bind(notVolatile);
3162 }
3163
3164 // make sure exception is reported in correct bcp range (getfield is
3165 // next instruction)
3166 __ increment(rbcp);
3167 __ null_check(r0);
3168 switch (state) {
3169 case itos:
3170 __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3171 break;
3172 case atos:
3173 do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3174 __ verify_oop(r0);
3175 break;
3176 case ftos:
3177 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3178 break;
3179 default:
3180 ShouldNotReachHere();
3181 }
3182
3183 {
3184 Label notVolatile;
3185 __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3186 ConstantPoolCacheEntry::flags_offset())));
3187 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3188 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3189 __ bind(notVolatile);
3190 }
3191
3192 __ decrement(rbcp);
3193 }
3194
3195
3196
3197 //-----------------------------------------------------------------------------
3198 // Calls
3199
3200 void TemplateTable::count_calls(Register method, Register temp)
3201 {
3202 __ call_Unimplemented();
3203 }
3204
3205 void TemplateTable::prepare_invoke(int byte_no,
3206 Register method, // linked method (or i-klass)
3207 Register index, // itable index, MethodType, etc.
3208 Register recv, // if caller wants to see it
3209 Register flags // if caller wants to test it
3210 ) {
3211 // determine flags
3212 Bytecodes::Code code = bytecode();
3213 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3214 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3215 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3216 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3217 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3218 const bool load_receiver = (recv != noreg);
3219 const bool save_flags = (flags != noreg);
3220 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3221 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3222 assert(flags == noreg || flags == r3, "");
3223 assert(recv == noreg || recv == r2, "");
3224
3225 // setup registers & access constant pool cache
3226 if (recv == noreg) recv = r2;
3227 if (flags == noreg) flags = r3;
3228 assert_different_registers(method, index, recv, flags);
3229
3230 // save 'interpreter return address'
3231 __ save_bcp();
3232
3233 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3234
3235 // maybe push appendix to arguments (just before return address)
3236 if (is_invokedynamic || is_invokehandle) {
3237 Label L_no_push;
3238 __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3239 // Push the appendix as a trailing parameter.
3240 // This must be done before we get the receiver,
3241 // since the parameter_size includes it.
3242 __ push(r19);
3243 __ mov(r19, index);
3244 __ load_resolved_reference_at_index(index, r19);
3245 __ pop(r19);
3246 __ push(index); // push appendix (MethodType, CallSite, etc.)
3247 __ bind(L_no_push);
3248 }
3249
3250 // load receiver if needed (note: no return address pushed yet)
3251 if (load_receiver) {
3252 __ andw(recv, flags, ConstantPoolCacheEntry::parameter_size_mask);
3253 // FIXME -- is this actually correct? looks like it should be 2
3254 // const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3255 // const int receiver_is_at_end = -1; // back off one slot to get receiver
3256 // Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3257 // __ movptr(recv, recv_addr);
3258 __ add(rscratch1, esp, recv, ext::uxtx, 3); // FIXME: uxtb here?
3259 __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3260 __ verify_oop(recv);
3261 }
3262
3263 // compute return type
3264 // x86 uses a shift and mask or wings it with a shift plus assert
3265 // the mask is not needed. aarch64 just uses bitfield extract
3266 __ ubfxw(rscratch2, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
3267 // load return address
3268 {
3269 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3270 __ mov(rscratch1, table_addr);
3271 __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3272 }
3273 }
3274
3275
3276 void TemplateTable::invokevirtual_helper(Register index,
3277 Register recv,
3278 Register flags)
3279 {
3280 // Uses temporary registers r0, r3
3281 assert_different_registers(index, recv, r0, r3);
3282 // Test for an invoke of a final method
3283 Label notFinal;
3284 __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3285
3286 const Register method = index; // method must be rmethod
3287 assert(method == rmethod,
3288 "methodOop must be rmethod for interpreter calling convention");
3289
3290 // do the call - the index is actually the method to call
3291 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3292
3293 // It's final, need a null check here!
3294 __ null_check(recv);
3295
3296 // profile this call
3297 __ profile_final_call(r0);
3298 __ profile_arguments_type(r0, method, r4, true);
3299
3300 __ jump_from_interpreted(method, r0);
3301
3302 __ bind(notFinal);
3303
3304 // get receiver klass
3305 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3306 __ load_klass(r0, recv);
3307
3308 // profile this call
3309 __ profile_virtual_call(r0, rlocals, r3);
3310
3311 // get target methodOop & entry point
3312 __ lookup_virtual_method(r0, index, method);
3313 __ profile_arguments_type(r3, method, r4, true);
3314 // FIXME -- this looks completely redundant. is it?
3315 // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3316 __ jump_from_interpreted(method, r3);
3317 }
3318
3319 void TemplateTable::invokevirtual(int byte_no)
3320 {
3321 transition(vtos, vtos);
3322 assert(byte_no == f2_byte, "use this argument");
3323
3324 prepare_invoke(byte_no, rmethod, noreg, r2, r3);
3325
3326 // rmethod: index (actually a Method*)
3327 // r2: receiver
3328 // r3: flags
3329
3330 invokevirtual_helper(rmethod, r2, r3);
3331 }
3332
3333 void TemplateTable::invokespecial(int byte_no)
3334 {
3335 transition(vtos, vtos);
3336 assert(byte_no == f1_byte, "use this argument");
3337
3338 prepare_invoke(byte_no, rmethod, noreg, // get f1 Method*
3339 r2); // get receiver also for null check
3340 __ verify_oop(r2);
3341 __ null_check(r2);
3342 // do the call
3343 __ profile_call(r0);
3344 __ profile_arguments_type(r0, rmethod, rbcp, false);
3345 __ jump_from_interpreted(rmethod, r0);
3346 }
3347
3348 void TemplateTable::invokestatic(int byte_no)
3349 {
3350 transition(vtos, vtos);
3351 assert(byte_no == f1_byte, "use this argument");
3352
3353 prepare_invoke(byte_no, rmethod); // get f1 Method*
3354 // do the call
3355 __ profile_call(r0);
3356 __ profile_arguments_type(r0, rmethod, r4, false);
3357 __ jump_from_interpreted(rmethod, r0);
3358 }
3359
3360 void TemplateTable::fast_invokevfinal(int byte_no)
3361 {
3362 __ call_Unimplemented();
3363 }
3364
3365 void TemplateTable::invokeinterface(int byte_no) {
3366 transition(vtos, vtos);
3367 assert(byte_no == f1_byte, "use this argument");
3368
3369 prepare_invoke(byte_no, r0, rmethod, // get f1 Klass*, f2 Method*
3370 r2, r3); // recv, flags
3371
3372 // r0: interface klass (from f1)
3373 // rmethod: method (from f2)
3374 // r2: receiver
3375 // r3: flags
3376
3377 // First check for Object case, then private interface method,
3378 // then regular interface method.
3379
3380 // Special case of invokeinterface called for virtual method of
3381 // java.lang.Object. See cpCache.cpp for details.
3382 Label notObjectMethod;
3383 __ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
3384
3385 invokevirtual_helper(rmethod, r2, r3);
3386 __ bind(notObjectMethod);
3387
3388 Label no_such_interface;
3389
3390 // Check for private method invocation - indicated by vfinal
3391 Label notVFinal;
3392 __ tbz(r3, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
3393
3394 // Get receiver klass into r3 - also a null check
3395 __ null_check(r2, oopDesc::klass_offset_in_bytes());
3396 __ load_klass(r3, r2);
3397
3398 Label subtype;
3399 __ check_klass_subtype(r3, r0, r4, subtype);
3400 // If we get here the typecheck failed
3401 __ b(no_such_interface);
3402 __ bind(subtype);
3403
3404 __ profile_final_call(r0);
3405 __ profile_arguments_type(r0, rmethod, r4, true);
3406 __ jump_from_interpreted(rmethod, r0);
3407
3408 __ bind(notVFinal);
3409
3410 // Get receiver klass into r3 - also a null check
3411 __ restore_locals();
3412 __ null_check(r2, oopDesc::klass_offset_in_bytes());
3413 __ load_klass(r3, r2);
3414
3415 Label no_such_method;
3416
3417 // Preserve method for throw_AbstractMethodErrorVerbose.
3418 __ mov(r16, rmethod);
3419 // Receiver subtype check against REFC.
3420 // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3421 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3422 r3, r0, noreg,
3423 // outputs: scan temp. reg, scan temp. reg
3424 rscratch2, r13,
3425 no_such_interface,
3426 /*return_method=*/false);
3427
3428 // profile this call
3429 __ profile_virtual_call(r3, r13, r19);
3430
3431 // Get declaring interface class from method, and itable index
3432
3433 __ load_method_holder(r0, rmethod);
3434 __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3435 __ subw(rmethod, rmethod, Method::itable_index_max);
3436 __ negw(rmethod, rmethod);
3437
3438 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3439 __ mov(rlocals, r3);
3440 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3441 rlocals, r0, rmethod,
3442 // outputs: method, scan temp. reg
3443 rmethod, r13,
3444 no_such_interface);
3445
3446 // rmethod,: methodOop to call
3447 // r2: receiver
3448 // Check for abstract method error
3449 // Note: This should be done more efficiently via a throw_abstract_method_error
3450 // interpreter entry point and a conditional jump to it in case of a null
3451 // method.
3452 __ cbz(rmethod, no_such_method);
3453
3454 __ profile_arguments_type(r3, rmethod, r13, true);
3455
3456 // do the call
3457 // r2: receiver
3458 // rmethod,: methodOop
3459 __ jump_from_interpreted(rmethod, r3);
3460 __ should_not_reach_here();
3461
3462 // exception handling code follows...
3463 // note: must restore interpreter registers to canonical
3464 // state for exception handling to work correctly!
3465
3466 __ bind(no_such_method);
3467 // throw exception
3468 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3469 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3470 // Pass arguments for generating a verbose error message.
3471 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3472 // the call_VM checks for exception, so we should never return here.
3473 __ should_not_reach_here();
3474
3475 __ bind(no_such_interface);
3476 // throw exception
3477 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3478 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3479 // Pass arguments for generating a verbose error message.
3480 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3481 InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3482 // the call_VM checks for exception, so we should never return here.
3483 __ should_not_reach_here();
3484 return;
3485 }
3486
3487 void TemplateTable::invokehandle(int byte_no) {
3488 transition(vtos, vtos);
3489 assert(byte_no == f1_byte, "use this argument");
3490
3491 prepare_invoke(byte_no, rmethod, r0, r2);
3492 __ verify_method_ptr(r2);
3493 __ verify_oop(r2);
3494 __ null_check(r2);
3495
3496 // FIXME: profile the LambdaForm also
3497
3498 // r13 is safe to use here as a scratch reg because it is about to
3499 // be clobbered by jump_from_interpreted().
3500 __ profile_final_call(r13);
3501 __ profile_arguments_type(r13, rmethod, r4, true);
3502
3503 __ jump_from_interpreted(rmethod, r0);
3504 }
3505
3506 void TemplateTable::invokedynamic(int byte_no) {
3507 transition(vtos, vtos);
3508 assert(byte_no == f1_byte, "use this argument");
3509
3510 prepare_invoke(byte_no, rmethod, r0);
3511
3512 // r0: CallSite object (from cpool->resolved_references[])
3513 // rmethod: MH.linkToCallSite method (from f2)
3514
3515 // Note: r0_callsite is already pushed by prepare_invoke
3516
3517 // %%% should make a type profile for any invokedynamic that takes a ref argument
3518 // profile this call
3519 __ profile_call(rbcp);
3520 __ profile_arguments_type(r3, rmethod, r13, false);
3521
3522 __ verify_oop(r0);
3523
3524 __ jump_from_interpreted(rmethod, r0);
3525 }
3526
3527
3528 //-----------------------------------------------------------------------------
3529 // Allocation
3530
3531 void TemplateTable::_new() {
3532 transition(vtos, atos);
3533
3534 __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3535 Label slow_case;
3536 Label done;
3537 Label initialize_header;
3538 Label initialize_object; // including clearing the fields
3539
3540 __ get_cpool_and_tags(r4, r0);
3541 // Make sure the class we're about to instantiate has been resolved.
3542 // This is done before loading InstanceKlass to be consistent with the order
3543 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3544 const int tags_offset = Array<u1>::base_offset_in_bytes();
3545 __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3546 __ lea(rscratch1, Address(rscratch1, tags_offset));
3547 __ ldarb(rscratch1, rscratch1);
3548 __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3549 __ br(Assembler::NE, slow_case);
3550
3551 // get InstanceKlass
3552 __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3553
3554 // make sure klass is initialized & doesn't have finalizer
3555 // make sure klass is fully initialized
3556 __ ldrb(rscratch1, Address(r4, InstanceKlass::init_state_offset()));
3557 __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized);
3558 __ br(Assembler::NE, slow_case);
3559
3560 // get instance_size in InstanceKlass (scaled to a count of bytes)
3561 __ ldrw(r3,
3562 Address(r4,
3563 Klass::layout_helper_offset()));
3564 // test to see if it has a finalizer or is malformed in some way
3565 __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3566
3567 // Allocate the instance:
3568 // If TLAB is enabled:
3569 // Try to allocate in the TLAB.
3570 // If fails, go to the slow path.
3571 // Else If inline contiguous allocations are enabled:
3572 // Try to allocate in eden.
3573 // If fails due to heap end, go to slow path.
3574 //
3575 // If TLAB is enabled OR inline contiguous is enabled:
3576 // Initialize the allocation.
3577 // Exit.
3578 //
3579 // Go to slow path.
3580 const bool allow_shared_alloc =
3581 Universe::heap()->supports_inline_contig_alloc();
3582
3583 if (UseTLAB) {
3584 __ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
3585
3586 if (ZeroTLAB) {
3587 // the fields have been already cleared
3588 __ b(initialize_header);
3589 } else {
3590 // initialize both the header and fields
3591 __ b(initialize_object);
3592 }
3593 } else {
3594 // Allocation in the shared Eden, if allowed.
3595 //
3596 // r3: instance size in bytes
3597 if (allow_shared_alloc) {
3598 __ eden_allocate(r0, r3, 0, r10, slow_case);
3599 }
3600 }
3601
3602 // If UseTLAB or allow_shared_alloc are true, the object is created above and
3603 // there is an initialize need. Otherwise, skip and go to the slow path.
3604 if (UseTLAB || allow_shared_alloc) {
3605 // The object is initialized before the header. If the object size is
3606 // zero, go directly to the header initialization.
3607 __ bind(initialize_object);
3608 __ sub(r3, r3, sizeof(oopDesc));
3609 __ cbz(r3, initialize_header);
3610
3611 // Initialize object fields
3612 {
3613 __ add(r2, r0, sizeof(oopDesc));
3614 Label loop;
3615 __ bind(loop);
3616 __ str(zr, Address(__ post(r2, BytesPerLong)));
3617 __ sub(r3, r3, BytesPerLong);
3618 __ cbnz(r3, loop);
3619 }
3620
3621 // initialize object header only.
3622 __ bind(initialize_header);
3623 if (UseBiasedLocking) {
3624 __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
3625 } else {
3626 __ mov(rscratch1, (intptr_t)markWord::prototype().value());
3627 }
3628 __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
3629 __ store_klass_gap(r0, zr); // zero klass gap for compressed oops
3630 __ store_klass(r0, r4); // store klass last
3631
3632 {
3633 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3634 // Trigger dtrace event for fastpath
3635 __ push(atos); // save the return value
3636 __ call_VM_leaf(
3637 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3638 __ pop(atos); // restore the return value
3639
3640 }
3641 __ b(done);
3642 }
3643
3644 // slow case
3645 __ bind(slow_case);
3646 __ get_constant_pool(c_rarg1);
3647 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3648 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3649 __ verify_oop(r0);
3650
3651 // continue
3652 __ bind(done);
3653 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3654 __ membar(Assembler::StoreStore);
3655 }
3656
3657 void TemplateTable::newarray() {
3658 transition(itos, atos);
3659 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3660 __ mov(c_rarg2, r0);
3661 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3662 c_rarg1, c_rarg2);
3663 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3664 __ membar(Assembler::StoreStore);
3665 }
3666
3667 void TemplateTable::anewarray() {
3668 transition(itos, atos);
3669 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3670 __ get_constant_pool(c_rarg1);
3671 __ mov(c_rarg3, r0);
3672 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3673 c_rarg1, c_rarg2, c_rarg3);
3674 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3675 __ membar(Assembler::StoreStore);
3676 }
3677
3678 void TemplateTable::arraylength() {
3679 transition(atos, itos);
3680 __ null_check(r0, arrayOopDesc::length_offset_in_bytes());
3681 __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3682 }
3683
3684 void TemplateTable::checkcast()
3685 {
3686 transition(atos, atos);
3687 Label done, is_null, ok_is_subtype, quicked, resolved;
3688 __ cbz(r0, is_null);
3689
3690 // Get cpool & tags index
3691 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3692 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3693 // See if bytecode has already been quicked
3694 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3695 __ lea(r1, Address(rscratch1, r19));
3696 __ ldarb(r1, r1);
3697 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3698 __ br(Assembler::EQ, quicked);
3699
3700 __ push(atos); // save receiver for result, and for GC
3701 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3702 // vm_result_2 has metadata result
3703 __ get_vm_result_2(r0, rthread);
3704 __ pop(r3); // restore receiver
3705 __ b(resolved);
3706
3707 // Get superklass in r0 and subklass in r3
3708 __ bind(quicked);
3709 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3710 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3711
3712 __ bind(resolved);
3713 __ load_klass(r19, r3);
3714
3715 // Generate subtype check. Blows r2, r5. Object in r3.
3716 // Superklass in r0. Subklass in r19.
3717 __ gen_subtype_check(r19, ok_is_subtype);
3718
3719 // Come here on failure
3720 __ push(r3);
3721 // object is at TOS
3722 __ b(Interpreter::_throw_ClassCastException_entry);
3723
3724 // Come here on success
3725 __ bind(ok_is_subtype);
3726 __ mov(r0, r3); // Restore object in r3
3727
3728 // Collect counts on whether this test sees NULLs a lot or not.
3729 if (ProfileInterpreter) {
3730 __ b(done);
3731 __ bind(is_null);
3732 __ profile_null_seen(r2);
3733 } else {
3734 __ bind(is_null); // same as 'done'
3735 }
3736 __ bind(done);
3737 }
3738
3739 void TemplateTable::instanceof() {
3740 transition(atos, itos);
3741 Label done, is_null, ok_is_subtype, quicked, resolved;
3742 __ cbz(r0, is_null);
3743
3744 // Get cpool & tags index
3745 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3746 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3747 // See if bytecode has already been quicked
3748 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3749 __ lea(r1, Address(rscratch1, r19));
3750 __ ldarb(r1, r1);
3751 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3752 __ br(Assembler::EQ, quicked);
3753
3754 __ push(atos); // save receiver for result, and for GC
3755 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3756 // vm_result_2 has metadata result
3757 __ get_vm_result_2(r0, rthread);
3758 __ pop(r3); // restore receiver
3759 __ verify_oop(r3);
3760 __ load_klass(r3, r3);
3761 __ b(resolved);
3762
3763 // Get superklass in r0 and subklass in r3
3764 __ bind(quicked);
3765 __ load_klass(r3, r0);
3766 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
3767
3768 __ bind(resolved);
3769
3770 // Generate subtype check. Blows r2, r5
3771 // Superklass in r0. Subklass in r3.
3772 __ gen_subtype_check(r3, ok_is_subtype);
3773
3774 // Come here on failure
3775 __ mov(r0, 0);
3776 __ b(done);
3777 // Come here on success
3778 __ bind(ok_is_subtype);
3779 __ mov(r0, 1);
3780
3781 // Collect counts on whether this test sees NULLs a lot or not.
3782 if (ProfileInterpreter) {
3783 __ b(done);
3784 __ bind(is_null);
3785 __ profile_null_seen(r2);
3786 } else {
3787 __ bind(is_null); // same as 'done'
3788 }
3789 __ bind(done);
3790 // r0 = 0: obj == NULL or obj is not an instanceof the specified klass
3791 // r0 = 1: obj != NULL and obj is an instanceof the specified klass
3792 }
3793
3794 //-----------------------------------------------------------------------------
3795 // Breakpoints
3796 void TemplateTable::_breakpoint() {
3797 // Note: We get here even if we are single stepping..
3798 // jbug inists on setting breakpoints at every bytecode
3799 // even if we are in single step mode.
3800
3801 transition(vtos, vtos);
3802
3803 // get the unpatched byte code
3804 __ get_method(c_rarg1);
3805 __ call_VM(noreg,
3806 CAST_FROM_FN_PTR(address,
3807 InterpreterRuntime::get_original_bytecode_at),
3808 c_rarg1, rbcp);
3809 __ mov(r19, r0);
3810
3811 // post the breakpoint event
3812 __ call_VM(noreg,
3813 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3814 rmethod, rbcp);
3815
3816 // complete the execution of original bytecode
3817 __ mov(rscratch1, r19);
3818 __ dispatch_only_normal(vtos);
3819 }
3820
3821 //-----------------------------------------------------------------------------
3822 // Exceptions
3823
3824 void TemplateTable::athrow() {
3825 transition(atos, vtos);
3826 __ null_check(r0);
3827 __ b(Interpreter::throw_exception_entry());
3828 }
3829
3830 //-----------------------------------------------------------------------------
3831 // Synchronization
3832 //
3833 // Note: monitorenter & exit are symmetric routines; which is reflected
3834 // in the assembly code structure as well
3835 //
3836 // Stack layout:
3837 //
3838 // [expressions ] <--- esp = expression stack top
3839 // ..
3840 // [expressions ]
3841 // [monitor entry] <--- monitor block top = expression stack bot
3842 // ..
3843 // [monitor entry]
3844 // [frame data ] <--- monitor block bot
3845 // ...
3846 // [saved rbp ] <--- rbp
3847 void TemplateTable::monitorenter()
3848 {
3849 transition(atos, vtos);
3850
3851 // check for NULL object
3852 __ null_check(r0);
3853
3854 __ resolve(IS_NOT_NULL, r0);
3855
3856 const Address monitor_block_top(
3857 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3858 const Address monitor_block_bot(
3859 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3860 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3861
3862 Label allocated;
3863
3864 // initialize entry pointer
3865 __ mov(c_rarg1, zr); // points to free slot or NULL
3866
3867 // find a free slot in the monitor block (result in c_rarg1)
3868 {
3869 Label entry, loop, exit;
3870 __ ldr(c_rarg3, monitor_block_top); // points to current entry,
3871 // starting with top-most entry
3872 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3873
3874 __ b(entry);
3875
3876 __ bind(loop);
3877 // check if current entry is used
3878 // if not used then remember entry in c_rarg1
3879 __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3880 __ cmp(zr, rscratch1);
3881 __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
3882 // check if current entry is for same object
3883 __ cmp(r0, rscratch1);
3884 // if same object then stop searching
3885 __ br(Assembler::EQ, exit);
3886 // otherwise advance to next entry
3887 __ add(c_rarg3, c_rarg3, entry_size);
3888 __ bind(entry);
3889 // check if bottom reached
3890 __ cmp(c_rarg3, c_rarg2);
3891 // if not at bottom then check this entry
3892 __ br(Assembler::NE, loop);
3893 __ bind(exit);
3894 }
3895
3896 __ cbnz(c_rarg1, allocated); // check if a slot has been found and
3897 // if found, continue with that on
3898
3899 // allocate one if there's no free slot
3900 {
3901 Label entry, loop;
3902 // 1. compute new pointers // rsp: old expression stack top
3903 __ ldr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3904 __ sub(esp, esp, entry_size); // move expression stack top
3905 __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
3906 __ mov(c_rarg3, esp); // set start value for copy loop
3907 __ str(c_rarg1, monitor_block_bot); // set new monitor block bottom
3908
3909 __ sub(sp, sp, entry_size); // make room for the monitor
3910
3911 __ b(entry);
3912 // 2. move expression stack contents
3913 __ bind(loop);
3914 __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3915 // word from old location
3916 __ str(c_rarg2, Address(c_rarg3, 0)); // and store it at new location
3917 __ add(c_rarg3, c_rarg3, wordSize); // advance to next word
3918 __ bind(entry);
3919 __ cmp(c_rarg3, c_rarg1); // check if bottom reached
3920 __ br(Assembler::NE, loop); // if not at bottom then
3921 // copy next word
3922 }
3923
3924 // call run-time routine
3925 // c_rarg1: points to monitor entry
3926 __ bind(allocated);
3927
3928 // Increment bcp to point to the next bytecode, so exception
3929 // handling for async. exceptions work correctly.
3930 // The object has already been poped from the stack, so the
3931 // expression stack looks correct.
3932 __ increment(rbcp);
3933
3934 // store object
3935 __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3936 __ lock_object(c_rarg1);
3937
3938 // check to make sure this monitor doesn't cause stack overflow after locking
3939 __ save_bcp(); // in case of exception
3940 __ generate_stack_overflow_check(0);
3941
3942 // The bcp has already been incremented. Just need to dispatch to
3943 // next instruction.
3944 __ dispatch_next(vtos);
3945 }
3946
3947
3948 void TemplateTable::monitorexit()
3949 {
3950 transition(atos, vtos);
3951
3952 // check for NULL object
3953 __ null_check(r0);
3954
3955 __ resolve(IS_NOT_NULL, r0);
3956
3957 const Address monitor_block_top(
3958 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3959 const Address monitor_block_bot(
3960 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3961 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3962
3963 Label found;
3964
3965 // find matching slot
3966 {
3967 Label entry, loop;
3968 __ ldr(c_rarg1, monitor_block_top); // points to current entry,
3969 // starting with top-most entry
3970 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3971 // of monitor block
3972 __ b(entry);
3973
3974 __ bind(loop);
3975 // check if current entry is for same object
3976 __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3977 __ cmp(r0, rscratch1);
3978 // if same object then stop searching
3979 __ br(Assembler::EQ, found);
3980 // otherwise advance to next entry
3981 __ add(c_rarg1, c_rarg1, entry_size);
3982 __ bind(entry);
3983 // check if bottom reached
3984 __ cmp(c_rarg1, c_rarg2);
3985 // if not at bottom then check this entry
3986 __ br(Assembler::NE, loop);
3987 }
3988
3989 // error handling. Unlocking was not block-structured
3990 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3991 InterpreterRuntime::throw_illegal_monitor_state_exception));
3992 __ should_not_reach_here();
3993
3994 // call run-time routine
3995 __ bind(found);
3996 __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
3997 __ unlock_object(c_rarg1);
3998 __ pop_ptr(r0); // discard object
3999 }
4000
4001
4002 // Wide instructions
4003 void TemplateTable::wide()
4004 {
4005 __ load_unsigned_byte(r19, at_bcp(1));
4006 __ mov(rscratch1, (address)Interpreter::_wentry_point);
4007 __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
4008 __ br(rscratch1);
4009 }
4010
4011
4012 // Multi arrays
4013 void TemplateTable::multianewarray() {
4014 transition(vtos, atos);
4015 __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
4016 // last dim is on top of stack; we want address of first one:
4017 // first_addr = last_addr + (ndims - 1) * wordSize
4018 __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
4019 __ sub(c_rarg1, c_rarg1, wordSize);
4020 call_VM(r0,
4021 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4022 c_rarg1);
4023 __ load_unsigned_byte(r1, at_bcp(3));
4024 __ lea(esp, Address(esp, r1, Address::uxtw(3)));
4025 }