2425 intptr_t the_pc = (intptr_t) __ pc();
2426 oop_maps->add_gc_map(the_pc - start, map);
2427
2428 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2429
2430
2431 // We have all of the arguments setup at this point. We must not touch any register
2432 // argument registers at this point (what if we save/restore them there are no oop?
2433
2434 {
2435 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2436 // protect the args we've loaded
2437 save_args(masm, total_c_args, c_arg, out_regs);
2438 __ mov_metadata(c_rarg1, method());
2439 __ call_VM_leaf(
2440 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2441 r15_thread, c_rarg1);
2442 restore_args(masm, total_c_args, c_arg, out_regs);
2443 }
2444
2445 // RedefineClasses() tracing support for obsolete method entry
2446 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2447 // protect the args we've loaded
2448 save_args(masm, total_c_args, c_arg, out_regs);
2449 __ mov_metadata(c_rarg1, method());
2450 __ call_VM_leaf(
2451 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2452 r15_thread, c_rarg1);
2453 restore_args(masm, total_c_args, c_arg, out_regs);
2454 }
2455
2456 // Lock a synchronized method
2457
2458 // Register definitions used by locking and unlocking
2459
2460 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2461 const Register obj_reg = rbx; // Will contain the oop
2462 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2463 const Register old_hdr = r13; // value of old header at unlock time
2464
2504
2505 // Test if the oopMark is an obvious stack pointer, i.e.,
2506 // 1) (mark & 3) == 0, and
2507 // 2) rsp <= mark < mark + os::pagesize()
2508 // These 3 tests can be done by evaluating the following
2509 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2510 // assuming both stack pointer and pagesize have their
2511 // least significant 2 bits clear.
2512 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2513
2514 __ subptr(swap_reg, rsp);
2515 __ andptr(swap_reg, 3 - os::vm_page_size());
2516
2517 // Save the test result, for recursive case, the result is zero
2518 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2519 __ jcc(Assembler::notEqual, slow_path_lock);
2520
2521 // Slow path will re-enter here
2522
2523 __ bind(lock_done);
2524 }
2525
2526
2527 // Finally just about ready to make the JNI call
2528
2529
2530 // get JNIEnv* which is first argument to native
2531 if (!is_critical_native) {
2532 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2533 }
2534
2535 // Now set thread in native
2536 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2537
2538 __ call(RuntimeAddress(native_func));
2539
2540 // Verify or restore cpu control state after JNI call
2541 __ restore_cpu_control_state_after_jni();
2542
2543 // Unpack native results.
2639 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2640 __ bind(after_transition);
2641
2642 Label reguard;
2643 Label reguard_done;
2644 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2645 __ jcc(Assembler::equal, reguard);
2646 __ bind(reguard_done);
2647
2648 // native result if any is live
2649
2650 // Unlock
2651 Label unlock_done;
2652 Label slow_path_unlock;
2653 if (method->is_synchronized()) {
2654
2655 // Get locked oop from the handle we passed to jni
2656 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2657 __ resolve(IS_NOT_NULL, obj_reg);
2658
2659 Label done;
2660
2661 if (UseBiasedLocking) {
2662 __ biased_locking_exit(obj_reg, old_hdr, done);
2663 }
2664
2665 // Simple recursive lock?
2666
2667 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2668 __ jcc(Assembler::equal, done);
2669
2670 // Must save rax if if it is live now because cmpxchg must use it
2671 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2672 save_native_result(masm, ret_type, stack_slots);
2673 }
2674
2675
2676 // get address of the stack lock
2677 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2678 // get old displaced header
2679 __ movptr(old_hdr, Address(rax, 0));
2680
2681 // Atomic swap old header if oop still contains the stack lock
2682 __ lock();
2683 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2684 __ jcc(Assembler::notEqual, slow_path_unlock);
2685
2686 // slow path re-enters here
2687 __ bind(unlock_done);
2688 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2689 restore_native_result(masm, ret_type, stack_slots);
2690 }
2691
2692 __ bind(done);
2693
2694 }
2695 {
2696 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2697 save_native_result(masm, ret_type, stack_slots);
2698 __ mov_metadata(c_rarg1, method());
2699 __ call_VM_leaf(
2700 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2701 r15_thread, c_rarg1);
2702 restore_native_result(masm, ret_type, stack_slots);
2703 }
2704
2705 __ reset_last_Java_frame(false);
2706
2707 // Unbox oop result, e.g. JNIHandles::resolve value.
2708 if (is_reference_type(ret_type)) {
2709 __ resolve_jobject(rax /* value */,
2710 r15_thread /* thread */,
2711 rcx /* tmp */);
2712 }
2713
2714 if (CheckJNICalls) {
|
2425 intptr_t the_pc = (intptr_t) __ pc();
2426 oop_maps->add_gc_map(the_pc - start, map);
2427
2428 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2429
2430
2431 // We have all of the arguments setup at this point. We must not touch any register
2432 // argument registers at this point (what if we save/restore them there are no oop?
2433
2434 {
2435 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2436 // protect the args we've loaded
2437 save_args(masm, total_c_args, c_arg, out_regs);
2438 __ mov_metadata(c_rarg1, method());
2439 __ call_VM_leaf(
2440 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2441 r15_thread, c_rarg1);
2442 restore_args(masm, total_c_args, c_arg, out_regs);
2443 }
2444
2445 TSAN_RUNTIME_ONLY(
2446 // protect the args we've loaded
2447 save_args(masm, total_c_args, c_arg, out_regs);
2448 __ call_VM(noreg,
2449 CAST_FROM_FN_PTR(address, SharedRuntime::tsan_interp_method_entry),
2450 r15_thread);
2451 restore_args(masm, total_c_args, c_arg, out_regs);
2452 );
2453
2454 // RedefineClasses() tracing support for obsolete method entry
2455 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2456 // protect the args we've loaded
2457 save_args(masm, total_c_args, c_arg, out_regs);
2458 __ mov_metadata(c_rarg1, method());
2459 __ call_VM_leaf(
2460 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2461 r15_thread, c_rarg1);
2462 restore_args(masm, total_c_args, c_arg, out_regs);
2463 }
2464
2465 // Lock a synchronized method
2466
2467 // Register definitions used by locking and unlocking
2468
2469 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2470 const Register obj_reg = rbx; // Will contain the oop
2471 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2472 const Register old_hdr = r13; // value of old header at unlock time
2473
2513
2514 // Test if the oopMark is an obvious stack pointer, i.e.,
2515 // 1) (mark & 3) == 0, and
2516 // 2) rsp <= mark < mark + os::pagesize()
2517 // These 3 tests can be done by evaluating the following
2518 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2519 // assuming both stack pointer and pagesize have their
2520 // least significant 2 bits clear.
2521 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2522
2523 __ subptr(swap_reg, rsp);
2524 __ andptr(swap_reg, 3 - os::vm_page_size());
2525
2526 // Save the test result, for recursive case, the result is zero
2527 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2528 __ jcc(Assembler::notEqual, slow_path_lock);
2529
2530 // Slow path will re-enter here
2531
2532 __ bind(lock_done);
2533
2534 TSAN_RUNTIME_ONLY(
2535 __ pusha();
2536 __ call_VM(noreg,
2537 CAST_FROM_FN_PTR(address, SharedRuntime::tsan_oop_lock),
2538 obj_reg);
2539 __ popa();
2540 );
2541 }
2542
2543
2544 // Finally just about ready to make the JNI call
2545
2546
2547 // get JNIEnv* which is first argument to native
2548 if (!is_critical_native) {
2549 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2550 }
2551
2552 // Now set thread in native
2553 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2554
2555 __ call(RuntimeAddress(native_func));
2556
2557 // Verify or restore cpu control state after JNI call
2558 __ restore_cpu_control_state_after_jni();
2559
2560 // Unpack native results.
2656 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2657 __ bind(after_transition);
2658
2659 Label reguard;
2660 Label reguard_done;
2661 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2662 __ jcc(Assembler::equal, reguard);
2663 __ bind(reguard_done);
2664
2665 // native result if any is live
2666
2667 // Unlock
2668 Label unlock_done;
2669 Label slow_path_unlock;
2670 if (method->is_synchronized()) {
2671
2672 // Get locked oop from the handle we passed to jni
2673 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2674 __ resolve(IS_NOT_NULL, obj_reg);
2675
2676 TSAN_RUNTIME_ONLY(
2677 __ pusha();
2678 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2679 SharedRuntime::tsan_oop_unlock),
2680 obj_reg);
2681 __ popa();
2682 );
2683
2684 Label done;
2685
2686 if (UseBiasedLocking) {
2687 __ biased_locking_exit(obj_reg, old_hdr, done);
2688 }
2689
2690 // Simple recursive lock?
2691
2692 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2693 __ jcc(Assembler::equal, done);
2694
2695 // Must save rax if if it is live now because cmpxchg must use it
2696 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2697 save_native_result(masm, ret_type, stack_slots);
2698 }
2699
2700
2701 // get address of the stack lock
2702 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2703 // get old displaced header
2704 __ movptr(old_hdr, Address(rax, 0));
2705
2706 // Atomic swap old header if oop still contains the stack lock
2707 __ lock();
2708 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2709 __ jcc(Assembler::notEqual, slow_path_unlock);
2710
2711 // slow path re-enters here
2712 __ bind(unlock_done);
2713 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2714 restore_native_result(masm, ret_type, stack_slots);
2715 }
2716
2717 __ bind(done);
2718
2719 }
2720
2721 TSAN_RUNTIME_ONLY(
2722 save_native_result(masm, ret_type, stack_slots);
2723 __ call_VM_leaf(
2724 CAST_FROM_FN_PTR(address, SharedRuntime::tsan_interp_method_exit));
2725 restore_native_result(masm, ret_type, stack_slots);
2726 );
2727
2728 {
2729 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2730 save_native_result(masm, ret_type, stack_slots);
2731 __ mov_metadata(c_rarg1, method());
2732 __ call_VM_leaf(
2733 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2734 r15_thread, c_rarg1);
2735 restore_native_result(masm, ret_type, stack_slots);
2736 }
2737
2738 __ reset_last_Java_frame(false);
2739
2740 // Unbox oop result, e.g. JNIHandles::resolve value.
2741 if (is_reference_type(ret_type)) {
2742 __ resolve_jobject(rax /* value */,
2743 r15_thread /* thread */,
2744 rcx /* tmp */);
2745 }
2746
2747 if (CheckJNICalls) {
|