< prev index next > src/hotspot/cpu/x86/templateTable_x86.cpp
Print this page
__ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
__ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
__ bind(skip);
}
void TemplateTable::iaload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
! Address(rdx, rax, Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_INT)),
! noreg, noreg);
}
void TemplateTable::laload() {
transition(itos, ltos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
NOT_LP64(__ mov(rbx, rax));
// rbx,: index
! __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
! Address(rdx, rbx, Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_LONG)),
! noreg, noreg);
}
void TemplateTable::faload() {
transition(itos, ftos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
! Address(rdx, rax,
! Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
! noreg, noreg);
}
void TemplateTable::daload() {
transition(itos, dtos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
! Address(rdx, rax,
! Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
! noreg, noreg);
}
void TemplateTable::aaload() {
transition(itos, atos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! do_oop_load(_masm,
! Address(rdx, rax,
! UseCompressedOops ? Address::times_4 : Address::times_ptr,
! arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
! rax,
! IS_ARRAY);
}
void TemplateTable::baload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
! Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
! noreg, noreg);
}
void TemplateTable::caload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
! Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
! noreg, noreg);
}
// iload followed by caload frequent pair
void TemplateTable::fast_icaload() {
transition(vtos, itos);
__ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
__ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
__ bind(skip);
}
+ #if INCLUDE_TSAN
+
+ TemplateTable::TsanMemoryReleaseAcquireFunction TemplateTable::tsan_release_acquire_method(
+ TsanMemoryReadWriteFunction tsan_function) {
+ if (tsan_function == SharedRuntime::tsan_read1
+ || tsan_function == SharedRuntime::tsan_read2
+ || tsan_function == SharedRuntime::tsan_read4
+ || tsan_function == SharedRuntime::tsan_read8) {
+ return SharedRuntime::tsan_acquire;
+ } else if (tsan_function == SharedRuntime::tsan_write1
+ || tsan_function == SharedRuntime::tsan_write2
+ || tsan_function == SharedRuntime::tsan_write4
+ || tsan_function == SharedRuntime::tsan_write8) {
+ return SharedRuntime::tsan_release;
+ }
+ ShouldNotReachHere();
+ return NULL;
+ }
+
+ void TemplateTable::tsan_observe_get_or_put(
+ const Address &field,
+ Register flags,
+ TsanMemoryReadWriteFunction tsan_function,
+ TosState tos) {
+ assert(flags == rdx, "flags should be in rdx register");
+ assert(ThreadSanitizer, "ThreadSanitizer should be set");
+
+ TsanMemoryReleaseAcquireFunction releaseAcquireFunction =
+ tsan_release_acquire_method(tsan_function);
+
+ Label done, notAcquireRelease;
+
+ // We could save some instructions by only saving the registers we need.
+ __ pusha();
+ // pusha() doesn't save xmm0, which tsan_function clobbers and the
+ // interpreter still needs.
+ // This really only needs to be done for some of the float/double accesses,
+ // but it's here because it's cleaner.
+ __ push_d(xmm0);
+ DEBUG_ONLY(
+ __ pusha();
+ __ movptr(c_rarg0, field.base());
+ __ leaq(c_rarg1, field);
+ __ subq(c_rarg1, field.base());
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::verify_oop_index),
+ c_rarg0 /* oop */, c_rarg1 /* index */);
+ __ popa();
+ );
+ // For volatile reads/writes use an acquire/release.
+ // If a reference is annotated to be ignored, assume it's safe to
+ // access the object it's referring to and create a happens-before relation
+ // between the accesses to this reference.
+ int32_t acquire_release_mask = 1 << ConstantPoolCacheEntry::is_volatile_shift |
+ ((tos == atos) ? 1 << ConstantPoolCacheEntry::is_tsan_ignore_shift : 0);
+ __ testl(flags, acquire_release_mask);
+ __ jcc(Assembler::zero, notAcquireRelease);
+
+ __ leaq(c_rarg0, field);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, releaseAcquireFunction), c_rarg0);
+ if (ThreadSanitizerJavaMemory) {
+ __ jmp(done);
+
+ __ bind(notAcquireRelease);
+ // Ignore reads/writes to final fields. They can't be racy.
+ int32_t ignore_mask = 1 << ConstantPoolCacheEntry::is_final_shift |
+ 1 << ConstantPoolCacheEntry::is_tsan_ignore_shift;
+ __ testl(flags, ignore_mask);
+ __ jcc(Assembler::notZero, done);
+
+ __ leaq(c_rarg0, field);
+ __ get_method(c_rarg1);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, tsan_function),
+ c_rarg0 /* addr */, c_rarg1 /* method */, rbcp /* bcp */);
+
+ __ bind(done);
+ } else {
+ __ bind(notAcquireRelease);
+ }
+ __ pop_d(xmm0);
+ __ popa();
+ }
+
+ void TemplateTable::tsan_observe_load_or_store(
+ const Address& field, TsanMemoryReadWriteFunction tsan_function) {
+ assert(ThreadSanitizer, "ThreadSanitizer should be set");
+ if (!ThreadSanitizerJavaMemory) {
+ return;
+ }
+ // We could save some instructions by only saving the registers we need.
+ __ pusha();
+ // pusha() doesn't save xmm0, which tsan_function clobbers and the
+ // interpreter still needs.
+ // This really only needs to be done for some of the float/double accesses,
+ // but it's here because it's cleaner.
+ __ push_d(xmm0);
+ DEBUG_ONLY(
+ __ pusha();
+ __ movptr(c_rarg0, field.base());
+ __ leaq(c_rarg1, field);
+ __ subq(c_rarg1, field.base());
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::verify_oop_index),
+ c_rarg0 /* oop */, c_rarg1 /* index */);
+ __ popa();
+ );
+ __ leaq(c_rarg0, field);
+ __ get_method(c_rarg1);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, tsan_function),
+ c_rarg0 /* addr */, c_rarg1 /* method */, rbcp /* bcp */);
+ __ pop_d(xmm0);
+ __ popa();
+ }
+
+ #endif // INCLUDE_TSAN
+
void TemplateTable::iaload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! Address addr(rdx, rax, Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_INT));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_read4));
! __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax, addr, noreg, noreg);
}
void TemplateTable::laload() {
transition(itos, ltos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
NOT_LP64(__ mov(rbx, rax));
// rbx,: index
! Address addr(rdx, rbx, Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_LONG));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_read8));
! __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */, addr, noreg,
+ noreg);
}
void TemplateTable::faload() {
transition(itos, ftos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! Address addr(rdx, rax, Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_FLOAT));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_read4));
! __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */, addr, noreg,
! noreg);
}
void TemplateTable::daload() {
transition(itos, dtos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! Address addr(rdx, rax, Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_read8));
! __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */, addr, noreg,
! noreg);
}
void TemplateTable::aaload() {
transition(itos, atos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! Address addr(rdx, rax,
! UseCompressedOops ? Address::times_4 : Address::times_ptr,
! arrayOopDesc::base_offset_in_bytes(T_OBJECT));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(
! addr, UseCompressedOops ? SharedRuntime::tsan_read4
! : SharedRuntime::tsan_read8));
+ do_oop_load(_masm, addr, rax, IS_ARRAY);
}
void TemplateTable::baload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! Address addr(rdx, rax, Address::times_1,
! arrayOopDesc::base_offset_in_bytes(T_BYTE));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_read1));
+ __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax, addr, noreg, noreg);
}
void TemplateTable::caload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! Address addr(rdx, rax, Address::times_2,
! arrayOopDesc::base_offset_in_bytes(T_CHAR));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_read2));
+ __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax, addr, noreg, noreg);
}
// iload followed by caload frequent pair
void TemplateTable::fast_icaload() {
transition(vtos, itos);
void TemplateTable::saload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
! Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
! noreg, noreg);
}
void TemplateTable::iload(int n) {
transition(vtos, itos);
__ movl(rax, iaddress(n));
void TemplateTable::saload() {
transition(itos, itos);
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
! Address addr(rdx, rax, Address::times_2,
! arrayOopDesc::base_offset_in_bytes(T_SHORT));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_read2));
+ __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax, addr, noreg, noreg);
}
void TemplateTable::iload(int n) {
transition(vtos, itos);
__ movl(rax, iaddress(n));
__ pop_i(rbx);
// rax: value
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
! Address(rdx, rbx, Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_INT)),
! rax, noreg, noreg);
}
void TemplateTable::lastore() {
transition(ltos, vtos);
__ pop_i(rbx);
// rax,: low(value)
// rcx: array
// rdx: high(value)
index_check(rcx, rbx); // prefer index in rbx,
// rbx,: index
! __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
! Address(rcx, rbx, Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_LONG)),
! noreg /* ltos */, noreg, noreg);
}
void TemplateTable::fastore() {
transition(ftos, vtos);
__ pop_i(rbx);
// value is in UseSSE >= 1 ? xmm0 : ST(0)
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
! Address(rdx, rbx, Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
! noreg /* ftos */, noreg, noreg);
}
void TemplateTable::dastore() {
transition(dtos, vtos);
__ pop_i(rbx);
// value is in UseSSE >= 2 ? xmm0 : ST(0)
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
! Address(rdx, rbx, Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
! noreg /* dtos */, noreg, noreg);
}
void TemplateTable::aastore() {
Label is_null, ok_is_subtype, done;
transition(vtos, vtos);
__ pop_i(rbx);
// rax: value
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! Address addr(rdx, rbx, Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_INT));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_write4));
! __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, addr, rax, noreg, noreg);
}
void TemplateTable::lastore() {
transition(ltos, vtos);
__ pop_i(rbx);
// rax,: low(value)
// rcx: array
// rdx: high(value)
index_check(rcx, rbx); // prefer index in rbx,
// rbx,: index
! Address addr(rcx, rbx, Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_LONG));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_write8));
! __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg,
+ noreg);
}
void TemplateTable::fastore() {
transition(ftos, vtos);
__ pop_i(rbx);
// value is in UseSSE >= 1 ? xmm0 : ST(0)
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! Address addr(rdx, rbx, Address::times_4,
! arrayOopDesc::base_offset_in_bytes(T_FLOAT));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_write4));
! __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg,
+ noreg);
}
void TemplateTable::dastore() {
transition(dtos, vtos);
__ pop_i(rbx);
// value is in UseSSE >= 2 ? xmm0 : ST(0)
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! Address addr(rdx, rbx, Address::times_8,
! arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_write8));
! __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */,
+ noreg, noreg);
}
void TemplateTable::aastore() {
Label is_null, ok_is_subtype, done;
transition(vtos, vtos);
Address element_address(rdx, rcx,
UseCompressedOops? Address::times_4 : Address::times_ptr,
arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(
+ element_address, UseCompressedOops ? SharedRuntime::tsan_write4
+ : SharedRuntime::tsan_write8));
+
index_check_without_pop(rdx, rcx); // kills rbx
__ testptr(rax, rax);
__ jcc(Assembler::zero, is_null);
// Move subklass into rbx
__ testl(rcx, diffbit);
Label L_skip;
__ jccb(Assembler::zero, L_skip);
__ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
! __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
! Address(rdx, rbx,Address::times_1,
! arrayOopDesc::base_offset_in_bytes(T_BYTE)),
! rax, noreg, noreg);
}
void TemplateTable::castore() {
transition(itos, vtos);
__ pop_i(rbx);
// rax: value
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
! Address(rdx, rbx, Address::times_2,
! arrayOopDesc::base_offset_in_bytes(T_CHAR)),
! rax, noreg, noreg);
}
void TemplateTable::sastore() {
castore();
__ testl(rcx, diffbit);
Label L_skip;
__ jccb(Assembler::zero, L_skip);
__ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
! Address addr(rdx, rbx, Address::times_1,
! arrayOopDesc::base_offset_in_bytes(T_BYTE));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_write1));
! __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, rax, noreg, noreg);
}
void TemplateTable::castore() {
transition(itos, vtos);
__ pop_i(rbx);
// rax: value
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
! Address addr(rdx, rbx, Address::times_2,
! arrayOopDesc::base_offset_in_bytes(T_CHAR));
! TSAN_RUNTIME_ONLY(tsan_observe_load_or_store(addr, SharedRuntime::tsan_write2));
! __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, rax, noreg, noreg);
}
void TemplateTable::sastore() {
castore();
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movptr(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj);
+ TSAN_RUNTIME_ONLY(
+ // Draw a happens-before edge from the class's static initializer to
+ // this lookup.
+
+ // java_lang_Class::_init_lock_offset may not have been initialized
+ // when generating code. It will be initialized at runtime though.
+ // So calculate its address and read from it at runtime.
+ __ pusha();
+ __ movq(c_rarg0, obj);
+ AddressLiteral init_lock_offset_address(
+ (address) java_lang_Class::init_lock_offset_addr(),
+ relocInfo::none);
+ __ lea(rax, init_lock_offset_address);
+ __ movl(rax, Address(rax, 0));
+ __ addq(c_rarg0, rax);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address,
+ SharedRuntime::tsan_acquire),
+ c_rarg0);
+ __ popa();
+ );
}
}
void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register method,
if (!is_static) pop_and_check_object(obj);
const Address field(obj, off, Address::times_1, 0*wordSize);
+ // During a TSAN instrumented run, move flags into rdx so we can later
+ // examine whether the field is volatile or has been annotated to be ignored
+ // by Tsan.
+ TSAN_RUNTIME_ONLY(__ movl(rdx, flags));
+
Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask edx after the above shift
assert(btos == 0, "change code, btos != 0");
__ jcc(Assembler::notZero, notByte);
// btos
__ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
__ push(btos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read1, btos));
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
}
__ jmp(Done);
__ jcc(Assembler::notEqual, notBool);
// ztos (same code as btos)
__ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
__ push(ztos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read1, ztos));
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
// use btos rewriting, no truncating to t/f bit is needed for getfield.
patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
}
__ cmpl(flags, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
do_oop_load(_masm, field, rax);
__ push(atos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, UseCompressedOops ? SharedRuntime::tsan_read4
+ : SharedRuntime::tsan_read8,
+ atos));
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
}
__ jmp(Done);
__ cmpl(flags, itos);
__ jcc(Assembler::notEqual, notInt);
// itos
__ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
__ push(itos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read4, itos));
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
}
__ jmp(Done);
__ cmpl(flags, ctos);
__ jcc(Assembler::notEqual, notChar);
// ctos
__ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
__ push(ctos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read2, ctos));
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
}
__ jmp(Done);
__ cmpl(flags, stos);
__ jcc(Assembler::notEqual, notShort);
// stos
__ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
__ push(stos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read2, stos));
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
}
__ jmp(Done);
// ltos
// Generate code as if volatile (x86_32). There just aren't enough registers to
// save that information and this code is faster than the test.
__ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
__ push(ltos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read8, ltos));
// Rewrite bytecode to be faster
LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
__ jmp(Done);
__ bind(notLong);
__ jcc(Assembler::notEqual, notFloat);
// ftos
__ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
__ push(ftos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read4, ftos));
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
}
__ jmp(Done);
#endif
// dtos
// MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
__ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
__ push(dtos);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_read8, dtos));
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
}
#ifdef ASSERT
// volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
// Assembler::StoreStore));
Label notVolatile, Done;
__ movl(rdx, flags);
- __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
- __ andl(rdx, 0x1);
// Check for volatile store
__ testl(rdx, rdx);
__ jcc(Assembler::zero, notVolatile);
// btos
{
__ pop(btos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write1, btos));
__ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
// ztos
{
__ pop(ztos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write1, ztos));
__ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
// atos
{
__ pop(atos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(field, rdx,
+ UseCompressedOops ? SharedRuntime::tsan_write4
+ : SharedRuntime::tsan_write8,
+ atos));
// Store into the field
do_oop_store(_masm, field, rax);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
}
// itos
{
__ pop(itos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write4, itos));
__ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
// ctos
{
__ pop(ctos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write2, ctos));
__ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
// stos
{
__ pop(stos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write2, stos));
__ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
// ltos
{
__ pop(ltos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write8, ltos));
// MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
__ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg);
#ifdef _LP64
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
// ftos
{
__ pop(ftos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write4, ftos));
__ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
// dtos
{
__ pop(dtos);
if (!is_static) pop_and_check_object(obj);
+ TSAN_RUNTIME_ONLY(tsan_observe_get_or_put(
+ field, rdx, SharedRuntime::tsan_write8, dtos));
// MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
__ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
}
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
__ pop(atos);
}
+ TSAN_RUNTIME_ONLY(
+ // return value of new oop is in rax.
+ __ push(atos);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::tsan_track_obj),
+ rax);
+ __ pop(atos);
+ );
+
__ jmp(done);
}
// slow case
__ bind(slow_case);
< prev index next >