13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36 #include "vmreg_x86.inline.hpp"
37
38
39 #define __ ce->masm()->
40
41 #ifndef _LP64
42 float ConversionStub::float_zero = 0.0;
43 double ConversionStub::double_zero = 0.0;
44
45 void ConversionStub::emit_code(LIR_Assembler* ce) {
46 __ bind(_entry);
47 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
48
49
50 if (input()->is_single_xmm()) {
51 __ comiss(input()->as_xmm_float_reg(),
52 ExternalAddress((address)&float_zero));
139 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
140 __ bind(_entry);
141 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
142 __ call(RuntimeAddress(a));
143 ce->add_call_info_here(_info);
144 ce->verify_oop_map(_info);
145 debug_only(__ should_not_reach_here());
146 }
147
148 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
149 if (_offset != -1) {
150 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
151 }
152 __ bind(_entry);
153 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
154 ce->add_call_info_here(_info);
155 debug_only(__ should_not_reach_here());
156 }
157
158
159 // Implementation of NewInstanceStub
160
161 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
162 _result = result;
163 _klass = klass;
164 _klass_reg = klass_reg;
165 _info = new CodeEmitInfo(info);
166 assert(stub_id == Runtime1::new_instance_id ||
167 stub_id == Runtime1::fast_new_instance_id ||
168 stub_id == Runtime1::fast_new_instance_init_check_id,
169 "need new_instance id");
170 _stub_id = stub_id;
171 }
172
173
174 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
175 assert(__ rsp_offset() == 0, "frame size should be fixed");
176 __ bind(_entry);
177 __ movptr(rdx, _klass_reg->as_register());
178 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
191 _result = result;
192 _info = new CodeEmitInfo(info);
193 }
194
195
196 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
197 assert(__ rsp_offset() == 0, "frame size should be fixed");
198 __ bind(_entry);
199 assert(_length->as_register() == rbx, "length must in rbx,");
200 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
201 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
202 ce->add_call_info_here(_info);
203 ce->verify_oop_map(_info);
204 assert(_result->as_register() == rax, "result must in rax,");
205 __ jmp(_continuation);
206 }
207
208
209 // Implementation of NewObjectArrayStub
210
211 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
212 _klass_reg = klass_reg;
213 _result = result;
214 _length = length;
215 _info = new CodeEmitInfo(info);
216 }
217
218
219 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
220 assert(__ rsp_offset() == 0, "frame size should be fixed");
221 __ bind(_entry);
222 assert(_length->as_register() == rbx, "length must in rbx,");
223 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
224 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
225 ce->add_call_info_here(_info);
226 ce->verify_oop_map(_info);
227 assert(_result->as_register() == rax, "result must in rax,");
228 __ jmp(_continuation);
229 }
230
231
232 // Implementation of MonitorAccessStubs
233
234 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
235 : MonitorAccessStub(obj_reg, lock_reg)
236 {
237 _info = new CodeEmitInfo(info);
238 }
239
240
241 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
242 assert(__ rsp_offset() == 0, "frame size should be fixed");
243 __ bind(_entry);
244 ce->store_parameter(_obj_reg->as_register(), 1);
245 ce->store_parameter(_lock_reg->as_register(), 0);
246 Runtime1::StubID enter_id;
247 if (ce->compilation()->has_fpu_code()) {
248 enter_id = Runtime1::monitorenter_id;
249 } else {
250 enter_id = Runtime1::monitorenter_nofpu_id;
251 }
252 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
253 ce->add_call_info_here(_info);
254 ce->verify_oop_map(_info);
255 __ jmp(_continuation);
256 }
257
258
259 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
260 __ bind(_entry);
261 if (_compute_lock) {
262 // lock_reg was destroyed by fast unlocking attempt => recompute it
263 ce->monitor_address(_monitor_ix, _lock_reg);
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/macros.hpp"
37 #include "vmreg_x86.inline.hpp"
38
39
40 #define __ ce->masm()->
41
42 #ifndef _LP64
43 float ConversionStub::float_zero = 0.0;
44 double ConversionStub::double_zero = 0.0;
45
46 void ConversionStub::emit_code(LIR_Assembler* ce) {
47 __ bind(_entry);
48 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
49
50
51 if (input()->is_single_xmm()) {
52 __ comiss(input()->as_xmm_float_reg(),
53 ExternalAddress((address)&float_zero));
140 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
141 __ bind(_entry);
142 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
143 __ call(RuntimeAddress(a));
144 ce->add_call_info_here(_info);
145 ce->verify_oop_map(_info);
146 debug_only(__ should_not_reach_here());
147 }
148
149 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
150 if (_offset != -1) {
151 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
152 }
153 __ bind(_entry);
154 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
155 ce->add_call_info_here(_info);
156 debug_only(__ should_not_reach_here());
157 }
158
159
160 // Implementation of LoadFlattenedArrayStub
161
162 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
163 _array = array;
164 _index = index;
165 _result = result;
166 // Tell the register allocator that the runtime call will scratch rax.
167 _scratch_reg = FrameMap::rax_oop_opr;
168 _info = new CodeEmitInfo(info);
169 }
170
171 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
172 assert(__ rsp_offset() == 0, "frame size should be fixed");
173 __ bind(_entry);
174 ce->store_parameter(_array->as_register(), 1);
175 ce->store_parameter(_index->as_register(), 0);
176 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_flattened_array_id)));
177 ce->add_call_info_here(_info);
178 ce->verify_oop_map(_info);
179 if (_result->as_register() != rax) {
180 __ movptr(_result->as_register(), rax);
181 }
182 __ jmp(_continuation);
183 }
184
185
186 // Implementation of StoreFlattenedArrayStub
187
188 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
189 _array = array;
190 _index = index;
191 _value = value;
192 // Tell the register allocator that the runtime call will scratch rax.
193 _scratch_reg = FrameMap::rax_oop_opr;
194 _info = new CodeEmitInfo(info);
195 }
196
197
198 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
199 assert(__ rsp_offset() == 0, "frame size should be fixed");
200 __ bind(_entry);
201 ce->store_parameter(_array->as_register(), 2);
202 ce->store_parameter(_index->as_register(), 1);
203 ce->store_parameter(_value->as_register(), 0);
204 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::store_flattened_array_id)));
205 ce->add_call_info_here(_info);
206 ce->verify_oop_map(_info);
207 __ jmp(_continuation);
208 }
209
210
211 // Implementation of SubstitutabilityCheckStub
212
213 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
214 _left = left;
215 _right = right;
216 // Tell the register allocator that the runtime call will scratch rax.
217 _scratch_reg = FrameMap::rax_oop_opr;
218 _info = new CodeEmitInfo(info);
219 }
220
221 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) {
222 assert(__ rsp_offset() == 0, "frame size should be fixed");
223 __ bind(_entry);
224 ce->store_parameter(_left->as_register(), 1);
225 ce->store_parameter(_right->as_register(), 0);
226 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::substitutability_check_id)));
227 ce->add_call_info_here(_info);
228 ce->verify_oop_map(_info);
229 __ jmp(_continuation);
230 }
231
232
233 // Implementation of NewInstanceStub
234
235 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
236 _result = result;
237 _klass = klass;
238 _klass_reg = klass_reg;
239 _info = new CodeEmitInfo(info);
240 assert(stub_id == Runtime1::new_instance_id ||
241 stub_id == Runtime1::fast_new_instance_id ||
242 stub_id == Runtime1::fast_new_instance_init_check_id,
243 "need new_instance id");
244 _stub_id = stub_id;
245 }
246
247
248 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
249 assert(__ rsp_offset() == 0, "frame size should be fixed");
250 __ bind(_entry);
251 __ movptr(rdx, _klass_reg->as_register());
252 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
265 _result = result;
266 _info = new CodeEmitInfo(info);
267 }
268
269
270 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
271 assert(__ rsp_offset() == 0, "frame size should be fixed");
272 __ bind(_entry);
273 assert(_length->as_register() == rbx, "length must in rbx,");
274 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
275 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
276 ce->add_call_info_here(_info);
277 ce->verify_oop_map(_info);
278 assert(_result->as_register() == rax, "result must in rax,");
279 __ jmp(_continuation);
280 }
281
282
283 // Implementation of NewObjectArrayStub
284
285 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
286 CodeEmitInfo* info, bool is_inline_type) {
287 _klass_reg = klass_reg;
288 _result = result;
289 _length = length;
290 _info = new CodeEmitInfo(info);
291 _is_inline_type = is_inline_type;
292 }
293
294
295 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
296 assert(__ rsp_offset() == 0, "frame size should be fixed");
297 __ bind(_entry);
298 assert(_length->as_register() == rbx, "length must in rbx,");
299 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
300 if (_is_inline_type) {
301 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_flat_array_id)));
302 } else {
303 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
304 }
305 ce->add_call_info_here(_info);
306 ce->verify_oop_map(_info);
307 assert(_result->as_register() == rax, "result must in rax,");
308 __ jmp(_continuation);
309 }
310
311
312 // Implementation of MonitorAccessStubs
313
314 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg)
315 : MonitorAccessStub(obj_reg, lock_reg)
316 {
317 _info = new CodeEmitInfo(info);
318 _throw_imse_stub = throw_imse_stub;
319 _scratch_reg = scratch_reg;
320 if (_throw_imse_stub != NULL) {
321 assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be");
322 }
323 }
324
325
326 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
327 assert(__ rsp_offset() == 0, "frame size should be fixed");
328 __ bind(_entry);
329 if (_throw_imse_stub != NULL) {
330 // When we come here, _obj_reg has already been checked to be non-null.
331 const int is_value_mask = markWord::always_locked_pattern;
332 Register mark = _scratch_reg->as_register();
333 __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes()));
334 __ andptr(mark, is_value_mask);
335 __ cmpl(mark, is_value_mask);
336 __ jcc(Assembler::equal, *_throw_imse_stub->entry());
337 }
338 ce->store_parameter(_obj_reg->as_register(), 1);
339 ce->store_parameter(_lock_reg->as_register(), 0);
340 Runtime1::StubID enter_id;
341 if (ce->compilation()->has_fpu_code()) {
342 enter_id = Runtime1::monitorenter_id;
343 } else {
344 enter_id = Runtime1::monitorenter_nofpu_id;
345 }
346 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
347 ce->add_call_info_here(_info);
348 ce->verify_oop_map(_info);
349 __ jmp(_continuation);
350 }
351
352
353 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
354 __ bind(_entry);
355 if (_compute_lock) {
356 // lock_reg was destroyed by fast unlocking attempt => recompute it
357 ce->monitor_address(_monitor_ix, _lock_reg);
|