1 /**
2  * \file
3  * Convert CIL to the JIT internal representation
4  *
5  * Author:
6  *   Paolo Molaro (lupus@ximian.com)
7  *   Dietmar Maurer (dietmar@ximian.com)
8  *
9  * (C) 2002 Ximian, Inc.
10  * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11  * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
13  */
14 
15 #include <config.h>
16 #include <mono/utils/mono-compiler.h>
17 #include "mini.h"
18 
19 #ifndef DISABLE_JIT
20 
21 #include <signal.h>
22 
23 #ifdef HAVE_UNISTD_H
24 #include <unistd.h>
25 #endif
26 
27 #include <math.h>
28 #include <string.h>
29 #include <ctype.h>
30 
31 #ifdef HAVE_SYS_TIME_H
32 #include <sys/time.h>
33 #endif
34 
35 #ifdef HAVE_ALLOCA_H
36 #include <alloca.h>
37 #endif
38 
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
67 
68 #include "trace.h"
69 
70 #include "ir-emit.h"
71 
72 #include "jit-icalls.h"
73 #include "jit.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
78 #include "mini-runtime.h"
79 
80 #define BRANCH_COST 10
81 #define INLINE_LENGTH_LIMIT 20
82 
83 /* These have 'cfg' as an implicit argument */
84 #define INLINE_FAILURE(msg) do {									\
85 	if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
86 		inline_failure (cfg, msg);										\
87 		goto exception_exit;											\
88 	} \
89 	} while (0)
90 #define CHECK_CFG_EXCEPTION do {\
91 		if (cfg->exception_type != MONO_EXCEPTION_NONE)	\
92 			goto exception_exit;						\
93 	} while (0)
94 #define FIELD_ACCESS_FAILURE(method, field) do {					\
95 		field_access_failure ((cfg), (method), (field));			\
96 		goto exception_exit;	\
97 	} while (0)
98 #define GENERIC_SHARING_FAILURE(opcode) do {		\
99 		if (cfg->gshared) {									\
100 			gshared_failure (cfg, opcode, __FILE__, __LINE__);	\
101 			goto exception_exit;	\
102 		}			\
103 	} while (0)
104 #define GSHAREDVT_FAILURE(opcode) do {		\
105 	if (cfg->gsharedvt) {												\
106 		gsharedvt_failure (cfg, opcode, __FILE__, __LINE__);			\
107 		goto exception_exit;											\
108 	}																	\
109 	} while (0)
110 #define OUT_OF_MEMORY_FAILURE do {	\
111 		mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);		\
112 		mono_error_set_out_of_memory (&cfg->error, "");					\
113 		goto exception_exit;	\
114 	} while (0)
115 #define DISABLE_AOT(cfg) do { \
116 		if ((cfg)->verbose_level >= 2)						  \
117 			printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__);	\
118 		(cfg)->disable_aot = TRUE;							  \
119 	} while (0)
120 #define LOAD_ERROR do { \
121 		break_on_unverified ();								\
122 		mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
123 		goto exception_exit;									\
124 	} while (0)
125 
126 #define TYPE_LOAD_ERROR(klass) do { \
127 		cfg->exception_ptr = klass; \
128 		LOAD_ERROR;					\
129 	} while (0)
130 
131 #define CHECK_CFG_ERROR do {\
132 		if (!mono_error_ok (&cfg->error)) { \
133 			mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);	\
134 			goto mono_error_exit; \
135 		} \
136 	} while (0)
137 
138 /* Determine whenever 'ins' represents a load of the 'this' argument */
139 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 
141 static int ldind_to_load_membase (int opcode);
142 static int stind_to_store_membase (int opcode);
143 
144 int mono_op_to_op_imm (int opcode);
145 int mono_op_to_op_imm_noemul (int opcode);
146 
147 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
148 						  guchar *ip, guint real_offset, gboolean inline_always);
149 static MonoInst*
150 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 
152 /* helper methods signatures */
153 static MonoMethodSignature *helper_sig_domain_get;
154 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
155 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
156 static MonoMethodSignature *helper_sig_jit_thread_attach;
157 static MonoMethodSignature *helper_sig_get_tls_tramp;
158 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 
160 /* type loading helpers */
161 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
162 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
163 
164 /*
165  * Instruction metadata
166  */
167 #ifdef MINI_OP
168 #undef MINI_OP
169 #endif
170 #ifdef MINI_OP3
171 #undef MINI_OP3
172 #endif
173 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
174 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
175 #define NONE ' '
176 #define IREG 'i'
177 #define FREG 'f'
178 #define VREG 'v'
179 #define XREG 'x'
180 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
181 #define LREG IREG
182 #else
183 #define LREG 'l'
184 #endif
185 /* keep in sync with the enum in mini.h */
186 const char
187 ins_info[] = {
188 #include "mini-ops.h"
189 };
190 #undef MINI_OP
191 #undef MINI_OP3
192 
193 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
194 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 /*
196  * This should contain the index of the last sreg + 1. This is not the same
197  * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198  */
199 const gint8 ins_sreg_counts[] = {
200 #include "mini-ops.h"
201 };
202 #undef MINI_OP
203 #undef MINI_OP3
204 
205 guint32
mono_alloc_ireg(MonoCompile * cfg)206 mono_alloc_ireg (MonoCompile *cfg)
207 {
208 	return alloc_ireg (cfg);
209 }
210 
211 guint32
mono_alloc_lreg(MonoCompile * cfg)212 mono_alloc_lreg (MonoCompile *cfg)
213 {
214 	return alloc_lreg (cfg);
215 }
216 
217 guint32
mono_alloc_freg(MonoCompile * cfg)218 mono_alloc_freg (MonoCompile *cfg)
219 {
220 	return alloc_freg (cfg);
221 }
222 
223 guint32
mono_alloc_preg(MonoCompile * cfg)224 mono_alloc_preg (MonoCompile *cfg)
225 {
226 	return alloc_preg (cfg);
227 }
228 
229 guint32
mono_alloc_dreg(MonoCompile * cfg,MonoStackType stack_type)230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 {
232 	return alloc_dreg (cfg, stack_type);
233 }
234 
235 /*
236  * mono_alloc_ireg_ref:
237  *
238  *   Allocate an IREG, and mark it as holding a GC ref.
239  */
240 guint32
mono_alloc_ireg_ref(MonoCompile * cfg)241 mono_alloc_ireg_ref (MonoCompile *cfg)
242 {
243 	return alloc_ireg_ref (cfg);
244 }
245 
246 /*
247  * mono_alloc_ireg_mp:
248  *
249  *   Allocate an IREG, and mark it as holding a managed pointer.
250  */
251 guint32
mono_alloc_ireg_mp(MonoCompile * cfg)252 mono_alloc_ireg_mp (MonoCompile *cfg)
253 {
254 	return alloc_ireg_mp (cfg);
255 }
256 
257 /*
258  * mono_alloc_ireg_copy:
259  *
260  *   Allocate an IREG with the same GC type as VREG.
261  */
262 guint32
mono_alloc_ireg_copy(MonoCompile * cfg,guint32 vreg)263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 {
265 	if (vreg_is_ref (cfg, vreg))
266 		return alloc_ireg_ref (cfg);
267 	else if (vreg_is_mp (cfg, vreg))
268 		return alloc_ireg_mp (cfg);
269 	else
270 		return alloc_ireg (cfg);
271 }
272 
273 guint
mono_type_to_regmove(MonoCompile * cfg,MonoType * type)274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
275 {
276 	if (type->byref)
277 		return OP_MOVE;
278 
279 	type = mini_get_underlying_type (type);
280 handle_enum:
281 	switch (type->type) {
282 	case MONO_TYPE_I1:
283 	case MONO_TYPE_U1:
284 		return OP_MOVE;
285 	case MONO_TYPE_I2:
286 	case MONO_TYPE_U2:
287 		return OP_MOVE;
288 	case MONO_TYPE_I4:
289 	case MONO_TYPE_U4:
290 		return OP_MOVE;
291 	case MONO_TYPE_I:
292 	case MONO_TYPE_U:
293 	case MONO_TYPE_PTR:
294 	case MONO_TYPE_FNPTR:
295 		return OP_MOVE;
296 	case MONO_TYPE_CLASS:
297 	case MONO_TYPE_STRING:
298 	case MONO_TYPE_OBJECT:
299 	case MONO_TYPE_SZARRAY:
300 	case MONO_TYPE_ARRAY:
301 		return OP_MOVE;
302 	case MONO_TYPE_I8:
303 	case MONO_TYPE_U8:
304 #if SIZEOF_REGISTER == 8
305 		return OP_MOVE;
306 #else
307 		return OP_LMOVE;
308 #endif
309 	case MONO_TYPE_R4:
310 		return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
311 	case MONO_TYPE_R8:
312 		return OP_FMOVE;
313 	case MONO_TYPE_VALUETYPE:
314 		if (type->data.klass->enumtype) {
315 			type = mono_class_enum_basetype (type->data.klass);
316 			goto handle_enum;
317 		}
318 		if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
319 			return OP_XMOVE;
320 		return OP_VMOVE;
321 	case MONO_TYPE_TYPEDBYREF:
322 		return OP_VMOVE;
323 	case MONO_TYPE_GENERICINST:
324 		if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 			return OP_XMOVE;
326 		type = &type->data.generic_class->container_class->byval_arg;
327 		goto handle_enum;
328 	case MONO_TYPE_VAR:
329 	case MONO_TYPE_MVAR:
330 		g_assert (cfg->gshared);
331 		if (mini_type_var_is_vt (type))
332 			return OP_VMOVE;
333 		else
334 			return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 	default:
336 		g_error ("unknown type 0x%02x in type_to_regstore", type->type);
337 	}
338 	return -1;
339 }
340 
341 void
mono_print_bb(MonoBasicBlock * bb,const char * msg)342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
343 {
344 	int i;
345 	MonoInst *tree;
346 	GString *str = g_string_new ("");
347 
348 	g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
349 	for (i = 0; i < bb->in_count; ++i)
350 		g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 	g_string_append_printf (str, ", OUT: ");
352 	for (i = 0; i < bb->out_count; ++i)
353 		g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 	g_string_append_printf (str, " ]\n");
355 
356 	g_print ("%s", str->str);
357 	g_string_free (str, TRUE);
358 
359 	for (tree = bb->code; tree; tree = tree->next)
360 		mono_print_ins_index (-1, tree);
361 }
362 
363 void
mono_create_helper_signatures(void)364 mono_create_helper_signatures (void)
365 {
366 	helper_sig_domain_get = mono_create_icall_signature ("ptr");
367 	helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
368 	helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
369 	helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
370 	helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
371 	helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
372 }
373 
374 static MONO_NEVER_INLINE void
break_on_unverified(void)375 break_on_unverified (void)
376 {
377 	if (mini_get_debug_options ()->break_on_unverified)
378 		G_BREAKPOINT ();
379 }
380 
381 static MONO_NEVER_INLINE void
field_access_failure(MonoCompile * cfg,MonoMethod * method,MonoClassField * field)382 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 {
384 	char *method_fname = mono_method_full_name (method, TRUE);
385 	char *field_fname = mono_field_full_name (field);
386 	mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
387 	mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
388 	g_free (method_fname);
389 	g_free (field_fname);
390 }
391 
392 static MONO_NEVER_INLINE void
inline_failure(MonoCompile * cfg,const char * msg)393 inline_failure (MonoCompile *cfg, const char *msg)
394 {
395 	if (cfg->verbose_level >= 2)
396 		printf ("inline failed: %s\n", msg);
397 	mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
398 }
399 
400 static MONO_NEVER_INLINE void
gshared_failure(MonoCompile * cfg,int opcode,const char * file,int line)401 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 {
403 	if (cfg->verbose_level > 2)											\
404 		printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
405 	mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
406 }
407 
408 static MONO_NEVER_INLINE void
gsharedvt_failure(MonoCompile * cfg,int opcode,const char * file,int line)409 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 {
411 	cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
412 	if (cfg->verbose_level >= 2)
413 		printf ("%s\n", cfg->exception_message);
414 	mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
415 }
416 
417 /*
418  * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
419  * foo<T> (int i) { ldarg.0; box T; }
420  */
421 #define UNVERIFIED do { \
422 	if (cfg->gsharedvt) { \
423 		if (cfg->verbose_level > 2)									\
424 			printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
425 		mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
426 		goto exception_exit;											\
427 	}																	\
428 	break_on_unverified ();												\
429 	goto unverified;													\
430 } while (0)
431 
432 #define GET_BBLOCK(cfg,tblock,ip) do {	\
433 		(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 		if (!(tblock)) {	\
435 			if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
436             NEW_BBLOCK (cfg, (tblock)); \
437 			(tblock)->cil_code = (ip);	\
438 			ADD_BBLOCK (cfg, (tblock));	\
439 		} \
440 	} while (0)
441 
442 #if defined(TARGET_X86) || defined(TARGET_AMD64)
443 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
444 		MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
445 		(dest)->dreg = alloc_ireg_mp ((cfg)); \
446 		(dest)->sreg1 = (sr1); \
447 		(dest)->sreg2 = (sr2); \
448 		(dest)->inst_imm = (imm); \
449 		(dest)->backend.shift_amount = (shift); \
450 		MONO_ADD_INS ((cfg)->cbb, (dest)); \
451 	} while (0)
452 #endif
453 
454 /* Emit conversions so both operands of a binary opcode are of the same type */
455 static void
add_widen_op(MonoCompile * cfg,MonoInst * ins,MonoInst ** arg1_ref,MonoInst ** arg2_ref)456 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 {
458 	MonoInst *arg1 = *arg1_ref;
459 	MonoInst *arg2 = *arg2_ref;
460 
461 	if (cfg->r4fp &&
462 		((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
463 		 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
464 		MonoInst *conv;
465 
466 		/* Mixing r4/r8 is allowed by the spec */
467 		if (arg1->type == STACK_R4) {
468 			int dreg = alloc_freg (cfg);
469 
470 			EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
471 			conv->type = STACK_R8;
472 			ins->sreg1 = dreg;
473 			*arg1_ref = conv;
474 		}
475 		if (arg2->type == STACK_R4) {
476 			int dreg = alloc_freg (cfg);
477 
478 			EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
479 			conv->type = STACK_R8;
480 			ins->sreg2 = dreg;
481 			*arg2_ref = conv;
482 		}
483 	}
484 
485 #if SIZEOF_REGISTER == 8
486 	/* FIXME: Need to add many more cases */
487 	if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
488 		MonoInst *widen;
489 
490 		int dr = alloc_preg (cfg);
491 		EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
492 		(ins)->sreg2 = widen->dreg;
493 	}
494 #endif
495 }
496 
497 #define ADD_BINOP(op) do {	\
498 		MONO_INST_NEW (cfg, ins, (op));	\
499 		sp -= 2;	\
500 		ins->sreg1 = sp [0]->dreg;	\
501 		ins->sreg2 = sp [1]->dreg;	\
502 		type_from_op (cfg, ins, sp [0], sp [1]);	\
503 		CHECK_TYPE (ins);	\
504 		/* Have to insert a widening op */		 \
505         add_widen_op (cfg, ins, &sp [0], &sp [1]);		 \
506         ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
507         MONO_ADD_INS ((cfg)->cbb, (ins)); \
508         *sp++ = mono_decompose_opcode ((cfg), (ins));	\
509 	} while (0)
510 
511 #define ADD_UNOP(op) do {	\
512 		MONO_INST_NEW (cfg, ins, (op));	\
513 		sp--;	\
514 		ins->sreg1 = sp [0]->dreg;	\
515 		type_from_op (cfg, ins, sp [0], NULL);	\
516 		CHECK_TYPE (ins);	\
517         (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
518         MONO_ADD_INS ((cfg)->cbb, (ins)); \
519 		*sp++ = mono_decompose_opcode (cfg, ins);	\
520 	} while (0)
521 
522 #define ADD_BINCOND(next_block) do {	\
523 		MonoInst *cmp;	\
524 		sp -= 2; \
525 		MONO_INST_NEW(cfg, cmp, OP_COMPARE);	\
526 		cmp->sreg1 = sp [0]->dreg;	\
527 		cmp->sreg2 = sp [1]->dreg;	\
528 		type_from_op (cfg, cmp, sp [0], sp [1]);	\
529 		CHECK_TYPE (cmp);	\
530 		add_widen_op (cfg, cmp, &sp [0], &sp [1]);						\
531 		type_from_op (cfg, ins, sp [0], sp [1]);							\
532 		ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);	\
533 		GET_BBLOCK (cfg, tblock, target);		\
534 		link_bblock (cfg, cfg->cbb, tblock);	\
535 		ins->inst_true_bb = tblock;	\
536 		if ((next_block)) {	\
537 			link_bblock (cfg, cfg->cbb, (next_block));	\
538 			ins->inst_false_bb = (next_block);	\
539 			start_new_bblock = 1;	\
540 		} else {	\
541 			GET_BBLOCK (cfg, tblock, ip);		\
542 			link_bblock (cfg, cfg->cbb, tblock);	\
543 			ins->inst_false_bb = tblock;	\
544 			start_new_bblock = 2;	\
545 		}	\
546 		if (sp != stack_start) {									\
547 		    handle_stack_args (cfg, stack_start, sp - stack_start); \
548 			CHECK_UNVERIFIABLE (cfg); \
549 		} \
550         MONO_ADD_INS (cfg->cbb, cmp); \
551 		MONO_ADD_INS (cfg->cbb, ins);	\
552 	} while (0)
553 
554 /* *
555  * link_bblock: Links two basic blocks
556  *
557  * links two basic blocks in the control flow graph, the 'from'
558  * argument is the starting block and the 'to' argument is the block
559  * the control flow ends to after 'from'.
560  */
561 static void
link_bblock(MonoCompile * cfg,MonoBasicBlock * from,MonoBasicBlock * to)562 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 {
564 	MonoBasicBlock **newa;
565 	int i, found;
566 
567 #if 0
568 	if (from->cil_code) {
569 		if (to->cil_code)
570 			printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 		else
572 			printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
573 	} else {
574 		if (to->cil_code)
575 			printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 		else
577 			printf ("edge from entry to exit\n");
578 	}
579 #endif
580 
581 	found = FALSE;
582 	for (i = 0; i < from->out_count; ++i) {
583 		if (to == from->out_bb [i]) {
584 			found = TRUE;
585 			break;
586 		}
587 	}
588 	if (!found) {
589 		newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
590 		for (i = 0; i < from->out_count; ++i) {
591 			newa [i] = from->out_bb [i];
592 		}
593 		newa [i] = to;
594 		from->out_count++;
595 		from->out_bb = newa;
596 	}
597 
598 	found = FALSE;
599 	for (i = 0; i < to->in_count; ++i) {
600 		if (from == to->in_bb [i]) {
601 			found = TRUE;
602 			break;
603 		}
604 	}
605 	if (!found) {
606 		newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
607 		for (i = 0; i < to->in_count; ++i) {
608 			newa [i] = to->in_bb [i];
609 		}
610 		newa [i] = from;
611 		to->in_count++;
612 		to->in_bb = newa;
613 	}
614 }
615 
616 void
mono_link_bblock(MonoCompile * cfg,MonoBasicBlock * from,MonoBasicBlock * to)617 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 {
619 	link_bblock (cfg, from, to);
620 }
621 
622 /**
623  * mono_find_block_region:
624  *
625  *   We mark each basic block with a region ID. We use that to avoid BB
626  *   optimizations when blocks are in different regions.
627  *
628  * Returns:
629  *   A region token that encodes where this region is, and information
630  *   about the clause owner for this block.
631  *
632  *   The region encodes the try/catch/filter clause that owns this block
633  *   as well as the type.  -1 is a special value that represents a block
634  *   that is in none of try/catch/filter.
635  */
636 static int
mono_find_block_region(MonoCompile * cfg,int offset)637 mono_find_block_region (MonoCompile *cfg, int offset)
638 {
639 	MonoMethodHeader *header = cfg->header;
640 	MonoExceptionClause *clause;
641 	int i;
642 
643 	for (i = 0; i < header->num_clauses; ++i) {
644 		clause = &header->clauses [i];
645 		if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
646 		    (offset < (clause->handler_offset)))
647 			return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 
649 		if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
650 			if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
651 				return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
652 			else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
653 				return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 			else
655 				return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
656 		}
657 	}
658 	for (i = 0; i < header->num_clauses; ++i) {
659 		clause = &header->clauses [i];
660 
661 		if (MONO_OFFSET_IN_CLAUSE (clause, offset))
662 			return ((i + 1) << 8) | clause->flags;
663 	}
664 
665 	return -1;
666 }
667 
668 static gboolean
ip_in_finally_clause(MonoCompile * cfg,int offset)669 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 {
671 	MonoMethodHeader *header = cfg->header;
672 	MonoExceptionClause *clause;
673 	int i;
674 
675 	for (i = 0; i < header->num_clauses; ++i) {
676 		clause = &header->clauses [i];
677 		if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
678 			continue;
679 
680 		if (MONO_OFFSET_IN_HANDLER (clause, offset))
681 			return TRUE;
682 	}
683 	return FALSE;
684 }
685 
686 /* Find clauses between ip and target, from inner to outer */
687 static GList*
mono_find_leave_clauses(MonoCompile * cfg,unsigned char * ip,unsigned char * target)688 mono_find_leave_clauses (MonoCompile *cfg, unsigned char *ip, unsigned char *target)
689 {
690 	MonoMethodHeader *header = cfg->header;
691 	MonoExceptionClause *clause;
692 	int i;
693 	GList *res = NULL;
694 
695 	for (i = 0; i < header->num_clauses; ++i) {
696 		clause = &header->clauses [i];
697 		if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
698 		    (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
699 			res = g_list_append_mempool (cfg->mempool, res, clause);
700 		}
701 	}
702 	return res;
703 }
704 
705 static void
mono_create_spvar_for_region(MonoCompile * cfg,int region)706 mono_create_spvar_for_region (MonoCompile *cfg, int region)
707 {
708 	MonoInst *var;
709 
710 	var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
711 	if (var)
712 		return;
713 
714 	var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
715 	/* prevent it from being register allocated */
716 	var->flags |= MONO_INST_VOLATILE;
717 
718 	g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
719 }
720 
721 MonoInst *
mono_find_exvar_for_offset(MonoCompile * cfg,int offset)722 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 {
724 	return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
725 }
726 
727 static MonoInst*
mono_create_exvar_for_offset(MonoCompile * cfg,int offset)728 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
729 {
730 	MonoInst *var;
731 
732 	var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
733 	if (var)
734 		return var;
735 
736 	var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
737 	/* prevent it from being register allocated */
738 	var->flags |= MONO_INST_VOLATILE;
739 
740 	g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
741 
742 	return var;
743 }
744 
745 /*
746  * Returns the type used in the eval stack when @type is loaded.
747  * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
748  */
749 void
type_to_eval_stack_type(MonoCompile * cfg,MonoType * type,MonoInst * inst)750 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
751 {
752 	MonoClass *klass;
753 
754 	type = mini_get_underlying_type (type);
755 	inst->klass = klass = mono_class_from_mono_type (type);
756 	if (type->byref) {
757 		inst->type = STACK_MP;
758 		return;
759 	}
760 
761 handle_enum:
762 	switch (type->type) {
763 	case MONO_TYPE_VOID:
764 		inst->type = STACK_INV;
765 		return;
766 	case MONO_TYPE_I1:
767 	case MONO_TYPE_U1:
768 	case MONO_TYPE_I2:
769 	case MONO_TYPE_U2:
770 	case MONO_TYPE_I4:
771 	case MONO_TYPE_U4:
772 		inst->type = STACK_I4;
773 		return;
774 	case MONO_TYPE_I:
775 	case MONO_TYPE_U:
776 	case MONO_TYPE_PTR:
777 	case MONO_TYPE_FNPTR:
778 		inst->type = STACK_PTR;
779 		return;
780 	case MONO_TYPE_CLASS:
781 	case MONO_TYPE_STRING:
782 	case MONO_TYPE_OBJECT:
783 	case MONO_TYPE_SZARRAY:
784 	case MONO_TYPE_ARRAY:
785 		inst->type = STACK_OBJ;
786 		return;
787 	case MONO_TYPE_I8:
788 	case MONO_TYPE_U8:
789 		inst->type = STACK_I8;
790 		return;
791 	case MONO_TYPE_R4:
792 		inst->type = cfg->r4_stack_type;
793 		break;
794 	case MONO_TYPE_R8:
795 		inst->type = STACK_R8;
796 		return;
797 	case MONO_TYPE_VALUETYPE:
798 		if (type->data.klass->enumtype) {
799 			type = mono_class_enum_basetype (type->data.klass);
800 			goto handle_enum;
801 		} else {
802 			inst->klass = klass;
803 			inst->type = STACK_VTYPE;
804 			return;
805 		}
806 	case MONO_TYPE_TYPEDBYREF:
807 		inst->klass = mono_defaults.typed_reference_class;
808 		inst->type = STACK_VTYPE;
809 		return;
810 	case MONO_TYPE_GENERICINST:
811 		type = &type->data.generic_class->container_class->byval_arg;
812 		goto handle_enum;
813 	case MONO_TYPE_VAR:
814 	case MONO_TYPE_MVAR:
815 		g_assert (cfg->gshared);
816 		if (mini_is_gsharedvt_type (type)) {
817 			g_assert (cfg->gsharedvt);
818 			inst->type = STACK_VTYPE;
819 		} else {
820 			type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
821 		}
822 		return;
823 	default:
824 		g_error ("unknown type 0x%02x in eval stack type", type->type);
825 	}
826 }
827 
828 /*
829  * The following tables are used to quickly validate the IL code in type_from_op ().
830  */
831 static const char
832 bin_num_table [STACK_MAX] [STACK_MAX] = {
833 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 	{STACK_INV, STACK_I4,  STACK_INV, STACK_PTR, STACK_INV, STACK_MP,  STACK_INV, STACK_INV},
835 	{STACK_INV, STACK_INV, STACK_I8,  STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 	{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP,  STACK_INV, STACK_INV},
837 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8,  STACK_INV, STACK_INV, STACK_INV, STACK_R8},
838 	{STACK_INV, STACK_MP,  STACK_INV, STACK_MP,  STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
839 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
842 };
843 
844 static const char
845 neg_table [] = {
846 	STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
847 };
848 
849 /* reduce the size of this table */
850 static const char
851 bin_int_table [STACK_MAX] [STACK_MAX] = {
852 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 	{STACK_INV, STACK_I4,  STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 	{STACK_INV, STACK_INV, STACK_I8,  STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 	{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
860 };
861 
862 static const char
863 bin_comp_table [STACK_MAX] [STACK_MAX] = {
864 /*	Inv i  L  p  F  &  O  vt r4 */
865 	{0},
866 	{0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
867 	{0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
868 	{0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
869 	{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
870 	{0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
871 	{0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
872 	{0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
873 	{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
874 };
875 
876 /* reduce the size of this table */
877 static const char
878 shift_table [STACK_MAX] [STACK_MAX] = {
879 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 	{STACK_INV, STACK_I4,  STACK_INV, STACK_I4,  STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 	{STACK_INV, STACK_I8,  STACK_INV, STACK_I8,  STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 	{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 	{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
887 };
888 
889 /*
890  * Tables to map from the non-specific opcode to the matching
891  * type-specific opcode.
892  */
893 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 static const guint16
895 binops_op_map [STACK_MAX] = {
896 	0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
897 };
898 
899 /* handles from CEE_NEG to CEE_CONV_U8 */
900 static const guint16
901 unops_op_map [STACK_MAX] = {
902 	0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
903 };
904 
905 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 static const guint16
907 ovfops_op_map [STACK_MAX] = {
908 	0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
909 };
910 
911 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 static const guint16
913 ovf2ops_op_map [STACK_MAX] = {
914 	0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
915 };
916 
917 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 static const guint16
919 ovf3ops_op_map [STACK_MAX] = {
920 	0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
921 };
922 
923 /* handles from CEE_BEQ to CEE_BLT_UN */
924 static const guint16
925 beqops_op_map [STACK_MAX] = {
926 	0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
927 };
928 
929 /* handles from CEE_CEQ to CEE_CLT_UN */
930 static const guint16
931 ceqops_op_map [STACK_MAX] = {
932 	0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
933 };
934 
935 /*
936  * Sets ins->type (the type on the eval stack) according to the
937  * type of the opcode and the arguments to it.
938  * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939  *
940  * FIXME: this function sets ins->type unconditionally in some cases, but
941  * it should set it to invalid for some types (a conv.x on an object)
942  */
943 static void
type_from_op(MonoCompile * cfg,MonoInst * ins,MonoInst * src1,MonoInst * src2)944 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 {
946 	switch (ins->opcode) {
947 	/* binops */
948 	case CEE_ADD:
949 	case CEE_SUB:
950 	case CEE_MUL:
951 	case CEE_DIV:
952 	case CEE_REM:
953 		/* FIXME: check unverifiable args for STACK_MP */
954 		ins->type = bin_num_table [src1->type] [src2->type];
955 		ins->opcode += binops_op_map [ins->type];
956 		break;
957 	case CEE_DIV_UN:
958 	case CEE_REM_UN:
959 	case CEE_AND:
960 	case CEE_OR:
961 	case CEE_XOR:
962 		ins->type = bin_int_table [src1->type] [src2->type];
963 		ins->opcode += binops_op_map [ins->type];
964 		break;
965 	case CEE_SHL:
966 	case CEE_SHR:
967 	case CEE_SHR_UN:
968 		ins->type = shift_table [src1->type] [src2->type];
969 		ins->opcode += binops_op_map [ins->type];
970 		break;
971 	case OP_COMPARE:
972 	case OP_LCOMPARE:
973 	case OP_ICOMPARE:
974 		ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
975 		if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 			ins->opcode = OP_LCOMPARE;
977 		else if (src1->type == STACK_R4)
978 			ins->opcode = OP_RCOMPARE;
979 		else if (src1->type == STACK_R8)
980 			ins->opcode = OP_FCOMPARE;
981 		else
982 			ins->opcode = OP_ICOMPARE;
983 		break;
984 	case OP_ICOMPARE_IMM:
985 		ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
986 		if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
987 			ins->opcode = OP_LCOMPARE_IMM;
988 		break;
989 	case CEE_BEQ:
990 	case CEE_BGE:
991 	case CEE_BGT:
992 	case CEE_BLE:
993 	case CEE_BLT:
994 	case CEE_BNE_UN:
995 	case CEE_BGE_UN:
996 	case CEE_BGT_UN:
997 	case CEE_BLE_UN:
998 	case CEE_BLT_UN:
999 		ins->opcode += beqops_op_map [src1->type];
1000 		break;
1001 	case OP_CEQ:
1002 		ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1003 		ins->opcode += ceqops_op_map [src1->type];
1004 		break;
1005 	case OP_CGT:
1006 	case OP_CGT_UN:
1007 	case OP_CLT:
1008 	case OP_CLT_UN:
1009 		ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1010 		ins->opcode += ceqops_op_map [src1->type];
1011 		break;
1012 	/* unops */
1013 	case CEE_NEG:
1014 		ins->type = neg_table [src1->type];
1015 		ins->opcode += unops_op_map [ins->type];
1016 		break;
1017 	case CEE_NOT:
1018 		if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1019 			ins->type = src1->type;
1020 		else
1021 			ins->type = STACK_INV;
1022 		ins->opcode += unops_op_map [ins->type];
1023 		break;
1024 	case CEE_CONV_I1:
1025 	case CEE_CONV_I2:
1026 	case CEE_CONV_I4:
1027 	case CEE_CONV_U4:
1028 		ins->type = STACK_I4;
1029 		ins->opcode += unops_op_map [src1->type];
1030 		break;
1031 	case CEE_CONV_R_UN:
1032 		ins->type = STACK_R8;
1033 		switch (src1->type) {
1034 		case STACK_I4:
1035 		case STACK_PTR:
1036 			ins->opcode = OP_ICONV_TO_R_UN;
1037 			break;
1038 		case STACK_I8:
1039 			ins->opcode = OP_LCONV_TO_R_UN;
1040 			break;
1041 		}
1042 		break;
1043 	case CEE_CONV_OVF_I1:
1044 	case CEE_CONV_OVF_U1:
1045 	case CEE_CONV_OVF_I2:
1046 	case CEE_CONV_OVF_U2:
1047 	case CEE_CONV_OVF_I4:
1048 	case CEE_CONV_OVF_U4:
1049 		ins->type = STACK_I4;
1050 		ins->opcode += ovf3ops_op_map [src1->type];
1051 		break;
1052 	case CEE_CONV_OVF_I_UN:
1053 	case CEE_CONV_OVF_U_UN:
1054 		ins->type = STACK_PTR;
1055 		ins->opcode += ovf2ops_op_map [src1->type];
1056 		break;
1057 	case CEE_CONV_OVF_I1_UN:
1058 	case CEE_CONV_OVF_I2_UN:
1059 	case CEE_CONV_OVF_I4_UN:
1060 	case CEE_CONV_OVF_U1_UN:
1061 	case CEE_CONV_OVF_U2_UN:
1062 	case CEE_CONV_OVF_U4_UN:
1063 		ins->type = STACK_I4;
1064 		ins->opcode += ovf2ops_op_map [src1->type];
1065 		break;
1066 	case CEE_CONV_U:
1067 		ins->type = STACK_PTR;
1068 		switch (src1->type) {
1069 		case STACK_I4:
1070 			ins->opcode = OP_ICONV_TO_U;
1071 			break;
1072 		case STACK_PTR:
1073 		case STACK_MP:
1074 		case STACK_OBJ:
1075 #if SIZEOF_VOID_P == 8
1076 			ins->opcode = OP_LCONV_TO_U;
1077 #else
1078 			ins->opcode = OP_MOVE;
1079 #endif
1080 			break;
1081 		case STACK_I8:
1082 			ins->opcode = OP_LCONV_TO_U;
1083 			break;
1084 		case STACK_R8:
1085 			ins->opcode = OP_FCONV_TO_U;
1086 			break;
1087 		}
1088 		break;
1089 	case CEE_CONV_I8:
1090 	case CEE_CONV_U8:
1091 		ins->type = STACK_I8;
1092 		ins->opcode += unops_op_map [src1->type];
1093 		break;
1094 	case CEE_CONV_OVF_I8:
1095 	case CEE_CONV_OVF_U8:
1096 		ins->type = STACK_I8;
1097 		ins->opcode += ovf3ops_op_map [src1->type];
1098 		break;
1099 	case CEE_CONV_OVF_U8_UN:
1100 	case CEE_CONV_OVF_I8_UN:
1101 		ins->type = STACK_I8;
1102 		ins->opcode += ovf2ops_op_map [src1->type];
1103 		break;
1104 	case CEE_CONV_R4:
1105 		ins->type = cfg->r4_stack_type;
1106 		ins->opcode += unops_op_map [src1->type];
1107 		break;
1108 	case CEE_CONV_R8:
1109 		ins->type = STACK_R8;
1110 		ins->opcode += unops_op_map [src1->type];
1111 		break;
1112 	case OP_CKFINITE:
1113 		ins->type = STACK_R8;
1114 		break;
1115 	case CEE_CONV_U2:
1116 	case CEE_CONV_U1:
1117 		ins->type = STACK_I4;
1118 		ins->opcode += ovfops_op_map [src1->type];
1119 		break;
1120 	case CEE_CONV_I:
1121 	case CEE_CONV_OVF_I:
1122 	case CEE_CONV_OVF_U:
1123 		ins->type = STACK_PTR;
1124 		ins->opcode += ovfops_op_map [src1->type];
1125 		break;
1126 	case CEE_ADD_OVF:
1127 	case CEE_ADD_OVF_UN:
1128 	case CEE_MUL_OVF:
1129 	case CEE_MUL_OVF_UN:
1130 	case CEE_SUB_OVF:
1131 	case CEE_SUB_OVF_UN:
1132 		ins->type = bin_num_table [src1->type] [src2->type];
1133 		ins->opcode += ovfops_op_map [src1->type];
1134 		if (ins->type == STACK_R8)
1135 			ins->type = STACK_INV;
1136 		break;
1137 	case OP_LOAD_MEMBASE:
1138 		ins->type = STACK_PTR;
1139 		break;
1140 	case OP_LOADI1_MEMBASE:
1141 	case OP_LOADU1_MEMBASE:
1142 	case OP_LOADI2_MEMBASE:
1143 	case OP_LOADU2_MEMBASE:
1144 	case OP_LOADI4_MEMBASE:
1145 	case OP_LOADU4_MEMBASE:
1146 		ins->type = STACK_PTR;
1147 		break;
1148 	case OP_LOADI8_MEMBASE:
1149 		ins->type = STACK_I8;
1150 		break;
1151 	case OP_LOADR4_MEMBASE:
1152 		ins->type = cfg->r4_stack_type;
1153 		break;
1154 	case OP_LOADR8_MEMBASE:
1155 		ins->type = STACK_R8;
1156 		break;
1157 	default:
1158 		g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1159 		break;
1160 	}
1161 
1162 	if (ins->type == STACK_MP)
1163 		ins->klass = mono_defaults.object_class;
1164 }
1165 
1166 static MonoClass*
ldind_to_type(int op)1167 ldind_to_type (int op)
1168 {
1169 	switch (op) {
1170 	case CEE_LDIND_I1: return mono_defaults.sbyte_class;
1171 	case CEE_LDIND_U1: return mono_defaults.byte_class;
1172 	case CEE_LDIND_I2: return mono_defaults.int16_class;
1173 	case CEE_LDIND_U2: return mono_defaults.uint16_class;
1174 	case CEE_LDIND_I4: return mono_defaults.int32_class;
1175 	case CEE_LDIND_U4: return mono_defaults.uint32_class;
1176 	case CEE_LDIND_I8: return mono_defaults.int64_class;
1177 	case CEE_LDIND_I: return mono_defaults.int_class;
1178 	case CEE_LDIND_R4: return mono_defaults.single_class;
1179 	case CEE_LDIND_R8: return mono_defaults.double_class;
1180 	case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1181 	default: g_error ("Unknown ldind type %d", op);
1182 	}
1183 }
1184 
1185 #if 0
1186 
1187 static const char
1188 param_table [STACK_MAX] [STACK_MAX] = {
1189 	{0},
1190 };
1191 
1192 static int
1193 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1194 {
1195 	int i;
1196 
1197 	if (sig->hasthis) {
1198 		switch (args->type) {
1199 		case STACK_I4:
1200 		case STACK_I8:
1201 		case STACK_R8:
1202 		case STACK_VTYPE:
1203 		case STACK_INV:
1204 			return 0;
1205 		}
1206 		args++;
1207 	}
1208 	for (i = 0; i < sig->param_count; ++i) {
1209 		switch (args [i].type) {
1210 		case STACK_INV:
1211 			return 0;
1212 		case STACK_MP:
1213 			if (!sig->params [i]->byref)
1214 				return 0;
1215 			continue;
1216 		case STACK_OBJ:
1217 			if (sig->params [i]->byref)
1218 				return 0;
1219 			switch (sig->params [i]->type) {
1220 			case MONO_TYPE_CLASS:
1221 			case MONO_TYPE_STRING:
1222 			case MONO_TYPE_OBJECT:
1223 			case MONO_TYPE_SZARRAY:
1224 			case MONO_TYPE_ARRAY:
1225 				break;
1226 			default:
1227 				return 0;
1228 			}
1229 			continue;
1230 		case STACK_R8:
1231 			if (sig->params [i]->byref)
1232 				return 0;
1233 			if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1234 				return 0;
1235 			continue;
1236 		case STACK_PTR:
1237 		case STACK_I4:
1238 		case STACK_I8:
1239 		case STACK_VTYPE:
1240 			break;
1241 		}
1242 		/*if (!param_table [args [i].type] [sig->params [i]->type])
1243 			return 0;*/
1244 	}
1245 	return 1;
1246 }
1247 #endif
1248 
1249 /*
1250  * When we need a pointer to the current domain many times in a method, we
1251  * call mono_domain_get() once and we store the result in a local variable.
1252  * This function returns the variable that represents the MonoDomain*.
1253  */
1254 inline static MonoInst *
mono_get_domainvar(MonoCompile * cfg)1255 mono_get_domainvar (MonoCompile *cfg)
1256 {
1257 	if (!cfg->domainvar)
1258 		cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1259 	return cfg->domainvar;
1260 }
1261 
1262 /*
1263  * The got_var contains the address of the Global Offset Table when AOT
1264  * compiling.
1265  */
1266 MonoInst *
mono_get_got_var(MonoCompile * cfg)1267 mono_get_got_var (MonoCompile *cfg)
1268 {
1269 	if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1270 		return NULL;
1271 	if (!cfg->got_var) {
1272 		cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1273 	}
1274 	return cfg->got_var;
1275 }
1276 
1277 static void
mono_create_rgctx_var(MonoCompile * cfg)1278 mono_create_rgctx_var (MonoCompile *cfg)
1279 {
1280 	if (!cfg->rgctx_var) {
1281 		cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1282 		/* force the var to be stack allocated */
1283 		cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1284 	}
1285 }
1286 
1287 static MonoInst *
mono_get_vtable_var(MonoCompile * cfg)1288 mono_get_vtable_var (MonoCompile *cfg)
1289 {
1290 	g_assert (cfg->gshared);
1291 
1292 	mono_create_rgctx_var (cfg);
1293 
1294 	return cfg->rgctx_var;
1295 }
1296 
1297 static MonoType*
type_from_stack_type(MonoInst * ins)1298 type_from_stack_type (MonoInst *ins) {
1299 	switch (ins->type) {
1300 	case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1301 	case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1302 	case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1303 	case STACK_R4: return &mono_defaults.single_class->byval_arg;
1304 	case STACK_R8: return &mono_defaults.double_class->byval_arg;
1305 	case STACK_MP:
1306 		return &ins->klass->this_arg;
1307 	case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1308 	case STACK_VTYPE: return &ins->klass->byval_arg;
1309 	default:
1310 		g_error ("stack type %d to monotype not handled\n", ins->type);
1311 	}
1312 	return NULL;
1313 }
1314 
1315 static G_GNUC_UNUSED int
type_to_stack_type(MonoCompile * cfg,MonoType * t)1316 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1317 {
1318 	t = mono_type_get_underlying_type (t);
1319 	switch (t->type) {
1320 	case MONO_TYPE_I1:
1321 	case MONO_TYPE_U1:
1322 	case MONO_TYPE_I2:
1323 	case MONO_TYPE_U2:
1324 	case MONO_TYPE_I4:
1325 	case MONO_TYPE_U4:
1326 		return STACK_I4;
1327 	case MONO_TYPE_I:
1328 	case MONO_TYPE_U:
1329 	case MONO_TYPE_PTR:
1330 	case MONO_TYPE_FNPTR:
1331 		return STACK_PTR;
1332 	case MONO_TYPE_CLASS:
1333 	case MONO_TYPE_STRING:
1334 	case MONO_TYPE_OBJECT:
1335 	case MONO_TYPE_SZARRAY:
1336 	case MONO_TYPE_ARRAY:
1337 		return STACK_OBJ;
1338 	case MONO_TYPE_I8:
1339 	case MONO_TYPE_U8:
1340 		return STACK_I8;
1341 	case MONO_TYPE_R4:
1342 		return cfg->r4_stack_type;
1343 	case MONO_TYPE_R8:
1344 		return STACK_R8;
1345 	case MONO_TYPE_VALUETYPE:
1346 	case MONO_TYPE_TYPEDBYREF:
1347 		return STACK_VTYPE;
1348 	case MONO_TYPE_GENERICINST:
1349 		if (mono_type_generic_inst_is_valuetype (t))
1350 			return STACK_VTYPE;
1351 		else
1352 			return STACK_OBJ;
1353 		break;
1354 	default:
1355 		g_assert_not_reached ();
1356 	}
1357 
1358 	return -1;
1359 }
1360 
1361 static MonoClass*
array_access_to_klass(int opcode)1362 array_access_to_klass (int opcode)
1363 {
1364 	switch (opcode) {
1365 	case CEE_LDELEM_U1:
1366 		return mono_defaults.byte_class;
1367 	case CEE_LDELEM_U2:
1368 		return mono_defaults.uint16_class;
1369 	case CEE_LDELEM_I:
1370 	case CEE_STELEM_I:
1371 		return mono_defaults.int_class;
1372 	case CEE_LDELEM_I1:
1373 	case CEE_STELEM_I1:
1374 		return mono_defaults.sbyte_class;
1375 	case CEE_LDELEM_I2:
1376 	case CEE_STELEM_I2:
1377 		return mono_defaults.int16_class;
1378 	case CEE_LDELEM_I4:
1379 	case CEE_STELEM_I4:
1380 		return mono_defaults.int32_class;
1381 	case CEE_LDELEM_U4:
1382 		return mono_defaults.uint32_class;
1383 	case CEE_LDELEM_I8:
1384 	case CEE_STELEM_I8:
1385 		return mono_defaults.int64_class;
1386 	case CEE_LDELEM_R4:
1387 	case CEE_STELEM_R4:
1388 		return mono_defaults.single_class;
1389 	case CEE_LDELEM_R8:
1390 	case CEE_STELEM_R8:
1391 		return mono_defaults.double_class;
1392 	case CEE_LDELEM_REF:
1393 	case CEE_STELEM_REF:
1394 		return mono_defaults.object_class;
1395 	default:
1396 		g_assert_not_reached ();
1397 	}
1398 	return NULL;
1399 }
1400 
1401 /*
1402  * We try to share variables when possible
1403  */
1404 static MonoInst *
mono_compile_get_interface_var(MonoCompile * cfg,int slot,MonoInst * ins)1405 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1406 {
1407 	MonoInst *res;
1408 	int pos, vnum;
1409 
1410 	/* inlining can result in deeper stacks */
1411 	if (slot >= cfg->header->max_stack)
1412 		return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1413 
1414 	pos = ins->type - 1 + slot * STACK_MAX;
1415 
1416 	switch (ins->type) {
1417 	case STACK_I4:
1418 	case STACK_I8:
1419 	case STACK_R8:
1420 	case STACK_PTR:
1421 	case STACK_MP:
1422 	case STACK_OBJ:
1423 		if ((vnum = cfg->intvars [pos]))
1424 			return cfg->varinfo [vnum];
1425 		res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1426 		cfg->intvars [pos] = res->inst_c0;
1427 		break;
1428 	default:
1429 		res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1430 	}
1431 	return res;
1432 }
1433 
1434 static void
mono_save_token_info(MonoCompile * cfg,MonoImage * image,guint32 token,gpointer key)1435 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1436 {
1437 	/*
1438 	 * Don't use this if a generic_context is set, since that means AOT can't
1439 	 * look up the method using just the image+token.
1440 	 * table == 0 means this is a reference made from a wrapper.
1441 	 */
1442 	if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1443 		MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1444 		jump_info_token->image = image;
1445 		jump_info_token->token = token;
1446 		g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1447 	}
1448 }
1449 
1450 /*
1451  * This function is called to handle items that are left on the evaluation stack
1452  * at basic block boundaries. What happens is that we save the values to local variables
1453  * and we reload them later when first entering the target basic block (with the
1454  * handle_loaded_temps () function).
1455  * A single joint point will use the same variables (stored in the array bb->out_stack or
1456  * bb->in_stack, if the basic block is before or after the joint point).
1457  *
1458  * This function needs to be called _before_ emitting the last instruction of
1459  * the bb (i.e. before emitting a branch).
1460  * If the stack merge fails at a join point, cfg->unverifiable is set.
1461  */
1462 static void
handle_stack_args(MonoCompile * cfg,MonoInst ** sp,int count)1463 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1464 {
1465 	int i, bindex;
1466 	MonoBasicBlock *bb = cfg->cbb;
1467 	MonoBasicBlock *outb;
1468 	MonoInst *inst, **locals;
1469 	gboolean found;
1470 
1471 	if (!count)
1472 		return;
1473 	if (cfg->verbose_level > 3)
1474 		printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1475 	if (!bb->out_scount) {
1476 		bb->out_scount = count;
1477 		//printf ("bblock %d has out:", bb->block_num);
1478 		found = FALSE;
1479 		for (i = 0; i < bb->out_count; ++i) {
1480 			outb = bb->out_bb [i];
1481 			/* exception handlers are linked, but they should not be considered for stack args */
1482 			if (outb->flags & BB_EXCEPTION_HANDLER)
1483 				continue;
1484 			//printf (" %d", outb->block_num);
1485 			if (outb->in_stack) {
1486 				found = TRUE;
1487 				bb->out_stack = outb->in_stack;
1488 				break;
1489 			}
1490 		}
1491 		//printf ("\n");
1492 		if (!found) {
1493 			bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1494 			for (i = 0; i < count; ++i) {
1495 				/*
1496 				 * try to reuse temps already allocated for this purpouse, if they occupy the same
1497 				 * stack slot and if they are of the same type.
1498 				 * This won't cause conflicts since if 'local' is used to
1499 				 * store one of the values in the in_stack of a bblock, then
1500 				 * the same variable will be used for the same outgoing stack
1501 				 * slot as well.
1502 				 * This doesn't work when inlining methods, since the bblocks
1503 				 * in the inlined methods do not inherit their in_stack from
1504 				 * the bblock they are inlined to. See bug #58863 for an
1505 				 * example.
1506 				 */
1507 				if (cfg->inlined_method)
1508 					bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1509 				else
1510 					bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1511 			}
1512 		}
1513 	}
1514 
1515 	for (i = 0; i < bb->out_count; ++i) {
1516 		outb = bb->out_bb [i];
1517 		/* exception handlers are linked, but they should not be considered for stack args */
1518 		if (outb->flags & BB_EXCEPTION_HANDLER)
1519 			continue;
1520 		if (outb->in_scount) {
1521 			if (outb->in_scount != bb->out_scount) {
1522 				cfg->unverifiable = TRUE;
1523 				return;
1524 			}
1525 			continue; /* check they are the same locals */
1526 		}
1527 		outb->in_scount = count;
1528 		outb->in_stack = bb->out_stack;
1529 	}
1530 
1531 	locals = bb->out_stack;
1532 	cfg->cbb = bb;
1533 	for (i = 0; i < count; ++i) {
1534 		EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1535 		inst->cil_code = sp [i]->cil_code;
1536 		sp [i] = locals [i];
1537 		if (cfg->verbose_level > 3)
1538 			printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1539 	}
1540 
1541 	/*
1542 	 * It is possible that the out bblocks already have in_stack assigned, and
1543 	 * the in_stacks differ. In this case, we will store to all the different
1544 	 * in_stacks.
1545 	 */
1546 
1547 	found = TRUE;
1548 	bindex = 0;
1549 	while (found) {
1550 		/* Find a bblock which has a different in_stack */
1551 		found = FALSE;
1552 		while (bindex < bb->out_count) {
1553 			outb = bb->out_bb [bindex];
1554 			/* exception handlers are linked, but they should not be considered for stack args */
1555 			if (outb->flags & BB_EXCEPTION_HANDLER) {
1556 				bindex++;
1557 				continue;
1558 			}
1559 			if (outb->in_stack != locals) {
1560 				for (i = 0; i < count; ++i) {
1561 					EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1562 					inst->cil_code = sp [i]->cil_code;
1563 					sp [i] = locals [i];
1564 					if (cfg->verbose_level > 3)
1565 						printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1566 				}
1567 				locals = outb->in_stack;
1568 				found = TRUE;
1569 				break;
1570 			}
1571 			bindex ++;
1572 		}
1573 	}
1574 }
1575 
1576 MonoInst*
mini_emit_runtime_constant(MonoCompile * cfg,MonoJumpInfoType patch_type,gpointer data)1577 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1578 {
1579 	MonoInst *ins;
1580 
1581 	if (cfg->compile_aot) {
1582 		EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1583 	} else {
1584 		MonoJumpInfo ji;
1585 		gpointer target;
1586 		MonoError error;
1587 
1588 		ji.type = patch_type;
1589 		ji.data.target = data;
1590 		target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1591 		mono_error_assert_ok (&error);
1592 
1593 		EMIT_NEW_PCONST (cfg, ins, target);
1594 	}
1595 	return ins;
1596 }
1597 
1598 static MonoInst*
mono_create_fast_tls_getter(MonoCompile * cfg,MonoTlsKey key)1599 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1600 {
1601 	int tls_offset = mono_tls_get_tls_offset (key);
1602 
1603 	if (cfg->compile_aot)
1604 		return NULL;
1605 
1606 	if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1607 		MonoInst *ins;
1608 		MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1609 		ins->dreg = mono_alloc_preg (cfg);
1610 		ins->inst_offset = tls_offset;
1611 		return ins;
1612 	}
1613 	return NULL;
1614 }
1615 
1616 static MonoInst*
mono_create_fast_tls_setter(MonoCompile * cfg,MonoInst * value,MonoTlsKey key)1617 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1618 {
1619 	int tls_offset = mono_tls_get_tls_offset (key);
1620 
1621 	if (cfg->compile_aot)
1622 		return NULL;
1623 
1624 	if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1625 		MonoInst *ins;
1626 		MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1627 		ins->sreg1 = value->dreg;
1628 		ins->inst_offset = tls_offset;
1629 		return ins;
1630 	}
1631 	return NULL;
1632 }
1633 
1634 
1635 MonoInst*
mono_create_tls_get(MonoCompile * cfg,MonoTlsKey key)1636 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1637 {
1638 	MonoInst *fast_tls = NULL;
1639 
1640 	if (!mini_get_debug_options ()->use_fallback_tls)
1641 		fast_tls = mono_create_fast_tls_getter (cfg, key);
1642 
1643 	if (fast_tls) {
1644 		MONO_ADD_INS (cfg->cbb, fast_tls);
1645 		return fast_tls;
1646 	}
1647 
1648 	if (cfg->compile_aot) {
1649 		MonoInst *addr;
1650 		/*
1651 		 * tls getters are critical pieces of code and we don't want to resolve them
1652 		 * through the standard plt/tramp mechanism since we might expose ourselves
1653 		 * to crashes and infinite recursions.
1654 		 */
1655 		EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1656 		return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1657 	} else {
1658 		gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1659 		return mono_emit_jit_icall (cfg, getter, NULL);
1660 	}
1661 }
1662 
1663 static MonoInst*
mono_create_tls_set(MonoCompile * cfg,MonoInst * value,MonoTlsKey key)1664 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1665 {
1666 	MonoInst *fast_tls = NULL;
1667 
1668 	if (!mini_get_debug_options ()->use_fallback_tls)
1669 		fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1670 
1671 	if (fast_tls) {
1672 		MONO_ADD_INS (cfg->cbb, fast_tls);
1673 		return fast_tls;
1674 	}
1675 
1676 	if (cfg->compile_aot) {
1677 		MonoInst *addr;
1678 		EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1679 		return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1680 	} else {
1681 		gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1682 		return mono_emit_jit_icall (cfg, setter, &value);
1683 	}
1684 }
1685 
1686 /*
1687  * emit_push_lmf:
1688  *
1689  *   Emit IR to push the current LMF onto the LMF stack.
1690  */
1691 static void
emit_push_lmf(MonoCompile * cfg)1692 emit_push_lmf (MonoCompile *cfg)
1693 {
1694 	/*
1695 	 * Emit IR to push the LMF:
1696 	 * lmf_addr = <lmf_addr from tls>
1697 	 * lmf->lmf_addr = lmf_addr
1698 	 * lmf->prev_lmf = *lmf_addr
1699 	 * *lmf_addr = lmf
1700 	 */
1701 	MonoInst *ins, *lmf_ins;
1702 
1703 	if (!cfg->lmf_ir)
1704 		return;
1705 
1706 	int lmf_reg, prev_lmf_reg;
1707 	/*
1708 	 * Store lmf_addr in a variable, so it can be allocated to a global register.
1709 	 */
1710 	if (!cfg->lmf_addr_var)
1711 		cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1712 
1713 #ifdef HOST_WIN32
1714 	ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1715 	g_assert (ins);
1716 	int jit_tls_dreg = ins->dreg;
1717 
1718 	lmf_reg = alloc_preg (cfg);
1719 	EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1720 #else
1721 	lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1722 	g_assert (lmf_ins);
1723 #endif
1724 	lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1725 
1726 	EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1727 	lmf_reg = ins->dreg;
1728 
1729 	prev_lmf_reg = alloc_preg (cfg);
1730 	/* Save previous_lmf */
1731 	EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1732 	EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1733 	/* Set new lmf */
1734 	EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1735 }
1736 
1737 /*
1738  * emit_pop_lmf:
1739  *
1740  *   Emit IR to pop the current LMF from the LMF stack.
1741  */
1742 static void
emit_pop_lmf(MonoCompile * cfg)1743 emit_pop_lmf (MonoCompile *cfg)
1744 {
1745 	int lmf_reg, lmf_addr_reg;
1746 	MonoInst *ins;
1747 
1748 	if (!cfg->lmf_ir)
1749 		return;
1750 
1751  	EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1752  	lmf_reg = ins->dreg;
1753 
1754 	int prev_lmf_reg;
1755 	/*
1756 	 * Emit IR to pop the LMF:
1757 	 * *(lmf->lmf_addr) = lmf->prev_lmf
1758 	 */
1759 	/* This could be called before emit_push_lmf () */
1760 	if (!cfg->lmf_addr_var)
1761 		cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1762 	lmf_addr_reg = cfg->lmf_addr_var->dreg;
1763 
1764 	prev_lmf_reg = alloc_preg (cfg);
1765 	EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1766 	EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1767 }
1768 
1769 static int
ret_type_to_call_opcode(MonoCompile * cfg,MonoType * type,int calli,int virt)1770 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1771 {
1772 handle_enum:
1773 	type = mini_get_underlying_type (type);
1774 	switch (type->type) {
1775 	case MONO_TYPE_VOID:
1776 		return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1777 	case MONO_TYPE_I1:
1778 	case MONO_TYPE_U1:
1779 	case MONO_TYPE_I2:
1780 	case MONO_TYPE_U2:
1781 	case MONO_TYPE_I4:
1782 	case MONO_TYPE_U4:
1783 		return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1784 	case MONO_TYPE_I:
1785 	case MONO_TYPE_U:
1786 	case MONO_TYPE_PTR:
1787 	case MONO_TYPE_FNPTR:
1788 		return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1789 	case MONO_TYPE_CLASS:
1790 	case MONO_TYPE_STRING:
1791 	case MONO_TYPE_OBJECT:
1792 	case MONO_TYPE_SZARRAY:
1793 	case MONO_TYPE_ARRAY:
1794 		return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1795 	case MONO_TYPE_I8:
1796 	case MONO_TYPE_U8:
1797 		return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1798 	case MONO_TYPE_R4:
1799 		if (cfg->r4fp)
1800 			return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1801 		else
1802 			return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1803 	case MONO_TYPE_R8:
1804 		return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1805 	case MONO_TYPE_VALUETYPE:
1806 		if (type->data.klass->enumtype) {
1807 			type = mono_class_enum_basetype (type->data.klass);
1808 			goto handle_enum;
1809 		} else
1810 			return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1811 	case MONO_TYPE_TYPEDBYREF:
1812 		return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1813 	case MONO_TYPE_GENERICINST:
1814 		type = &type->data.generic_class->container_class->byval_arg;
1815 		goto handle_enum;
1816 	case MONO_TYPE_VAR:
1817 	case MONO_TYPE_MVAR:
1818 		/* gsharedvt */
1819 		return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1820 	default:
1821 		g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1822 	}
1823 	return -1;
1824 }
1825 
1826 //XXX this ignores if t is byref
1827 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1828 
1829 /*
1830  * target_type_is_incompatible:
1831  * @cfg: MonoCompile context
1832  *
1833  * Check that the item @arg on the evaluation stack can be stored
1834  * in the target type (can be a local, or field, etc).
1835  * The cfg arg can be used to check if we need verification or just
1836  * validity checks.
1837  *
1838  * Returns: non-0 value if arg can't be stored on a target.
1839  */
1840 static int
target_type_is_incompatible(MonoCompile * cfg,MonoType * target,MonoInst * arg)1841 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1842 {
1843 	MonoType *simple_type;
1844 	MonoClass *klass;
1845 
1846 	if (target->byref) {
1847 		/* FIXME: check that the pointed to types match */
1848 		if (arg->type == STACK_MP) {
1849 			/* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1850 			MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1851 			MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1852 
1853 			/* if the target is native int& or X* or same type */
1854 			if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
1855 				return 0;
1856 
1857 			/* Both are primitive type byrefs and the source points to a larger type that the destination */
1858 			if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1859 				mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1860 				return 0;
1861 			return 1;
1862 		}
1863 		if (arg->type == STACK_PTR)
1864 			return 0;
1865 		return 1;
1866 	}
1867 
1868 	simple_type = mini_get_underlying_type (target);
1869 	switch (simple_type->type) {
1870 	case MONO_TYPE_VOID:
1871 		return 1;
1872 	case MONO_TYPE_I1:
1873 	case MONO_TYPE_U1:
1874 	case MONO_TYPE_I2:
1875 	case MONO_TYPE_U2:
1876 	case MONO_TYPE_I4:
1877 	case MONO_TYPE_U4:
1878 		if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1879 			return 1;
1880 		return 0;
1881 	case MONO_TYPE_PTR:
1882 		/* STACK_MP is needed when setting pinned locals */
1883 		if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1884 			return 1;
1885 		return 0;
1886 	case MONO_TYPE_I:
1887 	case MONO_TYPE_U:
1888 	case MONO_TYPE_FNPTR:
1889 		/*
1890 		 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1891 		 * in native int. (#688008).
1892 		 */
1893 		if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1894 			return 1;
1895 		return 0;
1896 	case MONO_TYPE_CLASS:
1897 	case MONO_TYPE_STRING:
1898 	case MONO_TYPE_OBJECT:
1899 	case MONO_TYPE_SZARRAY:
1900 	case MONO_TYPE_ARRAY:
1901 		if (arg->type != STACK_OBJ)
1902 			return 1;
1903 		/* FIXME: check type compatibility */
1904 		return 0;
1905 	case MONO_TYPE_I8:
1906 	case MONO_TYPE_U8:
1907 		if (arg->type != STACK_I8)
1908 			return 1;
1909 		return 0;
1910 	case MONO_TYPE_R4:
1911 		if (arg->type != cfg->r4_stack_type)
1912 			return 1;
1913 		return 0;
1914 	case MONO_TYPE_R8:
1915 		if (arg->type != STACK_R8)
1916 			return 1;
1917 		return 0;
1918 	case MONO_TYPE_VALUETYPE:
1919 		if (arg->type != STACK_VTYPE)
1920 			return 1;
1921 		klass = mono_class_from_mono_type (simple_type);
1922 		if (klass != arg->klass)
1923 			return 1;
1924 		return 0;
1925 	case MONO_TYPE_TYPEDBYREF:
1926 		if (arg->type != STACK_VTYPE)
1927 			return 1;
1928 		klass = mono_class_from_mono_type (simple_type);
1929 		if (klass != arg->klass)
1930 			return 1;
1931 		return 0;
1932 	case MONO_TYPE_GENERICINST:
1933 		if (mono_type_generic_inst_is_valuetype (simple_type)) {
1934 			MonoClass *target_class;
1935 			if (arg->type != STACK_VTYPE)
1936 				return 1;
1937 			klass = mono_class_from_mono_type (simple_type);
1938 			target_class = mono_class_from_mono_type (target);
1939 			/* The second cases is needed when doing partial sharing */
1940 			if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
1941 				return 1;
1942 			return 0;
1943 		} else {
1944 			if (arg->type != STACK_OBJ)
1945 				return 1;
1946 			/* FIXME: check type compatibility */
1947 			return 0;
1948 		}
1949 	case MONO_TYPE_VAR:
1950 	case MONO_TYPE_MVAR:
1951 		g_assert (cfg->gshared);
1952 		if (mini_type_var_is_vt (simple_type)) {
1953 			if (arg->type != STACK_VTYPE)
1954 				return 1;
1955 		} else {
1956 			if (arg->type != STACK_OBJ)
1957 				return 1;
1958 		}
1959 		return 0;
1960 	default:
1961 		g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1962 	}
1963 	return 1;
1964 }
1965 
1966 /*
1967  * Prepare arguments for passing to a function call.
1968  * Return a non-zero value if the arguments can't be passed to the given
1969  * signature.
1970  * The type checks are not yet complete and some conversions may need
1971  * casts on 32 or 64 bit architectures.
1972  *
1973  * FIXME: implement this using target_type_is_incompatible ()
1974  */
1975 static int
check_call_signature(MonoCompile * cfg,MonoMethodSignature * sig,MonoInst ** args)1976 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1977 {
1978 	MonoType *simple_type;
1979 	int i;
1980 
1981 	if (sig->hasthis) {
1982 		if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1983 			return 1;
1984 		args++;
1985 	}
1986 	for (i = 0; i < sig->param_count; ++i) {
1987 		if (sig->params [i]->byref) {
1988 			if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1989 				return 1;
1990 			continue;
1991 		}
1992 		simple_type = mini_get_underlying_type (sig->params [i]);
1993 handle_enum:
1994 		switch (simple_type->type) {
1995 		case MONO_TYPE_VOID:
1996 			return 1;
1997 			continue;
1998 		case MONO_TYPE_I1:
1999 		case MONO_TYPE_U1:
2000 		case MONO_TYPE_I2:
2001 		case MONO_TYPE_U2:
2002 		case MONO_TYPE_I4:
2003 		case MONO_TYPE_U4:
2004 			if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2005 				return 1;
2006 			continue;
2007 		case MONO_TYPE_I:
2008 		case MONO_TYPE_U:
2009 		case MONO_TYPE_PTR:
2010 		case MONO_TYPE_FNPTR:
2011 			if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2012 				return 1;
2013 			continue;
2014 		case MONO_TYPE_CLASS:
2015 		case MONO_TYPE_STRING:
2016 		case MONO_TYPE_OBJECT:
2017 		case MONO_TYPE_SZARRAY:
2018 		case MONO_TYPE_ARRAY:
2019 			if (args [i]->type != STACK_OBJ)
2020 				return 1;
2021 			continue;
2022 		case MONO_TYPE_I8:
2023 		case MONO_TYPE_U8:
2024 			if (args [i]->type != STACK_I8)
2025 				return 1;
2026 			continue;
2027 		case MONO_TYPE_R4:
2028 			if (args [i]->type != cfg->r4_stack_type)
2029 				return 1;
2030 			continue;
2031 		case MONO_TYPE_R8:
2032 			if (args [i]->type != STACK_R8)
2033 				return 1;
2034 			continue;
2035 		case MONO_TYPE_VALUETYPE:
2036 			if (simple_type->data.klass->enumtype) {
2037 				simple_type = mono_class_enum_basetype (simple_type->data.klass);
2038 				goto handle_enum;
2039 			}
2040 			if (args [i]->type != STACK_VTYPE)
2041 				return 1;
2042 			continue;
2043 		case MONO_TYPE_TYPEDBYREF:
2044 			if (args [i]->type != STACK_VTYPE)
2045 				return 1;
2046 			continue;
2047 		case MONO_TYPE_GENERICINST:
2048 			simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2049 			goto handle_enum;
2050 		case MONO_TYPE_VAR:
2051 		case MONO_TYPE_MVAR:
2052 			/* gsharedvt */
2053 			if (args [i]->type != STACK_VTYPE)
2054 				return 1;
2055 			continue;
2056 		default:
2057 			g_error ("unknown type 0x%02x in check_call_signature",
2058 				 simple_type->type);
2059 		}
2060 	}
2061 	return 0;
2062 }
2063 
2064 static int
callvirt_to_call(int opcode)2065 callvirt_to_call (int opcode)
2066 {
2067 	switch (opcode) {
2068 	case OP_CALL_MEMBASE:
2069 		return OP_CALL;
2070 	case OP_VOIDCALL_MEMBASE:
2071 		return OP_VOIDCALL;
2072 	case OP_FCALL_MEMBASE:
2073 		return OP_FCALL;
2074 	case OP_RCALL_MEMBASE:
2075 		return OP_RCALL;
2076 	case OP_VCALL_MEMBASE:
2077 		return OP_VCALL;
2078 	case OP_LCALL_MEMBASE:
2079 		return OP_LCALL;
2080 	default:
2081 		g_assert_not_reached ();
2082 	}
2083 
2084 	return -1;
2085 }
2086 
2087 static int
callvirt_to_call_reg(int opcode)2088 callvirt_to_call_reg (int opcode)
2089 {
2090 	switch (opcode) {
2091 	case OP_CALL_MEMBASE:
2092 		return OP_CALL_REG;
2093 	case OP_VOIDCALL_MEMBASE:
2094 		return OP_VOIDCALL_REG;
2095 	case OP_FCALL_MEMBASE:
2096 		return OP_FCALL_REG;
2097 	case OP_RCALL_MEMBASE:
2098 		return OP_RCALL_REG;
2099 	case OP_VCALL_MEMBASE:
2100 		return OP_VCALL_REG;
2101 	case OP_LCALL_MEMBASE:
2102 		return OP_LCALL_REG;
2103 	default:
2104 		g_assert_not_reached ();
2105 	}
2106 
2107 	return -1;
2108 }
2109 
2110 /* Either METHOD or IMT_ARG needs to be set */
2111 static void
emit_imt_argument(MonoCompile * cfg,MonoCallInst * call,MonoMethod * method,MonoInst * imt_arg)2112 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2113 {
2114 	int method_reg;
2115 
2116 	if (COMPILE_LLVM (cfg)) {
2117 		if (imt_arg) {
2118 			method_reg = alloc_preg (cfg);
2119 			MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2120 		} else {
2121 			MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2122 			method_reg = ins->dreg;
2123 		}
2124 
2125 #ifdef ENABLE_LLVM
2126 		call->imt_arg_reg = method_reg;
2127 #endif
2128 		mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2129 		return;
2130 	}
2131 
2132 	if (imt_arg) {
2133 		method_reg = alloc_preg (cfg);
2134 		MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2135 	} else {
2136 		MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2137 		method_reg = ins->dreg;
2138 	}
2139 
2140 	mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2141 }
2142 
2143 static MonoJumpInfo *
mono_patch_info_new(MonoMemPool * mp,int ip,MonoJumpInfoType type,gconstpointer target)2144 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2145 {
2146 	MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2147 
2148 	ji->ip.i = ip;
2149 	ji->type = type;
2150 	ji->data.target = target;
2151 
2152 	return ji;
2153 }
2154 
2155 int
mini_class_check_context_used(MonoCompile * cfg,MonoClass * klass)2156 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2157 {
2158 	if (cfg->gshared)
2159 		return mono_class_check_context_used (klass);
2160 	else
2161 		return 0;
2162 }
2163 
2164 static int
mini_method_check_context_used(MonoCompile * cfg,MonoMethod * method)2165 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2166 {
2167 	if (cfg->gshared)
2168 		return mono_method_check_context_used (method);
2169 	else
2170 		return 0;
2171 }
2172 
2173 /*
2174  * check_method_sharing:
2175  *
2176  *   Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2177  */
2178 static void
check_method_sharing(MonoCompile * cfg,MonoMethod * cmethod,gboolean * out_pass_vtable,gboolean * out_pass_mrgctx)2179 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2180 {
2181 	gboolean pass_vtable = FALSE;
2182 	gboolean pass_mrgctx = FALSE;
2183 
2184 	if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2185 		(mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2186 		gboolean sharable = FALSE;
2187 
2188 		if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2189 			sharable = TRUE;
2190 
2191 		/*
2192 		 * Pass vtable iff target method might
2193 		 * be shared, which means that sharing
2194 		 * is enabled for its class and its
2195 		 * context is sharable (and it's not a
2196 		 * generic method).
2197 		 */
2198 		if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2199 			pass_vtable = TRUE;
2200 	}
2201 
2202 	if (mini_method_get_context (cmethod) &&
2203 		mini_method_get_context (cmethod)->method_inst) {
2204 		g_assert (!pass_vtable);
2205 
2206 		if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2207 			pass_mrgctx = TRUE;
2208 		} else {
2209 			if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2210 				pass_mrgctx = TRUE;
2211 		}
2212 	}
2213 
2214 	if (out_pass_vtable)
2215 		*out_pass_vtable = pass_vtable;
2216 	if (out_pass_mrgctx)
2217 		*out_pass_mrgctx = pass_mrgctx;
2218 }
2219 
2220 inline static MonoCallInst *
mono_emit_call_args(MonoCompile * cfg,MonoMethodSignature * sig,MonoInst ** args,int calli,int virtual_,int tail,int rgctx,int unbox_trampoline,MonoMethod * target)2221 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2222 					 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline, MonoMethod *target)
2223 {
2224 	MonoType *sig_ret;
2225 	MonoCallInst *call;
2226 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2227 	int i;
2228 #endif
2229 
2230 	if (cfg->llvm_only)
2231 		tail = FALSE;
2232 
2233 	if (tail) {
2234 		mini_profiler_emit_tail_call (cfg, target);
2235 
2236 		MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2237 	} else
2238 		MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2239 
2240 	call->args = args;
2241 	call->signature = sig;
2242 	call->rgctx_reg = rgctx;
2243 	sig_ret = mini_get_underlying_type (sig->ret);
2244 
2245 	type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2246 
2247 	if (tail) {
2248 		if (mini_type_is_vtype (sig_ret)) {
2249 			call->vret_var = cfg->vret_addr;
2250 			//g_assert_not_reached ();
2251 		}
2252 	} else if (mini_type_is_vtype (sig_ret)) {
2253 		MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2254 		MonoInst *loada;
2255 
2256 		temp->backend.is_pinvoke = sig->pinvoke;
2257 
2258 		/*
2259 		 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2260 		 * address of return value to increase optimization opportunities.
2261 		 * Before vtype decomposition, the dreg of the call ins itself represents the
2262 		 * fact the call modifies the return value. After decomposition, the call will
2263 		 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2264 		 * will be transformed into an LDADDR.
2265 		 */
2266 		MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2267 		loada->dreg = alloc_preg (cfg);
2268 		loada->inst_p0 = temp;
2269 		/* We reference the call too since call->dreg could change during optimization */
2270 		loada->inst_p1 = call;
2271 		MONO_ADD_INS (cfg->cbb, loada);
2272 
2273 		call->inst.dreg = temp->dreg;
2274 
2275 		call->vret_var = loada;
2276 	} else if (!MONO_TYPE_IS_VOID (sig_ret))
2277 		call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2278 
2279 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2280 	if (COMPILE_SOFT_FLOAT (cfg)) {
2281 		/*
2282 		 * If the call has a float argument, we would need to do an r8->r4 conversion using
2283 		 * an icall, but that cannot be done during the call sequence since it would clobber
2284 		 * the call registers + the stack. So we do it before emitting the call.
2285 		 */
2286 		for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2287 			MonoType *t;
2288 			MonoInst *in = call->args [i];
2289 
2290 			if (i >= sig->hasthis)
2291 				t = sig->params [i - sig->hasthis];
2292 			else
2293 				t = &mono_defaults.int_class->byval_arg;
2294 			t = mono_type_get_underlying_type (t);
2295 
2296 			if (!t->byref && t->type == MONO_TYPE_R4) {
2297 				MonoInst *iargs [1];
2298 				MonoInst *conv;
2299 
2300 				iargs [0] = in;
2301 				conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2302 
2303 				/* The result will be in an int vreg */
2304 				call->args [i] = conv;
2305 			}
2306 		}
2307 	}
2308 #endif
2309 
2310 	call->need_unbox_trampoline = unbox_trampoline;
2311 
2312 #ifdef ENABLE_LLVM
2313 	if (COMPILE_LLVM (cfg))
2314 		mono_llvm_emit_call (cfg, call);
2315 	else
2316 		mono_arch_emit_call (cfg, call);
2317 #else
2318 	mono_arch_emit_call (cfg, call);
2319 #endif
2320 
2321 	cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2322 	cfg->flags |= MONO_CFG_HAS_CALLS;
2323 
2324 	return call;
2325 }
2326 
2327 static void
set_rgctx_arg(MonoCompile * cfg,MonoCallInst * call,int rgctx_reg,MonoInst * rgctx_arg)2328 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2329 {
2330 	mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2331 	cfg->uses_rgctx_reg = TRUE;
2332 	call->rgctx_reg = TRUE;
2333 #ifdef ENABLE_LLVM
2334 	call->rgctx_arg_reg = rgctx_reg;
2335 #endif
2336 }
2337 
2338 MonoInst*
mini_emit_calli(MonoCompile * cfg,MonoMethodSignature * sig,MonoInst ** args,MonoInst * addr,MonoInst * imt_arg,MonoInst * rgctx_arg)2339 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2340 {
2341 	MonoCallInst *call;
2342 	MonoInst *ins;
2343 	int rgctx_reg = -1;
2344 	gboolean check_sp = FALSE;
2345 
2346 	if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2347 		WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2348 
2349 		if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2350 			check_sp = TRUE;
2351 	}
2352 
2353 	if (rgctx_arg) {
2354 		rgctx_reg = mono_alloc_preg (cfg);
2355 		MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2356 	}
2357 
2358 	if (check_sp) {
2359 		if (!cfg->stack_inbalance_var)
2360 			cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2361 
2362 		MONO_INST_NEW (cfg, ins, OP_GET_SP);
2363 		ins->dreg = cfg->stack_inbalance_var->dreg;
2364 		MONO_ADD_INS (cfg->cbb, ins);
2365 	}
2366 
2367 	call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
2368 
2369 	call->inst.sreg1 = addr->dreg;
2370 
2371 	if (imt_arg)
2372 		emit_imt_argument (cfg, call, NULL, imt_arg);
2373 
2374 	MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2375 
2376 	if (check_sp) {
2377 		int sp_reg;
2378 
2379 		sp_reg = mono_alloc_preg (cfg);
2380 
2381 		MONO_INST_NEW (cfg, ins, OP_GET_SP);
2382 		ins->dreg = sp_reg;
2383 		MONO_ADD_INS (cfg->cbb, ins);
2384 
2385 		/* Restore the stack so we don't crash when throwing the exception */
2386 		MONO_INST_NEW (cfg, ins, OP_SET_SP);
2387 		ins->sreg1 = cfg->stack_inbalance_var->dreg;
2388 		MONO_ADD_INS (cfg->cbb, ins);
2389 
2390 		MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2391 		MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2392 	}
2393 
2394 	if (rgctx_arg)
2395 		set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2396 
2397 	return (MonoInst*)call;
2398 }
2399 
2400 static MonoInst*
2401 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2402 
2403 static MonoInst*
mono_emit_method_call_full(MonoCompile * cfg,MonoMethod * method,MonoMethodSignature * sig,gboolean tail,MonoInst ** args,MonoInst * this_ins,MonoInst * imt_arg,MonoInst * rgctx_arg)2404 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2405 							MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2406 {
2407 #ifndef DISABLE_REMOTING
2408 	gboolean might_be_remote = FALSE;
2409 #endif
2410 	gboolean virtual_ = this_ins != NULL;
2411 	gboolean enable_for_aot = TRUE;
2412 	int context_used;
2413 	MonoCallInst *call;
2414 	MonoInst *call_target = NULL;
2415 	int rgctx_reg = 0;
2416 	gboolean need_unbox_trampoline;
2417 
2418 	if (!sig)
2419 		sig = mono_method_signature (method);
2420 
2421 	if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2422 		g_assert_not_reached ();
2423 
2424 	if (rgctx_arg) {
2425 		rgctx_reg = mono_alloc_preg (cfg);
2426 		MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2427 	}
2428 
2429 	if (method->string_ctor) {
2430 		/* Create the real signature */
2431 		/* FIXME: Cache these */
2432 		MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2433 		ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2434 
2435 		sig = ctor_sig;
2436 	}
2437 
2438 	context_used = mini_method_check_context_used (cfg, method);
2439 
2440 #ifndef DISABLE_REMOTING
2441 	might_be_remote = this_ins && sig->hasthis &&
2442 		(mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2443 		!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2444 
2445 	if (might_be_remote && context_used) {
2446 		MonoInst *addr;
2447 
2448 		g_assert (cfg->gshared);
2449 
2450 		addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2451 
2452 		return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2453 	}
2454 #endif
2455 
2456 	if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2457 		return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2458 
2459 	need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2460 
2461 	call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
2462 
2463 #ifndef DISABLE_REMOTING
2464 	if (might_be_remote)
2465 		call->method = mono_marshal_get_remoting_invoke_with_check (method);
2466 	else
2467 #endif
2468 		call->method = method;
2469 	call->inst.flags |= MONO_INST_HAS_METHOD;
2470 	call->inst.inst_left = this_ins;
2471 	call->tail_call = tail;
2472 
2473 	if (virtual_) {
2474 		int vtable_reg, slot_reg, this_reg;
2475 		int offset;
2476 
2477 		this_reg = this_ins->dreg;
2478 
2479 		if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2480 			MonoInst *dummy_use;
2481 
2482 			MONO_EMIT_NULL_CHECK (cfg, this_reg);
2483 
2484 			/* Make a call to delegate->invoke_impl */
2485 			call->inst.inst_basereg = this_reg;
2486 			call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2487 			MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2488 
2489 			/* We must emit a dummy use here because the delegate trampoline will
2490 			replace the 'this' argument with the delegate target making this activation
2491 			no longer a root for the delegate.
2492 			This is an issue for delegates that target collectible code such as dynamic
2493 			methods of GC'able assemblies.
2494 
2495 			For a test case look into #667921.
2496 
2497 			FIXME: a dummy use is not the best way to do it as the local register allocator
2498 			will put it on a caller save register and spil it around the call.
2499 			Ideally, we would either put it on a callee save register or only do the store part.
2500 			 */
2501 			EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2502 
2503 			return (MonoInst*)call;
2504 		}
2505 
2506 		if ((!cfg->compile_aot || enable_for_aot) &&
2507 			(!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2508 			 (MONO_METHOD_IS_FINAL (method) &&
2509 			  method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2510 			!(mono_class_is_marshalbyref (method->klass) && context_used)) {
2511 			/*
2512 			 * the method is not virtual, we just need to ensure this is not null
2513 			 * and then we can call the method directly.
2514 			 */
2515 #ifndef DISABLE_REMOTING
2516 			if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2517 				/*
2518 				 * The check above ensures method is not gshared, this is needed since
2519 				 * gshared methods can't have wrappers.
2520 				 */
2521 				method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2522 			}
2523 #endif
2524 
2525 			if (!method->string_ctor)
2526 				MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2527 
2528 			call->inst.opcode = callvirt_to_call (call->inst.opcode);
2529 		} else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2530 			/*
2531 			 * the method is virtual, but we can statically dispatch since either
2532 			 * it's class or the method itself are sealed.
2533 			 * But first we need to ensure it's not a null reference.
2534 			 */
2535 			MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2536 
2537 			call->inst.opcode = callvirt_to_call (call->inst.opcode);
2538 		} else if (call_target) {
2539 			vtable_reg = alloc_preg (cfg);
2540 			MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2541 
2542 			call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2543 			call->inst.sreg1 = call_target->dreg;
2544 			call->inst.flags &= !MONO_INST_HAS_METHOD;
2545 		} else {
2546 			vtable_reg = alloc_preg (cfg);
2547 			MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2548 			if (mono_class_is_interface (method->klass)) {
2549 				guint32 imt_slot = mono_method_get_imt_slot (method);
2550 				emit_imt_argument (cfg, call, call->method, imt_arg);
2551 				slot_reg = vtable_reg;
2552 				offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2553 			} else {
2554 				slot_reg = vtable_reg;
2555 				offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2556 					((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2557 				if (imt_arg) {
2558 					g_assert (mono_method_signature (method)->generic_param_count);
2559 					emit_imt_argument (cfg, call, call->method, imt_arg);
2560 				}
2561 			}
2562 
2563 			call->inst.sreg1 = slot_reg;
2564 			call->inst.inst_offset = offset;
2565 			call->is_virtual = TRUE;
2566 		}
2567 	}
2568 
2569 	MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2570 
2571 	if (rgctx_arg)
2572 		set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2573 
2574 	return (MonoInst*)call;
2575 }
2576 
2577 MonoInst*
mono_emit_method_call(MonoCompile * cfg,MonoMethod * method,MonoInst ** args,MonoInst * this_ins)2578 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2579 {
2580 	return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2581 }
2582 
2583 MonoInst*
mono_emit_native_call(MonoCompile * cfg,gconstpointer func,MonoMethodSignature * sig,MonoInst ** args)2584 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2585 					   MonoInst **args)
2586 {
2587 	MonoCallInst *call;
2588 
2589 	g_assert (sig);
2590 
2591 	call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
2592 	call->fptr = func;
2593 
2594 	MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2595 
2596 	return (MonoInst*)call;
2597 }
2598 
2599 MonoInst*
mono_emit_jit_icall(MonoCompile * cfg,gconstpointer func,MonoInst ** args)2600 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2601 {
2602 	MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2603 
2604 	g_assert (info);
2605 
2606 	return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2607 }
2608 
2609 /*
2610  * mono_emit_abs_call:
2611  *
2612  *   Emit a call to the runtime function described by PATCH_TYPE and DATA.
2613  */
2614 inline static MonoInst*
mono_emit_abs_call(MonoCompile * cfg,MonoJumpInfoType patch_type,gconstpointer data,MonoMethodSignature * sig,MonoInst ** args)2615 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2616 					MonoMethodSignature *sig, MonoInst **args)
2617 {
2618 	MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2619 	MonoInst *ins;
2620 
2621 	/*
2622 	 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2623 	 * handle it.
2624 	 */
2625 	if (cfg->abs_patches == NULL)
2626 		cfg->abs_patches = g_hash_table_new (NULL, NULL);
2627 	g_hash_table_insert (cfg->abs_patches, ji, ji);
2628 	ins = mono_emit_native_call (cfg, ji, sig, args);
2629 	((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2630 	return ins;
2631 }
2632 
2633 static MonoMethodSignature*
sig_to_rgctx_sig(MonoMethodSignature * sig)2634 sig_to_rgctx_sig (MonoMethodSignature *sig)
2635 {
2636 	// FIXME: memory allocation
2637 	MonoMethodSignature *res;
2638 	int i;
2639 
2640 	res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2641 	memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2642 	res->param_count = sig->param_count + 1;
2643 	for (i = 0; i < sig->param_count; ++i)
2644 		res->params [i] = sig->params [i];
2645 	res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2646 	return res;
2647 }
2648 
2649 /* Make an indirect call to FSIG passing an additional argument */
2650 static MonoInst*
emit_extra_arg_calli(MonoCompile * cfg,MonoMethodSignature * fsig,MonoInst ** orig_args,int arg_reg,MonoInst * call_target)2651 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2652 {
2653 	MonoMethodSignature *csig;
2654 	MonoInst *args_buf [16];
2655 	MonoInst **args;
2656 	int i, pindex, tmp_reg;
2657 
2658 	/* Make a call with an rgctx/extra arg */
2659 	if (fsig->param_count + 2 < 16)
2660 		args = args_buf;
2661 	else
2662 		args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2663 	pindex = 0;
2664 	if (fsig->hasthis)
2665 		args [pindex ++] = orig_args [0];
2666 	for (i = 0; i < fsig->param_count; ++i)
2667 		args [pindex ++] = orig_args [fsig->hasthis + i];
2668 	tmp_reg = alloc_preg (cfg);
2669 	EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2670 	csig = sig_to_rgctx_sig (fsig);
2671 	return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2672 }
2673 
2674 /* Emit an indirect call to the function descriptor ADDR */
2675 static MonoInst*
emit_llvmonly_calli(MonoCompile * cfg,MonoMethodSignature * fsig,MonoInst ** args,MonoInst * addr)2676 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2677 {
2678 	int addr_reg, arg_reg;
2679 	MonoInst *call_target;
2680 
2681 	g_assert (cfg->llvm_only);
2682 
2683 	/*
2684 	 * addr points to a <addr, arg> pair, load both of them, and
2685 	 * make a call to addr, passing arg as an extra arg.
2686 	 */
2687 	addr_reg = alloc_preg (cfg);
2688 	EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2689 	arg_reg = alloc_preg (cfg);
2690 	MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2691 
2692 	return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2693 }
2694 
2695 static gboolean
direct_icalls_enabled(MonoCompile * cfg)2696 direct_icalls_enabled (MonoCompile *cfg)
2697 {
2698 	return FALSE;
2699 
2700 	/* LLVM on amd64 can't handle calls to non-32 bit addresses */
2701 #ifdef TARGET_AMD64
2702 	if (cfg->compile_llvm && !cfg->llvm_only)
2703 		return FALSE;
2704 #endif
2705 	if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2706 		return FALSE;
2707 	return TRUE;
2708 }
2709 
2710 MonoInst*
mono_emit_jit_icall_by_info(MonoCompile * cfg,int il_offset,MonoJitICallInfo * info,MonoInst ** args)2711 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2712 {
2713 	/*
2714 	 * Call the jit icall without a wrapper if possible.
2715 	 * The wrapper is needed to be able to do stack walks for asynchronously suspended
2716 	 * threads when debugging.
2717 	 */
2718 	if (direct_icalls_enabled (cfg)) {
2719 		char *name;
2720 		int costs;
2721 
2722 		if (!info->wrapper_method) {
2723 			name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2724 			info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2725 			g_free (name);
2726 			mono_memory_barrier ();
2727 		}
2728 
2729 		/*
2730 		 * Inline the wrapper method, which is basically a call to the C icall, and
2731 		 * an exception check.
2732 		 */
2733 		costs = inline_method (cfg, info->wrapper_method, NULL,
2734 							   args, NULL, il_offset, TRUE);
2735 		g_assert (costs > 0);
2736 		g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2737 
2738 		return args [0];
2739 	} else {
2740 		return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2741 	}
2742 }
2743 
2744 static MonoInst*
mono_emit_widen_call_res(MonoCompile * cfg,MonoInst * ins,MonoMethodSignature * fsig)2745 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2746 {
2747 	if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2748 		if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2749 			int widen_op = -1;
2750 
2751 			/*
2752 			 * Native code might return non register sized integers
2753 			 * without initializing the upper bits.
2754 			 */
2755 			switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2756 			case OP_LOADI1_MEMBASE:
2757 				widen_op = OP_ICONV_TO_I1;
2758 				break;
2759 			case OP_LOADU1_MEMBASE:
2760 				widen_op = OP_ICONV_TO_U1;
2761 				break;
2762 			case OP_LOADI2_MEMBASE:
2763 				widen_op = OP_ICONV_TO_I2;
2764 				break;
2765 			case OP_LOADU2_MEMBASE:
2766 				widen_op = OP_ICONV_TO_U2;
2767 				break;
2768 			default:
2769 				break;
2770 			}
2771 
2772 			if (widen_op != -1) {
2773 				int dreg = alloc_preg (cfg);
2774 				MonoInst *widen;
2775 
2776 				EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2777 				widen->type = ins->type;
2778 				ins = widen;
2779 			}
2780 		}
2781 	}
2782 
2783 	return ins;
2784 }
2785 
2786 
2787 static void
emit_method_access_failure(MonoCompile * cfg,MonoMethod * caller,MonoMethod * callee)2788 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2789 {
2790 	MonoInst *args [16];
2791 
2792 	args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2793 	args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2794 
2795 	mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2796 }
2797 
2798 MonoMethod*
mini_get_memcpy_method(void)2799 mini_get_memcpy_method (void)
2800 {
2801 	static MonoMethod *memcpy_method = NULL;
2802 	if (!memcpy_method) {
2803 		memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2804 		if (!memcpy_method)
2805 			g_error ("Old corlib found. Install a new one");
2806 	}
2807 	return memcpy_method;
2808 }
2809 
2810 void
mini_emit_write_barrier(MonoCompile * cfg,MonoInst * ptr,MonoInst * value)2811 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2812 {
2813 	int card_table_shift_bits;
2814 	gpointer card_table_mask;
2815 	guint8 *card_table;
2816 	MonoInst *dummy_use;
2817 	int nursery_shift_bits;
2818 	size_t nursery_size;
2819 
2820 	if (!cfg->gen_write_barriers)
2821 		return;
2822 
2823 	//method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2824 
2825 	card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2826 
2827 	mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2828 
2829 	if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2830 		MonoInst *wbarrier;
2831 
2832 		MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2833 		wbarrier->sreg1 = ptr->dreg;
2834 		wbarrier->sreg2 = value->dreg;
2835 		MONO_ADD_INS (cfg->cbb, wbarrier);
2836 	} else if (card_table) {
2837 		int offset_reg = alloc_preg (cfg);
2838 		int card_reg;
2839 		MonoInst *ins;
2840 
2841 		/*
2842 		 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2843 		 * collector case, so, for the serial collector, it might slightly slow down nursery
2844 		 * collections. We also expect that the host system and the target system have the same card
2845 		 * table configuration, which is the case if they have the same pointer size.
2846 		 */
2847 
2848 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2849 		if (card_table_mask)
2850 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2851 
2852 		/*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2853 		 * IMM's larger than 32bits.
2854 		 */
2855 		ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2856 		card_reg = ins->dreg;
2857 
2858 		MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2859 		MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2860 	} else {
2861 		MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2862 		mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2863 	}
2864 
2865 	EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2866 }
2867 
2868 MonoMethod*
mini_get_memset_method(void)2869 mini_get_memset_method (void)
2870 {
2871 	static MonoMethod *memset_method = NULL;
2872 	if (!memset_method) {
2873 		memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2874 		if (!memset_method)
2875 			g_error ("Old corlib found. Install a new one");
2876 	}
2877 	return memset_method;
2878 }
2879 
2880 void
mini_emit_initobj(MonoCompile * cfg,MonoInst * dest,const guchar * ip,MonoClass * klass)2881 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2882 {
2883 	MonoInst *iargs [3];
2884 	int n;
2885 	guint32 align;
2886 	MonoMethod *memset_method;
2887 	MonoInst *size_ins = NULL;
2888 	MonoInst *bzero_ins = NULL;
2889 	static MonoMethod *bzero_method;
2890 
2891 	/* FIXME: Optimize this for the case when dest is an LDADDR */
2892 	mono_class_init (klass);
2893 	if (mini_is_gsharedvt_klass (klass)) {
2894 		size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2895 		bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2896 		if (!bzero_method)
2897 			bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
2898 		g_assert (bzero_method);
2899 		iargs [0] = dest;
2900 		iargs [1] = size_ins;
2901 		mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
2902 		return;
2903 	}
2904 
2905 	klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
2906 
2907 	n = mono_class_value_size (klass, &align);
2908 
2909 	if (n <= sizeof (gpointer) * 8) {
2910 		mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2911 	}
2912 	else {
2913 		memset_method = mini_get_memset_method ();
2914 		iargs [0] = dest;
2915 		EMIT_NEW_ICONST (cfg, iargs [1], 0);
2916 		EMIT_NEW_ICONST (cfg, iargs [2], n);
2917 		mono_emit_method_call (cfg, memset_method, iargs, NULL);
2918 	}
2919 }
2920 
2921 /*
2922  * emit_get_rgctx:
2923  *
2924  *   Emit IR to return either the this pointer for instance method,
2925  * or the mrgctx for static methods.
2926  */
2927 static MonoInst*
emit_get_rgctx(MonoCompile * cfg,MonoMethod * method,int context_used)2928 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2929 {
2930 	MonoInst *this_ins = NULL;
2931 
2932 	g_assert (cfg->gshared);
2933 
2934 	if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2935 			!(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2936 		!method->klass->valuetype)
2937 		EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
2938 
2939 	if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2940 		MonoInst *mrgctx_loc, *mrgctx_var;
2941 
2942 		g_assert (!this_ins);
2943 		g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2944 
2945 		mrgctx_loc = mono_get_vtable_var (cfg);
2946 		EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2947 
2948 		return mrgctx_var;
2949 	} else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
2950 		MonoInst *mrgctx_loc, *mrgctx_var;
2951 
2952 		/* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
2953 		mrgctx_loc = mono_get_vtable_var (cfg);
2954 		EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2955 
2956 		g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
2957 
2958 		return mrgctx_var;
2959 	} else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2960 		MonoInst *vtable_loc, *vtable_var;
2961 
2962 		g_assert (!this_ins);
2963 
2964 		vtable_loc = mono_get_vtable_var (cfg);
2965 		EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2966 
2967 		if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2968 			MonoInst *mrgctx_var = vtable_var;
2969 			int vtable_reg;
2970 
2971 			vtable_reg = alloc_preg (cfg);
2972 			EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2973 			vtable_var->type = STACK_PTR;
2974 		}
2975 
2976 		return vtable_var;
2977 	} else {
2978 		MonoInst *ins;
2979 		int vtable_reg;
2980 
2981 		vtable_reg = alloc_preg (cfg);
2982 		EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2983 		return ins;
2984 	}
2985 }
2986 
2987 static MonoJumpInfoRgctxEntry *
mono_patch_info_rgctx_entry_new(MonoMemPool * mp,MonoMethod * method,gboolean in_mrgctx,MonoJumpInfoType patch_type,gconstpointer patch_data,MonoRgctxInfoType info_type)2988 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2989 {
2990 	MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2991 	res->method = method;
2992 	res->in_mrgctx = in_mrgctx;
2993 	res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2994 	res->data->type = patch_type;
2995 	res->data->data.target = patch_data;
2996 	res->info_type = info_type;
2997 
2998 	return res;
2999 }
3000 
3001 static inline MonoInst*
emit_rgctx_fetch_inline(MonoCompile * cfg,MonoInst * rgctx,MonoJumpInfoRgctxEntry * entry)3002 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3003 {
3004 	MonoInst *args [16];
3005 	MonoInst *call;
3006 
3007 	// FIXME: No fastpath since the slot is not a compile time constant
3008 	args [0] = rgctx;
3009 	EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3010 	if (entry->in_mrgctx)
3011 		call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3012 	else
3013 		call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3014 	return call;
3015 #if 0
3016 	/*
3017 	 * FIXME: This can be called during decompose, which is a problem since it creates
3018 	 * new bblocks.
3019 	 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3020 	 */
3021 	int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3022 	gboolean mrgctx;
3023 	MonoBasicBlock *is_null_bb, *end_bb;
3024 	MonoInst *res, *ins, *call;
3025 	MonoInst *args[16];
3026 
3027 	slot = mini_get_rgctx_entry_slot (entry);
3028 
3029 	mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3030 	index = MONO_RGCTX_SLOT_INDEX (slot);
3031 	if (mrgctx)
3032 		index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3033 	for (depth = 0; ; ++depth) {
3034 		int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3035 
3036 		if (index < size - 1)
3037 			break;
3038 		index -= size - 1;
3039 	}
3040 
3041 	NEW_BBLOCK (cfg, end_bb);
3042 	NEW_BBLOCK (cfg, is_null_bb);
3043 
3044 	if (mrgctx) {
3045 		rgctx_reg = rgctx->dreg;
3046 	} else {
3047 		rgctx_reg = alloc_preg (cfg);
3048 
3049 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3050 		// FIXME: Avoid this check by allocating the table when the vtable is created etc.
3051 		NEW_BBLOCK (cfg, is_null_bb);
3052 
3053 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3054 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3055 	}
3056 
3057 	for (i = 0; i < depth; ++i) {
3058 		int array_reg = alloc_preg (cfg);
3059 
3060 		/* load ptr to next array */
3061 		if (mrgctx && i == 0)
3062 			MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3063 		else
3064 			MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3065 		rgctx_reg = array_reg;
3066 		/* is the ptr null? */
3067 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3068 		/* if yes, jump to actual trampoline */
3069 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3070 	}
3071 
3072 	/* fetch slot */
3073 	val_reg = alloc_preg (cfg);
3074 	MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3075 	/* is the slot null? */
3076 	MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3077 	/* if yes, jump to actual trampoline */
3078 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3079 
3080 	/* Fastpath */
3081 	res_reg = alloc_preg (cfg);
3082 	MONO_INST_NEW (cfg, ins, OP_MOVE);
3083 	ins->dreg = res_reg;
3084 	ins->sreg1 = val_reg;
3085 	MONO_ADD_INS (cfg->cbb, ins);
3086 	res = ins;
3087 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3088 
3089 	/* Slowpath */
3090 	MONO_START_BB (cfg, is_null_bb);
3091 	args [0] = rgctx;
3092 	EMIT_NEW_ICONST (cfg, args [1], index);
3093 	if (mrgctx)
3094 		call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3095 	else
3096 		call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3097 	MONO_INST_NEW (cfg, ins, OP_MOVE);
3098 	ins->dreg = res_reg;
3099 	ins->sreg1 = call->dreg;
3100 	MONO_ADD_INS (cfg->cbb, ins);
3101 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3102 
3103 	MONO_START_BB (cfg, end_bb);
3104 
3105 	return res;
3106 #endif
3107 }
3108 
3109 /*
3110  * emit_rgctx_fetch:
3111  *
3112  *   Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3113  * given by RGCTX.
3114  */
3115 static inline MonoInst*
emit_rgctx_fetch(MonoCompile * cfg,MonoInst * rgctx,MonoJumpInfoRgctxEntry * entry)3116 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3117 {
3118 	if (cfg->llvm_only)
3119 		return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3120 	else
3121 		return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3122 }
3123 
3124 MonoInst*
mini_emit_get_rgctx_klass(MonoCompile * cfg,int context_used,MonoClass * klass,MonoRgctxInfoType rgctx_type)3125 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3126 					  MonoClass *klass, MonoRgctxInfoType rgctx_type)
3127 {
3128 	MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3129 	MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3130 
3131 	return emit_rgctx_fetch (cfg, rgctx, entry);
3132 }
3133 
3134 static MonoInst*
emit_get_rgctx_sig(MonoCompile * cfg,int context_used,MonoMethodSignature * sig,MonoRgctxInfoType rgctx_type)3135 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3136 					MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3137 {
3138 	MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3139 	MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3140 
3141 	return emit_rgctx_fetch (cfg, rgctx, entry);
3142 }
3143 
3144 static MonoInst*
emit_get_rgctx_gsharedvt_call(MonoCompile * cfg,int context_used,MonoMethodSignature * sig,MonoMethod * cmethod,MonoRgctxInfoType rgctx_type)3145 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3146 							   MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3147 {
3148 	MonoJumpInfoGSharedVtCall *call_info;
3149 	MonoJumpInfoRgctxEntry *entry;
3150 	MonoInst *rgctx;
3151 
3152 	call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3153 	call_info->sig = sig;
3154 	call_info->method = cmethod;
3155 
3156 	entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3157 	rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3158 
3159 	return emit_rgctx_fetch (cfg, rgctx, entry);
3160 }
3161 
3162 /*
3163  * emit_get_rgctx_virt_method:
3164  *
3165  *   Return data for method VIRT_METHOD for a receiver of type KLASS.
3166  */
3167 static MonoInst*
emit_get_rgctx_virt_method(MonoCompile * cfg,int context_used,MonoClass * klass,MonoMethod * virt_method,MonoRgctxInfoType rgctx_type)3168 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3169 							MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3170 {
3171 	MonoJumpInfoVirtMethod *info;
3172 	MonoJumpInfoRgctxEntry *entry;
3173 	MonoInst *rgctx;
3174 
3175 	info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3176 	info->klass = klass;
3177 	info->method = virt_method;
3178 
3179 	entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3180 	rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3181 
3182 	return emit_rgctx_fetch (cfg, rgctx, entry);
3183 }
3184 
3185 static MonoInst*
emit_get_rgctx_gsharedvt_method(MonoCompile * cfg,int context_used,MonoMethod * cmethod,MonoGSharedVtMethodInfo * info)3186 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3187 								 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3188 {
3189 	MonoJumpInfoRgctxEntry *entry;
3190 	MonoInst *rgctx;
3191 
3192 	entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3193 	rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3194 
3195 	return emit_rgctx_fetch (cfg, rgctx, entry);
3196 }
3197 
3198 /*
3199  * emit_get_rgctx_method:
3200  *
3201  *   Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3202  * normal constants, else emit a load from the rgctx.
3203  */
3204 static MonoInst*
emit_get_rgctx_method(MonoCompile * cfg,int context_used,MonoMethod * cmethod,MonoRgctxInfoType rgctx_type)3205 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3206 					   MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3207 {
3208 	if (!context_used) {
3209 		MonoInst *ins;
3210 
3211 		switch (rgctx_type) {
3212 		case MONO_RGCTX_INFO_METHOD:
3213 			EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3214 			return ins;
3215 		case MONO_RGCTX_INFO_METHOD_RGCTX:
3216 			EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3217 			return ins;
3218 		default:
3219 			g_assert_not_reached ();
3220 		}
3221 	} else {
3222 		MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3223 		MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3224 
3225 		return emit_rgctx_fetch (cfg, rgctx, entry);
3226 	}
3227 }
3228 
3229 static MonoInst*
emit_get_rgctx_field(MonoCompile * cfg,int context_used,MonoClassField * field,MonoRgctxInfoType rgctx_type)3230 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3231 					  MonoClassField *field, MonoRgctxInfoType rgctx_type)
3232 {
3233 	MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3234 	MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3235 
3236 	return emit_rgctx_fetch (cfg, rgctx, entry);
3237 }
3238 
3239 static int
get_gsharedvt_info_slot(MonoCompile * cfg,gpointer data,MonoRgctxInfoType rgctx_type)3240 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3241 {
3242 	MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3243 	MonoRuntimeGenericContextInfoTemplate *template_;
3244 	int i, idx;
3245 
3246 	g_assert (info);
3247 
3248 	for (i = 0; i < info->num_entries; ++i) {
3249 		MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3250 
3251 		if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3252 			return i;
3253 	}
3254 
3255 	if (info->num_entries == info->count_entries) {
3256 		MonoRuntimeGenericContextInfoTemplate *new_entries;
3257 		int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3258 
3259 		new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3260 
3261 		memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3262 		info->entries = new_entries;
3263 		info->count_entries = new_count_entries;
3264 	}
3265 
3266 	idx = info->num_entries;
3267 	template_ = &info->entries [idx];
3268 	template_->info_type = rgctx_type;
3269 	template_->data = data;
3270 
3271 	info->num_entries ++;
3272 
3273 	return idx;
3274 }
3275 
3276 /*
3277  * emit_get_gsharedvt_info:
3278  *
3279  *   This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3280  */
3281 static MonoInst*
emit_get_gsharedvt_info(MonoCompile * cfg,gpointer data,MonoRgctxInfoType rgctx_type)3282 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3283 {
3284 	MonoInst *ins;
3285 	int idx, dreg;
3286 
3287 	idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3288 	/* Load info->entries [idx] */
3289 	dreg = alloc_preg (cfg);
3290 	EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3291 
3292 	return ins;
3293 }
3294 
3295 MonoInst*
mini_emit_get_gsharedvt_info_klass(MonoCompile * cfg,MonoClass * klass,MonoRgctxInfoType rgctx_type)3296 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3297 {
3298 	return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3299 }
3300 
3301 /*
3302  * On return the caller must check @klass for load errors.
3303  */
3304 static void
emit_class_init(MonoCompile * cfg,MonoClass * klass)3305 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3306 {
3307 	MonoInst *vtable_arg;
3308 	int context_used;
3309 
3310 	context_used = mini_class_check_context_used (cfg, klass);
3311 
3312 	if (context_used) {
3313 		vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3314 										   klass, MONO_RGCTX_INFO_VTABLE);
3315 	} else {
3316 		MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3317 
3318 		if (!vtable)
3319 			return;
3320 		EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3321 	}
3322 
3323 	if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3324 		MonoInst *ins;
3325 
3326 		/*
3327 		 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3328 		 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3329 		 */
3330 		MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3331 		ins->sreg1 = vtable_arg->dreg;
3332 		MONO_ADD_INS (cfg->cbb, ins);
3333 	} else {
3334 		int inited_reg;
3335 		MonoBasicBlock *inited_bb;
3336 		MonoInst *args [16];
3337 
3338 		inited_reg = alloc_ireg (cfg);
3339 
3340 		MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3341 
3342 		NEW_BBLOCK (cfg, inited_bb);
3343 
3344 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3345 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3346 
3347 		args [0] = vtable_arg;
3348 		mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3349 
3350 		MONO_START_BB (cfg, inited_bb);
3351 	}
3352 }
3353 
3354 static void
emit_seq_point(MonoCompile * cfg,MonoMethod * method,guint8 * ip,gboolean intr_loc,gboolean nonempty_stack)3355 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3356 {
3357 	MonoInst *ins;
3358 
3359 	if (cfg->gen_seq_points && cfg->method == method) {
3360 		NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3361 		if (nonempty_stack)
3362 			ins->flags |= MONO_INST_NONEMPTY_STACK;
3363 		MONO_ADD_INS (cfg->cbb, ins);
3364 	}
3365 }
3366 
3367 void
mini_save_cast_details(MonoCompile * cfg,MonoClass * klass,int obj_reg,gboolean null_check)3368 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3369 {
3370 	if (mini_get_debug_options ()->better_cast_details) {
3371 		int vtable_reg = alloc_preg (cfg);
3372 		int klass_reg = alloc_preg (cfg);
3373 		MonoBasicBlock *is_null_bb = NULL;
3374 		MonoInst *tls_get;
3375 		int to_klass_reg, context_used;
3376 
3377 		if (null_check) {
3378 			NEW_BBLOCK (cfg, is_null_bb);
3379 
3380 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3381 			MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3382 		}
3383 
3384 		tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3385 		if (!tls_get) {
3386 			fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3387 			exit (1);
3388 		}
3389 
3390 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3391 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3392 
3393 		MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3394 
3395 		context_used = mini_class_check_context_used (cfg, klass);
3396 		if (context_used) {
3397 			MonoInst *class_ins;
3398 
3399 			class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3400 			to_klass_reg = class_ins->dreg;
3401 		} else {
3402 			to_klass_reg = alloc_preg (cfg);
3403 			MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3404 		}
3405 		MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3406 
3407 		if (null_check)
3408 			MONO_START_BB (cfg, is_null_bb);
3409 	}
3410 }
3411 
3412 void
mini_reset_cast_details(MonoCompile * cfg)3413 mini_reset_cast_details (MonoCompile *cfg)
3414 {
3415 	/* Reset the variables holding the cast details */
3416 	if (mini_get_debug_options ()->better_cast_details) {
3417 		MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3418 		/* It is enough to reset the from field */
3419 		MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3420 	}
3421 }
3422 
3423 /*
3424  * On return the caller must check @array_class for load errors
3425  */
3426 static void
mini_emit_check_array_type(MonoCompile * cfg,MonoInst * obj,MonoClass * array_class)3427 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3428 {
3429 	int vtable_reg = alloc_preg (cfg);
3430 	int context_used;
3431 
3432 	context_used = mini_class_check_context_used (cfg, array_class);
3433 
3434 	mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3435 
3436 	MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3437 
3438 	if (cfg->opt & MONO_OPT_SHARED) {
3439 		int class_reg = alloc_preg (cfg);
3440 		MonoInst *ins;
3441 
3442 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3443 		ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3444 		MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3445 	} else if (context_used) {
3446 		MonoInst *vtable_ins;
3447 
3448 		vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3449 		MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3450 	} else {
3451 		if (cfg->compile_aot) {
3452 			int vt_reg;
3453 			MonoVTable *vtable;
3454 
3455 			if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3456 				return;
3457 			vt_reg = alloc_preg (cfg);
3458 			MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3459 			MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3460 		} else {
3461 			MonoVTable *vtable;
3462 			if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3463 				return;
3464 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3465 		}
3466 	}
3467 
3468 	MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3469 
3470 	mini_reset_cast_details (cfg);
3471 }
3472 
3473 /**
3474  * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3475  * generic code is generated.
3476  */
3477 static MonoInst*
handle_unbox_nullable(MonoCompile * cfg,MonoInst * val,MonoClass * klass,int context_used)3478 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3479 {
3480 	MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3481 
3482 	if (context_used) {
3483 		MonoInst *rgctx, *addr;
3484 
3485 		/* FIXME: What if the class is shared?  We might not
3486 		   have to get the address of the method from the
3487 		   RGCTX. */
3488 		addr = emit_get_rgctx_method (cfg, context_used, method,
3489 									  MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3490 		if (cfg->llvm_only) {
3491 			cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3492 			return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3493 		} else {
3494 			rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3495 
3496 			return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3497 		}
3498 	} else {
3499 		gboolean pass_vtable, pass_mrgctx;
3500 		MonoInst *rgctx_arg = NULL;
3501 
3502 		check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3503 		g_assert (!pass_mrgctx);
3504 
3505 		if (pass_vtable) {
3506 			MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3507 
3508 			g_assert (vtable);
3509 			EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3510 		}
3511 
3512 		return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3513 	}
3514 }
3515 
3516 static MonoInst*
handle_unbox(MonoCompile * cfg,MonoClass * klass,MonoInst ** sp,int context_used)3517 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3518 {
3519 	MonoInst *add;
3520 	int obj_reg;
3521 	int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3522 	int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3523 	int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3524 	int rank_reg = alloc_dreg (cfg ,STACK_I4);
3525 
3526 	obj_reg = sp [0]->dreg;
3527 	MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3528 	MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3529 
3530 	/* FIXME: generics */
3531 	g_assert (klass->rank == 0);
3532 
3533 	// Check rank == 0
3534 	MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3535 	MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3536 
3537 	MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3538 	MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3539 
3540 	if (context_used) {
3541 		MonoInst *element_class;
3542 
3543 		/* This assertion is from the unboxcast insn */
3544 		g_assert (klass->rank == 0);
3545 
3546 		element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3547 				klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3548 
3549 		MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3550 		MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3551 	} else {
3552 		mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3553 		mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3554 		mini_reset_cast_details (cfg);
3555 	}
3556 
3557 	NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3558 	MONO_ADD_INS (cfg->cbb, add);
3559 	add->type = STACK_MP;
3560 	add->klass = klass;
3561 
3562 	return add;
3563 }
3564 
3565 static MonoInst*
handle_unbox_gsharedvt(MonoCompile * cfg,MonoClass * klass,MonoInst * obj)3566 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3567 {
3568 	MonoInst *addr, *klass_inst, *is_ref, *args[16];
3569 	MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3570 	MonoInst *ins;
3571 	int dreg, addr_reg;
3572 
3573 	klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3574 
3575 	/* obj */
3576 	args [0] = obj;
3577 
3578 	/* klass */
3579 	args [1] = klass_inst;
3580 
3581 	/* CASTCLASS */
3582 	obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3583 
3584 	NEW_BBLOCK (cfg, is_ref_bb);
3585 	NEW_BBLOCK (cfg, is_nullable_bb);
3586 	NEW_BBLOCK (cfg, end_bb);
3587 	is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3588 	MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3589 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3590 
3591 	MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3592 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3593 
3594 	/* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3595 	addr_reg = alloc_dreg (cfg, STACK_MP);
3596 
3597 	/* Non-ref case */
3598 	/* UNBOX */
3599 	NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3600 	MONO_ADD_INS (cfg->cbb, addr);
3601 
3602 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3603 
3604 	/* Ref case */
3605 	MONO_START_BB (cfg, is_ref_bb);
3606 
3607 	/* Save the ref to a temporary */
3608 	dreg = alloc_ireg (cfg);
3609 	EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3610 	addr->dreg = addr_reg;
3611 	MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3612 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3613 
3614 	/* Nullable case */
3615 	MONO_START_BB (cfg, is_nullable_bb);
3616 
3617 	{
3618 		MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3619 		MonoInst *unbox_call;
3620 		MonoMethodSignature *unbox_sig;
3621 
3622 		unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3623 		unbox_sig->ret = &klass->byval_arg;
3624 		unbox_sig->param_count = 1;
3625 		unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3626 
3627 		if (cfg->llvm_only)
3628 			unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3629 		else
3630 			unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3631 
3632 		EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3633 		addr->dreg = addr_reg;
3634 	}
3635 
3636 	MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3637 
3638 	/* End */
3639 	MONO_START_BB (cfg, end_bb);
3640 
3641 	/* LDOBJ */
3642 	EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3643 
3644 	return ins;
3645 }
3646 
3647 /*
3648  * Returns NULL and set the cfg exception on error.
3649  */
3650 static MonoInst*
handle_alloc(MonoCompile * cfg,MonoClass * klass,gboolean for_box,int context_used)3651 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3652 {
3653 	MonoInst *iargs [2];
3654 	void *alloc_ftn;
3655 
3656 	if (context_used) {
3657 		MonoInst *data;
3658 		MonoRgctxInfoType rgctx_info;
3659 		MonoInst *iargs [2];
3660 		gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3661 
3662 		MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3663 
3664 		if (cfg->opt & MONO_OPT_SHARED)
3665 			rgctx_info = MONO_RGCTX_INFO_KLASS;
3666 		else
3667 			rgctx_info = MONO_RGCTX_INFO_VTABLE;
3668 		data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3669 
3670 		if (cfg->opt & MONO_OPT_SHARED) {
3671 			EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3672 			iargs [1] = data;
3673 			alloc_ftn = ves_icall_object_new;
3674 		} else {
3675 			iargs [0] = data;
3676 			alloc_ftn = ves_icall_object_new_specific;
3677 		}
3678 
3679 		if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3680 			if (known_instance_size) {
3681 				int size = mono_class_instance_size (klass);
3682 				if (size < sizeof (MonoObject))
3683 					g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3684 
3685 				EMIT_NEW_ICONST (cfg, iargs [1], size);
3686 			}
3687 			return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3688 		}
3689 
3690 		return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3691 	}
3692 
3693 	if (cfg->opt & MONO_OPT_SHARED) {
3694 		EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3695 		EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3696 
3697 		alloc_ftn = ves_icall_object_new;
3698 	} else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3699 		/* This happens often in argument checking code, eg. throw new FooException... */
3700 		/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3701 		EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3702 		alloc_ftn = mono_helper_newobj_mscorlib;
3703 	} else {
3704 		MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3705 		MonoMethod *managed_alloc = NULL;
3706 
3707 		if (!vtable) {
3708 			mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3709 			cfg->exception_ptr = klass;
3710 			return NULL;
3711 		}
3712 
3713 		managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3714 
3715 		if (managed_alloc) {
3716 			int size = mono_class_instance_size (klass);
3717 			if (size < sizeof (MonoObject))
3718 				g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3719 
3720 			EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3721 			EMIT_NEW_ICONST (cfg, iargs [1], size);
3722 			return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3723 		}
3724 		alloc_ftn = ves_icall_object_new_specific;
3725 		EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3726 	}
3727 
3728 	return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3729 }
3730 
3731 /*
3732  * Returns NULL and set the cfg exception on error.
3733  */
3734 static MonoInst*
handle_box(MonoCompile * cfg,MonoInst * val,MonoClass * klass,int context_used)3735 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3736 {
3737 	MonoInst *alloc, *ins;
3738 
3739 	if (mono_class_is_nullable (klass)) {
3740 		MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3741 
3742 		if (context_used) {
3743 			if (cfg->llvm_only && cfg->gsharedvt) {
3744 				MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3745 														MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3746 				return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3747 			} else {
3748 				/* FIXME: What if the class is shared?  We might not
3749 				   have to get the method address from the RGCTX. */
3750 				MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3751 														MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3752 				MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3753 
3754 				return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3755 			}
3756 		} else {
3757 			gboolean pass_vtable, pass_mrgctx;
3758 			MonoInst *rgctx_arg = NULL;
3759 
3760 			check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3761 			g_assert (!pass_mrgctx);
3762 
3763 			if (pass_vtable) {
3764 				MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3765 
3766 				g_assert (vtable);
3767 				EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3768 			}
3769 
3770 			return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3771 		}
3772 	}
3773 
3774 	if (mini_is_gsharedvt_klass (klass)) {
3775 		MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3776 		MonoInst *res, *is_ref, *src_var, *addr;
3777 		int dreg;
3778 
3779 		dreg = alloc_ireg (cfg);
3780 
3781 		NEW_BBLOCK (cfg, is_ref_bb);
3782 		NEW_BBLOCK (cfg, is_nullable_bb);
3783 		NEW_BBLOCK (cfg, end_bb);
3784 		is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3785 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3786 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3787 
3788 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3789 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3790 
3791 		/* Non-ref case */
3792 		alloc = handle_alloc (cfg, klass, TRUE, context_used);
3793 		if (!alloc)
3794 			return NULL;
3795 		EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3796 		ins->opcode = OP_STOREV_MEMBASE;
3797 
3798 		EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3799 		res->type = STACK_OBJ;
3800 		res->klass = klass;
3801 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3802 
3803 		/* Ref case */
3804 		MONO_START_BB (cfg, is_ref_bb);
3805 
3806 		/* val is a vtype, so has to load the value manually */
3807 		src_var = get_vreg_to_inst (cfg, val->dreg);
3808 		if (!src_var)
3809 			src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3810 		EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3811 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3812 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3813 
3814 		/* Nullable case */
3815 		MONO_START_BB (cfg, is_nullable_bb);
3816 
3817 		{
3818 			MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3819 													MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3820 			MonoInst *box_call;
3821 			MonoMethodSignature *box_sig;
3822 
3823 			/*
3824 			 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3825 			 * construct that method at JIT time, so have to do things by hand.
3826 			 */
3827 			box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3828 			box_sig->ret = &mono_defaults.object_class->byval_arg;
3829 			box_sig->param_count = 1;
3830 			box_sig->params [0] = &klass->byval_arg;
3831 
3832 			if (cfg->llvm_only)
3833 				box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
3834 			else
3835 				box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3836 			EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3837 			res->type = STACK_OBJ;
3838 			res->klass = klass;
3839 		}
3840 
3841 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3842 
3843 		MONO_START_BB (cfg, end_bb);
3844 
3845 		return res;
3846 	} else {
3847 		alloc = handle_alloc (cfg, klass, TRUE, context_used);
3848 		if (!alloc)
3849 			return NULL;
3850 
3851 		EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3852 		return alloc;
3853 	}
3854 }
3855 
3856 static gboolean
method_needs_stack_walk(MonoCompile * cfg,MonoMethod * cmethod)3857 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3858 {
3859 	if (cmethod->klass == mono_defaults.systemtype_class) {
3860 		if (!strcmp (cmethod->name, "GetType"))
3861 			return TRUE;
3862 	}
3863 	return FALSE;
3864 }
3865 
3866 static G_GNUC_UNUSED MonoInst*
handle_enum_has_flag(MonoCompile * cfg,MonoClass * klass,MonoInst * enum_this,MonoInst * enum_flag)3867 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
3868 {
3869 	MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
3870 	guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3871 	gboolean is_i4;
3872 
3873 	switch (enum_type->type) {
3874 	case MONO_TYPE_I8:
3875 	case MONO_TYPE_U8:
3876 #if SIZEOF_REGISTER == 8
3877 	case MONO_TYPE_I:
3878 	case MONO_TYPE_U:
3879 #endif
3880 		is_i4 = FALSE;
3881 		break;
3882 	default:
3883 		is_i4 = TRUE;
3884 		break;
3885 	}
3886 
3887 	{
3888 		MonoInst *load, *and_, *cmp, *ceq;
3889 		int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3890 		int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3891 		int dest_reg = alloc_ireg (cfg);
3892 
3893 		EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3894 		EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3895 		EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3896 		EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3897 
3898 		ceq->type = STACK_I4;
3899 
3900 		if (!is_i4) {
3901 			load = mono_decompose_opcode (cfg, load);
3902 			and_ = mono_decompose_opcode (cfg, and_);
3903 			cmp = mono_decompose_opcode (cfg, cmp);
3904 			ceq = mono_decompose_opcode (cfg, ceq);
3905 		}
3906 
3907 		return ceq;
3908 	}
3909 }
3910 
3911 /*
3912  * Returns NULL and set the cfg exception on error.
3913  */
3914 static G_GNUC_UNUSED MonoInst*
handle_delegate_ctor(MonoCompile * cfg,MonoClass * klass,MonoInst * target,MonoMethod * method,int context_used,gboolean virtual_)3915 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
3916 {
3917 	MonoInst *ptr;
3918 	int dreg;
3919 	gpointer trampoline;
3920 	MonoInst *obj, *method_ins, *tramp_ins;
3921 	MonoDomain *domain;
3922 	guint8 **code_slot;
3923 
3924 	if (virtual_ && !cfg->llvm_only) {
3925 		MonoMethod *invoke = mono_get_delegate_invoke (klass);
3926 		g_assert (invoke);
3927 
3928 		if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
3929 			return NULL;
3930 	}
3931 
3932 	obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
3933 	if (!obj)
3934 		return NULL;
3935 
3936 	/* Inline the contents of mono_delegate_ctor */
3937 
3938 	/* Set target field */
3939 	/* Optimize away setting of NULL target */
3940 	if (!MONO_INS_IS_PCONST_NULL (target)) {
3941 		if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
3942 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
3943 			MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
3944 		}
3945 		MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3946 		if (cfg->gen_write_barriers) {
3947 			dreg = alloc_preg (cfg);
3948 			EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3949 			mini_emit_write_barrier (cfg, ptr, target);
3950 		}
3951 	}
3952 
3953 	/* Set method field */
3954 	method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3955 	MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3956 
3957 	/*
3958 	 * To avoid looking up the compiled code belonging to the target method
3959 	 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3960 	 * store it, and we fill it after the method has been compiled.
3961 	 */
3962 	if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3963 		MonoInst *code_slot_ins;
3964 
3965 		if (context_used) {
3966 			code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3967 		} else {
3968 			domain = mono_domain_get ();
3969 			mono_domain_lock (domain);
3970 			if (!domain_jit_info (domain)->method_code_hash)
3971 				domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3972 			code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3973 			if (!code_slot) {
3974 				code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
3975 				g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3976 			}
3977 			mono_domain_unlock (domain);
3978 
3979 			code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
3980 		}
3981 		MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3982 	}
3983 
3984  	if (cfg->llvm_only) {
3985 		MonoInst *args [16];
3986 
3987 		if (virtual_) {
3988 			args [0] = obj;
3989 			args [1] = target;
3990 			args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3991 			mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
3992 		} else {
3993 			args [0] = obj;
3994 			mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
3995 		}
3996 
3997 		return obj;
3998 	}
3999 
4000 	if (cfg->compile_aot) {
4001 		MonoDelegateClassMethodPair *del_tramp;
4002 
4003 		del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4004 		del_tramp->klass = klass;
4005 		del_tramp->method = context_used ? NULL : method;
4006 		del_tramp->is_virtual = virtual_;
4007 		EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4008 	} else {
4009 		if (virtual_)
4010 			trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4011 		else
4012 			trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4013 		EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4014 	}
4015 
4016 	/* Set invoke_impl field */
4017 	if (virtual_) {
4018 		MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4019 	} else {
4020 		dreg = alloc_preg (cfg);
4021 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4022 		MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4023 
4024 		dreg = alloc_preg (cfg);
4025 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4026 		MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4027 	}
4028 
4029 	dreg = alloc_preg (cfg);
4030 	MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4031 	MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4032 
4033 	/* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4034 
4035 	return obj;
4036 }
4037 
4038 static MonoInst*
handle_array_new(MonoCompile * cfg,int rank,MonoInst ** sp,unsigned char * ip)4039 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4040 {
4041 	MonoJitICallInfo *info;
4042 
4043 	/* Need to register the icall so it gets an icall wrapper */
4044 	info = mono_get_array_new_va_icall (rank);
4045 
4046 	cfg->flags |= MONO_CFG_HAS_VARARGS;
4047 
4048 	/* mono_array_new_va () needs a vararg calling convention */
4049 	cfg->exception_message = g_strdup ("array-new");
4050 	cfg->disable_llvm = TRUE;
4051 
4052 	/* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4053 	return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4054 }
4055 
4056 /*
4057  * handle_constrained_gsharedvt_call:
4058  *
4059  *   Handle constrained calls where the receiver is a gsharedvt type.
4060  * Return the instruction representing the call. Set the cfg exception on failure.
4061  */
4062 static MonoInst*
handle_constrained_gsharedvt_call(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,MonoInst ** sp,MonoClass * constrained_class,gboolean * ref_emit_widen)4063 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4064 								   gboolean *ref_emit_widen)
4065 {
4066 	MonoInst *ins = NULL;
4067 	gboolean emit_widen = *ref_emit_widen;
4068 
4069 	/*
4070 	 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4071 	 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4072 	 * pack the arguments into an array, and do the rest of the work in in an icall.
4073 	 */
4074 	if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4075 		(MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4076 		(fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4077 		MonoInst *args [16];
4078 
4079 		/*
4080 		 * This case handles calls to
4081 		 * - object:ToString()/Equals()/GetHashCode(),
4082 		 * - System.IComparable<T>:CompareTo()
4083 		 * - System.IEquatable<T>:Equals ()
4084 		 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4085 		 */
4086 
4087 		args [0] = sp [0];
4088 		if (mono_method_check_context_used (cmethod))
4089 			args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4090 		else
4091 			EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4092 		args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4093 
4094 		/* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4095 		if (fsig->hasthis && fsig->param_count) {
4096 			/* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4097 			MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4098 			ins->dreg = alloc_preg (cfg);
4099 			ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4100 			MONO_ADD_INS (cfg->cbb, ins);
4101 			args [4] = ins;
4102 
4103 			if (mini_is_gsharedvt_type (fsig->params [0])) {
4104 				int addr_reg, deref_arg_reg;
4105 
4106 				ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4107 				deref_arg_reg = alloc_preg (cfg);
4108 				/* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4109 				EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4110 
4111 				EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4112 				addr_reg = ins->dreg;
4113 				EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4114 			} else {
4115 				EMIT_NEW_ICONST (cfg, args [3], 0);
4116 				EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4117 			}
4118 		} else {
4119 			EMIT_NEW_ICONST (cfg, args [3], 0);
4120 			EMIT_NEW_ICONST (cfg, args [4], 0);
4121 		}
4122 		ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4123 		emit_widen = FALSE;
4124 
4125 		if (mini_is_gsharedvt_type (fsig->ret)) {
4126 			ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4127 		} else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4128 			MonoInst *add;
4129 
4130 			/* Unbox */
4131 			NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4132 			MONO_ADD_INS (cfg->cbb, add);
4133 			/* Load value */
4134 			NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4135 			MONO_ADD_INS (cfg->cbb, ins);
4136 			/* ins represents the call result */
4137 		}
4138 	} else {
4139 		GSHAREDVT_FAILURE (CEE_CALLVIRT);
4140 	}
4141 
4142 	*ref_emit_widen = emit_widen;
4143 
4144 	return ins;
4145 
4146  exception_exit:
4147 	return NULL;
4148 }
4149 
4150 static void
mono_emit_load_got_addr(MonoCompile * cfg)4151 mono_emit_load_got_addr (MonoCompile *cfg)
4152 {
4153 	MonoInst *getaddr, *dummy_use;
4154 
4155 	if (!cfg->got_var || cfg->got_var_allocated)
4156 		return;
4157 
4158 	MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4159 	getaddr->cil_code = cfg->header->code;
4160 	getaddr->dreg = cfg->got_var->dreg;
4161 
4162 	/* Add it to the start of the first bblock */
4163 	if (cfg->bb_entry->code) {
4164 		getaddr->next = cfg->bb_entry->code;
4165 		cfg->bb_entry->code = getaddr;
4166 	}
4167 	else
4168 		MONO_ADD_INS (cfg->bb_entry, getaddr);
4169 
4170 	cfg->got_var_allocated = TRUE;
4171 
4172 	/*
4173 	 * Add a dummy use to keep the got_var alive, since real uses might
4174 	 * only be generated by the back ends.
4175 	 * Add it to end_bblock, so the variable's lifetime covers the whole
4176 	 * method.
4177 	 * It would be better to make the usage of the got var explicit in all
4178 	 * cases when the backend needs it (i.e. calls, throw etc.), so this
4179 	 * wouldn't be needed.
4180 	 */
4181 	NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4182 	MONO_ADD_INS (cfg->bb_exit, dummy_use);
4183 }
4184 
4185 static int inline_limit;
4186 static gboolean inline_limit_inited;
4187 
4188 static gboolean
mono_method_check_inlining(MonoCompile * cfg,MonoMethod * method)4189 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4190 {
4191 	MonoMethodHeaderSummary header;
4192 	MonoVTable *vtable;
4193 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4194 	MonoMethodSignature *sig = mono_method_signature (method);
4195 	int i;
4196 #endif
4197 
4198 	if (cfg->disable_inline)
4199 		return FALSE;
4200 	if (cfg->gsharedvt)
4201 		return FALSE;
4202 
4203 	if (cfg->inline_depth > 10)
4204 		return FALSE;
4205 
4206 	if (!mono_method_get_header_summary (method, &header))
4207 		return FALSE;
4208 
4209 	/*runtime, icall and pinvoke are checked by summary call*/
4210 	if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4211 	    (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4212 	    (mono_class_is_marshalbyref (method->klass)) ||
4213 	    header.has_clauses)
4214 		return FALSE;
4215 
4216 	/* also consider num_locals? */
4217 	/* Do the size check early to avoid creating vtables */
4218 	if (!inline_limit_inited) {
4219 		char *inlinelimit;
4220 		if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4221 			inline_limit = atoi (inlinelimit);
4222 			g_free (inlinelimit);
4223 		} else
4224 			inline_limit = INLINE_LENGTH_LIMIT;
4225 		inline_limit_inited = TRUE;
4226 	}
4227 	if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4228 		return FALSE;
4229 
4230 	/*
4231 	 * if we can initialize the class of the method right away, we do,
4232 	 * otherwise we don't allow inlining if the class needs initialization,
4233 	 * since it would mean inserting a call to mono_runtime_class_init()
4234 	 * inside the inlined code
4235 	 */
4236 	if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4237 		return FALSE;
4238 
4239 	if (!(cfg->opt & MONO_OPT_SHARED)) {
4240 		/* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4241 		if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4242 			if (method->klass->has_cctor) {
4243 				vtable = mono_class_vtable (cfg->domain, method->klass);
4244 				if (!vtable)
4245 					return FALSE;
4246 				if (!cfg->compile_aot) {
4247 					MonoError error;
4248 					if (!mono_runtime_class_init_full (vtable, &error)) {
4249 						mono_error_cleanup (&error);
4250 						return FALSE;
4251 					}
4252 				}
4253 			}
4254 		} else if (mono_class_is_before_field_init (method->klass)) {
4255 			if (cfg->run_cctors && method->klass->has_cctor) {
4256 				/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4257 				if (!method->klass->runtime_info)
4258 					/* No vtable created yet */
4259 					return FALSE;
4260 				vtable = mono_class_vtable (cfg->domain, method->klass);
4261 				if (!vtable)
4262 					return FALSE;
4263 				/* This makes so that inline cannot trigger */
4264 				/* .cctors: too many apps depend on them */
4265 				/* running with a specific order... */
4266 				if (! vtable->initialized)
4267 					return FALSE;
4268 				MonoError error;
4269 				if (!mono_runtime_class_init_full (vtable, &error)) {
4270 					mono_error_cleanup (&error);
4271 					return FALSE;
4272 				}
4273 			}
4274 		} else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4275 			if (!method->klass->runtime_info)
4276 				/* No vtable created yet */
4277 				return FALSE;
4278 			vtable = mono_class_vtable (cfg->domain, method->klass);
4279 			if (!vtable)
4280 				return FALSE;
4281 			if (!vtable->initialized)
4282 				return FALSE;
4283 		}
4284 	} else {
4285 		/*
4286 		 * If we're compiling for shared code
4287 		 * the cctor will need to be run at aot method load time, for example,
4288 		 * or at the end of the compilation of the inlining method.
4289 		 */
4290 		if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4291 			return FALSE;
4292 	}
4293 
4294 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4295 	if (mono_arch_is_soft_float ()) {
4296 		/* FIXME: */
4297 		if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4298 			return FALSE;
4299 		for (i = 0; i < sig->param_count; ++i)
4300 			if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4301 				return FALSE;
4302 	}
4303 #endif
4304 
4305 	if (g_list_find (cfg->dont_inline, method))
4306 		return FALSE;
4307 
4308 	if (mono_profiler_get_call_instrumentation_flags (method))
4309 		return FALSE;
4310 
4311 	return TRUE;
4312 }
4313 
4314 static gboolean
mini_field_access_needs_cctor_run(MonoCompile * cfg,MonoMethod * method,MonoClass * klass,MonoVTable * vtable)4315 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4316 {
4317 	if (!cfg->compile_aot) {
4318 		g_assert (vtable);
4319 		if (vtable->initialized)
4320 			return FALSE;
4321 	}
4322 
4323 	if (mono_class_is_before_field_init (klass)) {
4324 		if (cfg->method == method)
4325 			return FALSE;
4326 	}
4327 
4328 	if (!mono_class_needs_cctor_run (klass, method))
4329 		return FALSE;
4330 
4331 	if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4332 		/* The initialization is already done before the method is called */
4333 		return FALSE;
4334 
4335 	return TRUE;
4336 }
4337 
4338 MonoInst*
mini_emit_ldelema_1_ins(MonoCompile * cfg,MonoClass * klass,MonoInst * arr,MonoInst * index,gboolean bcheck)4339 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4340 {
4341 	MonoInst *ins;
4342 	guint32 size;
4343 	int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4344 	int context_used;
4345 
4346 	if (mini_is_gsharedvt_variable_klass (klass)) {
4347 		size = -1;
4348 	} else {
4349 		mono_class_init (klass);
4350 		size = mono_class_array_element_size (klass);
4351 	}
4352 
4353 	mult_reg = alloc_preg (cfg);
4354 	array_reg = arr->dreg;
4355 	index_reg = index->dreg;
4356 
4357 #if SIZEOF_REGISTER == 8
4358 	/* The array reg is 64 bits but the index reg is only 32 */
4359 	if (COMPILE_LLVM (cfg)) {
4360 		/*
4361 		 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4362 		 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4363 		 * of OP_X86_LEA for llvm.
4364 		 */
4365 		index2_reg = index_reg;
4366 	} else {
4367 		index2_reg = alloc_preg (cfg);
4368 		MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4369 	}
4370 #else
4371 	if (index->type == STACK_I8) {
4372 		index2_reg = alloc_preg (cfg);
4373 		MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4374 	} else {
4375 		index2_reg = index_reg;
4376 	}
4377 #endif
4378 
4379 	if (bcheck)
4380 		MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4381 
4382 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4383 	if (size == 1 || size == 2 || size == 4 || size == 8) {
4384 		static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4385 
4386 		EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4387 		ins->klass = mono_class_get_element_class (klass);
4388 		ins->type = STACK_MP;
4389 
4390 		return ins;
4391 	}
4392 #endif
4393 
4394 	add_reg = alloc_ireg_mp (cfg);
4395 
4396 	if (size == -1) {
4397 		MonoInst *rgctx_ins;
4398 
4399 		/* gsharedvt */
4400 		g_assert (cfg->gshared);
4401 		context_used = mini_class_check_context_used (cfg, klass);
4402 		g_assert (context_used);
4403 		rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4404 		MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4405 	} else {
4406 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4407 	}
4408 	MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4409 	NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4410 	ins->klass = mono_class_get_element_class (klass);
4411 	ins->type = STACK_MP;
4412 	MONO_ADD_INS (cfg->cbb, ins);
4413 
4414 	return ins;
4415 }
4416 
4417 static MonoInst*
mini_emit_ldelema_2_ins(MonoCompile * cfg,MonoClass * klass,MonoInst * arr,MonoInst * index_ins1,MonoInst * index_ins2)4418 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4419 {
4420 	int bounds_reg = alloc_preg (cfg);
4421 	int add_reg = alloc_ireg_mp (cfg);
4422 	int mult_reg = alloc_preg (cfg);
4423 	int mult2_reg = alloc_preg (cfg);
4424 	int low1_reg = alloc_preg (cfg);
4425 	int low2_reg = alloc_preg (cfg);
4426 	int high1_reg = alloc_preg (cfg);
4427 	int high2_reg = alloc_preg (cfg);
4428 	int realidx1_reg = alloc_preg (cfg);
4429 	int realidx2_reg = alloc_preg (cfg);
4430 	int sum_reg = alloc_preg (cfg);
4431 	int index1, index2, tmpreg;
4432 	MonoInst *ins;
4433 	guint32 size;
4434 
4435 	mono_class_init (klass);
4436 	size = mono_class_array_element_size (klass);
4437 
4438 	index1 = index_ins1->dreg;
4439 	index2 = index_ins2->dreg;
4440 
4441 #if SIZEOF_REGISTER == 8
4442 	/* The array reg is 64 bits but the index reg is only 32 */
4443 	if (COMPILE_LLVM (cfg)) {
4444 		/* Not needed */
4445 	} else {
4446 		tmpreg = alloc_preg (cfg);
4447 		MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4448 		index1 = tmpreg;
4449 		tmpreg = alloc_preg (cfg);
4450 		MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4451 		index2 = tmpreg;
4452 	}
4453 #else
4454 	// FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4455 	tmpreg = -1;
4456 #endif
4457 
4458 	/* range checking */
4459 	MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4460 				       arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4461 
4462 	MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4463 				       bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4464 	MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4465 	MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4466 				       bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4467 	MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4468 	MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4469 
4470 	MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4471 				       bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4472 	MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4473 	MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4474 				       bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4475 	MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4476 	MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4477 
4478 	MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4479 	MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4480 	MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4481 	MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4482 	NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4483 
4484 	ins->type = STACK_MP;
4485 	ins->klass = klass;
4486 	MONO_ADD_INS (cfg->cbb, ins);
4487 
4488 	return ins;
4489 }
4490 
4491 static MonoInst*
mini_emit_ldelema_ins(MonoCompile * cfg,MonoMethod * cmethod,MonoInst ** sp,unsigned char * ip,gboolean is_set)4492 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4493 {
4494 	int rank;
4495 	MonoInst *addr;
4496 	MonoMethod *addr_method;
4497 	int element_size;
4498 	MonoClass *eclass = cmethod->klass->element_class;
4499 
4500 	rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4501 
4502 	if (rank == 1)
4503 		return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4504 
4505 	/* emit_ldelema_2 depends on OP_LMUL */
4506 	if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4507 		return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4508 	}
4509 
4510 	if (mini_is_gsharedvt_variable_klass (eclass))
4511 		element_size = 0;
4512 	else
4513 		element_size = mono_class_array_element_size (eclass);
4514 	addr_method = mono_marshal_get_array_address (rank, element_size);
4515 	addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4516 
4517 	return addr;
4518 }
4519 
4520 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4521 static MonoInst*
emit_array_generic_access(MonoCompile * cfg,MonoMethodSignature * fsig,MonoInst ** args,int is_set)4522 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4523 {
4524 	MonoInst *addr, *store, *load;
4525 	MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4526 
4527 	/* the bounds check is already done by the callers */
4528 	addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4529 	if (is_set) {
4530 		EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4531 		EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4532 		if (mini_type_is_reference (&eklass->byval_arg))
4533 			mini_emit_write_barrier (cfg, addr, load);
4534 	} else {
4535 		EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4536 		EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4537 	}
4538 	return store;
4539 }
4540 
4541 
4542 static gboolean
generic_class_is_reference_type(MonoCompile * cfg,MonoClass * klass)4543 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4544 {
4545 	return mini_type_is_reference (&klass->byval_arg);
4546 }
4547 
4548 static MonoInst*
emit_array_store(MonoCompile * cfg,MonoClass * klass,MonoInst ** sp,gboolean safety_checks)4549 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4550 {
4551 	if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4552 		!(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4553 		MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4554 		MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4555 		MonoInst *iargs [3];
4556 
4557 		if (!helper->slot)
4558 			mono_class_setup_vtable (obj_array);
4559 		g_assert (helper->slot);
4560 
4561 		if (sp [0]->type != STACK_OBJ)
4562 			return NULL;
4563 		if (sp [2]->type != STACK_OBJ)
4564 			return NULL;
4565 
4566 		iargs [2] = sp [2];
4567 		iargs [1] = sp [1];
4568 		iargs [0] = sp [0];
4569 
4570 		return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4571 	} else {
4572 		MonoInst *ins;
4573 
4574 		if (mini_is_gsharedvt_variable_klass (klass)) {
4575 			MonoInst *addr;
4576 
4577 			// FIXME-VT: OP_ICONST optimization
4578 			addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4579 			EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4580 			ins->opcode = OP_STOREV_MEMBASE;
4581 		} else if (sp [1]->opcode == OP_ICONST) {
4582 			int array_reg = sp [0]->dreg;
4583 			int index_reg = sp [1]->dreg;
4584 			int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4585 
4586 			if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4587 				MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4588 
4589 			if (safety_checks)
4590 				MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4591 			EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4592 		} else {
4593 			MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4594 			EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4595 			if (generic_class_is_reference_type (cfg, klass))
4596 				mini_emit_write_barrier (cfg, addr, sp [2]);
4597 		}
4598 		return ins;
4599 	}
4600 }
4601 
4602 static MonoInst*
emit_array_unsafe_access(MonoCompile * cfg,MonoMethodSignature * fsig,MonoInst ** args,int is_set)4603 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4604 {
4605 	MonoClass *eklass;
4606 
4607 	if (is_set)
4608 		eklass = mono_class_from_mono_type (fsig->params [2]);
4609 	else
4610 		eklass = mono_class_from_mono_type (fsig->ret);
4611 
4612 	if (is_set) {
4613 		return emit_array_store (cfg, eklass, args, FALSE);
4614 	} else {
4615 		MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4616 		EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4617 		return ins;
4618 	}
4619 }
4620 
4621 static gboolean
is_unsafe_mov_compatible(MonoCompile * cfg,MonoClass * param_klass,MonoClass * return_klass)4622 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
4623 {
4624 	uint32_t align;
4625 	int param_size, return_size;
4626 
4627 	param_klass = mono_class_from_mono_type (mini_get_underlying_type (&param_klass->byval_arg));
4628 	return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
4629 
4630 	if (cfg->verbose_level > 3)
4631 		printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
4632 
4633 	//Don't allow mixing reference types with value types
4634 	if (param_klass->valuetype != return_klass->valuetype) {
4635 		if (cfg->verbose_level > 3)
4636 			printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
4637 		return FALSE;
4638 	}
4639 
4640 	if (!param_klass->valuetype) {
4641 		if (cfg->verbose_level > 3)
4642 			printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
4643 		return TRUE;
4644 	}
4645 
4646 	//That are blitable
4647 	if (param_klass->has_references || return_klass->has_references)
4648 		return FALSE;
4649 
4650 	/* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
4651 	if ((MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
4652 		(!MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
4653 			if (cfg->verbose_level > 3)
4654 				printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
4655 		return FALSE;
4656 	}
4657 
4658 	if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
4659 		return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
4660 		if (cfg->verbose_level > 3)
4661 			printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
4662 		return FALSE;
4663 	}
4664 
4665 	param_size = mono_class_value_size (param_klass, &align);
4666 	return_size = mono_class_value_size (return_klass, &align);
4667 
4668 	//We can do it if sizes match
4669 	if (param_size == return_size) {
4670 		if (cfg->verbose_level > 3)
4671 			printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
4672 		return TRUE;
4673 	}
4674 
4675 	//No simple way to handle struct if sizes don't match
4676 	if (MONO_TYPE_ISSTRUCT (&param_klass->byval_arg)) {
4677 		if (cfg->verbose_level > 3)
4678 			printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
4679 		return FALSE;
4680 	}
4681 
4682 	/*
4683 	 * Same reg size category.
4684 	 * A quick note on why we don't require widening here.
4685 	 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
4686 	 *
4687 	 * Since the source value comes from a function argument, the JIT will already have
4688 	 * the value in a VREG and performed any widening needed before (say, when loading from a field).
4689 	 */
4690 	if (param_size <= 4 && return_size <= 4) {
4691 		if (cfg->verbose_level > 3)
4692 			printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
4693 		return TRUE;
4694 	}
4695 
4696 	return FALSE;
4697 }
4698 
4699 static MonoInst*
emit_array_unsafe_mov(MonoCompile * cfg,MonoMethodSignature * fsig,MonoInst ** args)4700 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
4701 {
4702 	MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
4703 	MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
4704 
4705 	if (mini_is_gsharedvt_variable_type (fsig->ret))
4706 		return NULL;
4707 
4708 	//Valuetypes that are semantically equivalent or numbers than can be widened to
4709 	if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
4710 		return args [0];
4711 
4712 	//Arrays of valuetypes that are semantically equivalent
4713 	if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
4714 		return args [0];
4715 
4716 	return NULL;
4717 }
4718 
4719 static MonoInst*
mini_emit_inst_for_ctor(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,MonoInst ** args)4720 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4721 {
4722 #ifdef MONO_ARCH_SIMD_INTRINSICS
4723 	MonoInst *ins = NULL;
4724 
4725 	if (cfg->opt & MONO_OPT_SIMD) {
4726 		ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4727 		if (ins)
4728 			return ins;
4729 	}
4730 #endif
4731 
4732 	return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
4733 }
4734 
4735 MonoInst*
mini_emit_memory_barrier(MonoCompile * cfg,int kind)4736 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4737 {
4738 	MonoInst *ins = NULL;
4739 	MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4740 	MONO_ADD_INS (cfg->cbb, ins);
4741 	ins->backend.memory_barrier_kind = kind;
4742 
4743 	return ins;
4744 }
4745 
4746 static MonoInst*
llvm_emit_inst_for_method(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,MonoInst ** args)4747 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4748 {
4749 	MonoInst *ins = NULL;
4750 	int opcode = 0;
4751 
4752 	/* The LLVM backend supports these intrinsics */
4753 	if (cmethod->klass == mono_defaults.math_class) {
4754 		if (strcmp (cmethod->name, "Sin") == 0) {
4755 			opcode = OP_SIN;
4756 		} else if (strcmp (cmethod->name, "Cos") == 0) {
4757 			opcode = OP_COS;
4758 		} else if (strcmp (cmethod->name, "Sqrt") == 0) {
4759 			opcode = OP_SQRT;
4760 		} else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4761 			opcode = OP_ABS;
4762 		}
4763 
4764 		if (opcode && fsig->param_count == 1) {
4765 			MONO_INST_NEW (cfg, ins, opcode);
4766 			ins->type = STACK_R8;
4767 			ins->dreg = mono_alloc_dreg (cfg, ins->type);
4768 			ins->sreg1 = args [0]->dreg;
4769 			MONO_ADD_INS (cfg->cbb, ins);
4770 		}
4771 
4772 		opcode = 0;
4773 		if (cfg->opt & MONO_OPT_CMOV) {
4774 			if (strcmp (cmethod->name, "Min") == 0) {
4775 				if (fsig->params [0]->type == MONO_TYPE_I4)
4776 					opcode = OP_IMIN;
4777 				if (fsig->params [0]->type == MONO_TYPE_U4)
4778 					opcode = OP_IMIN_UN;
4779 				else if (fsig->params [0]->type == MONO_TYPE_I8)
4780 					opcode = OP_LMIN;
4781 				else if (fsig->params [0]->type == MONO_TYPE_U8)
4782 					opcode = OP_LMIN_UN;
4783 			} else if (strcmp (cmethod->name, "Max") == 0) {
4784 				if (fsig->params [0]->type == MONO_TYPE_I4)
4785 					opcode = OP_IMAX;
4786 				if (fsig->params [0]->type == MONO_TYPE_U4)
4787 					opcode = OP_IMAX_UN;
4788 				else if (fsig->params [0]->type == MONO_TYPE_I8)
4789 					opcode = OP_LMAX;
4790 				else if (fsig->params [0]->type == MONO_TYPE_U8)
4791 					opcode = OP_LMAX_UN;
4792 			}
4793 		}
4794 
4795 		if (opcode && fsig->param_count == 2) {
4796 			MONO_INST_NEW (cfg, ins, opcode);
4797 			ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4798 			ins->dreg = mono_alloc_dreg (cfg, ins->type);
4799 			ins->sreg1 = args [0]->dreg;
4800 			ins->sreg2 = args [1]->dreg;
4801 			MONO_ADD_INS (cfg->cbb, ins);
4802 		}
4803 	}
4804 
4805 	return ins;
4806 }
4807 
4808 static MonoInst*
mini_emit_inst_for_sharable_method(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,MonoInst ** args)4809 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4810 {
4811 	if (cmethod->klass == mono_defaults.array_class) {
4812 		if (strcmp (cmethod->name, "UnsafeStore") == 0)
4813 			return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4814 		else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4815 			return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4816 		else if (strcmp (cmethod->name, "UnsafeMov") == 0)
4817 			return emit_array_unsafe_mov (cfg, fsig, args);
4818 	}
4819 
4820 	return NULL;
4821 }
4822 
4823 
4824 static gboolean
mono_type_is_native_blittable(MonoType * t)4825 mono_type_is_native_blittable (MonoType *t)
4826 {
4827 	if (MONO_TYPE_IS_REFERENCE (t))
4828 		return FALSE;
4829 
4830 	if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
4831 		return TRUE;
4832 
4833 	MonoClass *klass = mono_class_from_mono_type (t);
4834 
4835 	//MonoClass::blitable depends on mono_class_setup_fields being done.
4836 	mono_class_setup_fields (klass);
4837 	if (!klass->blittable)
4838 		return FALSE;
4839 
4840 	// If the native marshal size is different we can't convert PtrToStructure to a type load
4841 	if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
4842 		return FALSE;
4843 
4844 	return TRUE;
4845 }
4846 
4847 
4848 static MonoInst*
mini_emit_inst_for_method(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,MonoInst ** args)4849 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4850 {
4851 	MonoInst *ins = NULL;
4852 	MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
4853 
4854 	if (cmethod->klass == mono_defaults.string_class) {
4855 		if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
4856 			int dreg = alloc_ireg (cfg);
4857 			int index_reg = alloc_preg (cfg);
4858 			int add_reg = alloc_preg (cfg);
4859 
4860 #if SIZEOF_REGISTER == 8
4861 			if (COMPILE_LLVM (cfg)) {
4862 				MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
4863 			} else {
4864 				/* The array reg is 64 bits but the index reg is only 32 */
4865 				MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4866 			}
4867 #else
4868 			index_reg = args [1]->dreg;
4869 #endif
4870 			MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4871 
4872 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4873 			EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
4874 			add_reg = ins->dreg;
4875 			EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4876 								   add_reg, 0);
4877 #else
4878 			int mult_reg = alloc_preg (cfg);
4879 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4880 			MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4881 			EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4882 								   add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
4883 #endif
4884 			type_from_op (cfg, ins, NULL, NULL);
4885 			return ins;
4886 		} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
4887 			int dreg = alloc_ireg (cfg);
4888 			/* Decompose later to allow more optimizations */
4889 			EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4890 			ins->type = STACK_I4;
4891 			ins->flags |= MONO_INST_FAULT;
4892 			cfg->cbb->has_array_access = TRUE;
4893 			cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4894 
4895 			return ins;
4896 		} else
4897 			return NULL;
4898 	} else if (cmethod->klass == mono_defaults.object_class) {
4899 		if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
4900 			int dreg = alloc_ireg_ref (cfg);
4901 			int vt_reg = alloc_preg (cfg);
4902 			MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4903 			EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
4904 			type_from_op (cfg, ins, NULL, NULL);
4905 
4906 			return ins;
4907 		} else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
4908 			int dreg = alloc_ireg (cfg);
4909 			int t1 = alloc_ireg (cfg);
4910 
4911 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4912 			EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4913 			ins->type = STACK_I4;
4914 
4915 			return ins;
4916 		} else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
4917  			MONO_INST_NEW (cfg, ins, OP_NOP);
4918 			MONO_ADD_INS (cfg->cbb, ins);
4919 			return ins;
4920 		} else
4921 			return NULL;
4922 	} else if (cmethod->klass == mono_defaults.array_class) {
4923 		if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4924 			return emit_array_generic_access (cfg, fsig, args, FALSE);
4925 		else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4926 			return emit_array_generic_access (cfg, fsig, args, TRUE);
4927 
4928 #ifndef MONO_BIG_ARRAYS
4929 		/*
4930 		 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4931 		 * Array methods.
4932 		 */
4933 		else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
4934 		         (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
4935 		         args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4936 			int dreg = alloc_ireg (cfg);
4937 			int bounds_reg = alloc_ireg_mp (cfg);
4938 			MonoBasicBlock *end_bb, *szarray_bb;
4939 			gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4940 
4941 			NEW_BBLOCK (cfg, end_bb);
4942 			NEW_BBLOCK (cfg, szarray_bb);
4943 
4944 			EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4945 										 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4946 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4947 			MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4948 			/* Non-szarray case */
4949 			if (get_length)
4950 				EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4951 									   bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4952 			else
4953 				EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4954 									   bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4955 			MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4956 			MONO_START_BB (cfg, szarray_bb);
4957 			/* Szarray case */
4958 			if (get_length)
4959 				EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4960 									   args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
4961 			else
4962 				MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4963 			MONO_START_BB (cfg, end_bb);
4964 
4965 			EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4966 			ins->type = STACK_I4;
4967 
4968 			return ins;
4969 		}
4970 #endif
4971 
4972  		if (cmethod->name [0] != 'g')
4973  			return NULL;
4974 
4975 		if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
4976 			int dreg = alloc_ireg (cfg);
4977 			int vtable_reg = alloc_preg (cfg);
4978 			MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4979 												 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4980 			EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4981 								   vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4982 			type_from_op (cfg, ins, NULL, NULL);
4983 
4984 			return ins;
4985 		} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
4986 			int dreg = alloc_ireg (cfg);
4987 
4988 			EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4989 										 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
4990 			type_from_op (cfg, ins, NULL, NULL);
4991 
4992 			return ins;
4993 		} else
4994 			return NULL;
4995 	} else if (cmethod->klass == runtime_helpers_class) {
4996 		if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
4997 			EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
4998 			return ins;
4999 		} else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5000 			MonoGenericContext *ctx = mono_method_get_context (cmethod);
5001 			g_assert (ctx);
5002 			g_assert (ctx->method_inst);
5003 			g_assert (ctx->method_inst->type_argc == 1);
5004 			MonoType *arg_type = ctx->method_inst->type_argv [0];
5005 			MonoType *t;
5006 			MonoClass *klass;
5007 
5008 			ins = NULL;
5009 
5010 			/* Resolve the argument class as possible so we can handle common cases fast */
5011 			t = mini_get_underlying_type (arg_type);
5012 			klass = mono_class_from_mono_type (t);
5013 			mono_class_init (klass);
5014 			if (MONO_TYPE_IS_REFERENCE (t))
5015 				EMIT_NEW_ICONST (cfg, ins, 1);
5016 			else if (MONO_TYPE_IS_PRIMITIVE (t))
5017 				EMIT_NEW_ICONST (cfg, ins, 0);
5018 			else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5019 				EMIT_NEW_ICONST (cfg, ins, 1);
5020 			else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5021 				EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5022 			else {
5023 				g_assert (cfg->gshared);
5024 
5025 				/* Have to use the original argument class here */
5026 				MonoClass *arg_class = mono_class_from_mono_type (arg_type);
5027 				int context_used = mini_class_check_context_used (cfg, arg_class);
5028 
5029 				/* This returns 1 or 2 */
5030 				MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5031 				int dreg = alloc_ireg (cfg);
5032 				EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5033 			}
5034 
5035 			return ins;
5036 		} else
5037 			return NULL;
5038 	} else if (cmethod->klass == mono_defaults.monitor_class) {
5039 		gboolean is_enter = FALSE;
5040 		gboolean is_v4 = FALSE;
5041 
5042 		if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5043 			is_enter = TRUE;
5044 			is_v4 = TRUE;
5045 		}
5046 		if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5047 			is_enter = TRUE;
5048 
5049 		if (is_enter) {
5050 			/*
5051 			 * To make async stack traces work, icalls which can block should have a wrapper.
5052 			 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5053 			 */
5054 			MonoBasicBlock *end_bb;
5055 
5056 			NEW_BBLOCK (cfg, end_bb);
5057 
5058 			ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5059 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5060 			MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5061 			ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5062 			MONO_START_BB (cfg, end_bb);
5063 			return ins;
5064 		}
5065 	} else if (cmethod->klass == mono_defaults.thread_class) {
5066 		if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5067 			MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5068 			MONO_ADD_INS (cfg->cbb, ins);
5069 			return ins;
5070 		} else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5071 			return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5072 		} else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5073 			guint32 opcode = 0;
5074 			gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5075 
5076 			if (fsig->params [0]->type == MONO_TYPE_I1)
5077 				opcode = OP_LOADI1_MEMBASE;
5078 			else if (fsig->params [0]->type == MONO_TYPE_U1)
5079 				opcode = OP_LOADU1_MEMBASE;
5080 			else if (fsig->params [0]->type == MONO_TYPE_I2)
5081 				opcode = OP_LOADI2_MEMBASE;
5082 			else if (fsig->params [0]->type == MONO_TYPE_U2)
5083 				opcode = OP_LOADU2_MEMBASE;
5084 			else if (fsig->params [0]->type == MONO_TYPE_I4)
5085 				opcode = OP_LOADI4_MEMBASE;
5086 			else if (fsig->params [0]->type == MONO_TYPE_U4)
5087 				opcode = OP_LOADU4_MEMBASE;
5088 			else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5089 				opcode = OP_LOADI8_MEMBASE;
5090 			else if (fsig->params [0]->type == MONO_TYPE_R4)
5091 				opcode = OP_LOADR4_MEMBASE;
5092 			else if (fsig->params [0]->type == MONO_TYPE_R8)
5093 				opcode = OP_LOADR8_MEMBASE;
5094 			else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5095 				opcode = OP_LOAD_MEMBASE;
5096 
5097 			if (opcode) {
5098 				MONO_INST_NEW (cfg, ins, opcode);
5099 				ins->inst_basereg = args [0]->dreg;
5100 				ins->inst_offset = 0;
5101 				MONO_ADD_INS (cfg->cbb, ins);
5102 
5103 				switch (fsig->params [0]->type) {
5104 				case MONO_TYPE_I1:
5105 				case MONO_TYPE_U1:
5106 				case MONO_TYPE_I2:
5107 				case MONO_TYPE_U2:
5108 				case MONO_TYPE_I4:
5109 				case MONO_TYPE_U4:
5110 					ins->dreg = mono_alloc_ireg (cfg);
5111 					ins->type = STACK_I4;
5112 					break;
5113 				case MONO_TYPE_I8:
5114 				case MONO_TYPE_U8:
5115 					ins->dreg = mono_alloc_lreg (cfg);
5116 					ins->type = STACK_I8;
5117 					break;
5118 				case MONO_TYPE_I:
5119 				case MONO_TYPE_U:
5120 					ins->dreg = mono_alloc_ireg (cfg);
5121 #if SIZEOF_REGISTER == 8
5122 					ins->type = STACK_I8;
5123 #else
5124 					ins->type = STACK_I4;
5125 #endif
5126 					break;
5127 				case MONO_TYPE_R4:
5128 				case MONO_TYPE_R8:
5129 					ins->dreg = mono_alloc_freg (cfg);
5130 					ins->type = STACK_R8;
5131 					break;
5132 				default:
5133 					g_assert (mini_type_is_reference (fsig->params [0]));
5134 					ins->dreg = mono_alloc_ireg_ref (cfg);
5135 					ins->type = STACK_OBJ;
5136 					break;
5137 				}
5138 
5139 				if (opcode == OP_LOADI8_MEMBASE)
5140 					ins = mono_decompose_opcode (cfg, ins);
5141 
5142 				mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5143 
5144 				return ins;
5145 			}
5146 		} else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5147 			guint32 opcode = 0;
5148 			gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5149 
5150 			if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5151 				opcode = OP_STOREI1_MEMBASE_REG;
5152 			else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5153 				opcode = OP_STOREI2_MEMBASE_REG;
5154 			else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5155 				opcode = OP_STOREI4_MEMBASE_REG;
5156 			else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5157 				opcode = OP_STOREI8_MEMBASE_REG;
5158 			else if (fsig->params [0]->type == MONO_TYPE_R4)
5159 				opcode = OP_STORER4_MEMBASE_REG;
5160 			else if (fsig->params [0]->type == MONO_TYPE_R8)
5161 				opcode = OP_STORER8_MEMBASE_REG;
5162 			else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5163 				opcode = OP_STORE_MEMBASE_REG;
5164 
5165 			if (opcode) {
5166 				mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5167 
5168 				MONO_INST_NEW (cfg, ins, opcode);
5169 				ins->sreg1 = args [1]->dreg;
5170 				ins->inst_destbasereg = args [0]->dreg;
5171 				ins->inst_offset = 0;
5172 				MONO_ADD_INS (cfg->cbb, ins);
5173 
5174 				if (opcode == OP_STOREI8_MEMBASE_REG)
5175 					ins = mono_decompose_opcode (cfg, ins);
5176 
5177 				return ins;
5178 			}
5179 		}
5180 	} else if (cmethod->klass->image == mono_defaults.corlib &&
5181 			   (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5182 			   (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5183 		ins = NULL;
5184 
5185 #if SIZEOF_REGISTER == 8
5186 		if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5187 			if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5188 				MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5189 				ins->dreg = mono_alloc_preg (cfg);
5190 				ins->sreg1 = args [0]->dreg;
5191 				ins->type = STACK_I8;
5192 				ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5193 				MONO_ADD_INS (cfg->cbb, ins);
5194 			} else {
5195 				MonoInst *load_ins;
5196 
5197 				mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5198 
5199 				/* 64 bit reads are already atomic */
5200 				MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5201 				load_ins->dreg = mono_alloc_preg (cfg);
5202 				load_ins->inst_basereg = args [0]->dreg;
5203 				load_ins->inst_offset = 0;
5204 				load_ins->type = STACK_I8;
5205 				MONO_ADD_INS (cfg->cbb, load_ins);
5206 
5207 				mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5208 
5209 				ins = load_ins;
5210 			}
5211 		}
5212 #endif
5213 
5214 		if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5215 			MonoInst *ins_iconst;
5216 			guint32 opcode = 0;
5217 
5218 			if (fsig->params [0]->type == MONO_TYPE_I4) {
5219 				opcode = OP_ATOMIC_ADD_I4;
5220 				cfg->has_atomic_add_i4 = TRUE;
5221 			}
5222 #if SIZEOF_REGISTER == 8
5223 			else if (fsig->params [0]->type == MONO_TYPE_I8)
5224 				opcode = OP_ATOMIC_ADD_I8;
5225 #endif
5226 			if (opcode) {
5227 				if (!mono_arch_opcode_supported (opcode))
5228 					return NULL;
5229 				MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5230 				ins_iconst->inst_c0 = 1;
5231 				ins_iconst->dreg = mono_alloc_ireg (cfg);
5232 				MONO_ADD_INS (cfg->cbb, ins_iconst);
5233 
5234 				MONO_INST_NEW (cfg, ins, opcode);
5235 				ins->dreg = mono_alloc_ireg (cfg);
5236 				ins->inst_basereg = args [0]->dreg;
5237 				ins->inst_offset = 0;
5238 				ins->sreg2 = ins_iconst->dreg;
5239 				ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5240 				MONO_ADD_INS (cfg->cbb, ins);
5241 			}
5242 		} else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5243 			MonoInst *ins_iconst;
5244 			guint32 opcode = 0;
5245 
5246 			if (fsig->params [0]->type == MONO_TYPE_I4) {
5247 				opcode = OP_ATOMIC_ADD_I4;
5248 				cfg->has_atomic_add_i4 = TRUE;
5249 			}
5250 #if SIZEOF_REGISTER == 8
5251 			else if (fsig->params [0]->type == MONO_TYPE_I8)
5252 				opcode = OP_ATOMIC_ADD_I8;
5253 #endif
5254 			if (opcode) {
5255 				if (!mono_arch_opcode_supported (opcode))
5256 					return NULL;
5257 				MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5258 				ins_iconst->inst_c0 = -1;
5259 				ins_iconst->dreg = mono_alloc_ireg (cfg);
5260 				MONO_ADD_INS (cfg->cbb, ins_iconst);
5261 
5262 				MONO_INST_NEW (cfg, ins, opcode);
5263 				ins->dreg = mono_alloc_ireg (cfg);
5264 				ins->inst_basereg = args [0]->dreg;
5265 				ins->inst_offset = 0;
5266 				ins->sreg2 = ins_iconst->dreg;
5267 				ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5268 				MONO_ADD_INS (cfg->cbb, ins);
5269 			}
5270 		} else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5271 			guint32 opcode = 0;
5272 
5273 			if (fsig->params [0]->type == MONO_TYPE_I4) {
5274 				opcode = OP_ATOMIC_ADD_I4;
5275 				cfg->has_atomic_add_i4 = TRUE;
5276 			}
5277 #if SIZEOF_REGISTER == 8
5278 			else if (fsig->params [0]->type == MONO_TYPE_I8)
5279 				opcode = OP_ATOMIC_ADD_I8;
5280 #endif
5281 			if (opcode) {
5282 				if (!mono_arch_opcode_supported (opcode))
5283 					return NULL;
5284 				MONO_INST_NEW (cfg, ins, opcode);
5285 				ins->dreg = mono_alloc_ireg (cfg);
5286 				ins->inst_basereg = args [0]->dreg;
5287 				ins->inst_offset = 0;
5288 				ins->sreg2 = args [1]->dreg;
5289 				ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5290 				MONO_ADD_INS (cfg->cbb, ins);
5291 			}
5292 		}
5293 		else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5294 			MonoInst *f2i = NULL, *i2f;
5295 			guint32 opcode, f2i_opcode, i2f_opcode;
5296 			gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5297 			gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5298 
5299 			if (fsig->params [0]->type == MONO_TYPE_I4 ||
5300 			    fsig->params [0]->type == MONO_TYPE_R4) {
5301 				opcode = OP_ATOMIC_EXCHANGE_I4;
5302 				f2i_opcode = OP_MOVE_F_TO_I4;
5303 				i2f_opcode = OP_MOVE_I4_TO_F;
5304 				cfg->has_atomic_exchange_i4 = TRUE;
5305 			}
5306 #if SIZEOF_REGISTER == 8
5307 			else if (is_ref ||
5308 			         fsig->params [0]->type == MONO_TYPE_I8 ||
5309 			         fsig->params [0]->type == MONO_TYPE_R8 ||
5310 			         fsig->params [0]->type == MONO_TYPE_I) {
5311 				opcode = OP_ATOMIC_EXCHANGE_I8;
5312 				f2i_opcode = OP_MOVE_F_TO_I8;
5313 				i2f_opcode = OP_MOVE_I8_TO_F;
5314 			}
5315 #else
5316 			else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5317 				opcode = OP_ATOMIC_EXCHANGE_I4;
5318 				cfg->has_atomic_exchange_i4 = TRUE;
5319 			}
5320 #endif
5321 			else
5322 				return NULL;
5323 
5324 			if (!mono_arch_opcode_supported (opcode))
5325 				return NULL;
5326 
5327 			if (is_float) {
5328 				/* TODO: Decompose these opcodes instead of bailing here. */
5329 				if (COMPILE_SOFT_FLOAT (cfg))
5330 					return NULL;
5331 
5332 				MONO_INST_NEW (cfg, f2i, f2i_opcode);
5333 				f2i->dreg = mono_alloc_ireg (cfg);
5334 				f2i->sreg1 = args [1]->dreg;
5335 				if (f2i_opcode == OP_MOVE_F_TO_I4)
5336 					f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5337 				MONO_ADD_INS (cfg->cbb, f2i);
5338 			}
5339 
5340 			MONO_INST_NEW (cfg, ins, opcode);
5341 			ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5342 			ins->inst_basereg = args [0]->dreg;
5343 			ins->inst_offset = 0;
5344 			ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5345 			MONO_ADD_INS (cfg->cbb, ins);
5346 
5347 			switch (fsig->params [0]->type) {
5348 			case MONO_TYPE_I4:
5349 				ins->type = STACK_I4;
5350 				break;
5351 			case MONO_TYPE_I8:
5352 				ins->type = STACK_I8;
5353 				break;
5354 			case MONO_TYPE_I:
5355 #if SIZEOF_REGISTER == 8
5356 				ins->type = STACK_I8;
5357 #else
5358 				ins->type = STACK_I4;
5359 #endif
5360 				break;
5361 			case MONO_TYPE_R4:
5362 			case MONO_TYPE_R8:
5363 				ins->type = STACK_R8;
5364 				break;
5365 			default:
5366 				g_assert (mini_type_is_reference (fsig->params [0]));
5367 				ins->type = STACK_OBJ;
5368 				break;
5369 			}
5370 
5371 			if (is_float) {
5372 				MONO_INST_NEW (cfg, i2f, i2f_opcode);
5373 				i2f->dreg = mono_alloc_freg (cfg);
5374 				i2f->sreg1 = ins->dreg;
5375 				i2f->type = STACK_R8;
5376 				if (i2f_opcode == OP_MOVE_I4_TO_F)
5377 					i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5378 				MONO_ADD_INS (cfg->cbb, i2f);
5379 
5380 				ins = i2f;
5381 			}
5382 
5383 			if (cfg->gen_write_barriers && is_ref)
5384 				mini_emit_write_barrier (cfg, args [0], args [1]);
5385 		}
5386 		else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5387 			MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5388 			guint32 opcode, f2i_opcode, i2f_opcode;
5389 			gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5390 			gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5391 
5392 			if (fsig->params [1]->type == MONO_TYPE_I4 ||
5393 			    fsig->params [1]->type == MONO_TYPE_R4) {
5394 				opcode = OP_ATOMIC_CAS_I4;
5395 				f2i_opcode = OP_MOVE_F_TO_I4;
5396 				i2f_opcode = OP_MOVE_I4_TO_F;
5397 				cfg->has_atomic_cas_i4 = TRUE;
5398 			}
5399 #if SIZEOF_REGISTER == 8
5400 			else if (is_ref ||
5401 			         fsig->params [1]->type == MONO_TYPE_I8 ||
5402 			         fsig->params [1]->type == MONO_TYPE_R8 ||
5403 			         fsig->params [1]->type == MONO_TYPE_I) {
5404 				opcode = OP_ATOMIC_CAS_I8;
5405 				f2i_opcode = OP_MOVE_F_TO_I8;
5406 				i2f_opcode = OP_MOVE_I8_TO_F;
5407 			}
5408 #else
5409 			else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5410 				opcode = OP_ATOMIC_CAS_I4;
5411 				cfg->has_atomic_cas_i4 = TRUE;
5412 			}
5413 #endif
5414 			else
5415 				return NULL;
5416 
5417 			if (!mono_arch_opcode_supported (opcode))
5418 				return NULL;
5419 
5420 			if (is_float) {
5421 				/* TODO: Decompose these opcodes instead of bailing here. */
5422 				if (COMPILE_SOFT_FLOAT (cfg))
5423 					return NULL;
5424 
5425 				MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5426 				f2i_new->dreg = mono_alloc_ireg (cfg);
5427 				f2i_new->sreg1 = args [1]->dreg;
5428 				if (f2i_opcode == OP_MOVE_F_TO_I4)
5429 					f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5430 				MONO_ADD_INS (cfg->cbb, f2i_new);
5431 
5432 				MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5433 				f2i_cmp->dreg = mono_alloc_ireg (cfg);
5434 				f2i_cmp->sreg1 = args [2]->dreg;
5435 				if (f2i_opcode == OP_MOVE_F_TO_I4)
5436 					f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5437 				MONO_ADD_INS (cfg->cbb, f2i_cmp);
5438 			}
5439 
5440 			MONO_INST_NEW (cfg, ins, opcode);
5441 			ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5442 			ins->sreg1 = args [0]->dreg;
5443 			ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5444 			ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5445 			MONO_ADD_INS (cfg->cbb, ins);
5446 
5447 			switch (fsig->params [1]->type) {
5448 			case MONO_TYPE_I4:
5449 				ins->type = STACK_I4;
5450 				break;
5451 			case MONO_TYPE_I8:
5452 				ins->type = STACK_I8;
5453 				break;
5454 			case MONO_TYPE_I:
5455 #if SIZEOF_REGISTER == 8
5456 				ins->type = STACK_I8;
5457 #else
5458 				ins->type = STACK_I4;
5459 #endif
5460 				break;
5461 			case MONO_TYPE_R4:
5462 				ins->type = cfg->r4_stack_type;
5463 				break;
5464 			case MONO_TYPE_R8:
5465 				ins->type = STACK_R8;
5466 				break;
5467 			default:
5468 				g_assert (mini_type_is_reference (fsig->params [1]));
5469 				ins->type = STACK_OBJ;
5470 				break;
5471 			}
5472 
5473 			if (is_float) {
5474 				MONO_INST_NEW (cfg, i2f, i2f_opcode);
5475 				i2f->dreg = mono_alloc_freg (cfg);
5476 				i2f->sreg1 = ins->dreg;
5477 				i2f->type = STACK_R8;
5478 				if (i2f_opcode == OP_MOVE_I4_TO_F)
5479 					i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5480 				MONO_ADD_INS (cfg->cbb, i2f);
5481 
5482 				ins = i2f;
5483 			}
5484 
5485 			if (cfg->gen_write_barriers && is_ref)
5486 				mini_emit_write_barrier (cfg, args [0], args [1]);
5487 		}
5488 		else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5489 		         fsig->params [1]->type == MONO_TYPE_I4) {
5490 			MonoInst *cmp, *ceq;
5491 
5492 			if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5493 				return NULL;
5494 
5495 			/* int32 r = CAS (location, value, comparand); */
5496 			MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5497 			ins->dreg = alloc_ireg (cfg);
5498 			ins->sreg1 = args [0]->dreg;
5499 			ins->sreg2 = args [1]->dreg;
5500 			ins->sreg3 = args [2]->dreg;
5501 			ins->type = STACK_I4;
5502 			MONO_ADD_INS (cfg->cbb, ins);
5503 
5504 			/* bool result = r == comparand; */
5505 			MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5506 			cmp->sreg1 = ins->dreg;
5507 			cmp->sreg2 = args [2]->dreg;
5508 			cmp->type = STACK_I4;
5509 			MONO_ADD_INS (cfg->cbb, cmp);
5510 
5511 			MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5512 			ceq->dreg = alloc_ireg (cfg);
5513 			ceq->type = STACK_I4;
5514 			MONO_ADD_INS (cfg->cbb, ceq);
5515 
5516 			/* *success = result; */
5517 			MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5518 
5519 			cfg->has_atomic_cas_i4 = TRUE;
5520 		}
5521 		else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5522 			ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5523 
5524 		if (ins)
5525 			return ins;
5526 	} else if (cmethod->klass->image == mono_defaults.corlib &&
5527 			   (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5528 			   (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5529 		ins = NULL;
5530 
5531 		if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5532 			guint32 opcode = 0;
5533 			MonoType *t = fsig->params [0];
5534 			gboolean is_ref;
5535 			gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5536 
5537 			g_assert (t->byref);
5538 			/* t is a byref type, so the reference check is more complicated */
5539 			is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5540 			if (t->type == MONO_TYPE_I1)
5541 				opcode = OP_ATOMIC_LOAD_I1;
5542 			else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5543 				opcode = OP_ATOMIC_LOAD_U1;
5544 			else if (t->type == MONO_TYPE_I2)
5545 				opcode = OP_ATOMIC_LOAD_I2;
5546 			else if (t->type == MONO_TYPE_U2)
5547 				opcode = OP_ATOMIC_LOAD_U2;
5548 			else if (t->type == MONO_TYPE_I4)
5549 				opcode = OP_ATOMIC_LOAD_I4;
5550 			else if (t->type == MONO_TYPE_U4)
5551 				opcode = OP_ATOMIC_LOAD_U4;
5552 			else if (t->type == MONO_TYPE_R4)
5553 				opcode = OP_ATOMIC_LOAD_R4;
5554 			else if (t->type == MONO_TYPE_R8)
5555 				opcode = OP_ATOMIC_LOAD_R8;
5556 #if SIZEOF_REGISTER == 8
5557 			else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5558 				opcode = OP_ATOMIC_LOAD_I8;
5559 			else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5560 				opcode = OP_ATOMIC_LOAD_U8;
5561 #else
5562 			else if (t->type == MONO_TYPE_I)
5563 				opcode = OP_ATOMIC_LOAD_I4;
5564 			else if (is_ref || t->type == MONO_TYPE_U)
5565 				opcode = OP_ATOMIC_LOAD_U4;
5566 #endif
5567 
5568 			if (opcode) {
5569 				if (!mono_arch_opcode_supported (opcode))
5570 					return NULL;
5571 
5572 				MONO_INST_NEW (cfg, ins, opcode);
5573 				ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5574 				ins->sreg1 = args [0]->dreg;
5575 				ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5576 				MONO_ADD_INS (cfg->cbb, ins);
5577 
5578 				switch (t->type) {
5579 				case MONO_TYPE_BOOLEAN:
5580 				case MONO_TYPE_I1:
5581 				case MONO_TYPE_U1:
5582 				case MONO_TYPE_I2:
5583 				case MONO_TYPE_U2:
5584 				case MONO_TYPE_I4:
5585 				case MONO_TYPE_U4:
5586 					ins->type = STACK_I4;
5587 					break;
5588 				case MONO_TYPE_I8:
5589 				case MONO_TYPE_U8:
5590 					ins->type = STACK_I8;
5591 					break;
5592 				case MONO_TYPE_I:
5593 				case MONO_TYPE_U:
5594 #if SIZEOF_REGISTER == 8
5595 					ins->type = STACK_I8;
5596 #else
5597 					ins->type = STACK_I4;
5598 #endif
5599 					break;
5600 				case MONO_TYPE_R4:
5601 					ins->type = cfg->r4_stack_type;
5602 					break;
5603 				case MONO_TYPE_R8:
5604 					ins->type = STACK_R8;
5605 					break;
5606 				default:
5607 					g_assert (is_ref);
5608 					ins->type = STACK_OBJ;
5609 					break;
5610 				}
5611 			}
5612 		}
5613 
5614 		if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5615 			guint32 opcode = 0;
5616 			MonoType *t = fsig->params [0];
5617 			gboolean is_ref;
5618 
5619 			g_assert (t->byref);
5620 			is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5621 			if (t->type == MONO_TYPE_I1)
5622 				opcode = OP_ATOMIC_STORE_I1;
5623 			else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5624 				opcode = OP_ATOMIC_STORE_U1;
5625 			else if (t->type == MONO_TYPE_I2)
5626 				opcode = OP_ATOMIC_STORE_I2;
5627 			else if (t->type == MONO_TYPE_U2)
5628 				opcode = OP_ATOMIC_STORE_U2;
5629 			else if (t->type == MONO_TYPE_I4)
5630 				opcode = OP_ATOMIC_STORE_I4;
5631 			else if (t->type == MONO_TYPE_U4)
5632 				opcode = OP_ATOMIC_STORE_U4;
5633 			else if (t->type == MONO_TYPE_R4)
5634 				opcode = OP_ATOMIC_STORE_R4;
5635 			else if (t->type == MONO_TYPE_R8)
5636 				opcode = OP_ATOMIC_STORE_R8;
5637 #if SIZEOF_REGISTER == 8
5638 			else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5639 				opcode = OP_ATOMIC_STORE_I8;
5640 			else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5641 				opcode = OP_ATOMIC_STORE_U8;
5642 #else
5643 			else if (t->type == MONO_TYPE_I)
5644 				opcode = OP_ATOMIC_STORE_I4;
5645 			else if (is_ref || t->type == MONO_TYPE_U)
5646 				opcode = OP_ATOMIC_STORE_U4;
5647 #endif
5648 
5649 			if (opcode) {
5650 				if (!mono_arch_opcode_supported (opcode))
5651 					return NULL;
5652 
5653 				MONO_INST_NEW (cfg, ins, opcode);
5654 				ins->dreg = args [0]->dreg;
5655 				ins->sreg1 = args [1]->dreg;
5656 				ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5657 				MONO_ADD_INS (cfg->cbb, ins);
5658 
5659 				if (cfg->gen_write_barriers && is_ref)
5660 					mini_emit_write_barrier (cfg, args [0], args [1]);
5661 			}
5662 		}
5663 
5664 		if (ins)
5665 			return ins;
5666 	} else if (cmethod->klass->image == mono_defaults.corlib &&
5667 			   (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5668 			   (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5669 		if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5670 			if (mini_should_insert_breakpoint (cfg->method)) {
5671 				ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5672 			} else {
5673 				MONO_INST_NEW (cfg, ins, OP_NOP);
5674 				MONO_ADD_INS (cfg->cbb, ins);
5675 			}
5676 			return ins;
5677 		}
5678 	} else if (cmethod->klass->image == mono_defaults.corlib &&
5679 	           (strcmp (cmethod->klass->name_space, "System") == 0) &&
5680 	           (strcmp (cmethod->klass->name, "Environment") == 0)) {
5681 		if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
5682 #ifdef TARGET_WIN32
5683 			EMIT_NEW_ICONST (cfg, ins, 1);
5684 #else
5685 			EMIT_NEW_ICONST (cfg, ins, 0);
5686 #endif
5687 		}
5688 	} else if (cmethod->klass->image == mono_defaults.corlib &&
5689 			   (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5690 			   (strcmp (cmethod->klass->name, "Assembly") == 0)) {
5691 		if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
5692 			/* No stack walks are currently available, so implement this as an intrinsic */
5693 			MonoInst *assembly_ins;
5694 
5695 			EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
5696 			ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
5697 			return ins;
5698 		}
5699 	} else if (cmethod->klass->image == mono_defaults.corlib &&
5700 			   (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5701 			   (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
5702 		if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
5703 			/* No stack walks are currently available, so implement this as an intrinsic */
5704 			MonoInst *method_ins;
5705 			MonoMethod *declaring = cfg->method;
5706 
5707 			/* This returns the declaring generic method */
5708 			if (declaring->is_inflated)
5709 				declaring = ((MonoMethodInflated*)cfg->method)->declaring;
5710 			EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
5711 			ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
5712 			cfg->no_inline = TRUE;
5713 			if (cfg->method != cfg->current_method)
5714 				inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
5715 			return ins;
5716 		}
5717 	} else if (cmethod->klass == mono_defaults.math_class) {
5718 		/*
5719 		 * There is general branchless code for Min/Max, but it does not work for
5720 		 * all inputs:
5721 		 * http://everything2.com/?node_id=1051618
5722 		 */
5723 	} else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
5724 		EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
5725 		MONO_INST_NEW (cfg, ins, OP_PCEQ);
5726 		ins->dreg = alloc_preg (cfg);
5727 		ins->type = STACK_I4;
5728 		MONO_ADD_INS (cfg->cbb, ins);
5729 		return ins;
5730 	} else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
5731 	            !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
5732 				!strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
5733 				!strcmp (cmethod->klass->name, "Selector")) ||
5734 			   ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
5735 				 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
5736 				!strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
5737 				!strcmp (cmethod->klass->name, "Selector"))
5738 			   ) {
5739 		if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
5740 			!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
5741 		    (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
5742 		    cfg->compile_aot) {
5743 			MonoInst *pi;
5744 			MonoJumpInfoToken *ji;
5745 			char *s;
5746 
5747 			if (args [0]->opcode == OP_GOT_ENTRY) {
5748 				pi = (MonoInst *)args [0]->inst_p1;
5749 				g_assert (pi->opcode == OP_PATCH_INFO);
5750 				g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5751 				ji = (MonoJumpInfoToken *)pi->inst_p0;
5752 			} else {
5753 				g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5754 				ji = (MonoJumpInfoToken *)args [0]->inst_p0;
5755 			}
5756 
5757 			NULLIFY_INS (args [0]);
5758 
5759 			s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
5760 			return_val_if_nok (&cfg->error, NULL);
5761 
5762 			MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5763 			ins->dreg = mono_alloc_ireg (cfg);
5764 			// FIXME: Leaks
5765 			ins->inst_p0 = s;
5766 			MONO_ADD_INS (cfg->cbb, ins);
5767 			return ins;
5768 		}
5769 	} else if (cmethod->klass->image == mono_defaults.corlib &&
5770 			(strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) &&
5771 			(strcmp (cmethod->klass->name, "Marshal") == 0)) {
5772 		//Convert Marshal.PtrToStructure<T> of blittable T to direct loads
5773 		if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
5774 				cmethod->is_inflated &&
5775 				fsig->param_count == 1 &&
5776 				!mini_method_check_context_used (cfg, cmethod)) {
5777 
5778 			MonoGenericContext *method_context = mono_method_get_context (cmethod);
5779 			MonoType *arg0 = method_context->method_inst->type_argv [0];
5780 			if (mono_type_is_native_blittable (arg0))
5781 				return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
5782 		}
5783 	}
5784 
5785 #ifdef MONO_ARCH_SIMD_INTRINSICS
5786 	if (cfg->opt & MONO_OPT_SIMD) {
5787 		ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5788 		if (ins)
5789 			return ins;
5790 	}
5791 #endif
5792 
5793 	ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5794 	if (ins)
5795 		return ins;
5796 
5797 	if (COMPILE_LLVM (cfg)) {
5798 		ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5799 		if (ins)
5800 			return ins;
5801 	}
5802 
5803 	return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5804 }
5805 
5806 /*
5807  * This entry point could be used later for arbitrary method
5808  * redirection.
5809  */
5810 inline static MonoInst*
mini_redirect_call(MonoCompile * cfg,MonoMethod * method,MonoMethodSignature * signature,MonoInst ** args,MonoInst * this_ins)5811 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5812 					MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
5813 {
5814 	if (method->klass == mono_defaults.string_class) {
5815 		/* managed string allocation support */
5816 		if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
5817 			MonoInst *iargs [2];
5818 			MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5819 			MonoMethod *managed_alloc = NULL;
5820 
5821 			g_assert (vtable); /*Should not fail since it System.String*/
5822 #ifndef MONO_CROSS_COMPILE
5823 			managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
5824 #endif
5825 			if (!managed_alloc)
5826 				return NULL;
5827 			EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5828 			iargs [1] = args [0];
5829 			return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
5830 		}
5831 	}
5832 	return NULL;
5833 }
5834 
5835 static void
mono_save_args(MonoCompile * cfg,MonoMethodSignature * sig,MonoInst ** sp)5836 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5837 {
5838 	MonoInst *store, *temp;
5839 	int i;
5840 
5841 	for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5842 		MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5843 
5844 		/*
5845 		 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5846 		 * would be different than the MonoInst's used to represent arguments, and
5847 		 * the ldelema implementation can't deal with that.
5848 		 * Solution: When ldelema is used on an inline argument, create a var for
5849 		 * it, emit ldelema on that var, and emit the saving code below in
5850 		 * inline_method () if needed.
5851 		 */
5852 		temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5853 		cfg->args [i] = temp;
5854 		/* This uses cfg->args [i] which is set by the preceeding line */
5855 		EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5856 		store->cil_code = sp [0]->cil_code;
5857 		sp++;
5858 	}
5859 }
5860 
5861 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5862 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5863 
5864 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5865 static gboolean
check_inline_called_method_name_limit(MonoMethod * called_method)5866 check_inline_called_method_name_limit (MonoMethod *called_method)
5867 {
5868 	int strncmp_result;
5869 	static const char *limit = NULL;
5870 
5871 	if (limit == NULL) {
5872 		const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5873 
5874 		if (limit_string != NULL)
5875 			limit = limit_string;
5876 		else
5877 			limit = "";
5878 	}
5879 
5880 	if (limit [0] != '\0') {
5881 		char *called_method_name = mono_method_full_name (called_method, TRUE);
5882 
5883 		strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5884 		g_free (called_method_name);
5885 
5886 		//return (strncmp_result <= 0);
5887 		return (strncmp_result == 0);
5888 	} else {
5889 		return TRUE;
5890 	}
5891 }
5892 #endif
5893 
5894 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5895 static gboolean
check_inline_caller_method_name_limit(MonoMethod * caller_method)5896 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5897 {
5898 	int strncmp_result;
5899 	static const char *limit = NULL;
5900 
5901 	if (limit == NULL) {
5902 		const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5903 		if (limit_string != NULL) {
5904 			limit = limit_string;
5905 		} else {
5906 			limit = "";
5907 		}
5908 	}
5909 
5910 	if (limit [0] != '\0') {
5911 		char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5912 
5913 		strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5914 		g_free (caller_method_name);
5915 
5916 		//return (strncmp_result <= 0);
5917 		return (strncmp_result == 0);
5918 	} else {
5919 		return TRUE;
5920 	}
5921 }
5922 #endif
5923 
5924 static void
emit_init_rvar(MonoCompile * cfg,int dreg,MonoType * rtype)5925 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5926 {
5927 	static double r8_0 = 0.0;
5928 	static float r4_0 = 0.0;
5929 	MonoInst *ins;
5930 	int t;
5931 
5932 	rtype = mini_get_underlying_type (rtype);
5933 	t = rtype->type;
5934 
5935 	if (rtype->byref) {
5936 		MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5937 	} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5938 		MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5939 	} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5940 		MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5941 	} else if (cfg->r4fp && t == MONO_TYPE_R4) {
5942 		MONO_INST_NEW (cfg, ins, OP_R4CONST);
5943 		ins->type = STACK_R4;
5944 		ins->inst_p0 = (void*)&r4_0;
5945 		ins->dreg = dreg;
5946 		MONO_ADD_INS (cfg->cbb, ins);
5947 	} else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5948 		MONO_INST_NEW (cfg, ins, OP_R8CONST);
5949 		ins->type = STACK_R8;
5950 		ins->inst_p0 = (void*)&r8_0;
5951 		ins->dreg = dreg;
5952 		MONO_ADD_INS (cfg->cbb, ins);
5953 	} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5954 		   ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5955 		MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5956 	} else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
5957 		MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5958 	} else {
5959 		MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5960 	}
5961 }
5962 
5963 static void
emit_dummy_init_rvar(MonoCompile * cfg,int dreg,MonoType * rtype)5964 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5965 {
5966 	int t;
5967 
5968 	rtype = mini_get_underlying_type (rtype);
5969 	t = rtype->type;
5970 
5971 	if (rtype->byref) {
5972 		MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5973 	} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5974 		MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5975 	} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5976 		MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5977 	} else if (cfg->r4fp && t == MONO_TYPE_R4) {
5978 		MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
5979 	} else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5980 		MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5981 	} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5982 		   ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5983 		MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5984 	} else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
5985 		MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5986 	} else {
5987 		emit_init_rvar (cfg, dreg, rtype);
5988 	}
5989 }
5990 
5991 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5992 static void
emit_init_local(MonoCompile * cfg,int local,MonoType * type,gboolean init)5993 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5994 {
5995 	MonoInst *var = cfg->locals [local];
5996 	if (COMPILE_SOFT_FLOAT (cfg)) {
5997 		MonoInst *store;
5998 		int reg = alloc_dreg (cfg, (MonoStackType)var->type);
5999 		emit_init_rvar (cfg, reg, type);
6000 		EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6001 	} else {
6002 		if (init)
6003 			emit_init_rvar (cfg, var->dreg, type);
6004 		else
6005 			emit_dummy_init_rvar (cfg, var->dreg, type);
6006 	}
6007 }
6008 
6009 int
mini_inline_method(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,MonoInst ** sp,guchar * ip,guint real_offset,gboolean inline_always)6010 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6011 {
6012 	return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6013 }
6014 
6015 /*
6016  * inline_method:
6017  *
6018  * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6019  */
6020 static int
inline_method(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,MonoInst ** sp,guchar * ip,guint real_offset,gboolean inline_always)6021 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6022 	       guchar *ip, guint real_offset, gboolean inline_always)
6023 {
6024 	MonoError error;
6025 	MonoInst *ins, *rvar = NULL;
6026 	MonoMethodHeader *cheader;
6027 	MonoBasicBlock *ebblock, *sbblock;
6028 	int i, costs;
6029 	MonoMethod *prev_inlined_method;
6030 	MonoInst **prev_locals, **prev_args;
6031 	MonoType **prev_arg_types;
6032 	guint prev_real_offset;
6033 	GHashTable *prev_cbb_hash;
6034 	MonoBasicBlock **prev_cil_offset_to_bb;
6035 	MonoBasicBlock *prev_cbb;
6036 	const unsigned char *prev_ip;
6037 	unsigned char *prev_cil_start;
6038 	guint32 prev_cil_offset_to_bb_len;
6039 	MonoMethod *prev_current_method;
6040 	MonoGenericContext *prev_generic_context;
6041 	gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6042 
6043 	g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6044 
6045 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6046 	if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6047 		return 0;
6048 #endif
6049 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6050 	if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6051 		return 0;
6052 #endif
6053 
6054 	if (!fsig)
6055 		fsig = mono_method_signature (cmethod);
6056 
6057 	if (cfg->verbose_level > 2)
6058 		printf ("INLINE START %p %s -> %s\n", cmethod,  mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6059 
6060 	if (!cmethod->inline_info) {
6061 		cfg->stat_inlineable_methods++;
6062 		cmethod->inline_info = 1;
6063 	}
6064 
6065 	/* allocate local variables */
6066 	cheader = mono_method_get_header_checked (cmethod, &error);
6067 	if (!cheader) {
6068 		if (inline_always) {
6069 			mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6070 			mono_error_move (&cfg->error, &error);
6071 		} else {
6072 			mono_error_cleanup (&error);
6073 		}
6074 		return 0;
6075 	}
6076 
6077 	/*Must verify before creating locals as it can cause the JIT to assert.*/
6078 	if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6079 		mono_metadata_free_mh (cheader);
6080 		return 0;
6081 	}
6082 
6083 	/* allocate space to store the return value */
6084 	if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6085 		rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6086 	}
6087 
6088 	prev_locals = cfg->locals;
6089 	cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6090 	for (i = 0; i < cheader->num_locals; ++i)
6091 		cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6092 
6093 	/* allocate start and end blocks */
6094 	/* This is needed so if the inline is aborted, we can clean up */
6095 	NEW_BBLOCK (cfg, sbblock);
6096 	sbblock->real_offset = real_offset;
6097 
6098 	NEW_BBLOCK (cfg, ebblock);
6099 	ebblock->block_num = cfg->num_bblocks++;
6100 	ebblock->real_offset = real_offset;
6101 
6102 	prev_args = cfg->args;
6103 	prev_arg_types = cfg->arg_types;
6104 	prev_inlined_method = cfg->inlined_method;
6105 	prev_ret_var_set = cfg->ret_var_set;
6106 	prev_real_offset = cfg->real_offset;
6107 	prev_cbb_hash = cfg->cbb_hash;
6108 	prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6109 	prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6110 	prev_cil_start = cfg->cil_start;
6111 	prev_ip = cfg->ip;
6112 	prev_cbb = cfg->cbb;
6113 	prev_current_method = cfg->current_method;
6114 	prev_generic_context = cfg->generic_context;
6115 	prev_disable_inline = cfg->disable_inline;
6116 
6117 	cfg->inlined_method = cmethod;
6118 	cfg->ret_var_set = FALSE;
6119 	cfg->inline_depth ++;
6120 
6121 	if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6122 		virtual_ = TRUE;
6123 
6124 	costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6125 
6126 	ret_var_set = cfg->ret_var_set;
6127 
6128 	cfg->inlined_method = prev_inlined_method;
6129 	cfg->real_offset = prev_real_offset;
6130 	cfg->cbb_hash = prev_cbb_hash;
6131 	cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6132 	cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6133 	cfg->cil_start = prev_cil_start;
6134 	cfg->ip = prev_ip;
6135 	cfg->locals = prev_locals;
6136 	cfg->args = prev_args;
6137 	cfg->arg_types = prev_arg_types;
6138 	cfg->current_method = prev_current_method;
6139 	cfg->generic_context = prev_generic_context;
6140 	cfg->ret_var_set = prev_ret_var_set;
6141 	cfg->disable_inline = prev_disable_inline;
6142 	cfg->inline_depth --;
6143 
6144 	if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6145 		if (cfg->verbose_level > 2)
6146 			printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6147 
6148 		cfg->stat_inlined_methods++;
6149 
6150 		/* always add some code to avoid block split failures */
6151 		MONO_INST_NEW (cfg, ins, OP_NOP);
6152 		MONO_ADD_INS (prev_cbb, ins);
6153 
6154 		prev_cbb->next_bb = sbblock;
6155 		link_bblock (cfg, prev_cbb, sbblock);
6156 
6157 		/*
6158 		 * Get rid of the begin and end bblocks if possible to aid local
6159 		 * optimizations.
6160 		 */
6161 		if (prev_cbb->out_count == 1)
6162 			mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6163 
6164 		if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6165 			mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6166 
6167 		if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6168 			MonoBasicBlock *prev = ebblock->in_bb [0];
6169 
6170 			if (prev->next_bb == ebblock) {
6171 				mono_merge_basic_blocks (cfg, prev, ebblock);
6172 				cfg->cbb = prev;
6173 				if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6174 					mono_merge_basic_blocks (cfg, prev_cbb, prev);
6175 					cfg->cbb = prev_cbb;
6176 				}
6177 			} else {
6178 				/* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6179 				cfg->cbb = ebblock;
6180 			}
6181 		} else {
6182 			/*
6183 			 * Its possible that the rvar is set in some prev bblock, but not in others.
6184 			 * (#1835).
6185 			 */
6186 			if (rvar) {
6187 				MonoBasicBlock *bb;
6188 
6189 				for (i = 0; i < ebblock->in_count; ++i) {
6190 					bb = ebblock->in_bb [i];
6191 
6192 					if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6193 						cfg->cbb = bb;
6194 
6195 						emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6196 					}
6197 				}
6198 			}
6199 
6200 			cfg->cbb = ebblock;
6201 		}
6202 
6203 		if (rvar) {
6204 			/*
6205 			 * If the inlined method contains only a throw, then the ret var is not
6206 			 * set, so set it to a dummy value.
6207 			 */
6208 			if (!ret_var_set)
6209 				emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6210 
6211 			EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6212 			*sp++ = ins;
6213 		}
6214 		cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6215 		return costs + 1;
6216 	} else {
6217 		if (cfg->verbose_level > 2)
6218 			printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6219 		cfg->exception_type = MONO_EXCEPTION_NONE;
6220 
6221 		/* This gets rid of the newly added bblocks */
6222 		cfg->cbb = prev_cbb;
6223 	}
6224 	cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6225 	return 0;
6226 }
6227 
6228 /*
6229  * Some of these comments may well be out-of-date.
6230  * Design decisions: we do a single pass over the IL code (and we do bblock
6231  * splitting/merging in the few cases when it's required: a back jump to an IL
6232  * address that was not already seen as bblock starting point).
6233  * Code is validated as we go (full verification is still better left to metadata/verify.c).
6234  * Complex operations are decomposed in simpler ones right away. We need to let the
6235  * arch-specific code peek and poke inside this process somehow (except when the
6236  * optimizations can take advantage of the full semantic info of coarse opcodes).
6237  * All the opcodes of the form opcode.s are 'normalized' to opcode.
6238  * MonoInst->opcode initially is the IL opcode or some simplification of that
6239  * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6240  * opcode with value bigger than OP_LAST.
6241  * At this point the IR can be handed over to an interpreter, a dumb code generator
6242  * or to the optimizing code generator that will translate it to SSA form.
6243  *
6244  * Profiling directed optimizations.
6245  * We may compile by default with few or no optimizations and instrument the code
6246  * or the user may indicate what methods to optimize the most either in a config file
6247  * or through repeated runs where the compiler applies offline the optimizations to
6248  * each method and then decides if it was worth it.
6249  */
6250 
6251 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6252 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6253 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6254 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6255 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6256 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6257 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6258 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6259 
6260 /* offset from br.s -> br like opcodes */
6261 #define BIG_BRANCH_OFFSET 13
6262 
6263 static gboolean
ip_in_bb(MonoCompile * cfg,MonoBasicBlock * bb,const guint8 * ip)6264 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6265 {
6266 	MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6267 
6268 	return b == NULL || b == bb;
6269 }
6270 
6271 static int
get_basic_blocks(MonoCompile * cfg,MonoMethodHeader * header,guint real_offset,unsigned char * start,unsigned char * end,unsigned char ** pos)6272 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6273 {
6274 	unsigned char *ip = start;
6275 	unsigned char *target;
6276 	int i;
6277 	guint cli_addr;
6278 	MonoBasicBlock *bblock;
6279 	const MonoOpcode *opcode;
6280 
6281 	while (ip < end) {
6282 		cli_addr = ip - start;
6283 		i = mono_opcode_value ((const guint8 **)&ip, end);
6284 		if (i < 0)
6285 			UNVERIFIED;
6286 		opcode = &mono_opcodes [i];
6287 		switch (opcode->argument) {
6288 		case MonoInlineNone:
6289 			ip++;
6290 			break;
6291 		case MonoInlineString:
6292 		case MonoInlineType:
6293 		case MonoInlineField:
6294 		case MonoInlineMethod:
6295 		case MonoInlineTok:
6296 		case MonoInlineSig:
6297 		case MonoShortInlineR:
6298 		case MonoInlineI:
6299 			ip += 5;
6300 			break;
6301 		case MonoInlineVar:
6302 			ip += 3;
6303 			break;
6304 		case MonoShortInlineVar:
6305 		case MonoShortInlineI:
6306 			ip += 2;
6307 			break;
6308 		case MonoShortInlineBrTarget:
6309 			target = start + cli_addr + 2 + (signed char)ip [1];
6310 			GET_BBLOCK (cfg, bblock, target);
6311 			ip += 2;
6312 			if (ip < end)
6313 				GET_BBLOCK (cfg, bblock, ip);
6314 			break;
6315 		case MonoInlineBrTarget:
6316 			target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6317 			GET_BBLOCK (cfg, bblock, target);
6318 			ip += 5;
6319 			if (ip < end)
6320 				GET_BBLOCK (cfg, bblock, ip);
6321 			break;
6322 		case MonoInlineSwitch: {
6323 			guint32 n = read32 (ip + 1);
6324 			guint32 j;
6325 			ip += 5;
6326 			cli_addr += 5 + 4 * n;
6327 			target = start + cli_addr;
6328 			GET_BBLOCK (cfg, bblock, target);
6329 
6330 			for (j = 0; j < n; ++j) {
6331 				target = start + cli_addr + (gint32)read32 (ip);
6332 				GET_BBLOCK (cfg, bblock, target);
6333 				ip += 4;
6334 			}
6335 			break;
6336 		}
6337 		case MonoInlineR:
6338 		case MonoInlineI8:
6339 			ip += 9;
6340 			break;
6341 		default:
6342 			g_assert_not_reached ();
6343 		}
6344 
6345 		if (i == CEE_THROW) {
6346 			unsigned char *bb_start = ip - 1;
6347 
6348 			/* Find the start of the bblock containing the throw */
6349 			bblock = NULL;
6350 			while ((bb_start >= start) && !bblock) {
6351 				bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6352 				bb_start --;
6353 			}
6354 			if (bblock)
6355 				bblock->out_of_line = 1;
6356 		}
6357 	}
6358 	return 0;
6359 unverified:
6360 exception_exit:
6361 	*pos = ip;
6362 	return 1;
6363 }
6364 
6365 static inline MonoMethod *
mini_get_method_allow_open(MonoMethod * m,guint32 token,MonoClass * klass,MonoGenericContext * context,MonoError * error)6366 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6367 {
6368 	MonoMethod *method;
6369 
6370 	error_init (error);
6371 
6372 	if (m->wrapper_type != MONO_WRAPPER_NONE) {
6373 		method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6374 		if (context) {
6375 			method = mono_class_inflate_generic_method_checked (method, context, error);
6376 		}
6377 	} else {
6378 		method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6379 	}
6380 
6381 	return method;
6382 }
6383 
6384 static inline MonoMethod *
mini_get_method(MonoCompile * cfg,MonoMethod * m,guint32 token,MonoClass * klass,MonoGenericContext * context)6385 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6386 {
6387 	MonoError error;
6388 	MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6389 
6390 	if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6391 		mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6392 		method = NULL;
6393 	}
6394 
6395 	if (!method && !cfg)
6396 		mono_error_cleanup (&error); /* FIXME don't swallow the error */
6397 
6398 	return method;
6399 }
6400 
6401 static inline MonoMethodSignature*
mini_get_signature(MonoMethod * method,guint32 token,MonoGenericContext * context,MonoError * error)6402 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6403 {
6404 	MonoMethodSignature *fsig;
6405 
6406 	error_init (error);
6407 	if (method->wrapper_type != MONO_WRAPPER_NONE) {
6408 		fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6409 	} else {
6410 		fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6411 		return_val_if_nok (error, NULL);
6412 	}
6413 	if (context) {
6414 		fsig = mono_inflate_generic_signature(fsig, context, error);
6415 	}
6416 	return fsig;
6417 }
6418 
6419 static MonoMethod*
throw_exception(void)6420 throw_exception (void)
6421 {
6422 	static MonoMethod *method = NULL;
6423 
6424 	if (!method) {
6425 		MonoSecurityManager *secman = mono_security_manager_get_methods ();
6426 		method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6427 	}
6428 	g_assert (method);
6429 	return method;
6430 }
6431 
6432 static void
emit_throw_exception(MonoCompile * cfg,MonoException * ex)6433 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6434 {
6435 	MonoMethod *thrower = throw_exception ();
6436 	MonoInst *args [1];
6437 
6438 	EMIT_NEW_PCONST (cfg, args [0], ex);
6439 	mono_emit_method_call (cfg, thrower, args, NULL);
6440 }
6441 
6442 /*
6443  * Return the original method is a wrapper is specified. We can only access
6444  * the custom attributes from the original method.
6445  */
6446 static MonoMethod*
get_original_method(MonoMethod * method)6447 get_original_method (MonoMethod *method)
6448 {
6449 	if (method->wrapper_type == MONO_WRAPPER_NONE)
6450 		return method;
6451 
6452 	/* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6453 	if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6454 		return NULL;
6455 
6456 	/* in other cases we need to find the original method */
6457 	return mono_marshal_method_from_wrapper (method);
6458 }
6459 
6460 static void
ensure_method_is_allowed_to_access_field(MonoCompile * cfg,MonoMethod * caller,MonoClassField * field)6461 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6462 {
6463 	/* we can't get the coreclr security level on wrappers since they don't have the attributes */
6464 	MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6465 	if (ex)
6466 		emit_throw_exception (cfg, ex);
6467 }
6468 
6469 static void
ensure_method_is_allowed_to_call_method(MonoCompile * cfg,MonoMethod * caller,MonoMethod * callee)6470 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6471 {
6472 	/* we can't get the coreclr security level on wrappers since they don't have the attributes */
6473 	MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6474 	if (ex)
6475 		emit_throw_exception (cfg, ex);
6476 }
6477 
6478 /*
6479  * Check that the IL instructions at ip are the array initialization
6480  * sequence and return the pointer to the data and the size.
6481  */
6482 static const char*
initialize_array_data(MonoMethod * method,gboolean aot,unsigned char * ip,MonoClass * klass,guint32 len,int * out_size,guint32 * out_field_token)6483 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6484 {
6485 	/*
6486 	 * newarr[System.Int32]
6487 	 * dup
6488 	 * ldtoken field valuetype ...
6489 	 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6490 	 */
6491 	if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6492 		MonoError error;
6493 		guint32 token = read32 (ip + 7);
6494 		guint32 field_token = read32 (ip + 2);
6495 		guint32 field_index = field_token & 0xffffff;
6496 		guint32 rva;
6497 		const char *data_ptr;
6498 		int size = 0;
6499 		MonoMethod *cmethod;
6500 		MonoClass *dummy_class;
6501 		MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6502 		int dummy_align;
6503 
6504 		if (!field) {
6505 			mono_error_cleanup (&error); /* FIXME don't swallow the error */
6506 			return NULL;
6507 		}
6508 
6509 		*out_field_token = field_token;
6510 
6511 		cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6512 		if (!cmethod)
6513 			return NULL;
6514 		if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6515 			return NULL;
6516 		switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6517 		case MONO_TYPE_I1:
6518 		case MONO_TYPE_U1:
6519 			size = 1; break;
6520 		/* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6521 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6522 		case MONO_TYPE_I2:
6523 		case MONO_TYPE_U2:
6524 			size = 2; break;
6525 		case MONO_TYPE_I4:
6526 		case MONO_TYPE_U4:
6527 		case MONO_TYPE_R4:
6528 			size = 4; break;
6529 		case MONO_TYPE_R8:
6530 		case MONO_TYPE_I8:
6531 		case MONO_TYPE_U8:
6532 			size = 8; break;
6533 #endif
6534 		default:
6535 			return NULL;
6536 		}
6537 		size *= len;
6538 		if (size > mono_type_size (field->type, &dummy_align))
6539 		    return NULL;
6540 		*out_size = size;
6541 		/*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6542 		if (!image_is_dynamic (method->klass->image)) {
6543 			field_index = read32 (ip + 2) & 0xffffff;
6544 			mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6545 			data_ptr = mono_image_rva_map (method->klass->image, rva);
6546 			/*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6547 			/* for aot code we do the lookup on load */
6548 			if (aot && data_ptr)
6549 				return (const char *)GUINT_TO_POINTER (rva);
6550 		} else {
6551 			/*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6552 			g_assert (!aot);
6553 			data_ptr = mono_field_get_data (field);
6554 		}
6555 		return data_ptr;
6556 	}
6557 	return NULL;
6558 }
6559 
6560 static void
set_exception_type_from_invalid_il(MonoCompile * cfg,MonoMethod * method,unsigned char * ip)6561 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6562 {
6563 	MonoError error;
6564 	char *method_fname = mono_method_full_name (method, TRUE);
6565 	char *method_code;
6566 	MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6567 
6568 	if (!header) {
6569 		method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6570 		mono_error_cleanup (&error);
6571 	} else if (header->code_size == 0)
6572 		method_code = g_strdup ("method body is empty.");
6573 	else
6574 		method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6575 	mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6576  	g_free (method_fname);
6577  	g_free (method_code);
6578 	cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6579 }
6580 
6581 guint32
mono_type_to_stloc_coerce(MonoType * type)6582 mono_type_to_stloc_coerce (MonoType *type)
6583 {
6584 	if (type->byref)
6585 		return 0;
6586 
6587 	type = mini_get_underlying_type (type);
6588 handle_enum:
6589 	switch (type->type) {
6590 	case MONO_TYPE_I1:
6591 		return OP_ICONV_TO_I1;
6592 	case MONO_TYPE_U1:
6593 		return OP_ICONV_TO_U1;
6594 	case MONO_TYPE_I2:
6595 		return OP_ICONV_TO_I2;
6596 	case MONO_TYPE_U2:
6597 		return OP_ICONV_TO_U2;
6598 	case MONO_TYPE_I4:
6599 	case MONO_TYPE_U4:
6600 	case MONO_TYPE_I:
6601 	case MONO_TYPE_U:
6602 	case MONO_TYPE_PTR:
6603 	case MONO_TYPE_FNPTR:
6604 	case MONO_TYPE_CLASS:
6605 	case MONO_TYPE_STRING:
6606 	case MONO_TYPE_OBJECT:
6607 	case MONO_TYPE_SZARRAY:
6608 	case MONO_TYPE_ARRAY:
6609 	case MONO_TYPE_I8:
6610 	case MONO_TYPE_U8:
6611 	case MONO_TYPE_R4:
6612 	case MONO_TYPE_R8:
6613 	case MONO_TYPE_TYPEDBYREF:
6614 	case MONO_TYPE_GENERICINST:
6615 		return 0;
6616 	case MONO_TYPE_VALUETYPE:
6617 		if (type->data.klass->enumtype) {
6618 			type = mono_class_enum_basetype (type->data.klass);
6619 			goto handle_enum;
6620 		}
6621 		return 0;
6622 	case MONO_TYPE_VAR:
6623 	case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
6624 		return 0;
6625 	default:
6626 		g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
6627 	}
6628 	return -1;
6629 }
6630 
6631 static void
emit_stloc_ir(MonoCompile * cfg,MonoInst ** sp,MonoMethodHeader * header,int n)6632 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6633 {
6634 	MonoInst *ins;
6635 	guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
6636 
6637 	if (coerce_op) {
6638 		if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
6639 			if (cfg->verbose_level > 2)
6640 				printf ("Found existing coercing is enough for stloc\n");
6641 		} else {
6642 			MONO_INST_NEW (cfg, ins, coerce_op);
6643 			ins->dreg = alloc_ireg (cfg);
6644 			ins->sreg1 = sp [0]->dreg;
6645 			ins->type = STACK_I4;
6646 			ins->klass = mono_class_from_mono_type (header->locals [n]);
6647 			MONO_ADD_INS (cfg->cbb, ins);
6648 			*sp = mono_decompose_opcode (cfg, ins);
6649 		}
6650 	}
6651 
6652 
6653 	guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6654 	if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0]  &&
6655 			((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6656 		/* Optimize reg-reg moves away */
6657 		/*
6658 		 * Can't optimize other opcodes, since sp[0] might point to
6659 		 * the last ins of a decomposed opcode.
6660 		 */
6661 		sp [0]->dreg = (cfg)->locals [n]->dreg;
6662 	} else {
6663 		EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6664 	}
6665 }
6666 
6667 static void
emit_starg_ir(MonoCompile * cfg,MonoInst ** sp,int n)6668 emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
6669 {
6670 	MonoInst *ins;
6671 	guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
6672 
6673 	if (coerce_op) {
6674 		if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
6675 			if (cfg->verbose_level > 2)
6676 				printf ("Found existing coercing is enough for starg\n");
6677 		} else {
6678 			MONO_INST_NEW (cfg, ins, coerce_op);
6679 			ins->dreg = alloc_ireg (cfg);
6680 			ins->sreg1 = sp [0]->dreg;
6681 			ins->type = STACK_I4;
6682 			ins->klass = mono_class_from_mono_type (cfg->arg_types [n]);
6683 			MONO_ADD_INS (cfg->cbb, ins);
6684 			*sp = mono_decompose_opcode (cfg, ins);
6685 		}
6686 	}
6687 
6688 	EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6689 }
6690 
6691 /*
6692  * ldloca inhibits many optimizations so try to get rid of it in common
6693  * cases.
6694  */
6695 static inline unsigned char *
emit_optimized_ldloca_ir(MonoCompile * cfg,unsigned char * ip,unsigned char * end,int size)6696 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6697 {
6698 	int local, token;
6699 	MonoClass *klass;
6700 	MonoType *type;
6701 
6702 	if (size == 1) {
6703 		local = ip [1];
6704 		ip += 2;
6705 	} else {
6706 		local = read16 (ip + 2);
6707 		ip += 4;
6708 	}
6709 
6710 	if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6711 		/* From the INITOBJ case */
6712 		token = read32 (ip + 2);
6713 		klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6714 		CHECK_TYPELOAD (klass);
6715 		type = mini_get_underlying_type (&klass->byval_arg);
6716 		emit_init_local (cfg, local, type, TRUE);
6717 		return ip + 6;
6718 	}
6719  exception_exit:
6720 	return NULL;
6721 }
6722 
6723 static MonoInst*
emit_llvmonly_virtual_call(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,int context_used,MonoInst ** sp)6724 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6725 {
6726 	MonoInst *icall_args [16];
6727 	MonoInst *call_target, *ins, *vtable_ins;
6728 	int arg_reg, this_reg, vtable_reg;
6729 	gboolean is_iface = mono_class_is_interface (cmethod->klass);
6730 	gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6731 	gboolean variant_iface = FALSE;
6732 	guint32 slot;
6733 	int offset;
6734 	gboolean special_array_interface = cmethod->klass->is_array_special_interface;
6735 
6736 	/*
6737 	 * In llvm-only mode, vtables contain function descriptors instead of
6738 	 * method addresses/trampolines.
6739 	 */
6740 	MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6741 
6742 	if (is_iface)
6743 		slot = mono_method_get_imt_slot (cmethod);
6744 	else
6745 		slot = mono_method_get_vtable_index (cmethod);
6746 
6747 	this_reg = sp [0]->dreg;
6748 
6749 	if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6750 		variant_iface = TRUE;
6751 
6752 	if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6753 		/*
6754 		 * The simplest case, a normal virtual call.
6755 		 */
6756 		int slot_reg = alloc_preg (cfg);
6757 		int addr_reg = alloc_preg (cfg);
6758 		int arg_reg = alloc_preg (cfg);
6759 		MonoBasicBlock *non_null_bb;
6760 
6761 		vtable_reg = alloc_preg (cfg);
6762 		EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6763 		offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6764 
6765 		/* Load the vtable slot, which contains a function descriptor. */
6766 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6767 
6768 		NEW_BBLOCK (cfg, non_null_bb);
6769 
6770 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6771 		cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
6772 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
6773 
6774 		/* Slow path */
6775 		// FIXME: Make the wrapper use the preserveall cconv
6776 		// FIXME: Use one icall per slot for small slot numbers ?
6777 		icall_args [0] = vtable_ins;
6778 		EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6779 		/* Make the icall return the vtable slot value to save some code space */
6780 		ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
6781 		ins->dreg = slot_reg;
6782 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
6783 
6784 		/* Fastpath */
6785 		MONO_START_BB (cfg, non_null_bb);
6786 		/* Load the address + arg from the vtable slot */
6787 		EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6788 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
6789 
6790 		return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6791 	}
6792 
6793 	if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
6794 		/*
6795 		 * A simple interface call
6796 		 *
6797 		 * We make a call through an imt slot to obtain the function descriptor we need to call.
6798 		 * The imt slot contains a function descriptor for a runtime function + arg.
6799 		 */
6800 		int slot_reg = alloc_preg (cfg);
6801 		int addr_reg = alloc_preg (cfg);
6802 		int arg_reg = alloc_preg (cfg);
6803 		MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6804 
6805 		vtable_reg = alloc_preg (cfg);
6806 		EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6807 		offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6808 
6809 		/*
6810 		 * The slot is already initialized when the vtable is created so there is no need
6811 		 * to check it here.
6812 		 */
6813 
6814 		/* Load the imt slot, which contains a function descriptor. */
6815 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6816 
6817 		/* Load the address + arg of the imt thunk from the imt slot */
6818 		EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6819 		EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6820 		/*
6821 		 * IMT thunks in llvm-only mode are C functions which take an info argument
6822 		 * plus the imt method and return the ftndesc to call.
6823 		 */
6824 		icall_args [0] = thunk_arg_ins;
6825 		icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6826 												cmethod, MONO_RGCTX_INFO_METHOD);
6827 		ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6828 
6829 		return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6830 	}
6831 
6832 	if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
6833 		/*
6834 		 * This is similar to the interface case, the vtable slot points to an imt thunk which is
6835 		 * dynamically extended as more instantiations are discovered.
6836 		 * This handles generic virtual methods both on classes and interfaces.
6837 		 */
6838 		int slot_reg = alloc_preg (cfg);
6839 		int addr_reg = alloc_preg (cfg);
6840 		int arg_reg = alloc_preg (cfg);
6841 		int ftndesc_reg = alloc_preg (cfg);
6842 		MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6843 		MonoBasicBlock *slowpath_bb, *end_bb;
6844 
6845 		NEW_BBLOCK (cfg, slowpath_bb);
6846 		NEW_BBLOCK (cfg, end_bb);
6847 
6848 		vtable_reg = alloc_preg (cfg);
6849 		EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6850 		if (is_iface)
6851 			offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6852 		else
6853 			offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6854 
6855 		/* Load the slot, which contains a function descriptor. */
6856 		MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6857 
6858 		/* These slots are not initialized, so fall back to the slow path until they are initialized */
6859 		/* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
6860 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6861 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6862 
6863 		/* Fastpath */
6864 		/* Same as with iface calls */
6865 		EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6866 		EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6867 		icall_args [0] = thunk_arg_ins;
6868 		icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6869 												cmethod, MONO_RGCTX_INFO_METHOD);
6870 		ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6871 		ftndesc_ins->dreg = ftndesc_reg;
6872 		/*
6873 		 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
6874 		 * they don't know about yet. Fall back to the slowpath in that case.
6875 		 */
6876 		MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
6877 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6878 
6879 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6880 
6881 		/* Slowpath */
6882 		MONO_START_BB (cfg, slowpath_bb);
6883 		icall_args [0] = vtable_ins;
6884 		EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6885 		icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6886 												cmethod, MONO_RGCTX_INFO_METHOD);
6887 		if (is_iface)
6888 			ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
6889 		else
6890 			ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
6891 		ftndesc_ins->dreg = ftndesc_reg;
6892 		MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6893 
6894 		/* Common case */
6895 		MONO_START_BB (cfg, end_bb);
6896 		return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6897 	}
6898 
6899 	/*
6900 	 * Non-optimized cases
6901 	 */
6902 	icall_args [0] = sp [0];
6903 	EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6904 
6905 	icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6906 											cmethod, MONO_RGCTX_INFO_METHOD);
6907 
6908 	arg_reg = alloc_preg (cfg);
6909 	MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
6910 	EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
6911 
6912 	g_assert (is_gsharedvt);
6913 	if (is_iface)
6914 		call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
6915 	else
6916 		call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
6917 
6918 	/*
6919 	 * Pass the extra argument even if the callee doesn't receive it, most
6920 	 * calling conventions allow this.
6921 	 */
6922 	return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6923 }
6924 
6925 static gboolean
is_exception_class(MonoClass * klass)6926 is_exception_class (MonoClass *klass)
6927 {
6928 	while (klass) {
6929 		if (klass == mono_defaults.exception_class)
6930 			return TRUE;
6931 		klass = klass->parent;
6932 	}
6933 	return FALSE;
6934 }
6935 
6936 /*
6937  * is_jit_optimizer_disabled:
6938  *
6939  *   Determine whenever M's assembly has a DebuggableAttribute with the
6940  * IsJITOptimizerDisabled flag set.
6941  */
6942 static gboolean
is_jit_optimizer_disabled(MonoMethod * m)6943 is_jit_optimizer_disabled (MonoMethod *m)
6944 {
6945 	MonoError error;
6946 	MonoAssembly *ass = m->klass->image->assembly;
6947 	MonoCustomAttrInfo* attrs;
6948 	MonoClass *klass;
6949 	int i;
6950 	gboolean val = FALSE;
6951 
6952 	g_assert (ass);
6953 	if (ass->jit_optimizer_disabled_inited)
6954 		return ass->jit_optimizer_disabled;
6955 
6956 	klass = mono_class_try_get_debuggable_attribute_class ();
6957 
6958 	if (!klass) {
6959 		/* Linked away */
6960 		ass->jit_optimizer_disabled = FALSE;
6961 		mono_memory_barrier ();
6962 		ass->jit_optimizer_disabled_inited = TRUE;
6963 		return FALSE;
6964 	}
6965 
6966 	attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
6967 	mono_error_cleanup (&error); /* FIXME don't swallow the error */
6968 	if (attrs) {
6969 		for (i = 0; i < attrs->num_attrs; ++i) {
6970 			MonoCustomAttrEntry *attr = &attrs->attrs [i];
6971 			const gchar *p;
6972 			MonoMethodSignature *sig;
6973 
6974 			if (!attr->ctor || attr->ctor->klass != klass)
6975 				continue;
6976 			/* Decode the attribute. See reflection.c */
6977 			p = (const char*)attr->data;
6978 			g_assert (read16 (p) == 0x0001);
6979 			p += 2;
6980 
6981 			// FIXME: Support named parameters
6982 			sig = mono_method_signature (attr->ctor);
6983 			if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6984 				continue;
6985 			/* Two boolean arguments */
6986 			p ++;
6987 			val = *p;
6988 		}
6989 		mono_custom_attrs_free (attrs);
6990 	}
6991 
6992 	ass->jit_optimizer_disabled = val;
6993 	mono_memory_barrier ();
6994 	ass->jit_optimizer_disabled_inited = TRUE;
6995 
6996 	return val;
6997 }
6998 
6999 static gboolean
is_supported_tail_call(MonoCompile * cfg,MonoMethod * method,MonoMethod * cmethod,MonoMethodSignature * fsig,int call_opcode)7000 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7001 {
7002 	gboolean supported_tail_call;
7003 	int i;
7004 
7005 	supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7006 
7007 	for (i = 0; i < fsig->param_count; ++i) {
7008 		if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7009 			/* These can point to the current method's stack */
7010 			supported_tail_call = FALSE;
7011 	}
7012 	if (fsig->hasthis && cmethod->klass->valuetype)
7013 		/* this might point to the current method's stack */
7014 		supported_tail_call = FALSE;
7015 	if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7016 		supported_tail_call = FALSE;
7017 	if (cfg->method->save_lmf)
7018 		supported_tail_call = FALSE;
7019 	if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7020 		supported_tail_call = FALSE;
7021 	if (call_opcode != CEE_CALL)
7022 		supported_tail_call = FALSE;
7023 
7024 	/* Debugging support */
7025 #if 0
7026 	if (supported_tail_call) {
7027 		if (!mono_debug_count ())
7028 			supported_tail_call = FALSE;
7029 	}
7030 #endif
7031 
7032 	return supported_tail_call;
7033 }
7034 
7035 /*
7036  * is_adressable_valuetype_load
7037  *
7038  *    Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
7039  */
7040 static gboolean
is_adressable_valuetype_load(MonoCompile * cfg,guint8 * ip,MonoType * ldtype)7041 is_adressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype)
7042 {
7043 	/* Avoid loading a struct just to load one of its fields */
7044 	gboolean is_load_instruction = (*ip == CEE_LDFLD);
7045 	gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip);
7046 	gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype);
7047 	return is_load_instruction && is_in_previous_bb && is_struct;
7048 }
7049 
7050 /*
7051  * handle_ctor_call:
7052  *
7053  *   Handle calls made to ctors from NEWOBJ opcodes.
7054  */
7055 static void
handle_ctor_call(MonoCompile * cfg,MonoMethod * cmethod,MonoMethodSignature * fsig,int context_used,MonoInst ** sp,guint8 * ip,int * inline_costs)7056 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7057 				  MonoInst **sp, guint8 *ip, int *inline_costs)
7058 {
7059 	MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7060 
7061 	if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7062 					mono_method_is_generic_sharable (cmethod, TRUE)) {
7063 		if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7064 			mono_class_vtable (cfg->domain, cmethod->klass);
7065 			CHECK_TYPELOAD (cmethod->klass);
7066 
7067 			vtable_arg = emit_get_rgctx_method (cfg, context_used,
7068 												cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7069 		} else {
7070 			if (context_used) {
7071 				vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7072 												   cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7073 			} else {
7074 				MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7075 
7076 				CHECK_TYPELOAD (cmethod->klass);
7077 				EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7078 			}
7079 		}
7080 	}
7081 
7082 	/* Avoid virtual calls to ctors if possible */
7083 	if (mono_class_is_marshalbyref (cmethod->klass))
7084 		callvirt_this_arg = sp [0];
7085 
7086 	if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7087 		g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7088 		CHECK_CFG_EXCEPTION;
7089 	} else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7090 			   mono_method_check_inlining (cfg, cmethod) &&
7091 			   !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7092 		int costs;
7093 
7094 		if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7095 			cfg->real_offset += 5;
7096 
7097 			*inline_costs += costs - 5;
7098 		} else {
7099 			INLINE_FAILURE ("inline failure");
7100 			// FIXME-VT: Clean this up
7101 			if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7102 				GSHAREDVT_FAILURE(*ip);
7103 			mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7104 		}
7105 	} else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7106 		MonoInst *addr;
7107 
7108 		addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7109 
7110 		if (cfg->llvm_only) {
7111 			// FIXME: Avoid initializing vtable_arg
7112 			emit_llvmonly_calli (cfg, fsig, sp, addr);
7113 		} else {
7114 			mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7115 		}
7116 	} else if (context_used &&
7117 			   ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7118 				 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7119 		MonoInst *cmethod_addr;
7120 
7121 		/* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7122 
7123 		if (cfg->llvm_only) {
7124 			MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7125 													MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7126 			emit_llvmonly_calli (cfg, fsig, sp, addr);
7127 		} else {
7128 			cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7129 												  cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7130 
7131 			mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7132 		}
7133 	} else {
7134 		INLINE_FAILURE ("ctor call");
7135 		ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7136 										  callvirt_this_arg, NULL, vtable_arg);
7137 	}
7138  exception_exit:
7139 	return;
7140 }
7141 
7142 static void
emit_setret(MonoCompile * cfg,MonoInst * val)7143 emit_setret (MonoCompile *cfg, MonoInst *val)
7144 {
7145 	MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7146 	MonoInst *ins;
7147 
7148 	if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7149 		MonoInst *ret_addr;
7150 
7151 		if (!cfg->vret_addr) {
7152 			EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7153 		} else {
7154 			EMIT_NEW_RETLOADA (cfg, ret_addr);
7155 
7156 			EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7157 			ins->klass = mono_class_from_mono_type (ret_type);
7158 		}
7159 	} else {
7160 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7161 		if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7162 			MonoInst *iargs [1];
7163 			MonoInst *conv;
7164 
7165 			iargs [0] = val;
7166 			conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7167 			mono_arch_emit_setret (cfg, cfg->method, conv);
7168 		} else {
7169 			mono_arch_emit_setret (cfg, cfg->method, val);
7170 		}
7171 #else
7172 		mono_arch_emit_setret (cfg, cfg->method, val);
7173 #endif
7174 	}
7175 }
7176 
7177 /*
7178  * mono_method_to_ir:
7179  *
7180  * Translate the .net IL into linear IR.
7181  *
7182  * @start_bblock: if not NULL, the starting basic block, used during inlining.
7183  * @end_bblock: if not NULL, the ending basic block, used during inlining.
7184  * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7185  * @inline_args: if not NULL, contains the arguments to the inline call
7186  * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7187  * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7188  *
7189  * This method is used to turn ECMA IL into Mono's internal Linear IR
7190  * reprensetation.  It is used both for entire methods, as well as
7191  * inlining existing methods.  In the former case, the @start_bblock,
7192  * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7193  * inline_offset is set to zero.
7194  *
7195  * Returns: the inline cost, or -1 if there was an error processing this method.
7196  */
7197 int
mono_method_to_ir(MonoCompile * cfg,MonoMethod * method,MonoBasicBlock * start_bblock,MonoBasicBlock * end_bblock,MonoInst * return_var,MonoInst ** inline_args,guint inline_offset,gboolean is_virtual_call)7198 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7199 		   MonoInst *return_var, MonoInst **inline_args,
7200 		   guint inline_offset, gboolean is_virtual_call)
7201 {
7202 	MonoError error;
7203 	MonoInst *ins, **sp, **stack_start;
7204 	MonoBasicBlock *tblock = NULL;
7205 	MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL;
7206 	MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7207 	MonoMethod *cmethod, *method_definition;
7208 	MonoInst **arg_array;
7209 	MonoMethodHeader *header;
7210 	MonoImage *image;
7211 	guint32 token, ins_flag;
7212 	MonoClass *klass;
7213 	MonoClass *constrained_class = NULL;
7214 	unsigned char *ip, *end, *target, *err_pos;
7215 	MonoMethodSignature *sig;
7216 	MonoGenericContext *generic_context = NULL;
7217 	MonoGenericContainer *generic_container = NULL;
7218 	MonoType **param_types;
7219 	int i, n, start_new_bblock, dreg;
7220 	int num_calls = 0, inline_costs = 0;
7221 	int breakpoint_id = 0;
7222 	guint num_args;
7223 	GSList *class_inits = NULL;
7224 	gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7225 	int context_used;
7226 	gboolean init_locals, seq_points, skip_dead_blocks;
7227 	gboolean sym_seq_points = FALSE;
7228 	MonoDebugMethodInfo *minfo;
7229 	MonoBitSet *seq_point_locs = NULL;
7230 	MonoBitSet *seq_point_set_locs = NULL;
7231 
7232 	cfg->disable_inline = is_jit_optimizer_disabled (method);
7233 
7234 	/* serialization and xdomain stuff may need access to private fields and methods */
7235 	dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7236 	dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7237 	dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7238  	dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7239 	dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7240 	dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7241 
7242 	/* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7243 	dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7244 	dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7245 	dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7246 	dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7247 
7248 	image = method->klass->image;
7249 	header = mono_method_get_header_checked (method, &cfg->error);
7250 	if (!header) {
7251 		mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7252 		goto exception_exit;
7253 	} else {
7254 		cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7255 	}
7256 
7257 	generic_container = mono_method_get_generic_container (method);
7258 	sig = mono_method_signature (method);
7259 	num_args = sig->hasthis + sig->param_count;
7260 	ip = (unsigned char*)header->code;
7261 	cfg->cil_start = ip;
7262 	end = ip + header->code_size;
7263 	cfg->stat_cil_code_size += header->code_size;
7264 
7265 	seq_points = cfg->gen_seq_points && cfg->method == method;
7266 
7267 	if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7268 		/* We could hit a seq point before attaching to the JIT (#8338) */
7269 		seq_points = FALSE;
7270 	}
7271 
7272 	if (cfg->method == method)
7273 		cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7274 	if (cfg->compile_aot && cfg->coverage_info)
7275 		g_error ("Coverage profiling is not supported with AOT.");
7276 
7277 	if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->coverage_info) {
7278 		minfo = mono_debug_lookup_method (method);
7279 		if (minfo) {
7280 			MonoSymSeqPoint *sps;
7281 			int i, n_il_offsets;
7282 
7283 			mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7284 			seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7285 			seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7286 			sym_seq_points = TRUE;
7287 			for (i = 0; i < n_il_offsets; ++i) {
7288 				if (sps [i].il_offset < header->code_size)
7289 					mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7290 			}
7291 			g_free (sps);
7292 
7293 			MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7294 			if (asyncMethod) {
7295 				for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7296 				{
7297 					mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7298 					mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7299 				}
7300 				mono_debug_free_method_async_debug_info (asyncMethod);
7301 			}
7302 		} else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7303 			/* Methods without line number info like auto-generated property accessors */
7304 			seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7305 			seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7306 			sym_seq_points = TRUE;
7307 		}
7308 	}
7309 
7310 	/*
7311 	 * Methods without init_locals set could cause asserts in various passes
7312 	 * (#497220). To work around this, we emit dummy initialization opcodes
7313 	 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7314 	 * on some platforms.
7315 	 */
7316 	if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7317 		init_locals = header->init_locals;
7318 	else
7319 		init_locals = TRUE;
7320 
7321 	method_definition = method;
7322 	while (method_definition->is_inflated) {
7323 		MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7324 		method_definition = imethod->declaring;
7325 	}
7326 
7327 	/* SkipVerification is not allowed if core-clr is enabled */
7328 	if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7329 		dont_verify = TRUE;
7330 		dont_verify_stloc = TRUE;
7331 	}
7332 
7333 	if (sig->is_inflated)
7334 		generic_context = mono_method_get_context (method);
7335 	else if (generic_container)
7336 		generic_context = &generic_container->context;
7337 	cfg->generic_context = generic_context;
7338 
7339 	if (!cfg->gshared)
7340 		g_assert (!sig->has_type_parameters);
7341 
7342 	if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7343 		g_assert (method->is_inflated);
7344 		g_assert (mono_method_get_context (method)->method_inst);
7345 	}
7346 	if (method->is_inflated && mono_method_get_context (method)->method_inst)
7347 		g_assert (sig->generic_param_count);
7348 
7349 	if (cfg->method == method) {
7350 		cfg->real_offset = 0;
7351 	} else {
7352 		cfg->real_offset = inline_offset;
7353 	}
7354 
7355 	cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7356 	cfg->cil_offset_to_bb_len = header->code_size;
7357 
7358 	cfg->current_method = method;
7359 
7360 	if (cfg->verbose_level > 2)
7361 		printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7362 
7363 	param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7364 	if (sig->hasthis)
7365 		param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7366 	for (n = 0; n < sig->param_count; ++n)
7367 		param_types [n + sig->hasthis] = sig->params [n];
7368 	cfg->arg_types = param_types;
7369 
7370 	cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7371 	if (cfg->method == method) {
7372 		/* ENTRY BLOCK */
7373 		NEW_BBLOCK (cfg, start_bblock);
7374 		cfg->bb_entry = start_bblock;
7375 		start_bblock->cil_code = NULL;
7376 		start_bblock->cil_length = 0;
7377 
7378 		/* EXIT BLOCK */
7379 		NEW_BBLOCK (cfg, end_bblock);
7380 		cfg->bb_exit = end_bblock;
7381 		end_bblock->cil_code = NULL;
7382 		end_bblock->cil_length = 0;
7383 		end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7384 		g_assert (cfg->num_bblocks == 2);
7385 
7386 		arg_array = cfg->args;
7387 
7388 		if (header->num_clauses) {
7389 			cfg->spvars = g_hash_table_new (NULL, NULL);
7390 			cfg->exvars = g_hash_table_new (NULL, NULL);
7391 		}
7392 		/* handle exception clauses */
7393 		for (i = 0; i < header->num_clauses; ++i) {
7394 			MonoBasicBlock *try_bb;
7395 			MonoExceptionClause *clause = &header->clauses [i];
7396 			GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7397 
7398 			try_bb->real_offset = clause->try_offset;
7399 			try_bb->try_start = TRUE;
7400 			try_bb->region = ((i + 1) << 8) | clause->flags;
7401 			GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7402 			tblock->real_offset = clause->handler_offset;
7403 			tblock->flags |= BB_EXCEPTION_HANDLER;
7404 
7405 			if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
7406 				mono_create_exvar_for_offset (cfg, clause->handler_offset);
7407 			/*
7408 			 * Linking the try block with the EH block hinders inlining as we won't be able to
7409 			 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7410 			 */
7411 			if (COMPILE_LLVM (cfg))
7412 				link_bblock (cfg, try_bb, tblock);
7413 
7414 			if (*(ip + clause->handler_offset) == CEE_POP)
7415 				tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7416 
7417 			if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7418 			    clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7419 			    clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7420 				MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7421 				MONO_ADD_INS (tblock, ins);
7422 
7423 				if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7424 					/* finally clauses already have a seq point */
7425 					/* seq points for filter clauses are emitted below */
7426 					NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7427 					MONO_ADD_INS (tblock, ins);
7428 				}
7429 
7430 				/* todo: is a fault block unsafe to optimize? */
7431 				if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7432 					tblock->flags |= BB_EXCEPTION_UNSAFE;
7433 			}
7434 
7435 			/*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7436 			  while (p < end) {
7437 			  printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7438 			  }*/
7439 			/* catch and filter blocks get the exception object on the stack */
7440 			if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7441 			    clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7442 
7443 				/* mostly like handle_stack_args (), but just sets the input args */
7444 				/* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7445 				tblock->in_scount = 1;
7446 				tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7447 				tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7448 
7449 				cfg->cbb = tblock;
7450 
7451 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7452 				/* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7453 				if (!cfg->compile_llvm) {
7454 					MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7455 					ins->dreg = tblock->in_stack [0]->dreg;
7456 					MONO_ADD_INS (tblock, ins);
7457 				}
7458 #else
7459 				MonoInst *dummy_use;
7460 
7461 				/*
7462 				 * Add a dummy use for the exvar so its liveness info will be
7463 				 * correct.
7464 				 */
7465 				EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7466 #endif
7467 
7468 				if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7469 					NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7470 					MONO_ADD_INS (tblock, ins);
7471 				}
7472 
7473 				if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7474 					GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7475 					tblock->flags |= BB_EXCEPTION_HANDLER;
7476 					tblock->real_offset = clause->data.filter_offset;
7477 					tblock->in_scount = 1;
7478 					tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7479 					/* The filter block shares the exvar with the handler block */
7480 					tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7481 					MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7482 					MONO_ADD_INS (tblock, ins);
7483 				}
7484 			}
7485 
7486 			if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7487 					clause->data.catch_class &&
7488 					cfg->gshared &&
7489 					mono_class_check_context_used (clause->data.catch_class)) {
7490 				/*
7491 				 * In shared generic code with catch
7492 				 * clauses containing type variables
7493 				 * the exception handling code has to
7494 				 * be able to get to the rgctx.
7495 				 * Therefore we have to make sure that
7496 				 * the vtable/mrgctx argument (for
7497 				 * static or generic methods) or the
7498 				 * "this" argument (for non-static
7499 				 * methods) are live.
7500 				 */
7501 				if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7502 						mini_method_get_context (method)->method_inst ||
7503 						method->klass->valuetype) {
7504 					mono_get_vtable_var (cfg);
7505 				} else {
7506 					MonoInst *dummy_use;
7507 
7508 					EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7509 				}
7510 			}
7511 		}
7512 	} else {
7513 		arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7514 		cfg->cbb = start_bblock;
7515 		cfg->args = arg_array;
7516 		mono_save_args (cfg, sig, inline_args);
7517 	}
7518 
7519 	/* FIRST CODE BLOCK */
7520 	NEW_BBLOCK (cfg, tblock);
7521 	tblock->cil_code = ip;
7522 	cfg->cbb = tblock;
7523 	cfg->ip = ip;
7524 
7525 	ADD_BBLOCK (cfg, tblock);
7526 
7527 	if (cfg->method == method) {
7528 		breakpoint_id = mono_debugger_method_has_breakpoint (method);
7529 		if (breakpoint_id) {
7530 			MONO_INST_NEW (cfg, ins, OP_BREAK);
7531 			MONO_ADD_INS (cfg->cbb, ins);
7532 		}
7533 	}
7534 
7535 	/* we use a separate basic block for the initialization code */
7536 	NEW_BBLOCK (cfg, init_localsbb);
7537 	if (cfg->method == method)
7538 		cfg->bb_init = init_localsbb;
7539 	init_localsbb->real_offset = cfg->real_offset;
7540 	start_bblock->next_bb = init_localsbb;
7541 	init_localsbb->next_bb = cfg->cbb;
7542 	link_bblock (cfg, start_bblock, init_localsbb);
7543 	link_bblock (cfg, init_localsbb, cfg->cbb);
7544 	init_localsbb2 = init_localsbb;
7545 	cfg->cbb = init_localsbb;
7546 
7547 	if (cfg->gsharedvt && cfg->method == method) {
7548 		MonoGSharedVtMethodInfo *info;
7549 		MonoInst *var, *locals_var;
7550 		int dreg;
7551 
7552 		info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7553 		info->method = cfg->method;
7554 		info->count_entries = 16;
7555 		info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7556 		cfg->gsharedvt_info = info;
7557 
7558 		var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7559 		/* prevent it from being register allocated */
7560 		//var->flags |= MONO_INST_VOLATILE;
7561 		cfg->gsharedvt_info_var = var;
7562 
7563 		ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7564 		MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7565 
7566 		/* Allocate locals */
7567 		locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7568 		/* prevent it from being register allocated */
7569 		//locals_var->flags |= MONO_INST_VOLATILE;
7570 		cfg->gsharedvt_locals_var = locals_var;
7571 
7572 		dreg = alloc_ireg (cfg);
7573 		MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7574 
7575 		MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7576 		ins->dreg = locals_var->dreg;
7577 		ins->sreg1 = dreg;
7578 		MONO_ADD_INS (cfg->cbb, ins);
7579 		cfg->gsharedvt_locals_var_ins = ins;
7580 
7581 		cfg->flags |= MONO_CFG_HAS_ALLOCA;
7582 		/*
7583 		if (init_locals)
7584 			ins->flags |= MONO_INST_INIT;
7585 		*/
7586 	}
7587 
7588 	if (mono_security_core_clr_enabled ()) {
7589 		/* check if this is native code, e.g. an icall or a p/invoke */
7590 		if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7591 			MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7592 			if (wrapped) {
7593 				gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7594 				gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7595 
7596 				/* if this ia a native call then it can only be JITted from platform code */
7597 				if ((icall || pinvk) && method->klass && method->klass->image) {
7598 					if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7599 						MonoException *ex = icall ? mono_get_exception_security () :
7600 							mono_get_exception_method_access ();
7601 						emit_throw_exception (cfg, ex);
7602 					}
7603 				}
7604 			}
7605 		}
7606 	}
7607 
7608 	CHECK_CFG_EXCEPTION;
7609 
7610 	if (header->code_size == 0)
7611 		UNVERIFIED;
7612 
7613 	if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7614 		ip = err_pos;
7615 		UNVERIFIED;
7616 	}
7617 
7618 	if (cfg->method == method)
7619 		mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7620 
7621 	for (n = 0; n < header->num_locals; ++n) {
7622 		if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7623 			UNVERIFIED;
7624 	}
7625 	class_inits = NULL;
7626 
7627 	/* We force the vtable variable here for all shared methods
7628 	   for the possibility that they might show up in a stack
7629 	   trace where their exact instantiation is needed. */
7630 	if (cfg->gshared && method == cfg->method) {
7631 		if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7632 				mini_method_get_context (method)->method_inst ||
7633 				method->klass->valuetype) {
7634 			mono_get_vtable_var (cfg);
7635 		} else {
7636 			/* FIXME: Is there a better way to do this?
7637 			   We need the variable live for the duration
7638 			   of the whole method. */
7639 			cfg->args [0]->flags |= MONO_INST_VOLATILE;
7640 		}
7641 	}
7642 
7643 	/* add a check for this != NULL to inlined methods */
7644 	if (is_virtual_call) {
7645 		MonoInst *arg_ins;
7646 
7647 		NEW_ARGLOAD (cfg, arg_ins, 0);
7648 		MONO_ADD_INS (cfg->cbb, arg_ins);
7649 		MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7650 	}
7651 
7652 	skip_dead_blocks = !dont_verify;
7653 	if (skip_dead_blocks) {
7654 		original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7655 		CHECK_CFG_ERROR;
7656 		g_assert (bb);
7657 	}
7658 
7659 	/* we use a spare stack slot in SWITCH and NEWOBJ and others */
7660 	stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7661 
7662 	ins_flag = 0;
7663 	start_new_bblock = 0;
7664 	while (ip < end) {
7665 		if (cfg->method == method)
7666 			cfg->real_offset = ip - header->code;
7667 		else
7668 			cfg->real_offset = inline_offset;
7669 		cfg->ip = ip;
7670 
7671 		context_used = 0;
7672 
7673 		if (start_new_bblock) {
7674 			cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7675 			if (start_new_bblock == 2) {
7676 				g_assert (ip == tblock->cil_code);
7677 			} else {
7678 				GET_BBLOCK (cfg, tblock, ip);
7679 			}
7680 			cfg->cbb->next_bb = tblock;
7681 			cfg->cbb = tblock;
7682 			start_new_bblock = 0;
7683 			for (i = 0; i < cfg->cbb->in_scount; ++i) {
7684 				if (cfg->verbose_level > 3)
7685 					printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7686 				EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7687 				*sp++ = ins;
7688 			}
7689 			if (class_inits)
7690 				g_slist_free (class_inits);
7691 			class_inits = NULL;
7692 		} else {
7693 			if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7694 				link_bblock (cfg, cfg->cbb, tblock);
7695 				if (sp != stack_start) {
7696 					handle_stack_args (cfg, stack_start, sp - stack_start);
7697 					sp = stack_start;
7698 					CHECK_UNVERIFIABLE (cfg);
7699 				}
7700 				cfg->cbb->next_bb = tblock;
7701 				cfg->cbb = tblock;
7702 				for (i = 0; i < cfg->cbb->in_scount; ++i) {
7703 					if (cfg->verbose_level > 3)
7704 						printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7705 					EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7706 					*sp++ = ins;
7707 				}
7708 				g_slist_free (class_inits);
7709 				class_inits = NULL;
7710 			}
7711 		}
7712 
7713 		if (skip_dead_blocks) {
7714 			int ip_offset = ip - header->code;
7715 
7716 			if (ip_offset == bb->end)
7717 				bb = bb->next;
7718 
7719 			if (bb->dead) {
7720 				int op_size = mono_opcode_size (ip, end);
7721 				g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7722 
7723 				if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7724 
7725 				if (ip_offset + op_size == bb->end) {
7726 					MONO_INST_NEW (cfg, ins, OP_NOP);
7727 					MONO_ADD_INS (cfg->cbb, ins);
7728 					start_new_bblock = 1;
7729 				}
7730 
7731 				ip += op_size;
7732 				continue;
7733 			}
7734 		}
7735 		/*
7736 		 * Sequence points are points where the debugger can place a breakpoint.
7737 		 * Currently, we generate these automatically at points where the IL
7738 		 * stack is empty.
7739 		 */
7740 		if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7741 			/*
7742 			 * Make methods interruptable at the beginning, and at the targets of
7743 			 * backward branches.
7744 			 * Also, do this at the start of every bblock in methods with clauses too,
7745 			 * to be able to handle instructions with inprecise control flow like
7746 			 * throw/endfinally.
7747 			 * Backward branches are handled at the end of method-to-ir ().
7748 			 */
7749 			gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7750 			gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7751 
7752 			/* Avoid sequence points on empty IL like .volatile */
7753 			// FIXME: Enable this
7754 			//if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7755 			NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7756 			if ((sp != stack_start) && !sym_seq_point)
7757 				ins->flags |= MONO_INST_NONEMPTY_STACK;
7758 			MONO_ADD_INS (cfg->cbb, ins);
7759 
7760 			if (sym_seq_points)
7761 				mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7762 
7763 			if ((cfg->method == method) && cfg->coverage_info) {
7764 				guint32 cil_offset = ip - header->code;
7765 				gpointer counter = &cfg->coverage_info->data [cil_offset].count;
7766 				cfg->coverage_info->data [cil_offset].cil_code = ip;
7767 
7768 				if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
7769 					MonoInst *one_ins, *load_ins;
7770 
7771 					EMIT_NEW_PCONST (cfg, load_ins, counter);
7772 					EMIT_NEW_ICONST (cfg, one_ins, 1);
7773 					MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
7774 					ins->dreg = mono_alloc_ireg (cfg);
7775 					ins->inst_basereg = load_ins->dreg;
7776 					ins->inst_offset = 0;
7777 					ins->sreg2 = one_ins->dreg;
7778 					ins->type = STACK_I4;
7779 					MONO_ADD_INS (cfg->cbb, ins);
7780 				} else {
7781 					EMIT_NEW_PCONST (cfg, ins, counter);
7782 					MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7783 				}
7784 			}
7785 		}
7786 
7787 		cfg->cbb->real_offset = cfg->real_offset;
7788 
7789 		if (cfg->verbose_level > 3)
7790 			printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7791 
7792 		switch (*ip) {
7793 		case CEE_NOP:
7794 			if (seq_points && !sym_seq_points && sp != stack_start) {
7795 				/*
7796 				 * The C# compiler uses these nops to notify the JIT that it should
7797 				 * insert seq points.
7798 				 */
7799 				NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7800 				MONO_ADD_INS (cfg->cbb, ins);
7801 			}
7802 			if (cfg->keep_cil_nops)
7803 				MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7804 			else
7805 				MONO_INST_NEW (cfg, ins, OP_NOP);
7806 			ip++;
7807 			MONO_ADD_INS (cfg->cbb, ins);
7808 			break;
7809 		case CEE_BREAK:
7810 			if (mini_should_insert_breakpoint (cfg->method)) {
7811 				ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7812 			} else {
7813 				MONO_INST_NEW (cfg, ins, OP_NOP);
7814 			}
7815 			ip++;
7816 			MONO_ADD_INS (cfg->cbb, ins);
7817 			break;
7818 		case CEE_LDARG_0:
7819 		case CEE_LDARG_1:
7820 		case CEE_LDARG_2:
7821 		case CEE_LDARG_3:
7822 			CHECK_STACK_OVF (1);
7823 			n = (*ip)-CEE_LDARG_0;
7824 			CHECK_ARG (n);
7825 			if (is_adressable_valuetype_load (cfg, ip + 1, cfg->arg_types[n])) {
7826 				EMIT_NEW_ARGLOADA (cfg, ins, n);
7827 			} else {
7828 				EMIT_NEW_ARGLOAD (cfg, ins, n);
7829 			}
7830 			ip++;
7831 			*sp++ = ins;
7832 			break;
7833 		case CEE_LDLOC_0:
7834 		case CEE_LDLOC_1:
7835 		case CEE_LDLOC_2:
7836 		case CEE_LDLOC_3:
7837 			CHECK_STACK_OVF (1);
7838 			n = (*ip)-CEE_LDLOC_0;
7839 			CHECK_LOCAL (n);
7840 			if (is_adressable_valuetype_load (cfg, ip + 1, header->locals[n])) {
7841 				EMIT_NEW_LOCLOADA (cfg, ins, n);
7842 			} else {
7843 				EMIT_NEW_LOCLOAD (cfg, ins, n);
7844 			}
7845 			ip++;
7846 			*sp++ = ins;
7847 			break;
7848 		case CEE_STLOC_0:
7849 		case CEE_STLOC_1:
7850 		case CEE_STLOC_2:
7851 		case CEE_STLOC_3: {
7852 			CHECK_STACK (1);
7853 			n = (*ip)-CEE_STLOC_0;
7854 			CHECK_LOCAL (n);
7855 			--sp;
7856 			if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7857 				UNVERIFIED;
7858 			emit_stloc_ir (cfg, sp, header, n);
7859 			++ip;
7860 			inline_costs += 1;
7861 			break;
7862 			}
7863 		case CEE_LDARG_S:
7864 			CHECK_OPSIZE (2);
7865 			CHECK_STACK_OVF (1);
7866 			n = ip [1];
7867 			CHECK_ARG (n);
7868 			if (is_adressable_valuetype_load (cfg, ip + 2, cfg->arg_types[n])) {
7869 				EMIT_NEW_ARGLOADA (cfg, ins, n);
7870 			} else {
7871 				EMIT_NEW_ARGLOAD (cfg, ins, n);
7872 			}
7873 			*sp++ = ins;
7874 			ip += 2;
7875 			break;
7876 		case CEE_LDARGA_S:
7877 			CHECK_OPSIZE (2);
7878 			CHECK_STACK_OVF (1);
7879 			n = ip [1];
7880 			CHECK_ARG (n);
7881 			NEW_ARGLOADA (cfg, ins, n);
7882 			MONO_ADD_INS (cfg->cbb, ins);
7883 			*sp++ = ins;
7884 			ip += 2;
7885 			break;
7886 		case CEE_STARG_S:
7887 			CHECK_OPSIZE (2);
7888 			CHECK_STACK (1);
7889 			--sp;
7890 			n = ip [1];
7891 			CHECK_ARG (n);
7892 			if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7893 				UNVERIFIED;
7894 			emit_starg_ir (cfg, sp, n);
7895 			ip += 2;
7896 			break;
7897 		case CEE_LDLOC_S:
7898 			CHECK_OPSIZE (2);
7899 			CHECK_STACK_OVF (1);
7900 			n = ip [1];
7901 			CHECK_LOCAL (n);
7902 			if (is_adressable_valuetype_load (cfg, ip + 2, header->locals[n])) {
7903 				EMIT_NEW_LOCLOADA (cfg, ins, n);
7904 			} else {
7905 				EMIT_NEW_LOCLOAD (cfg, ins, n);
7906 			}
7907 			*sp++ = ins;
7908 			ip += 2;
7909 			break;
7910 		case CEE_LDLOCA_S: {
7911 			unsigned char *tmp_ip;
7912 			CHECK_OPSIZE (2);
7913 			CHECK_STACK_OVF (1);
7914 			CHECK_LOCAL (ip [1]);
7915 
7916 			if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7917 				ip = tmp_ip;
7918 				inline_costs += 1;
7919 				break;
7920 			}
7921 
7922 			EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7923 			*sp++ = ins;
7924 			ip += 2;
7925 			break;
7926 		}
7927 		case CEE_STLOC_S:
7928 			CHECK_OPSIZE (2);
7929 			CHECK_STACK (1);
7930 			--sp;
7931 			CHECK_LOCAL (ip [1]);
7932 			if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7933 				UNVERIFIED;
7934 			emit_stloc_ir (cfg, sp, header, ip [1]);
7935 			ip += 2;
7936 			inline_costs += 1;
7937 			break;
7938 		case CEE_LDNULL:
7939 			CHECK_STACK_OVF (1);
7940 			EMIT_NEW_PCONST (cfg, ins, NULL);
7941 			ins->type = STACK_OBJ;
7942 			++ip;
7943 			*sp++ = ins;
7944 			break;
7945 		case CEE_LDC_I4_M1:
7946 			CHECK_STACK_OVF (1);
7947 			EMIT_NEW_ICONST (cfg, ins, -1);
7948 			++ip;
7949 			*sp++ = ins;
7950 			break;
7951 		case CEE_LDC_I4_0:
7952 		case CEE_LDC_I4_1:
7953 		case CEE_LDC_I4_2:
7954 		case CEE_LDC_I4_3:
7955 		case CEE_LDC_I4_4:
7956 		case CEE_LDC_I4_5:
7957 		case CEE_LDC_I4_6:
7958 		case CEE_LDC_I4_7:
7959 		case CEE_LDC_I4_8:
7960 			CHECK_STACK_OVF (1);
7961 			EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7962 			++ip;
7963 			*sp++ = ins;
7964 			break;
7965 		case CEE_LDC_I4_S:
7966 			CHECK_OPSIZE (2);
7967 			CHECK_STACK_OVF (1);
7968 			++ip;
7969 			EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7970 			++ip;
7971 			*sp++ = ins;
7972 			break;
7973 		case CEE_LDC_I4:
7974 			CHECK_OPSIZE (5);
7975 			CHECK_STACK_OVF (1);
7976 			EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7977 			ip += 5;
7978 			*sp++ = ins;
7979 			break;
7980 		case CEE_LDC_I8:
7981 			CHECK_OPSIZE (9);
7982 			CHECK_STACK_OVF (1);
7983 			MONO_INST_NEW (cfg, ins, OP_I8CONST);
7984 			ins->type = STACK_I8;
7985 			ins->dreg = alloc_dreg (cfg, STACK_I8);
7986 			++ip;
7987 			ins->inst_l = (gint64)read64 (ip);
7988 			MONO_ADD_INS (cfg->cbb, ins);
7989 			ip += 8;
7990 			*sp++ = ins;
7991 			break;
7992 		case CEE_LDC_R4: {
7993 			float *f;
7994 			gboolean use_aotconst = FALSE;
7995 
7996 #ifdef TARGET_POWERPC
7997 			/* FIXME: Clean this up */
7998 			if (cfg->compile_aot)
7999 				use_aotconst = TRUE;
8000 #endif
8001 
8002 			/* FIXME: we should really allocate this only late in the compilation process */
8003 			f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8004 			CHECK_OPSIZE (5);
8005 			CHECK_STACK_OVF (1);
8006 
8007 			if (use_aotconst) {
8008 				MonoInst *cons;
8009 				int dreg;
8010 
8011 				EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8012 
8013 				dreg = alloc_freg (cfg);
8014 				EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8015 				ins->type = cfg->r4_stack_type;
8016 			} else {
8017 				MONO_INST_NEW (cfg, ins, OP_R4CONST);
8018 				ins->type = cfg->r4_stack_type;
8019 				ins->dreg = alloc_dreg (cfg, STACK_R8);
8020 				ins->inst_p0 = f;
8021 				MONO_ADD_INS (cfg->cbb, ins);
8022 			}
8023 			++ip;
8024 			readr4 (ip, f);
8025 			ip += 4;
8026 			*sp++ = ins;
8027 			break;
8028 		}
8029 		case CEE_LDC_R8: {
8030 			double *d;
8031 			gboolean use_aotconst = FALSE;
8032 
8033 #ifdef TARGET_POWERPC
8034 			/* FIXME: Clean this up */
8035 			if (cfg->compile_aot)
8036 				use_aotconst = TRUE;
8037 #endif
8038 
8039 			/* FIXME: we should really allocate this only late in the compilation process */
8040 			d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8041 			CHECK_OPSIZE (9);
8042 			CHECK_STACK_OVF (1);
8043 
8044 			if (use_aotconst) {
8045 				MonoInst *cons;
8046 				int dreg;
8047 
8048 				EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8049 
8050 				dreg = alloc_freg (cfg);
8051 				EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8052 				ins->type = STACK_R8;
8053 			} else {
8054 				MONO_INST_NEW (cfg, ins, OP_R8CONST);
8055 				ins->type = STACK_R8;
8056 				ins->dreg = alloc_dreg (cfg, STACK_R8);
8057 				ins->inst_p0 = d;
8058 				MONO_ADD_INS (cfg->cbb, ins);
8059 			}
8060 			++ip;
8061 			readr8 (ip, d);
8062 			ip += 8;
8063 			*sp++ = ins;
8064 			break;
8065 		}
8066 		case CEE_DUP: {
8067 			MonoInst *temp, *store;
8068 			CHECK_STACK (1);
8069 			CHECK_STACK_OVF (1);
8070 			sp--;
8071 			ins = *sp;
8072 
8073 			temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8074 			EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8075 
8076 			EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8077 			*sp++ = ins;
8078 
8079 			EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8080 			*sp++ = ins;
8081 
8082 			++ip;
8083 			inline_costs += 2;
8084 			break;
8085 		}
8086 		case CEE_POP:
8087 			CHECK_STACK (1);
8088 			ip++;
8089 			--sp;
8090 
8091 #ifdef TARGET_X86
8092 			if (sp [0]->type == STACK_R8)
8093 				/* we need to pop the value from the x86 FP stack */
8094 				MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8095 #endif
8096 			break;
8097 		case CEE_JMP: {
8098 			MonoCallInst *call;
8099 			MonoMethodSignature *fsig;
8100 			int i, n;
8101 
8102 			INLINE_FAILURE ("jmp");
8103 			GSHAREDVT_FAILURE (*ip);
8104 
8105 			CHECK_OPSIZE (5);
8106 			if (stack_start != sp)
8107 				UNVERIFIED;
8108 			token = read32 (ip + 1);
8109 			/* FIXME: check the signature matches */
8110 			cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8111 			CHECK_CFG_ERROR;
8112 
8113 			if (cfg->gshared && mono_method_check_context_used (cmethod))
8114 				GENERIC_SHARING_FAILURE (CEE_JMP);
8115 
8116 			mini_profiler_emit_tail_call (cfg, cmethod);
8117 
8118 			fsig = mono_method_signature (cmethod);
8119 			n = fsig->param_count + fsig->hasthis;
8120 			if (cfg->llvm_only) {
8121 				MonoInst **args;
8122 
8123 				args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8124 				for (i = 0; i < n; ++i)
8125 					EMIT_NEW_ARGLOAD (cfg, args [i], i);
8126 				ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8127 				/*
8128 				 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8129 				 * have to emit a normal return since llvm expects it.
8130 				 */
8131 				if (cfg->ret)
8132 					emit_setret (cfg, ins);
8133 				MONO_INST_NEW (cfg, ins, OP_BR);
8134 				ins->inst_target_bb = end_bblock;
8135 				MONO_ADD_INS (cfg->cbb, ins);
8136 				link_bblock (cfg, cfg->cbb, end_bblock);
8137 				ip += 5;
8138 				break;
8139 			} else if (cfg->backend->have_op_tail_call) {
8140 				/* Handle tail calls similarly to calls */
8141 				DISABLE_AOT (cfg);
8142 
8143 				MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8144 				call->method = cmethod;
8145 				call->tail_call = TRUE;
8146 				call->signature = mono_method_signature (cmethod);
8147 				call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8148 				call->inst.inst_p0 = cmethod;
8149 				for (i = 0; i < n; ++i)
8150 					EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8151 
8152 				if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8153 					call->vret_var = cfg->vret_addr;
8154 
8155 				mono_arch_emit_call (cfg, call);
8156 				cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8157 				MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8158 			} else {
8159 				for (i = 0; i < num_args; ++i)
8160 					/* Prevent arguments from being optimized away */
8161 					arg_array [i]->flags |= MONO_INST_VOLATILE;
8162 
8163 				MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8164 				ins = (MonoInst*)call;
8165 				ins->inst_p0 = cmethod;
8166 				MONO_ADD_INS (cfg->cbb, ins);
8167 			}
8168 
8169 			ip += 5;
8170 			start_new_bblock = 1;
8171 			break;
8172 		}
8173 		case CEE_CALLI: {
8174 			MonoInst *addr;
8175 			MonoMethodSignature *fsig;
8176 
8177 			CHECK_OPSIZE (5);
8178 			token = read32 (ip + 1);
8179 
8180 			ins = NULL;
8181 
8182 			//GSHAREDVT_FAILURE (*ip);
8183 			cmethod = NULL;
8184 			CHECK_STACK (1);
8185 			--sp;
8186 			addr = *sp;
8187 			fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8188 			CHECK_CFG_ERROR;
8189 
8190 			if (method->dynamic && fsig->pinvoke) {
8191 				MonoInst *args [3];
8192 
8193 				/*
8194 				 * This is a call through a function pointer using a pinvoke
8195 				 * signature. Have to create a wrapper and call that instead.
8196 				 * FIXME: This is very slow, need to create a wrapper at JIT time
8197 				 * instead based on the signature.
8198 				 */
8199 				EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8200 				EMIT_NEW_PCONST (cfg, args [1], fsig);
8201 				args [2] = addr;
8202 				addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8203 			}
8204 
8205 			n = fsig->param_count + fsig->hasthis;
8206 
8207 			CHECK_STACK (n);
8208 
8209 			//g_assert (!virtual_ || fsig->hasthis);
8210 
8211 			sp -= n;
8212 
8213 			if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp))
8214 				UNVERIFIED;
8215 
8216 			inline_costs += 10 * num_calls++;
8217 
8218 			/*
8219 			 * Making generic calls out of gsharedvt methods.
8220 			 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8221 			 * patching gshared method addresses into a gsharedvt method.
8222 			 */
8223 			if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8224 				/*
8225 				 * We pass the address to the gsharedvt trampoline in the rgctx reg
8226 				 */
8227 				MonoInst *callee = addr;
8228 
8229 				if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8230 					/* Not tested */
8231 					GSHAREDVT_FAILURE (*ip);
8232 
8233 				if (cfg->llvm_only)
8234 					// FIXME:
8235 					GSHAREDVT_FAILURE (*ip);
8236 
8237 				addr = emit_get_rgctx_sig (cfg, context_used,
8238 											  fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8239 				ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8240 				goto calli_end;
8241 			}
8242 
8243 			/* Prevent inlining of methods with indirect calls */
8244 			INLINE_FAILURE ("indirect call");
8245 
8246 			if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8247 				MonoJumpInfoType info_type;
8248 				gpointer info_data;
8249 
8250 				/*
8251 				 * Instead of emitting an indirect call, emit a direct call
8252 				 * with the contents of the aotconst as the patch info.
8253 				 */
8254 				if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8255 					info_type = (MonoJumpInfoType)addr->inst_c1;
8256 					info_data = addr->inst_p0;
8257 				} else {
8258 					info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8259 					info_data = addr->inst_right->inst_left;
8260 				}
8261 
8262 				if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8263 					ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8264 					NULLIFY_INS (addr);
8265 					goto calli_end;
8266 				} else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8267 					ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8268 					NULLIFY_INS (addr);
8269 					goto calli_end;
8270 				}
8271 			}
8272 			ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8273 
8274 			calli_end:
8275 
8276 			/* End of call, INS should contain the result of the call, if any */
8277 
8278 			if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8279 				g_assert (ins);
8280 				*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8281 			}
8282 
8283 			CHECK_CFG_EXCEPTION;
8284 
8285 			ip += 5;
8286 			ins_flag = 0;
8287 			constrained_class = NULL;
8288 			break;
8289 		}
8290 		case CEE_CALL:
8291 		case CEE_CALLVIRT: {
8292 			MonoInst *addr = NULL;
8293 			MonoMethodSignature *fsig = NULL;
8294 			int array_rank = 0;
8295 			int virtual_ = *ip == CEE_CALLVIRT;
8296 			gboolean pass_imt_from_rgctx = FALSE;
8297 			MonoInst *imt_arg = NULL;
8298 			MonoInst *keep_this_alive = NULL;
8299 			gboolean pass_vtable = FALSE;
8300 			gboolean pass_mrgctx = FALSE;
8301 			MonoInst *vtable_arg = NULL;
8302 			gboolean check_this = FALSE;
8303 			gboolean supported_tail_call = FALSE;
8304 			gboolean tail_call = FALSE;
8305 			gboolean need_seq_point = FALSE;
8306 			guint32 call_opcode = *ip;
8307 			gboolean emit_widen = TRUE;
8308 			gboolean push_res = TRUE;
8309 			gboolean skip_ret = FALSE;
8310 			gboolean delegate_invoke = FALSE;
8311 			gboolean direct_icall = FALSE;
8312 			gboolean constrained_partial_call = FALSE;
8313 			MonoMethod *cil_method;
8314 
8315 			CHECK_OPSIZE (5);
8316 			token = read32 (ip + 1);
8317 
8318 			ins = NULL;
8319 
8320 			cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8321 			CHECK_CFG_ERROR;
8322 
8323 			cil_method = cmethod;
8324 
8325 			if (constrained_class) {
8326 				if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8327 					if (!mini_is_gsharedvt_klass (constrained_class)) {
8328 						g_assert (!cmethod->klass->valuetype);
8329 						if (!mini_type_is_reference (&constrained_class->byval_arg))
8330 							constrained_partial_call = TRUE;
8331 					}
8332 				}
8333 
8334 				if (method->wrapper_type != MONO_WRAPPER_NONE) {
8335 					if (cfg->verbose_level > 2)
8336 						printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8337 					if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8338 						   constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8339 						  cfg->gshared)) {
8340 						cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8341 						CHECK_CFG_ERROR;
8342 					}
8343 				} else {
8344 					if (cfg->verbose_level > 2)
8345 						printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8346 
8347 					if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8348 						/*
8349 						 * This is needed since get_method_constrained can't find
8350 						 * the method in klass representing a type var.
8351 						 * The type var is guaranteed to be a reference type in this
8352 						 * case.
8353 						 */
8354 						if (!mini_is_gsharedvt_klass (constrained_class))
8355 							g_assert (!cmethod->klass->valuetype);
8356 					} else {
8357 						cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8358 						CHECK_CFG_ERROR;
8359 					}
8360 				}
8361 
8362 				if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8363 					/* Use the corresponding method from the base type to avoid boxing */
8364 					MonoType *base_type = mono_class_enum_basetype (constrained_class);
8365 					g_assert (base_type);
8366 					constrained_class = mono_class_from_mono_type (base_type);
8367 					cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8368 					g_assert (cmethod);
8369 				}
8370 			}
8371 
8372 			if (!dont_verify && !cfg->skip_visibility) {
8373 				MonoMethod *target_method = cil_method;
8374 				if (method->is_inflated) {
8375 					target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8376 					CHECK_CFG_ERROR;
8377 				}
8378 				if (!mono_method_can_access_method (method_definition, target_method) &&
8379 					!mono_method_can_access_method (method, cil_method))
8380 					emit_method_access_failure (cfg, method, cil_method);
8381 			}
8382 
8383 			if (mono_security_core_clr_enabled ())
8384 				ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8385 
8386 			if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8387 				/* MS.NET seems to silently convert this to a callvirt */
8388 				virtual_ = 1;
8389 
8390 			{
8391 				/*
8392 				 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8393 				 * converts to a callvirt.
8394 				 *
8395 				 * tests/bug-515884.il is an example of this behavior
8396 				 */
8397 				const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8398 				const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8399 				if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8400 					virtual_ = 1;
8401 			}
8402 
8403 			if (!cmethod->klass->inited)
8404 				if (!mono_class_init (cmethod->klass))
8405 					TYPE_LOAD_ERROR (cmethod->klass);
8406 
8407 			fsig = mono_method_signature (cmethod);
8408 			if (!fsig)
8409 				LOAD_ERROR;
8410 			if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8411 				mini_class_is_system_array (cmethod->klass)) {
8412 				array_rank = cmethod->klass->rank;
8413 			} else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg)) {
8414 				direct_icall = TRUE;
8415 			} else if (fsig->pinvoke) {
8416 				MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8417 				fsig = mono_method_signature (wrapper);
8418 			} else if (constrained_class) {
8419 			} else {
8420 				fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8421 				CHECK_CFG_ERROR;
8422 			}
8423 
8424 			if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8425 				cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8426 
8427 			/* See code below */
8428 			if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8429 				MonoBasicBlock *tbb;
8430 
8431 				GET_BBLOCK (cfg, tbb, ip + 5);
8432 				if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8433 					/*
8434 					 * We want to extend the try block to cover the call, but we can't do it if the
8435 					 * call is made directly since its followed by an exception check.
8436 					 */
8437 					direct_icall = FALSE;
8438 				}
8439 			}
8440 
8441 			mono_save_token_info (cfg, image, token, cil_method);
8442 
8443 			if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8444 				need_seq_point = TRUE;
8445 
8446 			/* Don't support calls made using type arguments for now */
8447 			/*
8448 			  if (cfg->gsharedvt) {
8449 			  if (mini_is_gsharedvt_signature (fsig))
8450 			  GSHAREDVT_FAILURE (*ip);
8451 			  }
8452 			*/
8453 
8454 			if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8455 				g_assert_not_reached ();
8456 
8457 			n = fsig->param_count + fsig->hasthis;
8458 
8459 			if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8460 				UNVERIFIED;
8461 
8462 			if (!cfg->gshared)
8463 				g_assert (!mono_method_check_context_used (cmethod));
8464 
8465 			CHECK_STACK (n);
8466 
8467 			//g_assert (!virtual_ || fsig->hasthis);
8468 
8469 			sp -= n;
8470 
8471 			if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
8472 				cfg->cbb->out_of_line = TRUE;
8473 
8474 			/*
8475 			 * We have the `constrained.' prefix opcode.
8476 			 */
8477 			if (constrained_class) {
8478 				if (mini_is_gsharedvt_klass (constrained_class)) {
8479 					if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8480 						/* The 'Own method' case below */
8481 					} else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8482 						/* 'The type parameter is instantiated as a reference type' case below. */
8483 					} else {
8484 						ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8485 						CHECK_CFG_EXCEPTION;
8486 						g_assert (ins);
8487 						goto call_end;
8488 					}
8489 				}
8490 
8491 				if (constrained_partial_call) {
8492 					gboolean need_box = TRUE;
8493 
8494 					/*
8495 					 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8496 					 * called method is not known at compile time either. The called method could end up being
8497 					 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8498 					 * to box the receiver.
8499 					 * A simple solution would be to box always and make a normal virtual call, but that would
8500 					 * be bad performance wise.
8501 					 */
8502 					if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8503 						/*
8504 						 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8505 						 */
8506 						need_box = FALSE;
8507 					}
8508 
8509 					if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8510 						/* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8511 						EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8512 						ins->klass = constrained_class;
8513 						sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8514 						CHECK_CFG_EXCEPTION;
8515 					} else if (need_box) {
8516 						MonoInst *box_type;
8517 						MonoBasicBlock *is_ref_bb, *end_bb;
8518 						MonoInst *nonbox_call;
8519 
8520 						/*
8521 						 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8522 						 * if needed.
8523 						 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8524 						 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8525 						 */
8526 						addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8527 
8528 						NEW_BBLOCK (cfg, is_ref_bb);
8529 						NEW_BBLOCK (cfg, end_bb);
8530 
8531 						box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8532 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8533 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8534 
8535 						/* Non-ref case */
8536 						if (cfg->llvm_only)
8537 							/* addr is an ftndesc in this case */
8538 							nonbox_call = emit_llvmonly_calli (cfg, fsig, sp, addr);
8539 						else
8540 							nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8541 
8542 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8543 
8544 						/* Ref case */
8545 						MONO_START_BB (cfg, is_ref_bb);
8546 						EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8547 						ins->klass = constrained_class;
8548 						sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8549 						if (cfg->llvm_only)
8550 							ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8551 						else
8552 							ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8553 
8554 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8555 
8556 						MONO_START_BB (cfg, end_bb);
8557 						cfg->cbb = end_bb;
8558 
8559 						nonbox_call->dreg = ins->dreg;
8560 						goto call_end;
8561 					} else {
8562 						g_assert (mono_class_is_interface (cmethod->klass));
8563 						addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8564 						if (cfg->llvm_only)
8565 							ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8566 						else
8567 							ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8568 						goto call_end;
8569 					}
8570 				} else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8571 					/*
8572 					 * The type parameter is instantiated as a valuetype,
8573 					 * but that type doesn't override the method we're
8574 					 * calling, so we need to box `this'.
8575 					 */
8576 					EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8577 					ins->klass = constrained_class;
8578 					sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8579 					CHECK_CFG_EXCEPTION;
8580 				} else if (!constrained_class->valuetype) {
8581 					int dreg = alloc_ireg_ref (cfg);
8582 
8583 					/*
8584 					 * The type parameter is instantiated as a reference
8585 					 * type.  We have a managed pointer on the stack, so
8586 					 * we need to dereference it here.
8587 					 */
8588 					EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8589 					ins->type = STACK_OBJ;
8590 					sp [0] = ins;
8591 				} else {
8592 					if (cmethod->klass->valuetype) {
8593 						/* Own method */
8594 					} else {
8595 						/* Interface method */
8596 						int ioffset, slot;
8597 
8598 						mono_class_setup_vtable (constrained_class);
8599 						CHECK_TYPELOAD (constrained_class);
8600 						ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8601 						if (ioffset == -1)
8602 							TYPE_LOAD_ERROR (constrained_class);
8603 						slot = mono_method_get_vtable_slot (cmethod);
8604 						if (slot == -1)
8605 							TYPE_LOAD_ERROR (cmethod->klass);
8606 						cmethod = constrained_class->vtable [ioffset + slot];
8607 
8608 						if (cmethod->klass == mono_defaults.enum_class) {
8609 							/* Enum implements some interfaces, so treat this as the first case */
8610 							EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8611 							ins->klass = constrained_class;
8612 							sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8613 							CHECK_CFG_EXCEPTION;
8614 						}
8615 					}
8616 					virtual_ = 0;
8617 				}
8618 				constrained_class = NULL;
8619 			}
8620 
8621 			if (check_call_signature (cfg, fsig, sp))
8622 				UNVERIFIED;
8623 
8624 			if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8625 				delegate_invoke = TRUE;
8626 
8627 			if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8628 				if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8629 					type_to_eval_stack_type ((cfg), fsig->ret, ins);
8630 					emit_widen = FALSE;
8631 				}
8632 
8633 				goto call_end;
8634 			}
8635 
8636 			/*
8637 			 * If the callee is a shared method, then its static cctor
8638 			 * might not get called after the call was patched.
8639 			 */
8640 			if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8641 				emit_class_init (cfg, cmethod->klass);
8642 				CHECK_TYPELOAD (cmethod->klass);
8643 			}
8644 
8645 			check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8646 
8647 			if (cfg->gshared) {
8648 				MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8649 
8650 				context_used = mini_method_check_context_used (cfg, cmethod);
8651 
8652 				if (context_used && mono_class_is_interface (cmethod->klass)) {
8653 					/* Generic method interface
8654 					   calls are resolved via a
8655 					   helper function and don't
8656 					   need an imt. */
8657 					if (!cmethod_context || !cmethod_context->method_inst)
8658 						pass_imt_from_rgctx = TRUE;
8659 				}
8660 
8661 				/*
8662 				 * If a shared method calls another
8663 				 * shared method then the caller must
8664 				 * have a generic sharing context
8665 				 * because the magic trampoline
8666 				 * requires it.  FIXME: We shouldn't
8667 				 * have to force the vtable/mrgctx
8668 				 * variable here.  Instead there
8669 				 * should be a flag in the cfg to
8670 				 * request a generic sharing context.
8671 				 */
8672 				if (context_used &&
8673 						((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8674 					mono_get_vtable_var (cfg);
8675 			}
8676 
8677 			if (pass_vtable) {
8678 				if (context_used) {
8679 					vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8680 				} else {
8681 					MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8682 
8683 					CHECK_TYPELOAD (cmethod->klass);
8684 					EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8685 				}
8686 			}
8687 
8688 			if (pass_mrgctx) {
8689 				g_assert (!vtable_arg);
8690 
8691 				if (!cfg->compile_aot) {
8692 					/*
8693 					 * emit_get_rgctx_method () calls mono_class_vtable () so check
8694 					 * for type load errors before.
8695 					 */
8696 					mono_class_setup_vtable (cmethod->klass);
8697 					CHECK_TYPELOAD (cmethod->klass);
8698 				}
8699 
8700 				vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8701 
8702 				/* !marshalbyref is needed to properly handle generic methods + remoting */
8703 				if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8704 					 MONO_METHOD_IS_FINAL (cmethod)) &&
8705 					!mono_class_is_marshalbyref (cmethod->klass)) {
8706 					if (virtual_)
8707 						check_this = TRUE;
8708 					virtual_ = 0;
8709 				}
8710 			}
8711 
8712 			if (pass_imt_from_rgctx) {
8713 				g_assert (!pass_vtable);
8714 
8715 				imt_arg = emit_get_rgctx_method (cfg, context_used,
8716 					cmethod, MONO_RGCTX_INFO_METHOD);
8717 			}
8718 
8719 			if (check_this)
8720 				MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8721 
8722 			/* Calling virtual generic methods */
8723 			if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8724 		 	    !(MONO_METHOD_IS_FINAL (cmethod) &&
8725 			      cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8726 			    fsig->generic_param_count &&
8727 				!(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8728 				!cfg->llvm_only) {
8729 				MonoInst *this_temp, *this_arg_temp, *store;
8730 				MonoInst *iargs [4];
8731 
8732 				g_assert (fsig->is_inflated);
8733 
8734 				/* Prevent inlining of methods that contain indirect calls */
8735 				INLINE_FAILURE ("virtual generic call");
8736 
8737 				if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8738 					GSHAREDVT_FAILURE (*ip);
8739 
8740 				if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8741 					g_assert (!imt_arg);
8742 					if (!context_used)
8743 						g_assert (cmethod->is_inflated);
8744 					imt_arg = emit_get_rgctx_method (cfg, context_used,
8745 													 cmethod, MONO_RGCTX_INFO_METHOD);
8746 					ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8747 				} else {
8748 					this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8749 					NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8750 					MONO_ADD_INS (cfg->cbb, store);
8751 
8752 					/* FIXME: This should be a managed pointer */
8753 					this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8754 
8755 					EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8756 					iargs [1] = emit_get_rgctx_method (cfg, context_used,
8757 													   cmethod, MONO_RGCTX_INFO_METHOD);
8758 					EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8759 					addr = mono_emit_jit_icall (cfg,
8760 												mono_helper_compile_generic_method, iargs);
8761 
8762 					EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8763 
8764 					ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8765 				}
8766 
8767 				goto call_end;
8768 			}
8769 
8770 			/*
8771 			 * Implement a workaround for the inherent races involved in locking:
8772 			 * Monitor.Enter ()
8773 			 * try {
8774 			 * } finally {
8775 			 *    Monitor.Exit ()
8776 			 * }
8777 			 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8778 			 * try block, the Exit () won't be executed, see:
8779 			 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8780 			 * To work around this, we extend such try blocks to include the last x bytes
8781 			 * of the Monitor.Enter () call.
8782 			 */
8783 			if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8784 				MonoBasicBlock *tbb;
8785 
8786 				GET_BBLOCK (cfg, tbb, ip + 5);
8787 				/*
8788 				 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8789 				 * from Monitor.Enter like ArgumentNullException.
8790 				 */
8791 				if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8792 					/* Mark this bblock as needing to be extended */
8793 					tbb->extend_try_block = TRUE;
8794 				}
8795 			}
8796 
8797 			/* Conversion to a JIT intrinsic */
8798 			if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8799 				if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8800 					type_to_eval_stack_type ((cfg), fsig->ret, ins);
8801 					emit_widen = FALSE;
8802 				}
8803 				goto call_end;
8804 			}
8805 			CHECK_CFG_ERROR;
8806 
8807 			/* Inlining */
8808 			if ((cfg->opt & MONO_OPT_INLINE) &&
8809 				(!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8810 			    mono_method_check_inlining (cfg, cmethod)) {
8811 				int costs;
8812 				gboolean always = FALSE;
8813 
8814 				if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8815 					(cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8816 					/* Prevent inlining of methods that call wrappers */
8817 					INLINE_FAILURE ("wrapper call");
8818 					cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8819 					always = TRUE;
8820 				}
8821 
8822 				costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8823 				if (costs) {
8824 					cfg->real_offset += 5;
8825 
8826  					if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8827 						/* *sp is already set by inline_method */
8828  						sp++;
8829 						push_res = FALSE;
8830 					}
8831 
8832 					inline_costs += costs;
8833 
8834 					goto call_end;
8835 				}
8836 			}
8837 
8838 			/* Tail recursion elimination */
8839 			if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8840 				gboolean has_vtargs = FALSE;
8841 				int i;
8842 
8843 				/* Prevent inlining of methods with tail calls (the call stack would be altered) */
8844 				INLINE_FAILURE ("tail call");
8845 
8846 				/* keep it simple */
8847 				for (i =  fsig->param_count - 1; i >= 0; i--) {
8848 					if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8849 						has_vtargs = TRUE;
8850 				}
8851 
8852 				if (!has_vtargs) {
8853 					if (need_seq_point) {
8854 						emit_seq_point (cfg, method, ip, FALSE, TRUE);
8855 						need_seq_point = FALSE;
8856 					}
8857 					for (i = 0; i < n; ++i)
8858 						EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8859 
8860 					mini_profiler_emit_tail_call (cfg, cmethod);
8861 
8862 					MONO_INST_NEW (cfg, ins, OP_BR);
8863 					MONO_ADD_INS (cfg->cbb, ins);
8864 					tblock = start_bblock->out_bb [0];
8865 					link_bblock (cfg, cfg->cbb, tblock);
8866 					ins->inst_target_bb = tblock;
8867 					start_new_bblock = 1;
8868 
8869 					/* skip the CEE_RET, too */
8870 					if (ip_in_bb (cfg, cfg->cbb, ip + 5))
8871 						skip_ret = TRUE;
8872 					push_res = FALSE;
8873 					goto call_end;
8874 				}
8875 			}
8876 
8877 			inline_costs += 10 * num_calls++;
8878 
8879 			/*
8880 			 * Synchronized wrappers.
8881 			 * Its hard to determine where to replace a method with its synchronized
8882 			 * wrapper without causing an infinite recursion. The current solution is
8883 			 * to add the synchronized wrapper in the trampolines, and to
8884 			 * change the called method to a dummy wrapper, and resolve that wrapper
8885 			 * to the real method in mono_jit_compile_method ().
8886 			 */
8887 			if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8888 				MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8889 				if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8890 					cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8891 			}
8892 
8893 			/*
8894 			 * Making generic calls out of gsharedvt methods.
8895 			 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8896 			 * patching gshared method addresses into a gsharedvt method.
8897 			 */
8898 			if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
8899 				!(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
8900 				(!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
8901 				MonoRgctxInfoType info_type;
8902 
8903 				if (virtual_) {
8904 					//if (mono_class_is_interface (cmethod->klass))
8905 						//GSHAREDVT_FAILURE (*ip);
8906 					// disable for possible remoting calls
8907 					if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8908 						GSHAREDVT_FAILURE (*ip);
8909 					if (fsig->generic_param_count) {
8910 						/* virtual generic call */
8911 						g_assert (!imt_arg);
8912 						/* Same as the virtual generic case above */
8913 						imt_arg = emit_get_rgctx_method (cfg, context_used,
8914 														 cmethod, MONO_RGCTX_INFO_METHOD);
8915 						/* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8916 						vtable_arg = NULL;
8917 					} else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
8918 						/* This can happen when we call a fully instantiated iface method */
8919 						imt_arg = emit_get_rgctx_method (cfg, context_used,
8920 														 cmethod, MONO_RGCTX_INFO_METHOD);
8921 						vtable_arg = NULL;
8922 					}
8923 				}
8924 
8925 				if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8926 					keep_this_alive = sp [0];
8927 
8928 				if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8929 					info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8930 				else
8931 					info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8932 				addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8933 
8934 				if (cfg->llvm_only) {
8935 					// FIXME: Avoid initializing vtable_arg
8936 					ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8937 				} else {
8938 					ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8939 				}
8940 				goto call_end;
8941 			}
8942 
8943 			/* Generic sharing */
8944 
8945 			/*
8946 			 * Use this if the callee is gsharedvt sharable too, since
8947 			 * at runtime we might find an instantiation so the call cannot
8948 			 * be patched (the 'no_patch' code path in mini-trampolines.c).
8949 			 */
8950 			if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8951 				(!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8952 				 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8953 				(!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
8954 				 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8955 				INLINE_FAILURE ("gshared");
8956 
8957 				g_assert (cfg->gshared && cmethod);
8958 				g_assert (!addr);
8959 
8960 				/*
8961 				 * We are compiling a call to a
8962 				 * generic method from shared code,
8963 				 * which means that we have to look up
8964 				 * the method in the rgctx and do an
8965 				 * indirect call.
8966 				 */
8967 				if (fsig->hasthis)
8968 					MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8969 
8970 				if (cfg->llvm_only) {
8971 					if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
8972 						addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
8973 					else
8974 						addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8975 					// FIXME: Avoid initializing imt_arg/vtable_arg
8976 					ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8977 				} else {
8978 					addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8979 					ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8980 				}
8981 				goto call_end;
8982 			}
8983 
8984 			/* Direct calls to icalls */
8985 			if (direct_icall) {
8986 				MonoMethod *wrapper;
8987 				int costs;
8988 
8989 				/* Inline the wrapper */
8990 				wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8991 
8992 				costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
8993 				g_assert (costs > 0);
8994 				cfg->real_offset += 5;
8995 
8996 				if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8997 					/* *sp is already set by inline_method */
8998 					sp++;
8999 					push_res = FALSE;
9000 				}
9001 
9002 				inline_costs += costs;
9003 
9004 				goto call_end;
9005 			}
9006 
9007 			/* Array methods */
9008 			if (array_rank) {
9009 				MonoInst *addr;
9010 
9011 				if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9012 					MonoInst *val = sp [fsig->param_count];
9013 
9014 					if (val->type == STACK_OBJ) {
9015 						MonoInst *iargs [2];
9016 
9017 						iargs [0] = sp [0];
9018 						iargs [1] = val;
9019 
9020 						mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9021 					}
9022 
9023 					addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9024 					EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9025 					if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9026 						mini_emit_write_barrier (cfg, addr, val);
9027 					if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9028 						GSHAREDVT_FAILURE (*ip);
9029 				} else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9030 					addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9031 
9032 					EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9033 				} else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9034 					if (!cmethod->klass->element_class->valuetype && !readonly)
9035 						mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9036 					CHECK_TYPELOAD (cmethod->klass);
9037 
9038 					readonly = FALSE;
9039 					addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9040 					ins = addr;
9041 				} else {
9042 					g_assert_not_reached ();
9043 				}
9044 
9045 				emit_widen = FALSE;
9046 				goto call_end;
9047 			}
9048 
9049 			ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9050 			if (ins)
9051 				goto call_end;
9052 
9053 			/* Tail prefix / tail call optimization */
9054 
9055 			/* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9056 			/* FIXME: runtime generic context pointer for jumps? */
9057 			/* FIXME: handle this for generic sharing eventually */
9058 			if ((ins_flag & MONO_INST_TAILCALL) &&
9059 				!vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9060 				supported_tail_call = TRUE;
9061 
9062 			if (supported_tail_call) {
9063 				MonoCallInst *call;
9064 
9065 				/* Prevent inlining of methods with tail calls (the call stack would be altered) */
9066 				INLINE_FAILURE ("tail call");
9067 
9068 				//printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9069 
9070 				if (cfg->backend->have_op_tail_call) {
9071 					/* Handle tail calls similarly to normal calls */
9072 					tail_call = TRUE;
9073 				} else {
9074 					mini_profiler_emit_tail_call (cfg, cmethod);
9075 
9076 					MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9077 					call->tail_call = TRUE;
9078 					call->method = cmethod;
9079 					call->signature = mono_method_signature (cmethod);
9080 
9081 					/*
9082 					 * We implement tail calls by storing the actual arguments into the
9083 					 * argument variables, then emitting a CEE_JMP.
9084 					 */
9085 					for (i = 0; i < n; ++i) {
9086 						/* Prevent argument from being register allocated */
9087 						arg_array [i]->flags |= MONO_INST_VOLATILE;
9088 						EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9089 					}
9090 					ins = (MonoInst*)call;
9091 					ins->inst_p0 = cmethod;
9092 					ins->inst_p1 = arg_array [0];
9093 					MONO_ADD_INS (cfg->cbb, ins);
9094 					link_bblock (cfg, cfg->cbb, end_bblock);
9095 					start_new_bblock = 1;
9096 
9097 					// FIXME: Eliminate unreachable epilogs
9098 
9099 					/*
9100 					 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9101 					 * only reachable from this call.
9102 					 */
9103 					GET_BBLOCK (cfg, tblock, ip + 5);
9104 					if (tblock == cfg->cbb || tblock->in_count == 0)
9105 						skip_ret = TRUE;
9106 					push_res = FALSE;
9107 
9108 					goto call_end;
9109 				}
9110 			}
9111 
9112 			/*
9113 			 * Virtual calls in llvm-only mode.
9114 			 */
9115 			if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9116 				ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9117 				goto call_end;
9118 			}
9119 
9120 			/* Common call */
9121 			if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9122 				INLINE_FAILURE ("call");
9123 			ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9124 											  imt_arg, vtable_arg);
9125 
9126 			if (tail_call && !cfg->llvm_only) {
9127 				link_bblock (cfg, cfg->cbb, end_bblock);
9128 				start_new_bblock = 1;
9129 
9130 				// FIXME: Eliminate unreachable epilogs
9131 
9132 				/*
9133 				 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9134 				 * only reachable from this call.
9135 				 */
9136 				GET_BBLOCK (cfg, tblock, ip + 5);
9137 				if (tblock == cfg->cbb || tblock->in_count == 0)
9138 					skip_ret = TRUE;
9139 				push_res = FALSE;
9140 			}
9141 
9142 			call_end:
9143 
9144 			/* End of call, INS should contain the result of the call, if any */
9145 
9146 			if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9147 				g_assert (ins);
9148 				if (emit_widen)
9149 					*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9150 				else
9151 					*sp++ = ins;
9152 			}
9153 
9154 			if (keep_this_alive) {
9155 				MonoInst *dummy_use;
9156 
9157 				/* See mono_emit_method_call_full () */
9158 				EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9159 			}
9160 
9161 			if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9162 				/*
9163 				 * Clang can convert these calls to tail calls which screw up the stack
9164 				 * walk. This happens even when the -fno-optimize-sibling-calls
9165 				 * option is passed to clang.
9166 				 * Work around this by emitting a dummy call.
9167 				 */
9168 				mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9169 			}
9170 
9171 			CHECK_CFG_EXCEPTION;
9172 
9173 			ip += 5;
9174 			if (skip_ret) {
9175 				g_assert (*ip == CEE_RET);
9176 				ip += 1;
9177 			}
9178 			ins_flag = 0;
9179 			constrained_class = NULL;
9180 			if (need_seq_point)
9181 				emit_seq_point (cfg, method, ip, FALSE, TRUE);
9182 			break;
9183 		}
9184 		case CEE_RET:
9185 			mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
9186 
9187 			if (cfg->method != method) {
9188 				/* return from inlined method */
9189 				/*
9190 				 * If in_count == 0, that means the ret is unreachable due to
9191 				 * being preceeded by a throw. In that case, inline_method () will
9192 				 * handle setting the return value
9193 				 * (test case: test_0_inline_throw ()).
9194 				 */
9195 				if (return_var && cfg->cbb->in_count) {
9196 					MonoType *ret_type = mono_method_signature (method)->ret;
9197 
9198 					MonoInst *store;
9199 					CHECK_STACK (1);
9200 					--sp;
9201 
9202 					if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9203 						UNVERIFIED;
9204 
9205 					//g_assert (returnvar != -1);
9206 					EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9207 					cfg->ret_var_set = TRUE;
9208 				}
9209 			} else {
9210 				if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9211 					emit_pop_lmf (cfg);
9212 
9213 				if (cfg->ret) {
9214 					MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9215 
9216 					if (seq_points && !sym_seq_points) {
9217 						/*
9218 						 * Place a seq point here too even through the IL stack is not
9219 						 * empty, so a step over on
9220 						 * call <FOO>
9221 						 * ret
9222 						 * will work correctly.
9223 						 */
9224 						NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9225 						MONO_ADD_INS (cfg->cbb, ins);
9226 					}
9227 
9228 					g_assert (!return_var);
9229 					CHECK_STACK (1);
9230 					--sp;
9231 
9232 					if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9233 						UNVERIFIED;
9234 
9235 					emit_setret (cfg, *sp);
9236 				}
9237 			}
9238 			if (sp != stack_start)
9239 				UNVERIFIED;
9240 			MONO_INST_NEW (cfg, ins, OP_BR);
9241 			ip++;
9242 			ins->inst_target_bb = end_bblock;
9243 			MONO_ADD_INS (cfg->cbb, ins);
9244 			link_bblock (cfg, cfg->cbb, end_bblock);
9245 			start_new_bblock = 1;
9246 			break;
9247 		case CEE_BR_S:
9248 			CHECK_OPSIZE (2);
9249 			MONO_INST_NEW (cfg, ins, OP_BR);
9250 			ip++;
9251 			target = ip + 1 + (signed char)(*ip);
9252 			++ip;
9253 			GET_BBLOCK (cfg, tblock, target);
9254 			link_bblock (cfg, cfg->cbb, tblock);
9255 			ins->inst_target_bb = tblock;
9256 			if (sp != stack_start) {
9257 				handle_stack_args (cfg, stack_start, sp - stack_start);
9258 				sp = stack_start;
9259 				CHECK_UNVERIFIABLE (cfg);
9260 			}
9261 			MONO_ADD_INS (cfg->cbb, ins);
9262 			start_new_bblock = 1;
9263 			inline_costs += BRANCH_COST;
9264 			break;
9265 		case CEE_BEQ_S:
9266 		case CEE_BGE_S:
9267 		case CEE_BGT_S:
9268 		case CEE_BLE_S:
9269 		case CEE_BLT_S:
9270 		case CEE_BNE_UN_S:
9271 		case CEE_BGE_UN_S:
9272 		case CEE_BGT_UN_S:
9273 		case CEE_BLE_UN_S:
9274 		case CEE_BLT_UN_S:
9275 			CHECK_OPSIZE (2);
9276 			CHECK_STACK (2);
9277 			MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9278 			ip++;
9279 			target = ip + 1 + *(signed char*)ip;
9280 			ip++;
9281 
9282 			ADD_BINCOND (NULL);
9283 
9284 			sp = stack_start;
9285 			inline_costs += BRANCH_COST;
9286 			break;
9287 		case CEE_BR:
9288 			CHECK_OPSIZE (5);
9289 			MONO_INST_NEW (cfg, ins, OP_BR);
9290 			ip++;
9291 
9292 			target = ip + 4 + (gint32)read32(ip);
9293 			ip += 4;
9294 			GET_BBLOCK (cfg, tblock, target);
9295 			link_bblock (cfg, cfg->cbb, tblock);
9296 			ins->inst_target_bb = tblock;
9297 			if (sp != stack_start) {
9298 				handle_stack_args (cfg, stack_start, sp - stack_start);
9299 				sp = stack_start;
9300 				CHECK_UNVERIFIABLE (cfg);
9301 			}
9302 
9303 			MONO_ADD_INS (cfg->cbb, ins);
9304 
9305 			start_new_bblock = 1;
9306 			inline_costs += BRANCH_COST;
9307 			break;
9308 		case CEE_BRFALSE_S:
9309 		case CEE_BRTRUE_S:
9310 		case CEE_BRFALSE:
9311 		case CEE_BRTRUE: {
9312 			MonoInst *cmp;
9313 			gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9314 			gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9315 			guint32 opsize = is_short ? 1 : 4;
9316 
9317 			CHECK_OPSIZE (opsize);
9318 			CHECK_STACK (1);
9319 			if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9320 				UNVERIFIED;
9321 			ip ++;
9322 			target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9323 			ip += opsize;
9324 
9325 			sp--;
9326 
9327 			GET_BBLOCK (cfg, tblock, target);
9328 			link_bblock (cfg, cfg->cbb, tblock);
9329 			GET_BBLOCK (cfg, tblock, ip);
9330 			link_bblock (cfg, cfg->cbb, tblock);
9331 
9332 			if (sp != stack_start) {
9333 				handle_stack_args (cfg, stack_start, sp - stack_start);
9334 				CHECK_UNVERIFIABLE (cfg);
9335 			}
9336 
9337 			MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9338 			cmp->sreg1 = sp [0]->dreg;
9339 			type_from_op (cfg, cmp, sp [0], NULL);
9340 			CHECK_TYPE (cmp);
9341 
9342 #if SIZEOF_REGISTER == 4
9343 			if (cmp->opcode == OP_LCOMPARE_IMM) {
9344 				/* Convert it to OP_LCOMPARE */
9345 				MONO_INST_NEW (cfg, ins, OP_I8CONST);
9346 				ins->type = STACK_I8;
9347 				ins->dreg = alloc_dreg (cfg, STACK_I8);
9348 				ins->inst_l = 0;
9349 				MONO_ADD_INS (cfg->cbb, ins);
9350 				cmp->opcode = OP_LCOMPARE;
9351 				cmp->sreg2 = ins->dreg;
9352 			}
9353 #endif
9354 			MONO_ADD_INS (cfg->cbb, cmp);
9355 
9356 			MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9357 			type_from_op (cfg, ins, sp [0], NULL);
9358 			MONO_ADD_INS (cfg->cbb, ins);
9359 			ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9360 			GET_BBLOCK (cfg, tblock, target);
9361 			ins->inst_true_bb = tblock;
9362 			GET_BBLOCK (cfg, tblock, ip);
9363 			ins->inst_false_bb = tblock;
9364 			start_new_bblock = 2;
9365 
9366 			sp = stack_start;
9367 			inline_costs += BRANCH_COST;
9368 			break;
9369 		}
9370 		case CEE_BEQ:
9371 		case CEE_BGE:
9372 		case CEE_BGT:
9373 		case CEE_BLE:
9374 		case CEE_BLT:
9375 		case CEE_BNE_UN:
9376 		case CEE_BGE_UN:
9377 		case CEE_BGT_UN:
9378 		case CEE_BLE_UN:
9379 		case CEE_BLT_UN:
9380 			CHECK_OPSIZE (5);
9381 			CHECK_STACK (2);
9382 			MONO_INST_NEW (cfg, ins, *ip);
9383 			ip++;
9384 			target = ip + 4 + (gint32)read32(ip);
9385 			ip += 4;
9386 
9387 			ADD_BINCOND (NULL);
9388 
9389 			sp = stack_start;
9390 			inline_costs += BRANCH_COST;
9391 			break;
9392 		case CEE_SWITCH: {
9393 			MonoInst *src1;
9394 			MonoBasicBlock **targets;
9395 			MonoBasicBlock *default_bblock;
9396 			MonoJumpInfoBBTable *table;
9397 			int offset_reg = alloc_preg (cfg);
9398 			int target_reg = alloc_preg (cfg);
9399 			int table_reg = alloc_preg (cfg);
9400 			int sum_reg = alloc_preg (cfg);
9401 			gboolean use_op_switch;
9402 
9403 			CHECK_OPSIZE (5);
9404 			CHECK_STACK (1);
9405 			n = read32 (ip + 1);
9406 			--sp;
9407 			src1 = sp [0];
9408 			if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9409 				UNVERIFIED;
9410 
9411 			ip += 5;
9412 			CHECK_OPSIZE (n * sizeof (guint32));
9413 			target = ip + n * sizeof (guint32);
9414 
9415 			GET_BBLOCK (cfg, default_bblock, target);
9416 			default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9417 
9418 			targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9419 			for (i = 0; i < n; ++i) {
9420 				GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9421 				targets [i] = tblock;
9422 				targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9423 				ip += 4;
9424 			}
9425 
9426 			if (sp != stack_start) {
9427 				/*
9428 				 * Link the current bb with the targets as well, so handle_stack_args
9429 				 * will set their in_stack correctly.
9430 				 */
9431 				link_bblock (cfg, cfg->cbb, default_bblock);
9432 				for (i = 0; i < n; ++i)
9433 					link_bblock (cfg, cfg->cbb, targets [i]);
9434 
9435 				handle_stack_args (cfg, stack_start, sp - stack_start);
9436 				sp = stack_start;
9437 				CHECK_UNVERIFIABLE (cfg);
9438 
9439 				/* Undo the links */
9440 				mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9441 				for (i = 0; i < n; ++i)
9442 					mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9443 			}
9444 
9445 			MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9446 			MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9447 
9448 			for (i = 0; i < n; ++i)
9449 				link_bblock (cfg, cfg->cbb, targets [i]);
9450 
9451 			table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9452 			table->table = targets;
9453 			table->table_size = n;
9454 
9455 			use_op_switch = FALSE;
9456 #ifdef TARGET_ARM
9457 			/* ARM implements SWITCH statements differently */
9458 			/* FIXME: Make it use the generic implementation */
9459 			if (!cfg->compile_aot)
9460 				use_op_switch = TRUE;
9461 #endif
9462 
9463 			if (COMPILE_LLVM (cfg))
9464 				use_op_switch = TRUE;
9465 
9466 			cfg->cbb->has_jump_table = 1;
9467 
9468 			if (use_op_switch) {
9469 				MONO_INST_NEW (cfg, ins, OP_SWITCH);
9470 				ins->sreg1 = src1->dreg;
9471 				ins->inst_p0 = table;
9472 				ins->inst_many_bb = targets;
9473 				ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9474 				MONO_ADD_INS (cfg->cbb, ins);
9475 			} else {
9476 				if (sizeof (gpointer) == 8)
9477 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9478 				else
9479 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9480 
9481 #if SIZEOF_REGISTER == 8
9482 				/* The upper word might not be zero, and we add it to a 64 bit address later */
9483 				MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9484 #endif
9485 
9486 				if (cfg->compile_aot) {
9487 					MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9488 				} else {
9489 					MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9490 					ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9491 					ins->inst_p0 = table;
9492 					ins->dreg = table_reg;
9493 					MONO_ADD_INS (cfg->cbb, ins);
9494 				}
9495 
9496 				/* FIXME: Use load_memindex */
9497 				MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9498 				MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9499 				MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9500 			}
9501 			start_new_bblock = 1;
9502 			inline_costs += (BRANCH_COST * 2);
9503 			break;
9504 		}
9505 		case CEE_LDIND_I1:
9506 		case CEE_LDIND_U1:
9507 		case CEE_LDIND_I2:
9508 		case CEE_LDIND_U2:
9509 		case CEE_LDIND_I4:
9510 		case CEE_LDIND_U4:
9511 		case CEE_LDIND_I8:
9512 		case CEE_LDIND_I:
9513 		case CEE_LDIND_R4:
9514 		case CEE_LDIND_R8:
9515 		case CEE_LDIND_REF:
9516 			CHECK_STACK (1);
9517 			--sp;
9518 
9519 			ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
9520 			*sp++ = ins;
9521 			ins_flag = 0;
9522 			++ip;
9523 			break;
9524 		case CEE_STIND_REF:
9525 		case CEE_STIND_I1:
9526 		case CEE_STIND_I2:
9527 		case CEE_STIND_I4:
9528 		case CEE_STIND_I8:
9529 		case CEE_STIND_R4:
9530 		case CEE_STIND_R8:
9531 		case CEE_STIND_I:
9532 			CHECK_STACK (2);
9533 			sp -= 2;
9534 
9535 			if (ins_flag & MONO_INST_VOLATILE) {
9536 				/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9537 				mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9538 			}
9539 
9540 			NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9541 			ins->flags |= ins_flag;
9542 			ins_flag = 0;
9543 
9544 			MONO_ADD_INS (cfg->cbb, ins);
9545 
9546 			if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9547 				mini_emit_write_barrier (cfg, sp [0], sp [1]);
9548 
9549 			inline_costs += 1;
9550 			++ip;
9551 			break;
9552 
9553 		case CEE_MUL:
9554 			CHECK_STACK (2);
9555 
9556 			MONO_INST_NEW (cfg, ins, (*ip));
9557 			sp -= 2;
9558 			ins->sreg1 = sp [0]->dreg;
9559 			ins->sreg2 = sp [1]->dreg;
9560 			type_from_op (cfg, ins, sp [0], sp [1]);
9561 			CHECK_TYPE (ins);
9562 			ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9563 
9564 			/* Use the immediate opcodes if possible */
9565 			if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9566 				int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9567 				if (imm_opcode != -1) {
9568 					ins->opcode = imm_opcode;
9569 					ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9570 					ins->sreg2 = -1;
9571 
9572 					NULLIFY_INS (sp [1]);
9573 				}
9574 			}
9575 
9576 			MONO_ADD_INS ((cfg)->cbb, (ins));
9577 
9578 			*sp++ = mono_decompose_opcode (cfg, ins);
9579 			ip++;
9580 			break;
9581 		case CEE_ADD:
9582 		case CEE_SUB:
9583 		case CEE_DIV:
9584 		case CEE_DIV_UN:
9585 		case CEE_REM:
9586 		case CEE_REM_UN:
9587 		case CEE_AND:
9588 		case CEE_OR:
9589 		case CEE_XOR:
9590 		case CEE_SHL:
9591 		case CEE_SHR:
9592 		case CEE_SHR_UN:
9593 			CHECK_STACK (2);
9594 
9595 			MONO_INST_NEW (cfg, ins, (*ip));
9596 			sp -= 2;
9597 			ins->sreg1 = sp [0]->dreg;
9598 			ins->sreg2 = sp [1]->dreg;
9599 			type_from_op (cfg, ins, sp [0], sp [1]);
9600 			CHECK_TYPE (ins);
9601 			add_widen_op (cfg, ins, &sp [0], &sp [1]);
9602 			ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9603 
9604 			/* FIXME: Pass opcode to is_inst_imm */
9605 
9606 			/* Use the immediate opcodes if possible */
9607 			if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9608 				int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9609 				if (imm_opcode != -1) {
9610 					ins->opcode = imm_opcode;
9611 					if (sp [1]->opcode == OP_I8CONST) {
9612 #if SIZEOF_REGISTER == 8
9613 						ins->inst_imm = sp [1]->inst_l;
9614 #else
9615 						ins->inst_ls_word = sp [1]->inst_ls_word;
9616 						ins->inst_ms_word = sp [1]->inst_ms_word;
9617 #endif
9618 					}
9619 					else
9620 						ins->inst_imm = (gssize)(sp [1]->inst_c0);
9621 					ins->sreg2 = -1;
9622 
9623 					/* Might be followed by an instruction added by add_widen_op */
9624 					if (sp [1]->next == NULL)
9625 						NULLIFY_INS (sp [1]);
9626 				}
9627 			}
9628 			MONO_ADD_INS ((cfg)->cbb, (ins));
9629 
9630 			*sp++ = mono_decompose_opcode (cfg, ins);
9631 			ip++;
9632 			break;
9633 		case CEE_NEG:
9634 		case CEE_NOT:
9635 		case CEE_CONV_I1:
9636 		case CEE_CONV_I2:
9637 		case CEE_CONV_I4:
9638 		case CEE_CONV_R4:
9639 		case CEE_CONV_R8:
9640 		case CEE_CONV_U4:
9641 		case CEE_CONV_I8:
9642 		case CEE_CONV_U8:
9643 		case CEE_CONV_OVF_I8:
9644 		case CEE_CONV_OVF_U8:
9645 		case CEE_CONV_R_UN:
9646 			CHECK_STACK (1);
9647 
9648 			/* Special case this earlier so we have long constants in the IR */
9649 			if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9650 				int data = sp [-1]->inst_c0;
9651 				sp [-1]->opcode = OP_I8CONST;
9652 				sp [-1]->type = STACK_I8;
9653 #if SIZEOF_REGISTER == 8
9654 				if ((*ip) == CEE_CONV_U8)
9655 					sp [-1]->inst_c0 = (guint32)data;
9656 				else
9657 					sp [-1]->inst_c0 = data;
9658 #else
9659 				sp [-1]->inst_ls_word = data;
9660 				if ((*ip) == CEE_CONV_U8)
9661 					sp [-1]->inst_ms_word = 0;
9662 				else
9663 					sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9664 #endif
9665 				sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9666 			}
9667 			else {
9668 				ADD_UNOP (*ip);
9669 			}
9670 			ip++;
9671 			break;
9672 		case CEE_CONV_OVF_I4:
9673 		case CEE_CONV_OVF_I1:
9674 		case CEE_CONV_OVF_I2:
9675 		case CEE_CONV_OVF_I:
9676 		case CEE_CONV_OVF_U:
9677 			CHECK_STACK (1);
9678 
9679 			if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9680 				ADD_UNOP (CEE_CONV_OVF_I8);
9681 				ADD_UNOP (*ip);
9682 			} else {
9683 				ADD_UNOP (*ip);
9684 			}
9685 			ip++;
9686 			break;
9687 		case CEE_CONV_OVF_U1:
9688 		case CEE_CONV_OVF_U2:
9689 		case CEE_CONV_OVF_U4:
9690 			CHECK_STACK (1);
9691 
9692 			if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9693 				ADD_UNOP (CEE_CONV_OVF_U8);
9694 				ADD_UNOP (*ip);
9695 			} else {
9696 				ADD_UNOP (*ip);
9697 			}
9698 			ip++;
9699 			break;
9700 		case CEE_CONV_OVF_I1_UN:
9701 		case CEE_CONV_OVF_I2_UN:
9702 		case CEE_CONV_OVF_I4_UN:
9703 		case CEE_CONV_OVF_I8_UN:
9704 		case CEE_CONV_OVF_U1_UN:
9705 		case CEE_CONV_OVF_U2_UN:
9706 		case CEE_CONV_OVF_U4_UN:
9707 		case CEE_CONV_OVF_U8_UN:
9708 		case CEE_CONV_OVF_I_UN:
9709 		case CEE_CONV_OVF_U_UN:
9710 		case CEE_CONV_U2:
9711 		case CEE_CONV_U1:
9712 		case CEE_CONV_I:
9713 		case CEE_CONV_U:
9714 			CHECK_STACK (1);
9715 			ADD_UNOP (*ip);
9716 			CHECK_CFG_EXCEPTION;
9717 			ip++;
9718 			break;
9719 		case CEE_ADD_OVF:
9720 		case CEE_ADD_OVF_UN:
9721 		case CEE_MUL_OVF:
9722 		case CEE_MUL_OVF_UN:
9723 		case CEE_SUB_OVF:
9724 		case CEE_SUB_OVF_UN:
9725 			CHECK_STACK (2);
9726 			ADD_BINOP (*ip);
9727 			ip++;
9728 			break;
9729 		case CEE_CPOBJ:
9730 			GSHAREDVT_FAILURE (*ip);
9731 			CHECK_OPSIZE (5);
9732 			CHECK_STACK (2);
9733 			token = read32 (ip + 1);
9734 			klass = mini_get_class (method, token, generic_context);
9735 			CHECK_TYPELOAD (klass);
9736 			sp -= 2;
9737 			mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9738 			ins_flag = 0;
9739 			ip += 5;
9740 			break;
9741 		case CEE_LDOBJ: {
9742 			int loc_index = -1;
9743 			int stloc_len = 0;
9744 
9745 			CHECK_OPSIZE (5);
9746 			CHECK_STACK (1);
9747 			--sp;
9748 			token = read32 (ip + 1);
9749 			klass = mini_get_class (method, token, generic_context);
9750 			CHECK_TYPELOAD (klass);
9751 
9752 			/* Optimize the common ldobj+stloc combination */
9753 			switch (ip [5]) {
9754 			case CEE_STLOC_S:
9755 				loc_index = ip [6];
9756 				stloc_len = 2;
9757 				break;
9758 			case CEE_STLOC_0:
9759 			case CEE_STLOC_1:
9760 			case CEE_STLOC_2:
9761 			case CEE_STLOC_3:
9762 				loc_index = ip [5] - CEE_STLOC_0;
9763 				stloc_len = 1;
9764 				break;
9765 			default:
9766 				break;
9767 			}
9768 
9769 			if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9770 				CHECK_LOCAL (loc_index);
9771 
9772 				EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9773 				ins->dreg = cfg->locals [loc_index]->dreg;
9774 				ins->flags |= ins_flag;
9775 				ip += 5;
9776 				ip += stloc_len;
9777 				if (ins_flag & MONO_INST_VOLATILE) {
9778 					/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9779 					mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9780 				}
9781 				ins_flag = 0;
9782 				break;
9783 			}
9784 
9785 			/* Optimize the ldobj+stobj combination */
9786 			if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9787 				CHECK_STACK (1);
9788 
9789 				sp --;
9790 
9791 				mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9792 
9793 				ip += 5 + 5;
9794 				ins_flag = 0;
9795 				break;
9796 			}
9797 
9798 			ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
9799 			*sp++ = ins;
9800 
9801 			ip += 5;
9802 			ins_flag = 0;
9803 			inline_costs += 1;
9804 			break;
9805 		}
9806 		case CEE_LDSTR:
9807 			CHECK_STACK_OVF (1);
9808 			CHECK_OPSIZE (5);
9809 			n = read32 (ip + 1);
9810 
9811 			if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9812 				EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9813 				ins->type = STACK_OBJ;
9814 				*sp = ins;
9815 			}
9816 			else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9817 				MonoInst *iargs [1];
9818 				char *str = (char *)mono_method_get_wrapper_data (method, n);
9819 
9820 				if (cfg->compile_aot)
9821 					EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9822 				else
9823 					EMIT_NEW_PCONST (cfg, iargs [0], str);
9824 				*sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9825 			} else {
9826 				if (cfg->opt & MONO_OPT_SHARED) {
9827 					MonoInst *iargs [3];
9828 
9829 					if (cfg->compile_aot) {
9830 						cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9831 					}
9832 					EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9833 					EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9834 					EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9835 					*sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
9836 					mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9837 					CHECK_CFG_ERROR;
9838 				} else {
9839 					if (cfg->cbb->out_of_line) {
9840 						MonoInst *iargs [2];
9841 
9842 						if (image == mono_defaults.corlib) {
9843 							/*
9844 							 * Avoid relocations in AOT and save some space by using a
9845 							 * version of helper_ldstr specialized to mscorlib.
9846 							 */
9847 							EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9848 							*sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9849 						} else {
9850 							/* Avoid creating the string object */
9851 							EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9852 							EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9853 							*sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9854 						}
9855 					}
9856 					else
9857 					if (cfg->compile_aot) {
9858 						NEW_LDSTRCONST (cfg, ins, image, n);
9859 						*sp = ins;
9860 						MONO_ADD_INS (cfg->cbb, ins);
9861 					}
9862 					else {
9863 						NEW_PCONST (cfg, ins, NULL);
9864 						ins->type = STACK_OBJ;
9865 						ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9866 						CHECK_CFG_ERROR;
9867 
9868 						if (!ins->inst_p0)
9869 							OUT_OF_MEMORY_FAILURE;
9870 
9871 						*sp = ins;
9872 						MONO_ADD_INS (cfg->cbb, ins);
9873 					}
9874 				}
9875 			}
9876 
9877 			sp++;
9878 			ip += 5;
9879 			break;
9880 		case CEE_NEWOBJ: {
9881 			MonoInst *iargs [2];
9882 			MonoMethodSignature *fsig;
9883 			MonoInst this_ins;
9884 			MonoInst *alloc;
9885 			MonoInst *vtable_arg = NULL;
9886 
9887 			CHECK_OPSIZE (5);
9888 			token = read32 (ip + 1);
9889 			cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9890 			CHECK_CFG_ERROR;
9891 
9892 			fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9893 			CHECK_CFG_ERROR;
9894 
9895 			mono_save_token_info (cfg, image, token, cmethod);
9896 
9897 			if (!mono_class_init (cmethod->klass))
9898 				TYPE_LOAD_ERROR (cmethod->klass);
9899 
9900 			context_used = mini_method_check_context_used (cfg, cmethod);
9901 
9902 			if (!dont_verify && !cfg->skip_visibility) {
9903 				MonoMethod *cil_method = cmethod;
9904 				MonoMethod *target_method = cil_method;
9905 
9906 				if (method->is_inflated) {
9907 					target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9908 					CHECK_CFG_ERROR;
9909 				}
9910 
9911 				if (!mono_method_can_access_method (method_definition, target_method) &&
9912 					!mono_method_can_access_method (method, cil_method))
9913 					emit_method_access_failure (cfg, method, cil_method);
9914 			}
9915 
9916 			if (mono_security_core_clr_enabled ())
9917 				ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
9918 
9919 			if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9920 				emit_class_init (cfg, cmethod->klass);
9921 				CHECK_TYPELOAD (cmethod->klass);
9922 			}
9923 
9924 			/*
9925 			if (cfg->gsharedvt) {
9926 				if (mini_is_gsharedvt_variable_signature (sig))
9927 					GSHAREDVT_FAILURE (*ip);
9928 			}
9929 			*/
9930 
9931 			n = fsig->param_count;
9932 			CHECK_STACK (n);
9933 
9934 			/*
9935 			 * Generate smaller code for the common newobj <exception> instruction in
9936 			 * argument checking code.
9937 			 */
9938 			if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9939 				is_exception_class (cmethod->klass) && n <= 2 &&
9940 				((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9941 				((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9942 				MonoInst *iargs [3];
9943 
9944 				sp -= n;
9945 
9946 				EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9947 				switch (n) {
9948 				case 0:
9949 					*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9950 					break;
9951 				case 1:
9952 					iargs [1] = sp [0];
9953 					*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9954 					break;
9955 				case 2:
9956 					iargs [1] = sp [0];
9957 					iargs [2] = sp [1];
9958 					*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9959 					break;
9960 				default:
9961 					g_assert_not_reached ();
9962 				}
9963 
9964 				ip += 5;
9965 				inline_costs += 5;
9966 				break;
9967 			}
9968 
9969 			/* move the args to allow room for 'this' in the first position */
9970 			while (n--) {
9971 				--sp;
9972 				sp [1] = sp [0];
9973 			}
9974 
9975 			/* check_call_signature () requires sp[0] to be set */
9976 			this_ins.type = STACK_OBJ;
9977 			sp [0] = &this_ins;
9978 			if (check_call_signature (cfg, fsig, sp))
9979 				UNVERIFIED;
9980 
9981 			iargs [0] = NULL;
9982 
9983 			if (mini_class_is_system_array (cmethod->klass)) {
9984 				*sp = emit_get_rgctx_method (cfg, context_used,
9985 											 cmethod, MONO_RGCTX_INFO_METHOD);
9986 
9987 				/* Avoid varargs in the common case */
9988 				if (fsig->param_count == 1)
9989 					alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9990 				else if (fsig->param_count == 2)
9991 					alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9992 				else if (fsig->param_count == 3)
9993 					alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9994 				else if (fsig->param_count == 4)
9995 					alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9996 				else
9997 					alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9998 			} else if (cmethod->string_ctor) {
9999 				g_assert (!context_used);
10000 				g_assert (!vtable_arg);
10001 				/* we simply pass a null pointer */
10002 				EMIT_NEW_PCONST (cfg, *sp, NULL);
10003 				/* now call the string ctor */
10004 				alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10005 			} else {
10006 				if (cmethod->klass->valuetype) {
10007 					iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10008 					emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10009 					EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10010 
10011 					alloc = NULL;
10012 
10013 					/*
10014 					 * The code generated by mini_emit_virtual_call () expects
10015 					 * iargs [0] to be a boxed instance, but luckily the vcall
10016 					 * will be transformed into a normal call there.
10017 					 */
10018 				} else if (context_used) {
10019 					alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10020 					*sp = alloc;
10021 				} else {
10022 					MonoVTable *vtable = NULL;
10023 
10024 					if (!cfg->compile_aot)
10025 						vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10026 					CHECK_TYPELOAD (cmethod->klass);
10027 
10028 					/*
10029 					 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10030 					 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10031 					 * As a workaround, we call class cctors before allocating objects.
10032 					 */
10033 					if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10034 						emit_class_init (cfg, cmethod->klass);
10035 						if (cfg->verbose_level > 2)
10036 							printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10037 						class_inits = g_slist_prepend (class_inits, cmethod->klass);
10038 					}
10039 
10040 					alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10041 					*sp = alloc;
10042 				}
10043 				CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10044 
10045 				if (alloc)
10046 					MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10047 
10048 				/* Now call the actual ctor */
10049 				handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10050 				CHECK_CFG_EXCEPTION;
10051 			}
10052 
10053 			if (alloc == NULL) {
10054 				/* Valuetype */
10055 				EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10056 				type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10057 				*sp++= ins;
10058 			} else {
10059 				*sp++ = alloc;
10060 			}
10061 
10062 			ip += 5;
10063 			inline_costs += 5;
10064 			if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10065 				emit_seq_point (cfg, method, ip, FALSE, TRUE);
10066 			break;
10067 		}
10068 		case CEE_CASTCLASS:
10069 		case CEE_ISINST: {
10070 			CHECK_STACK (1);
10071 			--sp;
10072 			CHECK_OPSIZE (5);
10073 			token = read32 (ip + 1);
10074 			klass = mini_get_class (method, token, generic_context);
10075 			CHECK_TYPELOAD (klass);
10076 			if (sp [0]->type != STACK_OBJ)
10077 				UNVERIFIED;
10078 
10079 			MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10080 			ins->dreg = alloc_preg (cfg);
10081 			ins->sreg1 = (*sp)->dreg;
10082 			ins->klass = klass;
10083 			ins->type = STACK_OBJ;
10084 			MONO_ADD_INS (cfg->cbb, ins);
10085 
10086 			CHECK_CFG_EXCEPTION;
10087 			*sp++ = ins;
10088 			ip += 5;
10089 
10090 			cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10091 			break;
10092 		}
10093 		case CEE_UNBOX_ANY: {
10094 			MonoInst *res, *addr;
10095 
10096 			CHECK_STACK (1);
10097 			--sp;
10098 			CHECK_OPSIZE (5);
10099 			token = read32 (ip + 1);
10100 			klass = mini_get_class (method, token, generic_context);
10101 			CHECK_TYPELOAD (klass);
10102 
10103 			mono_save_token_info (cfg, image, token, klass);
10104 
10105 			context_used = mini_class_check_context_used (cfg, klass);
10106 
10107 			if (mini_is_gsharedvt_klass (klass)) {
10108 				res = handle_unbox_gsharedvt (cfg, klass, *sp);
10109 				inline_costs += 2;
10110 			} else if (generic_class_is_reference_type (cfg, klass)) {
10111 				if (MONO_INS_IS_PCONST_NULL (*sp)) {
10112 					EMIT_NEW_PCONST (cfg, res, NULL);
10113 					res->type = STACK_OBJ;
10114 				} else {
10115 					MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10116 					res->dreg = alloc_preg (cfg);
10117 					res->sreg1 = (*sp)->dreg;
10118 					res->klass = klass;
10119 					res->type = STACK_OBJ;
10120 					MONO_ADD_INS (cfg->cbb, res);
10121 					cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10122 				}
10123 			} else if (mono_class_is_nullable (klass)) {
10124 				res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10125 			} else {
10126 				addr = handle_unbox (cfg, klass, sp, context_used);
10127 				/* LDOBJ */
10128 				EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10129 				res = ins;
10130 				inline_costs += 2;
10131 			}
10132 
10133 			*sp ++ = res;
10134 			ip += 5;
10135 			break;
10136 		}
10137 		case CEE_BOX: {
10138 			MonoInst *val;
10139 			MonoClass *enum_class;
10140 			MonoMethod *has_flag;
10141 
10142 			CHECK_STACK (1);
10143 			--sp;
10144 			val = *sp;
10145 			CHECK_OPSIZE (5);
10146 			token = read32 (ip + 1);
10147 			klass = mini_get_class (method, token, generic_context);
10148 			CHECK_TYPELOAD (klass);
10149 
10150 			mono_save_token_info (cfg, image, token, klass);
10151 
10152 			context_used = mini_class_check_context_used (cfg, klass);
10153 
10154 			if (generic_class_is_reference_type (cfg, klass)) {
10155 				*sp++ = val;
10156 				ip += 5;
10157 				break;
10158 			}
10159 
10160 			if (klass == mono_defaults.void_class)
10161 				UNVERIFIED;
10162 			if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10163 				UNVERIFIED;
10164 			/* frequent check in generic code: box (struct), brtrue */
10165 
10166 			/*
10167 			 * Look for:
10168 			 *
10169 			 *   <push int/long ptr>
10170 			 *   <push int/long>
10171 			 *   box MyFlags
10172 			 *   constrained. MyFlags
10173 			 *   callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10174 			 *
10175 			 * If we find this sequence and the operand types on box and constrained
10176 			 * are equal, we can emit a specialized instruction sequence instead of
10177 			 * the very slow HasFlag () call.
10178 			 */
10179 			if ((cfg->opt & MONO_OPT_INTRINS) &&
10180 			    /* Cheap checks first. */
10181 			    ip + 5 + 6 + 5 < end &&
10182 			    ip [5] == CEE_PREFIX1 &&
10183 			    ip [6] == CEE_CONSTRAINED_ &&
10184 			    ip [11] == CEE_CALLVIRT &&
10185 			    ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10186 			    mono_class_is_enum (klass) &&
10187 			    (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10188 			    (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10189 			    has_flag->klass == mono_defaults.enum_class &&
10190 			    !strcmp (has_flag->name, "HasFlag") &&
10191 			    has_flag->signature->hasthis &&
10192 			    has_flag->signature->param_count == 1) {
10193 				CHECK_TYPELOAD (enum_class);
10194 
10195 				if (enum_class == klass) {
10196 					MonoInst *enum_this, *enum_flag;
10197 
10198 					ip += 5 + 6 + 5;
10199 					--sp;
10200 
10201 					enum_this = sp [0];
10202 					enum_flag = sp [1];
10203 
10204 					*sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10205 					break;
10206 				}
10207 			}
10208 
10209 			// FIXME: LLVM can't handle the inconsistent bb linking
10210 			if (!mono_class_is_nullable (klass) &&
10211 				!mini_is_gsharedvt_klass (klass) &&
10212 				ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10213 				(ip [5] == CEE_BRTRUE ||
10214 				 ip [5] == CEE_BRTRUE_S ||
10215 				 ip [5] == CEE_BRFALSE ||
10216 				 ip [5] == CEE_BRFALSE_S)) {
10217 				gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10218 				int dreg;
10219 				MonoBasicBlock *true_bb, *false_bb;
10220 
10221 				ip += 5;
10222 
10223 				if (cfg->verbose_level > 3) {
10224 					printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10225 					printf ("<box+brtrue opt>\n");
10226 				}
10227 
10228 				switch (*ip) {
10229 				case CEE_BRTRUE_S:
10230 				case CEE_BRFALSE_S:
10231 					CHECK_OPSIZE (2);
10232 					ip++;
10233 					target = ip + 1 + (signed char)(*ip);
10234 					ip++;
10235 					break;
10236 				case CEE_BRTRUE:
10237 				case CEE_BRFALSE:
10238 					CHECK_OPSIZE (5);
10239 					ip++;
10240 					target = ip + 4 + (gint)(read32 (ip));
10241 					ip += 4;
10242 					break;
10243 				default:
10244 					g_assert_not_reached ();
10245 				}
10246 
10247 				/*
10248 				 * We need to link both bblocks, since it is needed for handling stack
10249 				 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10250 				 * Branching to only one of them would lead to inconsistencies, so
10251 				 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10252 				 */
10253 				GET_BBLOCK (cfg, true_bb, target);
10254 				GET_BBLOCK (cfg, false_bb, ip);
10255 
10256 				mono_link_bblock (cfg, cfg->cbb, true_bb);
10257 				mono_link_bblock (cfg, cfg->cbb, false_bb);
10258 
10259 				if (sp != stack_start) {
10260 					handle_stack_args (cfg, stack_start, sp - stack_start);
10261 					sp = stack_start;
10262 					CHECK_UNVERIFIABLE (cfg);
10263 				}
10264 
10265 				if (COMPILE_LLVM (cfg)) {
10266 					dreg = alloc_ireg (cfg);
10267 					MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10268 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10269 
10270 					MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10271 				} else {
10272 					/* The JIT can't eliminate the iconst+compare */
10273 					MONO_INST_NEW (cfg, ins, OP_BR);
10274 					ins->inst_target_bb = is_true ? true_bb : false_bb;
10275 					MONO_ADD_INS (cfg->cbb, ins);
10276 				}
10277 
10278 				start_new_bblock = 1;
10279 				break;
10280 			}
10281 
10282 			*sp++ = handle_box (cfg, val, klass, context_used);
10283 
10284 			CHECK_CFG_EXCEPTION;
10285 			ip += 5;
10286 			inline_costs += 1;
10287 			break;
10288 		}
10289 		case CEE_UNBOX: {
10290 			CHECK_STACK (1);
10291 			--sp;
10292 			CHECK_OPSIZE (5);
10293 			token = read32 (ip + 1);
10294 			klass = mini_get_class (method, token, generic_context);
10295 			CHECK_TYPELOAD (klass);
10296 
10297 			mono_save_token_info (cfg, image, token, klass);
10298 
10299 			context_used = mini_class_check_context_used (cfg, klass);
10300 
10301 			if (mono_class_is_nullable (klass)) {
10302 				MonoInst *val;
10303 
10304 				val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10305 				EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10306 
10307 				*sp++= ins;
10308 			} else {
10309 				ins = handle_unbox (cfg, klass, sp, context_used);
10310 				*sp++ = ins;
10311 			}
10312 			ip += 5;
10313 			inline_costs += 2;
10314 			break;
10315 		}
10316 		case CEE_LDFLD:
10317 		case CEE_LDFLDA:
10318 		case CEE_STFLD:
10319 		case CEE_LDSFLD:
10320 		case CEE_LDSFLDA:
10321 		case CEE_STSFLD: {
10322 			MonoClassField *field;
10323 #ifndef DISABLE_REMOTING
10324 			int costs;
10325 #endif
10326 			guint foffset;
10327 			gboolean is_instance;
10328 			int op;
10329 			gpointer addr = NULL;
10330 			gboolean is_special_static;
10331 			MonoType *ftype;
10332 			MonoInst *store_val = NULL;
10333 			MonoInst *thread_ins;
10334 
10335 			op = *ip;
10336 			is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10337 			if (is_instance) {
10338 				if (op == CEE_STFLD) {
10339 					CHECK_STACK (2);
10340 					sp -= 2;
10341 					store_val = sp [1];
10342 				} else {
10343 					CHECK_STACK (1);
10344 					--sp;
10345 				}
10346 				if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10347 					UNVERIFIED;
10348 				if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10349 					UNVERIFIED;
10350 			} else {
10351 				if (op == CEE_STSFLD) {
10352 					CHECK_STACK (1);
10353 					sp--;
10354 					store_val = sp [0];
10355 				}
10356 			}
10357 
10358 			CHECK_OPSIZE (5);
10359 			token = read32 (ip + 1);
10360 			if (method->wrapper_type != MONO_WRAPPER_NONE) {
10361 				field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10362 				klass = field->parent;
10363 			}
10364 			else {
10365 				field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10366 				CHECK_CFG_ERROR;
10367 			}
10368 			if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10369 				FIELD_ACCESS_FAILURE (method, field);
10370 			mono_class_init (klass);
10371 
10372 			/* if the class is Critical then transparent code cannot access it's fields */
10373 			if (!is_instance && mono_security_core_clr_enabled ())
10374 				ensure_method_is_allowed_to_access_field (cfg, method, field);
10375 
10376 			/* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10377 			   any visible *instance* field  (in fact there's a single case for a static field in Marshal) XXX
10378 			if (mono_security_core_clr_enabled ())
10379 				ensure_method_is_allowed_to_access_field (cfg, method, field);
10380 			*/
10381 
10382 			ftype = mono_field_get_type (field);
10383 
10384 			/*
10385 			 * LDFLD etc. is usable on static fields as well, so convert those cases to
10386 			 * the static case.
10387 			 */
10388 			if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10389 				switch (op) {
10390 				case CEE_LDFLD:
10391 					op = CEE_LDSFLD;
10392 					break;
10393 				case CEE_STFLD:
10394 					op = CEE_STSFLD;
10395 					break;
10396 				case CEE_LDFLDA:
10397 					op = CEE_LDSFLDA;
10398 					break;
10399 				default:
10400 					g_assert_not_reached ();
10401 				}
10402 				is_instance = FALSE;
10403 			}
10404 
10405 			context_used = mini_class_check_context_used (cfg, klass);
10406 
10407 			/* INSTANCE CASE */
10408 
10409 			foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10410 			if (op == CEE_STFLD) {
10411 				if (target_type_is_incompatible (cfg, field->type, sp [1]))
10412 					UNVERIFIED;
10413 #ifndef DISABLE_REMOTING
10414 				if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10415 					MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10416 					MonoInst *iargs [5];
10417 
10418 					GSHAREDVT_FAILURE (op);
10419 
10420 					iargs [0] = sp [0];
10421 					EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10422 					EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10423 					EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10424 						    field->offset);
10425 					iargs [4] = sp [1];
10426 
10427 					if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10428 						costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10429 											   iargs, ip, cfg->real_offset, TRUE);
10430 						CHECK_CFG_EXCEPTION;
10431 						g_assert (costs > 0);
10432 
10433 						cfg->real_offset += 5;
10434 
10435 						inline_costs += costs;
10436 					} else {
10437 						mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10438 					}
10439 				} else
10440 #endif
10441 				{
10442 					MonoInst *store, *wbarrier_ptr_ins = NULL;
10443 
10444 					MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10445 
10446 					if (ins_flag & MONO_INST_VOLATILE) {
10447 						/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10448 						mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10449 					}
10450 
10451 					if (mini_is_gsharedvt_klass (klass)) {
10452 						MonoInst *offset_ins;
10453 
10454 						context_used = mini_class_check_context_used (cfg, klass);
10455 
10456 						offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10457 						/* The value is offset by 1 */
10458 						EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10459 						dreg = alloc_ireg_mp (cfg);
10460 						EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10461 						wbarrier_ptr_ins = ins;
10462 						/* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
10463 						EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10464 					} else {
10465 						EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10466 					}
10467 					if (sp [0]->opcode != OP_LDADDR)
10468 						store->flags |= MONO_INST_FAULT;
10469 
10470 					if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10471 						if (mini_is_gsharedvt_klass (klass)) {
10472 							g_assert (wbarrier_ptr_ins);
10473 							mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10474 						} else {
10475 							/* insert call to write barrier */
10476 							MonoInst *ptr;
10477 							int dreg;
10478 
10479 							dreg = alloc_ireg_mp (cfg);
10480 							EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10481 							mini_emit_write_barrier (cfg, ptr, sp [1]);
10482 						}
10483 					}
10484 
10485 					store->flags |= ins_flag;
10486 				}
10487 				ins_flag = 0;
10488 				ip += 5;
10489 				break;
10490 			}
10491 
10492 #ifndef DISABLE_REMOTING
10493 			if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10494 				MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10495 				MonoInst *iargs [4];
10496 
10497 				GSHAREDVT_FAILURE (op);
10498 
10499 				iargs [0] = sp [0];
10500 				EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10501 				EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10502 				EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10503 				if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10504 					costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10505 										   iargs, ip, cfg->real_offset, TRUE);
10506 					CHECK_CFG_EXCEPTION;
10507 					g_assert (costs > 0);
10508 
10509 					cfg->real_offset += 5;
10510 
10511 					*sp++ = iargs [0];
10512 
10513 					inline_costs += costs;
10514 				} else {
10515 					ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10516 					*sp++ = ins;
10517 				}
10518 			} else
10519 #endif
10520 			if (is_instance) {
10521 				if (sp [0]->type == STACK_VTYPE) {
10522 					MonoInst *var;
10523 
10524 					/* Have to compute the address of the variable */
10525 
10526 					var = get_vreg_to_inst (cfg, sp [0]->dreg);
10527 					if (!var)
10528 						var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10529 					else
10530 						g_assert (var->klass == klass);
10531 
10532 					EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10533 					sp [0] = ins;
10534 				}
10535 
10536 				if (op == CEE_LDFLDA) {
10537 					if (sp [0]->type == STACK_OBJ) {
10538 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10539 						MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10540 					}
10541 
10542 					dreg = alloc_ireg_mp (cfg);
10543 
10544 					if (mini_is_gsharedvt_klass (klass)) {
10545 						MonoInst *offset_ins;
10546 
10547 						offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10548 						/* The value is offset by 1 */
10549 						EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10550 						EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10551 					} else {
10552 						EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10553 					}
10554 					ins->klass = mono_class_from_mono_type (field->type);
10555 					ins->type = STACK_MP;
10556 					*sp++ = ins;
10557 				} else {
10558 					MonoInst *load;
10559 
10560 					MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10561 
10562 					if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10563 						ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10564 						if (ins) {
10565 							*sp++ = ins;
10566 							ins_flag = 0;
10567 							ip += 5;
10568 							break;
10569 						}
10570 					}
10571 
10572 					MonoInst *field_add_inst = sp [0];
10573 					if (mini_is_gsharedvt_klass (klass)) {
10574 						MonoInst *offset_ins;
10575 
10576 						offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10577 						/* The value is offset by 1 */
10578 						EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10579 						EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
10580 						foffset = 0;
10581 					}
10582 
10583 					load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
10584 
10585 					if (sp [0]->opcode != OP_LDADDR)
10586 						load->flags |= MONO_INST_FAULT;
10587 					*sp++ = load;
10588 				}
10589 			}
10590 
10591 			if (is_instance) {
10592 				ins_flag = 0;
10593 				ip += 5;
10594 				break;
10595 			}
10596 
10597 			/* STATIC CASE */
10598 			context_used = mini_class_check_context_used (cfg, klass);
10599 
10600 			if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10601 				mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10602 				CHECK_CFG_ERROR;
10603 			}
10604 
10605 			/* The special_static_fields field is init'd in mono_class_vtable, so it needs
10606 			 * to be called here.
10607 			 */
10608 			if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10609 				mono_class_vtable (cfg->domain, klass);
10610 				CHECK_TYPELOAD (klass);
10611 			}
10612 			mono_domain_lock (cfg->domain);
10613 			if (cfg->domain->special_static_fields)
10614 				addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10615 			mono_domain_unlock (cfg->domain);
10616 
10617 			is_special_static = mono_class_field_is_special_static (field);
10618 
10619 			if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10620 				thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10621 			else
10622 				thread_ins = NULL;
10623 
10624 			/* Generate IR to compute the field address */
10625 			if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10626 				/*
10627 				 * Fast access to TLS data
10628 				 * Inline version of get_thread_static_data () in
10629 				 * threads.c.
10630 				 */
10631 				guint32 offset;
10632 				int idx, static_data_reg, array_reg, dreg;
10633 
10634 				if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10635 					GSHAREDVT_FAILURE (op);
10636 
10637 				static_data_reg = alloc_ireg (cfg);
10638 				MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10639 
10640 				if (cfg->compile_aot) {
10641 					int offset_reg, offset2_reg, idx_reg;
10642 
10643 					/* For TLS variables, this will return the TLS offset */
10644 					EMIT_NEW_SFLDACONST (cfg, ins, field);
10645 					offset_reg = ins->dreg;
10646 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10647 					idx_reg = alloc_ireg (cfg);
10648 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10649 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10650 					MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10651 					array_reg = alloc_ireg (cfg);
10652 					MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10653 					offset2_reg = alloc_ireg (cfg);
10654 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10655 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10656 					dreg = alloc_ireg (cfg);
10657 					EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10658 				} else {
10659 					offset = (gsize)addr & 0x7fffffff;
10660 					idx = offset & 0x3f;
10661 
10662 					array_reg = alloc_ireg (cfg);
10663 					MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10664 					dreg = alloc_ireg (cfg);
10665 					EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10666 				}
10667 			} else if ((cfg->opt & MONO_OPT_SHARED) ||
10668 					(cfg->compile_aot && is_special_static) ||
10669 					(context_used && is_special_static)) {
10670 				MonoInst *iargs [2];
10671 
10672 				g_assert (field->parent);
10673 				EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10674 				if (context_used) {
10675 					iargs [1] = emit_get_rgctx_field (cfg, context_used,
10676 						field, MONO_RGCTX_INFO_CLASS_FIELD);
10677 				} else {
10678 					EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10679 				}
10680 				ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10681 			} else if (context_used) {
10682 				MonoInst *static_data;
10683 
10684 				/*
10685 				g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10686 					method->klass->name_space, method->klass->name, method->name,
10687 					depth, field->offset);
10688 				*/
10689 
10690 				if (mono_class_needs_cctor_run (klass, method))
10691 					emit_class_init (cfg, klass);
10692 
10693 				/*
10694 				 * The pointer we're computing here is
10695 				 *
10696 				 *   super_info.static_data + field->offset
10697 				 */
10698 				static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10699 					klass, MONO_RGCTX_INFO_STATIC_DATA);
10700 
10701 				if (mini_is_gsharedvt_klass (klass)) {
10702 					MonoInst *offset_ins;
10703 
10704 					offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10705 					/* The value is offset by 1 */
10706 					EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10707 					dreg = alloc_ireg_mp (cfg);
10708 					EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10709 				} else if (field->offset == 0) {
10710 					ins = static_data;
10711 				} else {
10712 					int addr_reg = mono_alloc_preg (cfg);
10713 					EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10714 				}
10715 			} else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10716 				MonoInst *iargs [2];
10717 
10718 				g_assert (field->parent);
10719 				EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10720 				EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10721 				ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10722 			} else {
10723 				MonoVTable *vtable = NULL;
10724 
10725 				if (!cfg->compile_aot)
10726 					vtable = mono_class_vtable (cfg->domain, klass);
10727 				CHECK_TYPELOAD (klass);
10728 
10729 				if (!addr) {
10730 					if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10731 						if (!(g_slist_find (class_inits, klass))) {
10732 							emit_class_init (cfg, klass);
10733 							if (cfg->verbose_level > 2)
10734 								printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10735 							class_inits = g_slist_prepend (class_inits, klass);
10736 						}
10737 					} else {
10738 						if (cfg->run_cctors) {
10739 							/* This makes so that inline cannot trigger */
10740 							/* .cctors: too many apps depend on them */
10741 							/* running with a specific order... */
10742 							g_assert (vtable);
10743 							if (! vtable->initialized)
10744 								INLINE_FAILURE ("class init");
10745 							if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10746 								mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10747 								goto exception_exit;
10748 							}
10749 						}
10750 					}
10751 					if (cfg->compile_aot)
10752 						EMIT_NEW_SFLDACONST (cfg, ins, field);
10753 					else {
10754 						g_assert (vtable);
10755 						addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10756 						g_assert (addr);
10757 						EMIT_NEW_PCONST (cfg, ins, addr);
10758 					}
10759 				} else {
10760 					MonoInst *iargs [1];
10761 					EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10762 					ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10763 				}
10764 			}
10765 
10766 			/* Generate IR to do the actual load/store operation */
10767 
10768 			if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10769 				/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10770 				mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10771 			}
10772 
10773 			if (op == CEE_LDSFLDA) {
10774 				ins->klass = mono_class_from_mono_type (ftype);
10775 				ins->type = STACK_PTR;
10776 				*sp++ = ins;
10777 			} else if (op == CEE_STSFLD) {
10778 				MonoInst *store;
10779 
10780 				EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10781 				store->flags |= ins_flag;
10782 			} else {
10783 				gboolean is_const = FALSE;
10784 				MonoVTable *vtable = NULL;
10785 				gpointer addr = NULL;
10786 
10787 				if (!context_used) {
10788 					vtable = mono_class_vtable (cfg->domain, klass);
10789 					CHECK_TYPELOAD (klass);
10790 				}
10791 				if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10792 						(!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10793 					int ro_type = ftype->type;
10794 					if (!addr)
10795 						addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10796 					if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10797 						ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10798 					}
10799 
10800 					GSHAREDVT_FAILURE (op);
10801 
10802 					/* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10803 					is_const = TRUE;
10804 					switch (ro_type) {
10805 					case MONO_TYPE_BOOLEAN:
10806 					case MONO_TYPE_U1:
10807 						EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10808 						sp++;
10809 						break;
10810 					case MONO_TYPE_I1:
10811 						EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10812 						sp++;
10813 						break;
10814 					case MONO_TYPE_CHAR:
10815 					case MONO_TYPE_U2:
10816 						EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10817 						sp++;
10818 						break;
10819 					case MONO_TYPE_I2:
10820 						EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10821 						sp++;
10822 						break;
10823 						break;
10824 					case MONO_TYPE_I4:
10825 						EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10826 						sp++;
10827 						break;
10828 					case MONO_TYPE_U4:
10829 						EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10830 						sp++;
10831 						break;
10832 					case MONO_TYPE_I:
10833 					case MONO_TYPE_U:
10834 					case MONO_TYPE_PTR:
10835 					case MONO_TYPE_FNPTR:
10836 						EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10837 						type_to_eval_stack_type ((cfg), field->type, *sp);
10838 						sp++;
10839 						break;
10840 					case MONO_TYPE_STRING:
10841 					case MONO_TYPE_OBJECT:
10842 					case MONO_TYPE_CLASS:
10843 					case MONO_TYPE_SZARRAY:
10844 					case MONO_TYPE_ARRAY:
10845 						if (!mono_gc_is_moving ()) {
10846 							EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10847 							type_to_eval_stack_type ((cfg), field->type, *sp);
10848 							sp++;
10849 						} else {
10850 							is_const = FALSE;
10851 						}
10852 						break;
10853 					case MONO_TYPE_I8:
10854 					case MONO_TYPE_U8:
10855 						EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10856 						sp++;
10857 						break;
10858 					case MONO_TYPE_R4:
10859 					case MONO_TYPE_R8:
10860 					case MONO_TYPE_VALUETYPE:
10861 					default:
10862 						is_const = FALSE;
10863 						break;
10864 					}
10865 				}
10866 
10867 				if (!is_const) {
10868 					MonoInst *load;
10869 
10870 					CHECK_STACK_OVF (1);
10871 
10872 					EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10873 					load->flags |= ins_flag;
10874 					ins_flag = 0;
10875 					*sp++ = load;
10876 				}
10877 			}
10878 
10879 			if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10880 				/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10881 				mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10882 			}
10883 
10884 			ins_flag = 0;
10885 			ip += 5;
10886 			break;
10887 		}
10888 		case CEE_STOBJ:
10889 			CHECK_STACK (2);
10890 			sp -= 2;
10891 			CHECK_OPSIZE (5);
10892 			token = read32 (ip + 1);
10893 			klass = mini_get_class (method, token, generic_context);
10894 			CHECK_TYPELOAD (klass);
10895 
10896 			/* FIXME: should check item at sp [1] is compatible with the type of the store. */
10897 			mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
10898 			ins_flag = 0;
10899 			ip += 5;
10900 			inline_costs += 1;
10901 			break;
10902 
10903 			/*
10904 			 * Array opcodes
10905 			 */
10906 		case CEE_NEWARR: {
10907 			MonoInst *len_ins;
10908 			const char *data_ptr;
10909 			int data_size = 0;
10910 			guint32 field_token;
10911 
10912 			CHECK_STACK (1);
10913 			--sp;
10914 
10915 			CHECK_OPSIZE (5);
10916 			token = read32 (ip + 1);
10917 
10918 			klass = mini_get_class (method, token, generic_context);
10919 			CHECK_TYPELOAD (klass);
10920 			if (klass->byval_arg.type == MONO_TYPE_VOID)
10921 				UNVERIFIED;
10922 
10923 			context_used = mini_class_check_context_used (cfg, klass);
10924 
10925 			if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10926 				MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10927 				ins->sreg1 = sp [0]->dreg;
10928 				ins->type = STACK_I4;
10929 				ins->dreg = alloc_ireg (cfg);
10930 				MONO_ADD_INS (cfg->cbb, ins);
10931 				*sp = mono_decompose_opcode (cfg, ins);
10932 			}
10933 
10934 			if (context_used) {
10935 				MonoInst *args [3];
10936 				MonoClass *array_class = mono_array_class_get (klass, 1);
10937 				MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10938 
10939 				/* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10940 
10941 				/* vtable */
10942 				args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
10943 					array_class, MONO_RGCTX_INFO_VTABLE);
10944 				/* array len */
10945 				args [1] = sp [0];
10946 
10947 				if (managed_alloc)
10948 					ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10949 				else
10950 					ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
10951 			} else {
10952 				if (cfg->opt & MONO_OPT_SHARED) {
10953 					/* Decompose now to avoid problems with references to the domainvar */
10954 					MonoInst *iargs [3];
10955 
10956 					EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10957 					EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10958 					iargs [2] = sp [0];
10959 
10960 					ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
10961 				} else {
10962 					/* Decompose later since it is needed by abcrem */
10963 					MonoClass *array_type = mono_array_class_get (klass, 1);
10964 					mono_class_vtable (cfg->domain, array_type);
10965 					CHECK_TYPELOAD (array_type);
10966 
10967 					MONO_INST_NEW (cfg, ins, OP_NEWARR);
10968 					ins->dreg = alloc_ireg_ref (cfg);
10969 					ins->sreg1 = sp [0]->dreg;
10970 					ins->inst_newa_class = klass;
10971 					ins->type = STACK_OBJ;
10972 					ins->klass = array_type;
10973 					MONO_ADD_INS (cfg->cbb, ins);
10974 					cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10975 					cfg->cbb->has_array_access = TRUE;
10976 
10977 					/* Needed so mono_emit_load_get_addr () gets called */
10978 					mono_get_got_var (cfg);
10979 				}
10980 			}
10981 
10982 			len_ins = sp [0];
10983 			ip += 5;
10984 			*sp++ = ins;
10985 			inline_costs += 1;
10986 
10987 			/*
10988 			 * we inline/optimize the initialization sequence if possible.
10989 			 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10990 			 * for small sizes open code the memcpy
10991 			 * ensure the rva field is big enough
10992 			 */
10993 			if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10994 				MonoMethod *memcpy_method = mini_get_memcpy_method ();
10995 				MonoInst *iargs [3];
10996 				int add_reg = alloc_ireg_mp (cfg);
10997 
10998 				EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10999 				if (cfg->compile_aot) {
11000 					EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11001 				} else {
11002 					EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11003 				}
11004 				EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11005 				mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11006 				ip += 11;
11007 			}
11008 
11009 			break;
11010 		}
11011 		case CEE_LDLEN:
11012 			CHECK_STACK (1);
11013 			--sp;
11014 			if (sp [0]->type != STACK_OBJ)
11015 				UNVERIFIED;
11016 
11017 			MONO_INST_NEW (cfg, ins, OP_LDLEN);
11018 			ins->dreg = alloc_preg (cfg);
11019 			ins->sreg1 = sp [0]->dreg;
11020 			ins->type = STACK_I4;
11021 			/* This flag will be inherited by the decomposition */
11022 			ins->flags |= MONO_INST_FAULT;
11023 			MONO_ADD_INS (cfg->cbb, ins);
11024 			cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11025 			cfg->cbb->has_array_access = TRUE;
11026 			ip ++;
11027 			*sp++ = ins;
11028 			break;
11029 		case CEE_LDELEMA:
11030 			CHECK_STACK (2);
11031 			sp -= 2;
11032 			CHECK_OPSIZE (5);
11033 			if (sp [0]->type != STACK_OBJ)
11034 				UNVERIFIED;
11035 
11036 			cfg->flags |= MONO_CFG_HAS_LDELEMA;
11037 
11038 			klass = mini_get_class (method, read32 (ip + 1), generic_context);
11039 			CHECK_TYPELOAD (klass);
11040 			/* we need to make sure that this array is exactly the type it needs
11041 			 * to be for correctness. the wrappers are lax with their usage
11042 			 * so we need to ignore them here
11043 			 */
11044 			if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11045 				MonoClass *array_class = mono_array_class_get (klass, 1);
11046 				mini_emit_check_array_type (cfg, sp [0], array_class);
11047 				CHECK_TYPELOAD (array_class);
11048 			}
11049 
11050 			readonly = FALSE;
11051 			ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11052 			*sp++ = ins;
11053 			ip += 5;
11054 			break;
11055 		case CEE_LDELEM:
11056 		case CEE_LDELEM_I1:
11057 		case CEE_LDELEM_U1:
11058 		case CEE_LDELEM_I2:
11059 		case CEE_LDELEM_U2:
11060 		case CEE_LDELEM_I4:
11061 		case CEE_LDELEM_U4:
11062 		case CEE_LDELEM_I8:
11063 		case CEE_LDELEM_I:
11064 		case CEE_LDELEM_R4:
11065 		case CEE_LDELEM_R8:
11066 		case CEE_LDELEM_REF: {
11067 			MonoInst *addr;
11068 
11069 			CHECK_STACK (2);
11070 			sp -= 2;
11071 
11072 			if (*ip == CEE_LDELEM) {
11073 				CHECK_OPSIZE (5);
11074 				token = read32 (ip + 1);
11075 				klass = mini_get_class (method, token, generic_context);
11076 				CHECK_TYPELOAD (klass);
11077 				mono_class_init (klass);
11078 			}
11079 			else
11080 				klass = array_access_to_klass (*ip);
11081 
11082 			if (sp [0]->type != STACK_OBJ)
11083 				UNVERIFIED;
11084 
11085 			cfg->flags |= MONO_CFG_HAS_LDELEMA;
11086 
11087 			if (mini_is_gsharedvt_variable_klass (klass)) {
11088 				// FIXME-VT: OP_ICONST optimization
11089 				addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11090 				EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11091 				ins->opcode = OP_LOADV_MEMBASE;
11092 			} else if (sp [1]->opcode == OP_ICONST) {
11093 				int array_reg = sp [0]->dreg;
11094 				int index_reg = sp [1]->dreg;
11095 				int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11096 
11097 				if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11098 					MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11099 
11100 				MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11101 				EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11102 			} else {
11103 				addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11104 				EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11105 			}
11106 			*sp++ = ins;
11107 			if (*ip == CEE_LDELEM)
11108 				ip += 5;
11109 			else
11110 				++ip;
11111 			break;
11112 		}
11113 		case CEE_STELEM_I:
11114 		case CEE_STELEM_I1:
11115 		case CEE_STELEM_I2:
11116 		case CEE_STELEM_I4:
11117 		case CEE_STELEM_I8:
11118 		case CEE_STELEM_R4:
11119 		case CEE_STELEM_R8:
11120 		case CEE_STELEM_REF:
11121 		case CEE_STELEM: {
11122 			CHECK_STACK (3);
11123 			sp -= 3;
11124 
11125 			cfg->flags |= MONO_CFG_HAS_LDELEMA;
11126 
11127 			if (*ip == CEE_STELEM) {
11128 				CHECK_OPSIZE (5);
11129 				token = read32 (ip + 1);
11130 				klass = mini_get_class (method, token, generic_context);
11131 				CHECK_TYPELOAD (klass);
11132 				mono_class_init (klass);
11133 			}
11134 			else
11135 				klass = array_access_to_klass (*ip);
11136 
11137 			if (sp [0]->type != STACK_OBJ)
11138 				UNVERIFIED;
11139 
11140 			emit_array_store (cfg, klass, sp, TRUE);
11141 
11142 			if (*ip == CEE_STELEM)
11143 				ip += 5;
11144 			else
11145 				++ip;
11146 			inline_costs += 1;
11147 			break;
11148 		}
11149 		case CEE_CKFINITE: {
11150 			CHECK_STACK (1);
11151 			--sp;
11152 
11153 			if (cfg->llvm_only) {
11154 				MonoInst *iargs [1];
11155 
11156 				iargs [0] = sp [0];
11157 				*sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11158 			} else  {
11159 				MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11160 				ins->sreg1 = sp [0]->dreg;
11161 				ins->dreg = alloc_freg (cfg);
11162 				ins->type = STACK_R8;
11163 				MONO_ADD_INS (cfg->cbb, ins);
11164 
11165 				*sp++ = mono_decompose_opcode (cfg, ins);
11166 			}
11167 
11168 			++ip;
11169 			break;
11170 		}
11171 		case CEE_REFANYVAL: {
11172 			MonoInst *src_var, *src;
11173 
11174 			int klass_reg = alloc_preg (cfg);
11175 			int dreg = alloc_preg (cfg);
11176 
11177 			GSHAREDVT_FAILURE (*ip);
11178 
11179 			CHECK_STACK (1);
11180 			MONO_INST_NEW (cfg, ins, *ip);
11181 			--sp;
11182 			CHECK_OPSIZE (5);
11183 			klass = mini_get_class (method, read32 (ip + 1), generic_context);
11184 			CHECK_TYPELOAD (klass);
11185 
11186 			context_used = mini_class_check_context_used (cfg, klass);
11187 
11188 			// FIXME:
11189 			src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11190 			if (!src_var)
11191 				src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11192 			EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11193 			MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11194 
11195 			if (context_used) {
11196 				MonoInst *klass_ins;
11197 
11198 				klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11199 						klass, MONO_RGCTX_INFO_KLASS);
11200 
11201 				// FIXME:
11202 				MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11203 				MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11204 			} else {
11205 				mini_emit_class_check (cfg, klass_reg, klass);
11206 			}
11207 			EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11208 			ins->type = STACK_MP;
11209 			ins->klass = klass;
11210 			*sp++ = ins;
11211 			ip += 5;
11212 			break;
11213 		}
11214 		case CEE_MKREFANY: {
11215 			MonoInst *loc, *addr;
11216 
11217 			GSHAREDVT_FAILURE (*ip);
11218 
11219 			CHECK_STACK (1);
11220 			MONO_INST_NEW (cfg, ins, *ip);
11221 			--sp;
11222 			CHECK_OPSIZE (5);
11223 			klass = mini_get_class (method, read32 (ip + 1), generic_context);
11224 			CHECK_TYPELOAD (klass);
11225 
11226 			context_used = mini_class_check_context_used (cfg, klass);
11227 
11228 			loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11229 			EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11230 
11231 			if (context_used) {
11232 				MonoInst *const_ins;
11233 				int type_reg = alloc_preg (cfg);
11234 
11235 				const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11236 				MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11237 				MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11238 				MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11239 			} else {
11240 				int const_reg = alloc_preg (cfg);
11241 				int type_reg = alloc_preg (cfg);
11242 
11243 				MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11244 				MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11245 				MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11246 				MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11247 			}
11248 			MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11249 
11250 			EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11251 			ins->type = STACK_VTYPE;
11252 			ins->klass = mono_defaults.typed_reference_class;
11253 			*sp++ = ins;
11254 			ip += 5;
11255 			break;
11256 		}
11257 		case CEE_LDTOKEN: {
11258 			gpointer handle;
11259 			MonoClass *handle_class;
11260 
11261 			CHECK_STACK_OVF (1);
11262 
11263 			CHECK_OPSIZE (5);
11264 			n = read32 (ip + 1);
11265 
11266 			if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11267 					method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11268 				handle = mono_method_get_wrapper_data (method, n);
11269 				handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11270 				if (handle_class == mono_defaults.typehandle_class)
11271 					handle = &((MonoClass*)handle)->byval_arg;
11272 			}
11273 			else {
11274 				handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11275 				CHECK_CFG_ERROR;
11276 			}
11277 			if (!handle)
11278 				LOAD_ERROR;
11279 			mono_class_init (handle_class);
11280 			if (cfg->gshared) {
11281 				if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11282 						mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11283 					/* This case handles ldtoken
11284 					   of an open type, like for
11285 					   typeof(Gen<>). */
11286 					context_used = 0;
11287 				} else if (handle_class == mono_defaults.typehandle_class) {
11288 					context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11289 				} else if (handle_class == mono_defaults.fieldhandle_class)
11290 					context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11291 				else if (handle_class == mono_defaults.methodhandle_class)
11292 					context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11293 				else
11294 					g_assert_not_reached ();
11295 			}
11296 
11297 			if ((cfg->opt & MONO_OPT_SHARED) &&
11298 					method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11299 					method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11300 				MonoInst *addr, *vtvar, *iargs [3];
11301 				int method_context_used;
11302 
11303 				method_context_used = mini_method_check_context_used (cfg, method);
11304 
11305 				vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11306 
11307 				EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11308 				EMIT_NEW_ICONST (cfg, iargs [1], n);
11309 				if (method_context_used) {
11310 					iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11311 						method, MONO_RGCTX_INFO_METHOD);
11312 					ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11313 				} else {
11314 					EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11315 					ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11316 				}
11317 				EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11318 
11319 				MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11320 
11321 				EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11322 			} else {
11323 				if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11324 					((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11325 					(cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11326 					(cmethod->klass == mono_defaults.systemtype_class) &&
11327 					(strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11328 					MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11329 
11330 					mono_class_init (tclass);
11331 					if (context_used) {
11332 						ins = mini_emit_get_rgctx_klass (cfg, context_used,
11333 							tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11334 					} else if (cfg->compile_aot) {
11335 						if (method->wrapper_type) {
11336 							error_init (&error); //got to do it since there are multiple conditionals below
11337 							if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11338 								/* Special case for static synchronized wrappers */
11339 								EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11340 							} else {
11341 								mono_error_cleanup (&error); /* FIXME don't swallow the error */
11342 								/* FIXME: n is not a normal token */
11343 								DISABLE_AOT (cfg);
11344 								EMIT_NEW_PCONST (cfg, ins, NULL);
11345 							}
11346 						} else {
11347 							EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11348 						}
11349 					} else {
11350 						MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11351 						CHECK_CFG_ERROR;
11352 						EMIT_NEW_PCONST (cfg, ins, rt);
11353 					}
11354 					ins->type = STACK_OBJ;
11355 					ins->klass = cmethod->klass;
11356 					ip += 5;
11357 				} else {
11358 					MonoInst *addr, *vtvar;
11359 
11360 					vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11361 
11362 					if (context_used) {
11363 						if (handle_class == mono_defaults.typehandle_class) {
11364 							ins = mini_emit_get_rgctx_klass (cfg, context_used,
11365 									mono_class_from_mono_type ((MonoType *)handle),
11366 									MONO_RGCTX_INFO_TYPE);
11367 						} else if (handle_class == mono_defaults.methodhandle_class) {
11368 							ins = emit_get_rgctx_method (cfg, context_used,
11369 									(MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11370 						} else if (handle_class == mono_defaults.fieldhandle_class) {
11371 							ins = emit_get_rgctx_field (cfg, context_used,
11372 									(MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11373 						} else {
11374 							g_assert_not_reached ();
11375 						}
11376 					} else if (cfg->compile_aot) {
11377 						EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11378 					} else {
11379 						EMIT_NEW_PCONST (cfg, ins, handle);
11380 					}
11381 					EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11382 					MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11383 					EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11384 				}
11385 			}
11386 
11387 			*sp++ = ins;
11388 			ip += 5;
11389 			break;
11390 		}
11391 		case CEE_THROW:
11392 			CHECK_STACK (1);
11393 			if (sp [-1]->type != STACK_OBJ)
11394 				UNVERIFIED;
11395 
11396 			MONO_INST_NEW (cfg, ins, OP_THROW);
11397 			--sp;
11398 			ins->sreg1 = sp [0]->dreg;
11399 			ip++;
11400 			cfg->cbb->out_of_line = TRUE;
11401 			MONO_ADD_INS (cfg->cbb, ins);
11402 			MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11403 			MONO_ADD_INS (cfg->cbb, ins);
11404 			sp = stack_start;
11405 
11406 			link_bblock (cfg, cfg->cbb, end_bblock);
11407 			start_new_bblock = 1;
11408 			/* This can complicate code generation for llvm since the return value might not be defined */
11409 			if (COMPILE_LLVM (cfg))
11410 				INLINE_FAILURE ("throw");
11411 			break;
11412 		case CEE_ENDFINALLY:
11413 			if (!ip_in_finally_clause (cfg, ip - header->code))
11414 				UNVERIFIED;
11415 			/* mono_save_seq_point_info () depends on this */
11416 			if (sp != stack_start)
11417 				emit_seq_point (cfg, method, ip, FALSE, FALSE);
11418 			MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11419 			MONO_ADD_INS (cfg->cbb, ins);
11420 			ip++;
11421 			start_new_bblock = 1;
11422 
11423 			/*
11424 			 * Control will leave the method so empty the stack, otherwise
11425 			 * the next basic block will start with a nonempty stack.
11426 			 */
11427 			while (sp != stack_start) {
11428 				sp--;
11429 			}
11430 			break;
11431 		case CEE_LEAVE:
11432 		case CEE_LEAVE_S: {
11433 			GList *handlers;
11434 
11435 			if (*ip == CEE_LEAVE) {
11436 				CHECK_OPSIZE (5);
11437 				target = ip + 5 + (gint32)read32(ip + 1);
11438 			} else {
11439 				CHECK_OPSIZE (2);
11440 				target = ip + 2 + (signed char)(ip [1]);
11441 			}
11442 
11443 			/* empty the stack */
11444 			while (sp != stack_start) {
11445 				sp--;
11446 			}
11447 
11448 			/*
11449 			 * If this leave statement is in a catch block, check for a
11450 			 * pending exception, and rethrow it if necessary.
11451 			 * We avoid doing this in runtime invoke wrappers, since those are called
11452 			 * by native code which excepts the wrapper to catch all exceptions.
11453 			 */
11454 			for (i = 0; i < header->num_clauses; ++i) {
11455 				MonoExceptionClause *clause = &header->clauses [i];
11456 
11457 				/*
11458 				 * Use <= in the final comparison to handle clauses with multiple
11459 				 * leave statements, like in bug #78024.
11460 				 * The ordering of the exception clauses guarantees that we find the
11461 				 * innermost clause.
11462 				 */
11463 				if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11464 					MonoInst *exc_ins;
11465 					MonoBasicBlock *dont_throw;
11466 
11467 					/*
11468 					  MonoInst *load;
11469 
11470 					  NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11471 					*/
11472 
11473 					exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11474 
11475 					NEW_BBLOCK (cfg, dont_throw);
11476 
11477 					/*
11478 					 * Currently, we always rethrow the abort exception, despite the
11479 					 * fact that this is not correct. See thread6.cs for an example.
11480 					 * But propagating the abort exception is more important than
11481 					 * getting the semantics right.
11482 					 */
11483 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11484 					MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11485 					MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11486 
11487 					MONO_START_BB (cfg, dont_throw);
11488 				}
11489 			}
11490 
11491 #ifdef ENABLE_LLVM
11492 			cfg->cbb->try_end = (intptr_t)(ip - header->code);
11493 #endif
11494 
11495 			if ((handlers = mono_find_leave_clauses (cfg, ip, target))) {
11496 				GList *tmp;
11497 				/*
11498 				 * For each finally clause that we exit we need to invoke the finally block.
11499 				 * After each invocation we need to add try holes for all the clauses that
11500 				 * we already exited.
11501 				 */
11502 				for (tmp = handlers; tmp; tmp = tmp->next) {
11503 					MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
11504 					if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
11505 						continue;
11506 					MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
11507 					MonoBasicBlock *dont_throw;
11508 
11509 					tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11510 					g_assert (tblock);
11511 					link_bblock (cfg, cfg->cbb, tblock);
11512 
11513 					MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
11514 
11515 					MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11516 					ins->inst_target_bb = tblock;
11517 					ins->inst_eh_blocks = tmp;
11518 					MONO_ADD_INS (cfg->cbb, ins);
11519 					cfg->cbb->has_call_handler = 1;
11520 
11521 					/* Throw exception if exvar is set */
11522 					/* FIXME Do we need this for calls from catch/filter ? */
11523 					NEW_BBLOCK (cfg, dont_throw);
11524 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
11525 					MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11526 					mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
11527 					cfg->cbb->clause_holes = tmp;
11528 
11529 					MONO_START_BB (cfg, dont_throw);
11530 					cfg->cbb->clause_holes = tmp;
11531 
11532 					if (COMPILE_LLVM (cfg)) {
11533 						MonoBasicBlock *target_bb;
11534 
11535 						/*
11536 						 * Link the finally bblock with the target, since it will
11537 						 * conceptually branch there.
11538 						 */
11539 						GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11540 						GET_BBLOCK (cfg, target_bb, target);
11541 						link_bblock (cfg, tblock, target_bb);
11542 					}
11543 				}
11544 			}
11545 
11546 			MONO_INST_NEW (cfg, ins, OP_BR);
11547 			MONO_ADD_INS (cfg->cbb, ins);
11548 			GET_BBLOCK (cfg, tblock, target);
11549 			link_bblock (cfg, cfg->cbb, tblock);
11550 			ins->inst_target_bb = tblock;
11551 
11552 			start_new_bblock = 1;
11553 
11554 			if (*ip == CEE_LEAVE)
11555 				ip += 5;
11556 			else
11557 				ip += 2;
11558 
11559 			break;
11560 		}
11561 
11562 			/*
11563 			 * Mono specific opcodes
11564 			 */
11565 		case MONO_CUSTOM_PREFIX: {
11566 
11567 			g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11568 
11569 			CHECK_OPSIZE (2);
11570 			switch (ip [1]) {
11571 			case CEE_MONO_ICALL: {
11572 				gpointer func;
11573 				MonoJitICallInfo *info;
11574 
11575 				token = read32 (ip + 2);
11576 				func = mono_method_get_wrapper_data (method, token);
11577 				info = mono_find_jit_icall_by_addr (func);
11578 				if (!info)
11579 					g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11580 				g_assert (info);
11581 
11582 				CHECK_STACK (info->sig->param_count);
11583 				sp -= info->sig->param_count;
11584 
11585 				if (!strcmp (info->name, "mono_threads_attach_coop")) {
11586 					MonoInst *addr;
11587 					MonoBasicBlock *next_bb;
11588 
11589 					if (cfg->compile_aot) {
11590 						/*
11591 						 * This is called on unattached threads, so it cannot go through the trampoline
11592 						 * infrastructure. Use an indirect call through a got slot initialized at load time
11593 						 * instead.
11594 						 */
11595 						EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
11596 						ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
11597 					} else {
11598 						ins = mono_emit_jit_icall (cfg, info->func, sp);
11599 					}
11600 
11601 					/*
11602 					 * Parts of the initlocals code needs to come after this, since it might call methods like memset.
11603 					 */
11604 					init_localsbb2 = cfg->cbb;
11605 					NEW_BBLOCK (cfg, next_bb);
11606 					MONO_START_BB (cfg, next_bb);
11607 				} else {
11608 					ins = mono_emit_jit_icall (cfg, info->func, sp);
11609 				}
11610 
11611 				if (!MONO_TYPE_IS_VOID (info->sig->ret))
11612 					*sp++ = ins;
11613 
11614 				ip += 6;
11615 				inline_costs += 10 * num_calls++;
11616 
11617 				break;
11618 			}
11619 			case CEE_MONO_LDPTR_CARD_TABLE:
11620 			case CEE_MONO_LDPTR_NURSERY_START:
11621 			case CEE_MONO_LDPTR_NURSERY_BITS:
11622 			case CEE_MONO_LDPTR_INT_REQ_FLAG:
11623 			case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
11624 				CHECK_STACK_OVF (1);
11625 
11626 				switch (ip [1]) {
11627 				case CEE_MONO_LDPTR_CARD_TABLE:
11628 					ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11629 					break;
11630 				case CEE_MONO_LDPTR_NURSERY_START:
11631 					ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11632 					break;
11633 				case CEE_MONO_LDPTR_NURSERY_BITS:
11634 					ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11635 					break;
11636 				case CEE_MONO_LDPTR_INT_REQ_FLAG:
11637 					ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11638 					break;
11639 				case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
11640 					ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
11641 					break;
11642 				default:
11643 					g_assert_not_reached ();
11644 					break;
11645 				}
11646 
11647 				*sp++ = ins;
11648 				ip += 2;
11649 				inline_costs += 10 * num_calls++;
11650 				break;
11651 			}
11652 			case CEE_MONO_LDPTR: {
11653 				gpointer ptr;
11654 
11655 				CHECK_STACK_OVF (1);
11656 				CHECK_OPSIZE (6);
11657 				token = read32 (ip + 2);
11658 
11659 				ptr = mono_method_get_wrapper_data (method, token);
11660 				EMIT_NEW_PCONST (cfg, ins, ptr);
11661 				*sp++ = ins;
11662 				ip += 6;
11663 				inline_costs += 10 * num_calls++;
11664 				/* Can't embed random pointers into AOT code */
11665 				DISABLE_AOT (cfg);
11666 				break;
11667 			}
11668 			case CEE_MONO_JIT_ICALL_ADDR: {
11669 				MonoJitICallInfo *callinfo;
11670 				gpointer ptr;
11671 
11672 				CHECK_STACK_OVF (1);
11673 				CHECK_OPSIZE (6);
11674 				token = read32 (ip + 2);
11675 
11676 				ptr = mono_method_get_wrapper_data (method, token);
11677 				callinfo = mono_find_jit_icall_by_addr (ptr);
11678 				g_assert (callinfo);
11679 				EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11680 				*sp++ = ins;
11681 				ip += 6;
11682 				inline_costs += 10 * num_calls++;
11683 				break;
11684 			}
11685 			case CEE_MONO_ICALL_ADDR: {
11686 				MonoMethod *cmethod;
11687 				gpointer ptr;
11688 
11689 				CHECK_STACK_OVF (1);
11690 				CHECK_OPSIZE (6);
11691 				token = read32 (ip + 2);
11692 
11693 				cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11694 
11695 				if (cfg->compile_aot) {
11696 					if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11697 						/*
11698 						 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11699 						 * before the call, its not needed when using direct pinvoke.
11700 						 * This is not an optimization, but its used to avoid looking up pinvokes
11701 						 * on platforms which don't support dlopen ().
11702 						 */
11703 						EMIT_NEW_PCONST (cfg, ins, NULL);
11704 					} else {
11705 						EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11706 					}
11707 				} else {
11708 					ptr = mono_lookup_internal_call (cmethod);
11709 					g_assert (ptr);
11710 					EMIT_NEW_PCONST (cfg, ins, ptr);
11711 				}
11712 				*sp++ = ins;
11713 				ip += 6;
11714 				break;
11715 			}
11716 			case CEE_MONO_VTADDR: {
11717 				MonoInst *src_var, *src;
11718 
11719 				CHECK_STACK (1);
11720 				--sp;
11721 
11722 				// FIXME:
11723 				src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11724 				EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11725 				*sp++ = src;
11726 				ip += 2;
11727 				break;
11728 			}
11729 			case CEE_MONO_NEWOBJ: {
11730 				MonoInst *iargs [2];
11731 
11732 				CHECK_STACK_OVF (1);
11733 				CHECK_OPSIZE (6);
11734 				token = read32 (ip + 2);
11735 				klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11736 				mono_class_init (klass);
11737 				NEW_DOMAINCONST (cfg, iargs [0]);
11738 				MONO_ADD_INS (cfg->cbb, iargs [0]);
11739 				NEW_CLASSCONST (cfg, iargs [1], klass);
11740 				MONO_ADD_INS (cfg->cbb, iargs [1]);
11741 				*sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11742 				ip += 6;
11743 				inline_costs += 10 * num_calls++;
11744 				break;
11745 			}
11746 			case CEE_MONO_OBJADDR:
11747 				CHECK_STACK (1);
11748 				--sp;
11749 				MONO_INST_NEW (cfg, ins, OP_MOVE);
11750 				ins->dreg = alloc_ireg_mp (cfg);
11751 				ins->sreg1 = sp [0]->dreg;
11752 				ins->type = STACK_MP;
11753 				MONO_ADD_INS (cfg->cbb, ins);
11754 				*sp++ = ins;
11755 				ip += 2;
11756 				break;
11757 			case CEE_MONO_LDNATIVEOBJ:
11758 				/*
11759 				 * Similar to LDOBJ, but instead load the unmanaged
11760 				 * representation of the vtype to the stack.
11761 				 */
11762 				CHECK_STACK (1);
11763 				CHECK_OPSIZE (6);
11764 				--sp;
11765 				token = read32 (ip + 2);
11766 				klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11767 				g_assert (klass->valuetype);
11768 				mono_class_init (klass);
11769 
11770 				{
11771 					MonoInst *src, *dest, *temp;
11772 
11773 					src = sp [0];
11774 					temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11775 					temp->backend.is_pinvoke = 1;
11776 					EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11777 					mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
11778 
11779 					EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11780 					dest->type = STACK_VTYPE;
11781 					dest->klass = klass;
11782 
11783 					*sp ++ = dest;
11784 					ip += 6;
11785 				}
11786 				break;
11787 			case CEE_MONO_RETOBJ: {
11788 				/*
11789 				 * Same as RET, but return the native representation of a vtype
11790 				 * to the caller.
11791 				 */
11792 				g_assert (cfg->ret);
11793 				g_assert (mono_method_signature (method)->pinvoke);
11794 				CHECK_STACK (1);
11795 				--sp;
11796 
11797 				CHECK_OPSIZE (6);
11798 				token = read32 (ip + 2);
11799 				klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11800 
11801 				if (!cfg->vret_addr) {
11802 					g_assert (cfg->ret_var_is_local);
11803 
11804 					EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11805 				} else {
11806 					EMIT_NEW_RETLOADA (cfg, ins);
11807 				}
11808 				mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
11809 
11810 				if (sp != stack_start)
11811 					UNVERIFIED;
11812 
11813 				mini_profiler_emit_leave (cfg, sp [0]);
11814 
11815 				MONO_INST_NEW (cfg, ins, OP_BR);
11816 				ins->inst_target_bb = end_bblock;
11817 				MONO_ADD_INS (cfg->cbb, ins);
11818 				link_bblock (cfg, cfg->cbb, end_bblock);
11819 				start_new_bblock = 1;
11820 				ip += 6;
11821 				break;
11822 			}
11823 			case CEE_MONO_SAVE_LMF:
11824 			case CEE_MONO_RESTORE_LMF:
11825 				ip += 2;
11826 				break;
11827 			case CEE_MONO_CLASSCONST:
11828 				CHECK_STACK_OVF (1);
11829 				CHECK_OPSIZE (6);
11830 				token = read32 (ip + 2);
11831 				EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11832 				*sp++ = ins;
11833 				ip += 6;
11834 				inline_costs += 10 * num_calls++;
11835 				break;
11836 			case CEE_MONO_NOT_TAKEN:
11837 				cfg->cbb->out_of_line = TRUE;
11838 				ip += 2;
11839 				break;
11840 			case CEE_MONO_TLS: {
11841 				MonoTlsKey key;
11842 
11843 				CHECK_STACK_OVF (1);
11844 				CHECK_OPSIZE (6);
11845 				key = (MonoTlsKey)read32 (ip + 2);
11846 				g_assert (key < TLS_KEY_NUM);
11847 
11848 				ins = mono_create_tls_get (cfg, key);
11849 				g_assert (ins);
11850 				ins->type = STACK_PTR;
11851 				*sp++ = ins;
11852 				ip += 6;
11853 				break;
11854 			}
11855 			case CEE_MONO_DYN_CALL: {
11856 				MonoCallInst *call;
11857 
11858 				/* It would be easier to call a trampoline, but that would put an
11859 				 * extra frame on the stack, confusing exception handling. So
11860 				 * implement it inline using an opcode for now.
11861 				 */
11862 
11863 				if (!cfg->dyn_call_var) {
11864 					cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11865 					/* prevent it from being register allocated */
11866 					cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11867 				}
11868 
11869 				/* Has to use a call inst since local regalloc expects it */
11870 				MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11871 				ins = (MonoInst*)call;
11872 				sp -= 2;
11873 				ins->sreg1 = sp [0]->dreg;
11874 				ins->sreg2 = sp [1]->dreg;
11875 				MONO_ADD_INS (cfg->cbb, ins);
11876 
11877 				cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
11878 				/* OP_DYN_CALL might need to allocate a dynamically sized param area */
11879 				cfg->flags |= MONO_CFG_HAS_ALLOCA;
11880 
11881 				ip += 2;
11882 				inline_costs += 10 * num_calls++;
11883 
11884 				break;
11885 			}
11886 			case CEE_MONO_MEMORY_BARRIER: {
11887 				CHECK_OPSIZE (6);
11888 				mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
11889 				ip += 6;
11890 				break;
11891 			}
11892 			case CEE_MONO_ATOMIC_STORE_I4: {
11893 				g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
11894 
11895 				CHECK_OPSIZE (6);
11896 				CHECK_STACK (2);
11897 				sp -= 2;
11898 
11899 				MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
11900 				ins->dreg = sp [0]->dreg;
11901 				ins->sreg1 = sp [1]->dreg;
11902 				ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
11903 				MONO_ADD_INS (cfg->cbb, ins);
11904 
11905 				ip += 6;
11906 				break;
11907 			}
11908 			case CEE_MONO_JIT_ATTACH: {
11909 				MonoInst *args [16], *domain_ins;
11910 				MonoInst *ad_ins, *jit_tls_ins;
11911 				MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11912 
11913 				g_assert (!mono_threads_is_blocking_transition_enabled ());
11914 
11915 				cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11916 
11917 				EMIT_NEW_PCONST (cfg, ins, NULL);
11918 				MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11919 
11920 				ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11921 				jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
11922 
11923 				if (ad_ins && jit_tls_ins) {
11924 					NEW_BBLOCK (cfg, next_bb);
11925 					NEW_BBLOCK (cfg, call_bb);
11926 
11927 					if (cfg->compile_aot) {
11928 						/* AOT code is only used in the root domain */
11929 						EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11930 					} else {
11931 						EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11932 					}
11933 					MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11934 					MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11935 
11936 					MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11937 					MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11938 
11939 					MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11940 					MONO_START_BB (cfg, call_bb);
11941 				}
11942 
11943 				/* AOT code is only used in the root domain */
11944 				EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
11945 				if (cfg->compile_aot) {
11946 					MonoInst *addr;
11947 
11948 					/*
11949 					 * This is called on unattached threads, so it cannot go through the trampoline
11950 					 * infrastructure. Use an indirect call through a got slot initialized at load time
11951 					 * instead.
11952 					 */
11953 					EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
11954 					ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
11955 				} else {
11956 					ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11957 				}
11958 				MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11959 
11960 				if (next_bb)
11961 					MONO_START_BB (cfg, next_bb);
11962 
11963 				/*
11964 				 * Parts of the initlocals code needs to come after this, since it might call methods like memset.
11965 				 */
11966 				init_localsbb2 = cfg->cbb;
11967 				NEW_BBLOCK (cfg, next_bb);
11968 				MONO_START_BB (cfg, next_bb);
11969 
11970 				ip += 2;
11971 				break;
11972 			}
11973 			case CEE_MONO_JIT_DETACH: {
11974 				MonoInst *args [16];
11975 
11976 				/* Restore the original domain */
11977 				dreg = alloc_ireg (cfg);
11978 				EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11979 				mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11980 				ip += 2;
11981 				break;
11982 			}
11983 			case CEE_MONO_CALLI_EXTRA_ARG: {
11984 				MonoInst *addr;
11985 				MonoMethodSignature *fsig;
11986 				MonoInst *arg;
11987 
11988 				/*
11989 				 * This is the same as CEE_CALLI, but passes an additional argument
11990 				 * to the called method in llvmonly mode.
11991 				 * This is only used by delegate invoke wrappers to call the
11992 				 * actual delegate method.
11993 				 */
11994 				g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
11995 
11996 				CHECK_OPSIZE (6);
11997 				token = read32 (ip + 2);
11998 
11999 				ins = NULL;
12000 
12001 				cmethod = NULL;
12002 				CHECK_STACK (1);
12003 				--sp;
12004 				addr = *sp;
12005 				fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12006 				CHECK_CFG_ERROR;
12007 
12008 				if (cfg->llvm_only)
12009 					cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12010 
12011 				n = fsig->param_count + fsig->hasthis + 1;
12012 
12013 				CHECK_STACK (n);
12014 
12015 				sp -= n;
12016 				arg = sp [n - 1];
12017 
12018 				if (cfg->llvm_only) {
12019 					/*
12020 					 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12021 					 * cconv. This is set by mono_init_delegate ().
12022 					 */
12023 					if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12024 						MonoInst *callee = addr;
12025 						MonoInst *call, *localloc_ins;
12026 						MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12027 						int low_bit_reg = alloc_preg (cfg);
12028 
12029 						NEW_BBLOCK (cfg, is_gsharedvt_bb);
12030 						NEW_BBLOCK (cfg, end_bb);
12031 
12032 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12033 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12034 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12035 
12036 						/* Normal case: callee uses a normal cconv, have to add an out wrapper */
12037 						addr = emit_get_rgctx_sig (cfg, context_used,
12038 												   fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12039 						/*
12040 						 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12041 						 */
12042 						MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12043 						ins->dreg = alloc_preg (cfg);
12044 						ins->inst_imm = 2 * SIZEOF_VOID_P;
12045 						MONO_ADD_INS (cfg->cbb, ins);
12046 						localloc_ins = ins;
12047 						cfg->flags |= MONO_CFG_HAS_ALLOCA;
12048 						MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12049 						MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12050 
12051 						call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12052 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12053 
12054 						/* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12055 						MONO_START_BB (cfg, is_gsharedvt_bb);
12056 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12057 						ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12058 						ins->dreg = call->dreg;
12059 
12060 						MONO_START_BB (cfg, end_bb);
12061 					} else {
12062 						/* Caller uses a normal calling conv */
12063 
12064 						MonoInst *callee = addr;
12065 						MonoInst *call, *localloc_ins;
12066 						MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12067 						int low_bit_reg = alloc_preg (cfg);
12068 
12069 						NEW_BBLOCK (cfg, is_gsharedvt_bb);
12070 						NEW_BBLOCK (cfg, end_bb);
12071 
12072 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12073 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12074 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12075 
12076 						/* Normal case: callee uses a normal cconv, no conversion is needed */
12077 						call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12078 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12079 						/* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12080 						MONO_START_BB (cfg, is_gsharedvt_bb);
12081 						MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12082 						NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12083 						MONO_ADD_INS (cfg->cbb, addr);
12084 						/*
12085 						 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12086 						 */
12087 						MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12088 						ins->dreg = alloc_preg (cfg);
12089 						ins->inst_imm = 2 * SIZEOF_VOID_P;
12090 						MONO_ADD_INS (cfg->cbb, ins);
12091 						localloc_ins = ins;
12092 						cfg->flags |= MONO_CFG_HAS_ALLOCA;
12093 						MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12094 						MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12095 
12096 						ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12097 						ins->dreg = call->dreg;
12098 						MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12099 
12100 						MONO_START_BB (cfg, end_bb);
12101 					}
12102 				} else {
12103 					/* Same as CEE_CALLI */
12104 					if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12105 						/*
12106 						 * We pass the address to the gsharedvt trampoline in the rgctx reg
12107 						 */
12108 						MonoInst *callee = addr;
12109 
12110 						addr = emit_get_rgctx_sig (cfg, context_used,
12111 												   fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12112 						ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12113 					} else {
12114 						ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12115 					}
12116 				}
12117 
12118 				if (!MONO_TYPE_IS_VOID (fsig->ret))
12119 					*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12120 
12121 				CHECK_CFG_EXCEPTION;
12122 
12123 				ip += 6;
12124 				ins_flag = 0;
12125 				constrained_class = NULL;
12126 				break;
12127 			}
12128 			case CEE_MONO_LDDOMAIN:
12129 				CHECK_STACK_OVF (1);
12130 				EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12131 				ip += 2;
12132 				*sp++ = ins;
12133 				break;
12134 			case CEE_MONO_GET_LAST_ERROR:
12135 				CHECK_OPSIZE (2);
12136 				CHECK_STACK_OVF (1);
12137 
12138 				MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12139 				ins->dreg = alloc_dreg (cfg, STACK_I4);
12140 				ins->type = STACK_I4;
12141 				MONO_ADD_INS (cfg->cbb, ins);
12142 
12143 				ip += 2;
12144 				*sp++ = ins;
12145 				break;
12146 			case CEE_MONO_GET_RGCTX_ARG:
12147 				CHECK_OPSIZE (2);
12148 				CHECK_STACK_OVF (1);
12149 
12150 				mono_create_rgctx_var (cfg);
12151 
12152 				MONO_INST_NEW (cfg, ins, OP_MOVE);
12153 				ins->dreg = alloc_dreg (cfg, STACK_PTR);
12154 				ins->sreg1 = cfg->rgctx_var->dreg;
12155 				ins->type = STACK_PTR;
12156 				MONO_ADD_INS (cfg->cbb, ins);
12157 
12158 				ip += 2;
12159 				*sp++ = ins;
12160 				break;
12161 			default:
12162 				g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12163 				break;
12164 			}
12165 			break;
12166 		}
12167 
12168 		case CEE_PREFIX1: {
12169 			CHECK_OPSIZE (2);
12170 			switch (ip [1]) {
12171 			case CEE_ARGLIST: {
12172 				/* somewhat similar to LDTOKEN */
12173 				MonoInst *addr, *vtvar;
12174 				CHECK_STACK_OVF (1);
12175 				vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12176 
12177 				EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12178 				EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12179 
12180 				EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12181 				ins->type = STACK_VTYPE;
12182 				ins->klass = mono_defaults.argumenthandle_class;
12183 				*sp++ = ins;
12184 				ip += 2;
12185 				break;
12186 			}
12187 			case CEE_CEQ:
12188 			case CEE_CGT:
12189 			case CEE_CGT_UN:
12190 			case CEE_CLT:
12191 			case CEE_CLT_UN: {
12192 				MonoInst *cmp, *arg1, *arg2;
12193 
12194 				CHECK_STACK (2);
12195 				sp -= 2;
12196 				arg1 = sp [0];
12197 				arg2 = sp [1];
12198 
12199 				/*
12200 				 * The following transforms:
12201 				 *    CEE_CEQ    into OP_CEQ
12202 				 *    CEE_CGT    into OP_CGT
12203 				 *    CEE_CGT_UN into OP_CGT_UN
12204 				 *    CEE_CLT    into OP_CLT
12205 				 *    CEE_CLT_UN into OP_CLT_UN
12206 				 */
12207 				MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12208 
12209 				MONO_INST_NEW (cfg, ins, cmp->opcode);
12210 				cmp->sreg1 = arg1->dreg;
12211 				cmp->sreg2 = arg2->dreg;
12212 				type_from_op (cfg, cmp, arg1, arg2);
12213 				CHECK_TYPE (cmp);
12214 				add_widen_op (cfg, cmp, &arg1, &arg2);
12215 				if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12216 					cmp->opcode = OP_LCOMPARE;
12217 				else if (arg1->type == STACK_R4)
12218 					cmp->opcode = OP_RCOMPARE;
12219 				else if (arg1->type == STACK_R8)
12220 					cmp->opcode = OP_FCOMPARE;
12221 				else
12222 					cmp->opcode = OP_ICOMPARE;
12223 				MONO_ADD_INS (cfg->cbb, cmp);
12224 				ins->type = STACK_I4;
12225 				ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12226 				type_from_op (cfg, ins, arg1, arg2);
12227 
12228 				if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12229 					/*
12230 					 * The backends expect the fceq opcodes to do the
12231 					 * comparison too.
12232 					 */
12233 					ins->sreg1 = cmp->sreg1;
12234 					ins->sreg2 = cmp->sreg2;
12235 					NULLIFY_INS (cmp);
12236 				}
12237 				MONO_ADD_INS (cfg->cbb, ins);
12238 				*sp++ = ins;
12239 				ip += 2;
12240 				break;
12241 			}
12242 			case CEE_LDFTN: {
12243 				MonoInst *argconst;
12244 				MonoMethod *cil_method;
12245 
12246 				CHECK_STACK_OVF (1);
12247 				CHECK_OPSIZE (6);
12248 				n = read32 (ip + 2);
12249 				cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12250 				CHECK_CFG_ERROR;
12251 
12252 				mono_class_init (cmethod->klass);
12253 
12254 				mono_save_token_info (cfg, image, n, cmethod);
12255 
12256 				context_used = mini_method_check_context_used (cfg, cmethod);
12257 
12258 				cil_method = cmethod;
12259 				if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12260 					emit_method_access_failure (cfg, method, cil_method);
12261 
12262 				if (mono_security_core_clr_enabled ())
12263 					ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12264 
12265 				/*
12266 				 * Optimize the common case of ldftn+delegate creation
12267 				 */
12268 				if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12269 					MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12270 					if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12271 						MonoInst *target_ins, *handle_ins;
12272 						MonoMethod *invoke;
12273 						int invoke_context_used;
12274 
12275 						invoke = mono_get_delegate_invoke (ctor_method->klass);
12276 						if (!invoke || !mono_method_signature (invoke))
12277 							LOAD_ERROR;
12278 
12279 						invoke_context_used = mini_method_check_context_used (cfg, invoke);
12280 
12281 						target_ins = sp [-1];
12282 
12283 						if (mono_security_core_clr_enabled ())
12284 							ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12285 
12286 						if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12287 							/*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12288 							if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12289 								MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12290 								MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12291 							}
12292 						}
12293 
12294 						/* FIXME: SGEN support */
12295 						if (invoke_context_used == 0 || cfg->llvm_only) {
12296 							ip += 6;
12297 							if (cfg->verbose_level > 3)
12298 								g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12299 							if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12300 								sp --;
12301 								*sp = handle_ins;
12302 								CHECK_CFG_EXCEPTION;
12303 								ip += 5;
12304 								sp ++;
12305 								break;
12306 							}
12307 							ip -= 6;
12308 						}
12309 					}
12310 				}
12311 
12312 				argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12313 				ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12314 				*sp++ = ins;
12315 
12316 				ip += 6;
12317 				inline_costs += 10 * num_calls++;
12318 				break;
12319 			}
12320 			case CEE_LDVIRTFTN: {
12321 				MonoInst *args [2];
12322 
12323 				CHECK_STACK (1);
12324 				CHECK_OPSIZE (6);
12325 				n = read32 (ip + 2);
12326 				cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12327 				CHECK_CFG_ERROR;
12328 
12329 				mono_class_init (cmethod->klass);
12330 
12331 				context_used = mini_method_check_context_used (cfg, cmethod);
12332 
12333 				if (mono_security_core_clr_enabled ())
12334 					ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12335 
12336 				/*
12337 				 * Optimize the common case of ldvirtftn+delegate creation
12338 				 */
12339 				if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12340 					MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12341 					if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12342 						MonoInst *target_ins, *handle_ins;
12343 						MonoMethod *invoke;
12344 						int invoke_context_used;
12345 						gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12346 
12347 						invoke = mono_get_delegate_invoke (ctor_method->klass);
12348 						if (!invoke || !mono_method_signature (invoke))
12349 							LOAD_ERROR;
12350 
12351 						invoke_context_used = mini_method_check_context_used (cfg, invoke);
12352 
12353 						target_ins = sp [-1];
12354 
12355 						if (mono_security_core_clr_enabled ())
12356 							ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12357 
12358 						/* FIXME: SGEN support */
12359 						if (invoke_context_used == 0 || cfg->llvm_only) {
12360 							ip += 6;
12361 							if (cfg->verbose_level > 3)
12362 								g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12363 							if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12364 								sp -= 2;
12365 								*sp = handle_ins;
12366 								CHECK_CFG_EXCEPTION;
12367 								ip += 5;
12368 								sp ++;
12369 								break;
12370 							}
12371 							ip -= 6;
12372 						}
12373 					}
12374 				}
12375 
12376 				--sp;
12377 				args [0] = *sp;
12378 
12379 				args [1] = emit_get_rgctx_method (cfg, context_used,
12380 												  cmethod, MONO_RGCTX_INFO_METHOD);
12381 
12382 				if (context_used)
12383 					*sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12384 				else
12385 					*sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12386 
12387 				ip += 6;
12388 				inline_costs += 10 * num_calls++;
12389 				break;
12390 			}
12391 			case CEE_LDARG:
12392 				CHECK_STACK_OVF (1);
12393 				CHECK_OPSIZE (4);
12394 				n = read16 (ip + 2);
12395 				CHECK_ARG (n);
12396 				if (is_adressable_valuetype_load (cfg, ip + 4, cfg->arg_types[n])) {
12397 					EMIT_NEW_ARGLOADA (cfg, ins, n);
12398 				} else {
12399 					EMIT_NEW_ARGLOAD (cfg, ins, n);
12400 				}
12401 				*sp++ = ins;
12402 				ip += 4;
12403 				break;
12404 			case CEE_LDARGA:
12405 				CHECK_STACK_OVF (1);
12406 				CHECK_OPSIZE (4);
12407 				n = read16 (ip + 2);
12408 				CHECK_ARG (n);
12409 				NEW_ARGLOADA (cfg, ins, n);
12410 				MONO_ADD_INS (cfg->cbb, ins);
12411 				*sp++ = ins;
12412 				ip += 4;
12413 				break;
12414 			case CEE_STARG:
12415 				CHECK_STACK (1);
12416 				--sp;
12417 				CHECK_OPSIZE (4);
12418 				n = read16 (ip + 2);
12419 				CHECK_ARG (n);
12420 				if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12421 					UNVERIFIED;
12422 				emit_starg_ir (cfg, sp, n);
12423 				ip += 4;
12424 				break;
12425 			case CEE_LDLOC:
12426 				CHECK_STACK_OVF (1);
12427 				CHECK_OPSIZE (4);
12428 				n = read16 (ip + 2);
12429 				CHECK_LOCAL (n);
12430 				if (is_adressable_valuetype_load (cfg, ip + 4, header->locals[n])) {
12431 					EMIT_NEW_LOCLOADA (cfg, ins, n);
12432 				} else {
12433 					EMIT_NEW_LOCLOAD (cfg, ins, n);
12434 				}
12435 				*sp++ = ins;
12436 				ip += 4;
12437 				break;
12438 			case CEE_LDLOCA: {
12439 				unsigned char *tmp_ip;
12440 				CHECK_STACK_OVF (1);
12441 				CHECK_OPSIZE (4);
12442 				n = read16 (ip + 2);
12443 				CHECK_LOCAL (n);
12444 
12445 				if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12446 					ip = tmp_ip;
12447 					inline_costs += 1;
12448 					break;
12449 				}
12450 
12451 				EMIT_NEW_LOCLOADA (cfg, ins, n);
12452 				*sp++ = ins;
12453 				ip += 4;
12454 				break;
12455 			}
12456 			case CEE_STLOC:
12457 				CHECK_STACK (1);
12458 				--sp;
12459 				CHECK_OPSIZE (4);
12460 				n = read16 (ip + 2);
12461 				CHECK_LOCAL (n);
12462 				if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12463 					UNVERIFIED;
12464 				emit_stloc_ir (cfg, sp, header, n);
12465 				ip += 4;
12466 				inline_costs += 1;
12467 				break;
12468 			case CEE_LOCALLOC: {
12469 				CHECK_STACK (1);
12470 				MonoBasicBlock *non_zero_bb, *end_bb;
12471 				int alloc_ptr = alloc_preg (cfg);
12472 				--sp;
12473 				if (sp != stack_start)
12474 					UNVERIFIED;
12475 				if (cfg->method != method)
12476 					/*
12477 					 * Inlining this into a loop in a parent could lead to
12478 					 * stack overflows which is different behavior than the
12479 					 * non-inlined case, thus disable inlining in this case.
12480 					 */
12481 					INLINE_FAILURE("localloc");
12482 
12483 				NEW_BBLOCK (cfg, non_zero_bb);
12484 				NEW_BBLOCK (cfg, end_bb);
12485 
12486 				/* if size != zero */
12487 				MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12488 				MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12489 
12490 				//size is zero, so result is NULL
12491 				MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12492 				MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12493 
12494 				MONO_START_BB (cfg, non_zero_bb);
12495 				MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12496 				ins->dreg = alloc_ptr;
12497 				ins->sreg1 = sp [0]->dreg;
12498 				ins->type = STACK_PTR;
12499 				MONO_ADD_INS (cfg->cbb, ins);
12500 
12501 				cfg->flags |= MONO_CFG_HAS_ALLOCA;
12502 				if (init_locals)
12503 					ins->flags |= MONO_INST_INIT;
12504 
12505 				MONO_START_BB (cfg, end_bb);
12506 				EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12507 				ins->type = STACK_PTR;
12508 
12509 				*sp++ = ins;
12510 				ip += 2;
12511 				break;
12512 			}
12513 			case CEE_ENDFILTER: {
12514 				MonoExceptionClause *clause, *nearest;
12515 				int cc;
12516 
12517 				CHECK_STACK (1);
12518 				--sp;
12519 				if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12520 					UNVERIFIED;
12521 				MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12522 				ins->sreg1 = (*sp)->dreg;
12523 				MONO_ADD_INS (cfg->cbb, ins);
12524 				start_new_bblock = 1;
12525 				ip += 2;
12526 
12527 				nearest = NULL;
12528 				for (cc = 0; cc < header->num_clauses; ++cc) {
12529 					clause = &header->clauses [cc];
12530 					if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12531 						((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12532 					    (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12533 						nearest = clause;
12534 				}
12535 				g_assert (nearest);
12536 				if ((ip - header->code) != nearest->handler_offset)
12537 					UNVERIFIED;
12538 
12539 				break;
12540 			}
12541 			case CEE_UNALIGNED_:
12542 				ins_flag |= MONO_INST_UNALIGNED;
12543 				/* FIXME: record alignment? we can assume 1 for now */
12544 				CHECK_OPSIZE (3);
12545 				ip += 3;
12546 				break;
12547 			case CEE_VOLATILE_:
12548 				ins_flag |= MONO_INST_VOLATILE;
12549 				ip += 2;
12550 				break;
12551 			case CEE_TAIL_:
12552 				ins_flag   |= MONO_INST_TAILCALL;
12553 				cfg->flags |= MONO_CFG_HAS_TAIL;
12554 				/* Can't inline tail calls at this time */
12555 				inline_costs += 100000;
12556 				ip += 2;
12557 				break;
12558 			case CEE_INITOBJ:
12559 				CHECK_STACK (1);
12560 				--sp;
12561 				CHECK_OPSIZE (6);
12562 				token = read32 (ip + 2);
12563 				klass = mini_get_class (method, token, generic_context);
12564 				CHECK_TYPELOAD (klass);
12565 				if (generic_class_is_reference_type (cfg, klass))
12566 					MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12567 				else
12568 					mini_emit_initobj (cfg, *sp, NULL, klass);
12569 				ip += 6;
12570 				inline_costs += 1;
12571 				break;
12572 			case CEE_CONSTRAINED_:
12573 				CHECK_OPSIZE (6);
12574 				token = read32 (ip + 2);
12575 				constrained_class = mini_get_class (method, token, generic_context);
12576 				CHECK_TYPELOAD (constrained_class);
12577 				ip += 6;
12578 				break;
12579 			case CEE_CPBLK:
12580 				CHECK_STACK (3);
12581 				sp -= 3;
12582 				mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12583 				ip += 2;
12584 				ins_flag = 0;
12585 				inline_costs += 1;
12586 				break;
12587 			case CEE_INITBLK:
12588 				CHECK_STACK (3);
12589 				sp -= 3;
12590 				mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12591 				ip += 2;
12592 				ins_flag = 0;
12593 				inline_costs += 1;
12594 				break;
12595 			case CEE_NO_:
12596 				CHECK_OPSIZE (3);
12597 				if (ip [2] & 0x1)
12598 					ins_flag |= MONO_INST_NOTYPECHECK;
12599 				if (ip [2] & 0x2)
12600 					ins_flag |= MONO_INST_NORANGECHECK;
12601 				/* we ignore the no-nullcheck for now since we
12602 				 * really do it explicitly only when doing callvirt->call
12603 				 */
12604 				ip += 3;
12605 				break;
12606 			case CEE_RETHROW: {
12607 				MonoInst *load;
12608 				int handler_offset = -1;
12609 
12610 				for (i = 0; i < header->num_clauses; ++i) {
12611 					MonoExceptionClause *clause = &header->clauses [i];
12612 					if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12613 						handler_offset = clause->handler_offset;
12614 						break;
12615 					}
12616 				}
12617 
12618 				cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12619 
12620 				if (handler_offset == -1)
12621 					UNVERIFIED;
12622 
12623 				EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12624 				MONO_INST_NEW (cfg, ins, OP_RETHROW);
12625 				ins->sreg1 = load->dreg;
12626 				MONO_ADD_INS (cfg->cbb, ins);
12627 
12628 				MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12629 				MONO_ADD_INS (cfg->cbb, ins);
12630 
12631 				sp = stack_start;
12632 				link_bblock (cfg, cfg->cbb, end_bblock);
12633 				start_new_bblock = 1;
12634 				ip += 2;
12635 				break;
12636 			}
12637 			case CEE_SIZEOF: {
12638 				guint32 val;
12639 				int ialign;
12640 
12641 				CHECK_STACK_OVF (1);
12642 				CHECK_OPSIZE (6);
12643 				token = read32 (ip + 2);
12644 				if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12645 					MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12646 					CHECK_CFG_ERROR;
12647 
12648 					val = mono_type_size (type, &ialign);
12649 				} else {
12650 					MonoClass *klass = mini_get_class (method, token, generic_context);
12651 					CHECK_TYPELOAD (klass);
12652 
12653 					val = mono_type_size (&klass->byval_arg, &ialign);
12654 
12655 					if (mini_is_gsharedvt_klass (klass))
12656 						GSHAREDVT_FAILURE (*ip);
12657 				}
12658 				EMIT_NEW_ICONST (cfg, ins, val);
12659 				*sp++= ins;
12660 				ip += 6;
12661 				break;
12662 			}
12663 			case CEE_REFANYTYPE: {
12664 				MonoInst *src_var, *src;
12665 
12666 				GSHAREDVT_FAILURE (*ip);
12667 
12668 				CHECK_STACK (1);
12669 				--sp;
12670 
12671 				// FIXME:
12672 				src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12673 				if (!src_var)
12674 					src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12675 				EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12676 				EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12677 				*sp++ = ins;
12678 				ip += 2;
12679 				break;
12680 			}
12681 			case CEE_READONLY_:
12682 				readonly = TRUE;
12683 				ip += 2;
12684 				break;
12685 
12686 			case CEE_UNUSED56:
12687 			case CEE_UNUSED57:
12688 			case CEE_UNUSED70:
12689 			case CEE_UNUSED:
12690 			case CEE_UNUSED99:
12691 				UNVERIFIED;
12692 
12693 			default:
12694 				g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12695 				UNVERIFIED;
12696 			}
12697 			break;
12698 		}
12699 		case CEE_UNUSED58:
12700 		case CEE_UNUSED1:
12701 			UNVERIFIED;
12702 
12703 		default:
12704 			g_warning ("opcode 0x%02x not handled", *ip);
12705 			UNVERIFIED;
12706 		}
12707 	}
12708 	if (start_new_bblock != 1)
12709 		UNVERIFIED;
12710 
12711 	cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12712 	if (cfg->cbb->next_bb) {
12713 		/* This could already be set because of inlining, #693905 */
12714 		MonoBasicBlock *bb = cfg->cbb;
12715 
12716 		while (bb->next_bb)
12717 			bb = bb->next_bb;
12718 		bb->next_bb = end_bblock;
12719 	} else {
12720 		cfg->cbb->next_bb = end_bblock;
12721 	}
12722 
12723 	if (cfg->method == method && cfg->domainvar) {
12724 		MonoInst *store;
12725 		MonoInst *get_domain;
12726 
12727 		cfg->cbb = init_localsbb;
12728 
12729 		get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12730 		NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12731 		MONO_ADD_INS (cfg->cbb, store);
12732 	}
12733 
12734 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12735 	if (cfg->compile_aot)
12736 		/* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12737 		mono_get_got_var (cfg);
12738 #endif
12739 
12740 	if (cfg->method == method && cfg->got_var)
12741 		mono_emit_load_got_addr (cfg);
12742 
12743 	if (init_localsbb) {
12744 		cfg->cbb = init_localsbb;
12745 		cfg->ip = NULL;
12746 		for (i = 0; i < header->num_locals; ++i) {
12747 			/*
12748 			 * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
12749 			 * which need the trampoline code to work.
12750 			 */
12751 			if (MONO_TYPE_ISSTRUCT (header->locals [i]))
12752 				cfg->cbb = init_localsbb2;
12753 			else
12754 				cfg->cbb = init_localsbb;
12755 			emit_init_local (cfg, i, header->locals [i], init_locals);
12756 		}
12757 	}
12758 
12759 	if (cfg->init_ref_vars && cfg->method == method) {
12760 		/* Emit initialization for ref vars */
12761 		// FIXME: Avoid duplication initialization for IL locals.
12762 		for (i = 0; i < cfg->num_varinfo; ++i) {
12763 			MonoInst *ins = cfg->varinfo [i];
12764 
12765 			if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12766 				MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12767 		}
12768 	}
12769 
12770 	if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12771 		cfg->cbb = init_localsbb;
12772 		emit_push_lmf (cfg);
12773 	}
12774 
12775 	cfg->cbb = init_localsbb;
12776 	mini_profiler_emit_enter (cfg);
12777 
12778 	if (seq_points) {
12779 		MonoBasicBlock *bb;
12780 
12781 		/*
12782 		 * Make seq points at backward branch targets interruptable.
12783 		 */
12784 		for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12785 			if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12786 				bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12787 	}
12788 
12789 	/* Add a sequence point for method entry/exit events */
12790 	if (seq_points && cfg->gen_sdb_seq_points) {
12791 		NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12792 		MONO_ADD_INS (init_localsbb, ins);
12793 		NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12794 		MONO_ADD_INS (cfg->bb_exit, ins);
12795 	}
12796 
12797 	/*
12798 	 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12799 	 * the code they refer to was dead (#11880).
12800 	 */
12801 	if (sym_seq_points) {
12802 		for (i = 0; i < header->code_size; ++i) {
12803 			if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12804 				MonoInst *ins;
12805 
12806 				NEW_SEQ_POINT (cfg, ins, i, FALSE);
12807 				mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12808 			}
12809 		}
12810 	}
12811 
12812 	cfg->ip = NULL;
12813 
12814 	if (cfg->method == method) {
12815 		MonoBasicBlock *bb;
12816 		for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12817 			if (bb == cfg->bb_init)
12818 				bb->region = -1;
12819 			else
12820 				bb->region = mono_find_block_region (cfg, bb->real_offset);
12821 			if (cfg->spvars)
12822 				mono_create_spvar_for_region (cfg, bb->region);
12823 			if (cfg->verbose_level > 2)
12824 				printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12825 		}
12826 	} else {
12827 		MonoBasicBlock *bb;
12828 		/* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12829 		for (bb = start_bblock; bb != end_bblock; bb  = bb->next_bb) {
12830 			bb->real_offset = inline_offset;
12831 		}
12832 	}
12833 
12834 	if (inline_costs < 0) {
12835 		char *mname;
12836 
12837 		/* Method is too large */
12838 		mname = mono_method_full_name (method, TRUE);
12839 		mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12840 		g_free (mname);
12841 	}
12842 
12843 	if ((cfg->verbose_level > 2) && (cfg->method == method))
12844 		mono_print_code (cfg, "AFTER METHOD-TO-IR");
12845 
12846 	goto cleanup;
12847 
12848 mono_error_exit:
12849 	g_assert (!mono_error_ok (&cfg->error));
12850 	goto cleanup;
12851 
12852  exception_exit:
12853 	g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12854 	goto cleanup;
12855 
12856  unverified:
12857 	set_exception_type_from_invalid_il (cfg, method, ip);
12858 	goto cleanup;
12859 
12860  cleanup:
12861 	g_slist_free (class_inits);
12862 	mono_basic_block_free (original_bb);
12863 	cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12864 	if (cfg->exception_type)
12865 		return -1;
12866 	else
12867 		return inline_costs;
12868 }
12869 
12870 static int
store_membase_reg_to_store_membase_imm(int opcode)12871 store_membase_reg_to_store_membase_imm (int opcode)
12872 {
12873 	switch (opcode) {
12874 	case OP_STORE_MEMBASE_REG:
12875 		return OP_STORE_MEMBASE_IMM;
12876 	case OP_STOREI1_MEMBASE_REG:
12877 		return OP_STOREI1_MEMBASE_IMM;
12878 	case OP_STOREI2_MEMBASE_REG:
12879 		return OP_STOREI2_MEMBASE_IMM;
12880 	case OP_STOREI4_MEMBASE_REG:
12881 		return OP_STOREI4_MEMBASE_IMM;
12882 	case OP_STOREI8_MEMBASE_REG:
12883 		return OP_STOREI8_MEMBASE_IMM;
12884 	default:
12885 		g_assert_not_reached ();
12886 	}
12887 
12888 	return -1;
12889 }
12890 
12891 int
mono_op_to_op_imm(int opcode)12892 mono_op_to_op_imm (int opcode)
12893 {
12894 	switch (opcode) {
12895 	case OP_IADD:
12896 		return OP_IADD_IMM;
12897 	case OP_ISUB:
12898 		return OP_ISUB_IMM;
12899 	case OP_IDIV:
12900 		return OP_IDIV_IMM;
12901 	case OP_IDIV_UN:
12902 		return OP_IDIV_UN_IMM;
12903 	case OP_IREM:
12904 		return OP_IREM_IMM;
12905 	case OP_IREM_UN:
12906 		return OP_IREM_UN_IMM;
12907 	case OP_IMUL:
12908 		return OP_IMUL_IMM;
12909 	case OP_IAND:
12910 		return OP_IAND_IMM;
12911 	case OP_IOR:
12912 		return OP_IOR_IMM;
12913 	case OP_IXOR:
12914 		return OP_IXOR_IMM;
12915 	case OP_ISHL:
12916 		return OP_ISHL_IMM;
12917 	case OP_ISHR:
12918 		return OP_ISHR_IMM;
12919 	case OP_ISHR_UN:
12920 		return OP_ISHR_UN_IMM;
12921 
12922 	case OP_LADD:
12923 		return OP_LADD_IMM;
12924 	case OP_LSUB:
12925 		return OP_LSUB_IMM;
12926 	case OP_LAND:
12927 		return OP_LAND_IMM;
12928 	case OP_LOR:
12929 		return OP_LOR_IMM;
12930 	case OP_LXOR:
12931 		return OP_LXOR_IMM;
12932 	case OP_LSHL:
12933 		return OP_LSHL_IMM;
12934 	case OP_LSHR:
12935 		return OP_LSHR_IMM;
12936 	case OP_LSHR_UN:
12937 		return OP_LSHR_UN_IMM;
12938 #if SIZEOF_REGISTER == 8
12939 	case OP_LREM:
12940 		return OP_LREM_IMM;
12941 #endif
12942 
12943 	case OP_COMPARE:
12944 		return OP_COMPARE_IMM;
12945 	case OP_ICOMPARE:
12946 		return OP_ICOMPARE_IMM;
12947 	case OP_LCOMPARE:
12948 		return OP_LCOMPARE_IMM;
12949 
12950 	case OP_STORE_MEMBASE_REG:
12951 		return OP_STORE_MEMBASE_IMM;
12952 	case OP_STOREI1_MEMBASE_REG:
12953 		return OP_STOREI1_MEMBASE_IMM;
12954 	case OP_STOREI2_MEMBASE_REG:
12955 		return OP_STOREI2_MEMBASE_IMM;
12956 	case OP_STOREI4_MEMBASE_REG:
12957 		return OP_STOREI4_MEMBASE_IMM;
12958 
12959 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12960 	case OP_X86_PUSH:
12961 		return OP_X86_PUSH_IMM;
12962 	case OP_X86_COMPARE_MEMBASE_REG:
12963 		return OP_X86_COMPARE_MEMBASE_IMM;
12964 #endif
12965 #if defined(TARGET_AMD64)
12966 	case OP_AMD64_ICOMPARE_MEMBASE_REG:
12967 		return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12968 #endif
12969 	case OP_VOIDCALL_REG:
12970 		return OP_VOIDCALL;
12971 	case OP_CALL_REG:
12972 		return OP_CALL;
12973 	case OP_LCALL_REG:
12974 		return OP_LCALL;
12975 	case OP_FCALL_REG:
12976 		return OP_FCALL;
12977 	case OP_LOCALLOC:
12978 		return OP_LOCALLOC_IMM;
12979 	}
12980 
12981 	return -1;
12982 }
12983 
12984 static int
ldind_to_load_membase(int opcode)12985 ldind_to_load_membase (int opcode)
12986 {
12987 	switch (opcode) {
12988 	case CEE_LDIND_I1:
12989 		return OP_LOADI1_MEMBASE;
12990 	case CEE_LDIND_U1:
12991 		return OP_LOADU1_MEMBASE;
12992 	case CEE_LDIND_I2:
12993 		return OP_LOADI2_MEMBASE;
12994 	case CEE_LDIND_U2:
12995 		return OP_LOADU2_MEMBASE;
12996 	case CEE_LDIND_I4:
12997 		return OP_LOADI4_MEMBASE;
12998 	case CEE_LDIND_U4:
12999 		return OP_LOADU4_MEMBASE;
13000 	case CEE_LDIND_I:
13001 		return OP_LOAD_MEMBASE;
13002 	case CEE_LDIND_REF:
13003 		return OP_LOAD_MEMBASE;
13004 	case CEE_LDIND_I8:
13005 		return OP_LOADI8_MEMBASE;
13006 	case CEE_LDIND_R4:
13007 		return OP_LOADR4_MEMBASE;
13008 	case CEE_LDIND_R8:
13009 		return OP_LOADR8_MEMBASE;
13010 	default:
13011 		g_assert_not_reached ();
13012 	}
13013 
13014 	return -1;
13015 }
13016 
13017 static int
stind_to_store_membase(int opcode)13018 stind_to_store_membase (int opcode)
13019 {
13020 	switch (opcode) {
13021 	case CEE_STIND_I1:
13022 		return OP_STOREI1_MEMBASE_REG;
13023 	case CEE_STIND_I2:
13024 		return OP_STOREI2_MEMBASE_REG;
13025 	case CEE_STIND_I4:
13026 		return OP_STOREI4_MEMBASE_REG;
13027 	case CEE_STIND_I:
13028 	case CEE_STIND_REF:
13029 		return OP_STORE_MEMBASE_REG;
13030 	case CEE_STIND_I8:
13031 		return OP_STOREI8_MEMBASE_REG;
13032 	case CEE_STIND_R4:
13033 		return OP_STORER4_MEMBASE_REG;
13034 	case CEE_STIND_R8:
13035 		return OP_STORER8_MEMBASE_REG;
13036 	default:
13037 		g_assert_not_reached ();
13038 	}
13039 
13040 	return -1;
13041 }
13042 
13043 int
mono_load_membase_to_load_mem(int opcode)13044 mono_load_membase_to_load_mem (int opcode)
13045 {
13046 	// FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13047 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13048 	switch (opcode) {
13049 	case OP_LOAD_MEMBASE:
13050 		return OP_LOAD_MEM;
13051 	case OP_LOADU1_MEMBASE:
13052 		return OP_LOADU1_MEM;
13053 	case OP_LOADU2_MEMBASE:
13054 		return OP_LOADU2_MEM;
13055 	case OP_LOADI4_MEMBASE:
13056 		return OP_LOADI4_MEM;
13057 	case OP_LOADU4_MEMBASE:
13058 		return OP_LOADU4_MEM;
13059 #if SIZEOF_REGISTER == 8
13060 	case OP_LOADI8_MEMBASE:
13061 		return OP_LOADI8_MEM;
13062 #endif
13063 	}
13064 #endif
13065 
13066 	return -1;
13067 }
13068 
13069 static inline int
op_to_op_dest_membase(int store_opcode,int opcode)13070 op_to_op_dest_membase (int store_opcode, int opcode)
13071 {
13072 #if defined(TARGET_X86)
13073 	if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13074 		return -1;
13075 
13076 	switch (opcode) {
13077 	case OP_IADD:
13078 		return OP_X86_ADD_MEMBASE_REG;
13079 	case OP_ISUB:
13080 		return OP_X86_SUB_MEMBASE_REG;
13081 	case OP_IAND:
13082 		return OP_X86_AND_MEMBASE_REG;
13083 	case OP_IOR:
13084 		return OP_X86_OR_MEMBASE_REG;
13085 	case OP_IXOR:
13086 		return OP_X86_XOR_MEMBASE_REG;
13087 	case OP_ADD_IMM:
13088 	case OP_IADD_IMM:
13089 		return OP_X86_ADD_MEMBASE_IMM;
13090 	case OP_SUB_IMM:
13091 	case OP_ISUB_IMM:
13092 		return OP_X86_SUB_MEMBASE_IMM;
13093 	case OP_AND_IMM:
13094 	case OP_IAND_IMM:
13095 		return OP_X86_AND_MEMBASE_IMM;
13096 	case OP_OR_IMM:
13097 	case OP_IOR_IMM:
13098 		return OP_X86_OR_MEMBASE_IMM;
13099 	case OP_XOR_IMM:
13100 	case OP_IXOR_IMM:
13101 		return OP_X86_XOR_MEMBASE_IMM;
13102 	case OP_MOVE:
13103 		return OP_NOP;
13104 	}
13105 #endif
13106 
13107 #if defined(TARGET_AMD64)
13108 	if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13109 		return -1;
13110 
13111 	switch (opcode) {
13112 	case OP_IADD:
13113 		return OP_X86_ADD_MEMBASE_REG;
13114 	case OP_ISUB:
13115 		return OP_X86_SUB_MEMBASE_REG;
13116 	case OP_IAND:
13117 		return OP_X86_AND_MEMBASE_REG;
13118 	case OP_IOR:
13119 		return OP_X86_OR_MEMBASE_REG;
13120 	case OP_IXOR:
13121 		return OP_X86_XOR_MEMBASE_REG;
13122 	case OP_IADD_IMM:
13123 		return OP_X86_ADD_MEMBASE_IMM;
13124 	case OP_ISUB_IMM:
13125 		return OP_X86_SUB_MEMBASE_IMM;
13126 	case OP_IAND_IMM:
13127 		return OP_X86_AND_MEMBASE_IMM;
13128 	case OP_IOR_IMM:
13129 		return OP_X86_OR_MEMBASE_IMM;
13130 	case OP_IXOR_IMM:
13131 		return OP_X86_XOR_MEMBASE_IMM;
13132 	case OP_LADD:
13133 		return OP_AMD64_ADD_MEMBASE_REG;
13134 	case OP_LSUB:
13135 		return OP_AMD64_SUB_MEMBASE_REG;
13136 	case OP_LAND:
13137 		return OP_AMD64_AND_MEMBASE_REG;
13138 	case OP_LOR:
13139 		return OP_AMD64_OR_MEMBASE_REG;
13140 	case OP_LXOR:
13141 		return OP_AMD64_XOR_MEMBASE_REG;
13142 	case OP_ADD_IMM:
13143 	case OP_LADD_IMM:
13144 		return OP_AMD64_ADD_MEMBASE_IMM;
13145 	case OP_SUB_IMM:
13146 	case OP_LSUB_IMM:
13147 		return OP_AMD64_SUB_MEMBASE_IMM;
13148 	case OP_AND_IMM:
13149 	case OP_LAND_IMM:
13150 		return OP_AMD64_AND_MEMBASE_IMM;
13151 	case OP_OR_IMM:
13152 	case OP_LOR_IMM:
13153 		return OP_AMD64_OR_MEMBASE_IMM;
13154 	case OP_XOR_IMM:
13155 	case OP_LXOR_IMM:
13156 		return OP_AMD64_XOR_MEMBASE_IMM;
13157 	case OP_MOVE:
13158 		return OP_NOP;
13159 	}
13160 #endif
13161 
13162 	return -1;
13163 }
13164 
13165 static inline int
op_to_op_store_membase(int store_opcode,int opcode)13166 op_to_op_store_membase (int store_opcode, int opcode)
13167 {
13168 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13169 	switch (opcode) {
13170 	case OP_ICEQ:
13171 		if (store_opcode == OP_STOREI1_MEMBASE_REG)
13172 			return OP_X86_SETEQ_MEMBASE;
13173 	case OP_CNE:
13174 		if (store_opcode == OP_STOREI1_MEMBASE_REG)
13175 			return OP_X86_SETNE_MEMBASE;
13176 	}
13177 #endif
13178 
13179 	return -1;
13180 }
13181 
13182 static inline int
op_to_op_src1_membase(MonoCompile * cfg,int load_opcode,int opcode)13183 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13184 {
13185 #ifdef TARGET_X86
13186 	/* FIXME: This has sign extension issues */
13187 	/*
13188 	if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13189 		return OP_X86_COMPARE_MEMBASE8_IMM;
13190 	*/
13191 
13192 	if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13193 		return -1;
13194 
13195 	switch (opcode) {
13196 	case OP_X86_PUSH:
13197 		return OP_X86_PUSH_MEMBASE;
13198 	case OP_COMPARE_IMM:
13199 	case OP_ICOMPARE_IMM:
13200 		return OP_X86_COMPARE_MEMBASE_IMM;
13201 	case OP_COMPARE:
13202 	case OP_ICOMPARE:
13203 		return OP_X86_COMPARE_MEMBASE_REG;
13204 	}
13205 #endif
13206 
13207 #ifdef TARGET_AMD64
13208 	/* FIXME: This has sign extension issues */
13209 	/*
13210 	if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13211 		return OP_X86_COMPARE_MEMBASE8_IMM;
13212 	*/
13213 
13214 	switch (opcode) {
13215 	case OP_X86_PUSH:
13216 		if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13217 			return OP_X86_PUSH_MEMBASE;
13218 		break;
13219 		/* FIXME: This only works for 32 bit immediates
13220 	case OP_COMPARE_IMM:
13221 	case OP_LCOMPARE_IMM:
13222 		if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13223 			return OP_AMD64_COMPARE_MEMBASE_IMM;
13224 		*/
13225 	case OP_ICOMPARE_IMM:
13226 		if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13227 			return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13228 		break;
13229 	case OP_COMPARE:
13230 	case OP_LCOMPARE:
13231 		if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13232 			return OP_AMD64_ICOMPARE_MEMBASE_REG;
13233 		if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13234 			return OP_AMD64_COMPARE_MEMBASE_REG;
13235 		break;
13236 	case OP_ICOMPARE:
13237 		if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13238 			return OP_AMD64_ICOMPARE_MEMBASE_REG;
13239 		break;
13240 	}
13241 #endif
13242 
13243 	return -1;
13244 }
13245 
13246 static inline int
op_to_op_src2_membase(MonoCompile * cfg,int load_opcode,int opcode)13247 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13248 {
13249 #ifdef TARGET_X86
13250 	if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13251 		return -1;
13252 
13253 	switch (opcode) {
13254 	case OP_COMPARE:
13255 	case OP_ICOMPARE:
13256 		return OP_X86_COMPARE_REG_MEMBASE;
13257 	case OP_IADD:
13258 		return OP_X86_ADD_REG_MEMBASE;
13259 	case OP_ISUB:
13260 		return OP_X86_SUB_REG_MEMBASE;
13261 	case OP_IAND:
13262 		return OP_X86_AND_REG_MEMBASE;
13263 	case OP_IOR:
13264 		return OP_X86_OR_REG_MEMBASE;
13265 	case OP_IXOR:
13266 		return OP_X86_XOR_REG_MEMBASE;
13267 	}
13268 #endif
13269 
13270 #ifdef TARGET_AMD64
13271 	if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13272 		switch (opcode) {
13273 		case OP_ICOMPARE:
13274 			return OP_AMD64_ICOMPARE_REG_MEMBASE;
13275 		case OP_IADD:
13276 			return OP_X86_ADD_REG_MEMBASE;
13277 		case OP_ISUB:
13278 			return OP_X86_SUB_REG_MEMBASE;
13279 		case OP_IAND:
13280 			return OP_X86_AND_REG_MEMBASE;
13281 		case OP_IOR:
13282 			return OP_X86_OR_REG_MEMBASE;
13283 		case OP_IXOR:
13284 			return OP_X86_XOR_REG_MEMBASE;
13285 		}
13286 	} else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13287 		switch (opcode) {
13288 		case OP_COMPARE:
13289 		case OP_LCOMPARE:
13290 			return OP_AMD64_COMPARE_REG_MEMBASE;
13291 		case OP_LADD:
13292 			return OP_AMD64_ADD_REG_MEMBASE;
13293 		case OP_LSUB:
13294 			return OP_AMD64_SUB_REG_MEMBASE;
13295 		case OP_LAND:
13296 			return OP_AMD64_AND_REG_MEMBASE;
13297 		case OP_LOR:
13298 			return OP_AMD64_OR_REG_MEMBASE;
13299 		case OP_LXOR:
13300 			return OP_AMD64_XOR_REG_MEMBASE;
13301 		}
13302 	}
13303 #endif
13304 
13305 	return -1;
13306 }
13307 
13308 int
mono_op_to_op_imm_noemul(int opcode)13309 mono_op_to_op_imm_noemul (int opcode)
13310 {
13311 	switch (opcode) {
13312 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13313 	case OP_LSHR:
13314 	case OP_LSHL:
13315 	case OP_LSHR_UN:
13316 		return -1;
13317 #endif
13318 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13319 	case OP_IDIV:
13320 	case OP_IDIV_UN:
13321 	case OP_IREM:
13322 	case OP_IREM_UN:
13323 		return -1;
13324 #endif
13325 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13326 	case OP_IMUL:
13327 		return -1;
13328 #endif
13329 	default:
13330 		return mono_op_to_op_imm (opcode);
13331 	}
13332 }
13333 
13334 /**
13335  * mono_handle_global_vregs:
13336  *
13337  *   Make vregs used in more than one bblock 'global', i.e. allocate a variable
13338  * for them.
13339  */
13340 void
mono_handle_global_vregs(MonoCompile * cfg)13341 mono_handle_global_vregs (MonoCompile *cfg)
13342 {
13343 	gint32 *vreg_to_bb;
13344 	MonoBasicBlock *bb;
13345 	int i, pos;
13346 
13347 	vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13348 
13349 #ifdef MONO_ARCH_SIMD_INTRINSICS
13350 	if (cfg->uses_simd_intrinsics)
13351 		mono_simd_simplify_indirection (cfg);
13352 #endif
13353 
13354 	/* Find local vregs used in more than one bb */
13355 	for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13356 		MonoInst *ins = bb->code;
13357 		int block_num = bb->block_num;
13358 
13359 		if (cfg->verbose_level > 2)
13360 			printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13361 
13362 		cfg->cbb = bb;
13363 		for (; ins; ins = ins->next) {
13364 			const char *spec = INS_INFO (ins->opcode);
13365 			int regtype = 0, regindex;
13366 			gint32 prev_bb;
13367 
13368 			if (G_UNLIKELY (cfg->verbose_level > 2))
13369 				mono_print_ins (ins);
13370 
13371 			g_assert (ins->opcode >= MONO_CEE_LAST);
13372 
13373 			for (regindex = 0; regindex < 4; regindex ++) {
13374 				int vreg = 0;
13375 
13376 				if (regindex == 0) {
13377 					regtype = spec [MONO_INST_DEST];
13378 					if (regtype == ' ')
13379 						continue;
13380 					vreg = ins->dreg;
13381 				} else if (regindex == 1) {
13382 					regtype = spec [MONO_INST_SRC1];
13383 					if (regtype == ' ')
13384 						continue;
13385 					vreg = ins->sreg1;
13386 				} else if (regindex == 2) {
13387 					regtype = spec [MONO_INST_SRC2];
13388 					if (regtype == ' ')
13389 						continue;
13390 					vreg = ins->sreg2;
13391 				} else if (regindex == 3) {
13392 					regtype = spec [MONO_INST_SRC3];
13393 					if (regtype == ' ')
13394 						continue;
13395 					vreg = ins->sreg3;
13396 				}
13397 
13398 #if SIZEOF_REGISTER == 4
13399 				/* In the LLVM case, the long opcodes are not decomposed */
13400 				if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13401 					/*
13402 					 * Since some instructions reference the original long vreg,
13403 					 * and some reference the two component vregs, it is quite hard
13404 					 * to determine when it needs to be global. So be conservative.
13405 					 */
13406 					if (!get_vreg_to_inst (cfg, vreg)) {
13407 						mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13408 
13409 						if (cfg->verbose_level > 2)
13410 							printf ("LONG VREG R%d made global.\n", vreg);
13411 					}
13412 
13413 					/*
13414 					 * Make the component vregs volatile since the optimizations can
13415 					 * get confused otherwise.
13416 					 */
13417 					get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13418 					get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13419 				}
13420 #endif
13421 
13422 				g_assert (vreg != -1);
13423 
13424 				prev_bb = vreg_to_bb [vreg];
13425 				if (prev_bb == 0) {
13426 					/* 0 is a valid block num */
13427 					vreg_to_bb [vreg] = block_num + 1;
13428 				} else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13429 					if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13430 						continue;
13431 
13432 					if (!get_vreg_to_inst (cfg, vreg)) {
13433 						if (G_UNLIKELY (cfg->verbose_level > 2))
13434 							printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13435 
13436 						switch (regtype) {
13437 						case 'i':
13438 							if (vreg_is_ref (cfg, vreg))
13439 								mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13440 							else
13441 								mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13442 							break;
13443 						case 'l':
13444 							mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13445 							break;
13446 						case 'f':
13447 							mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13448 							break;
13449 						case 'v':
13450 						case 'x':
13451 							mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13452 							break;
13453 						default:
13454 							g_assert_not_reached ();
13455 						}
13456 					}
13457 
13458 					/* Flag as having been used in more than one bb */
13459 					vreg_to_bb [vreg] = -1;
13460 				}
13461 			}
13462 		}
13463 	}
13464 
13465 	/* If a variable is used in only one bblock, convert it into a local vreg */
13466 	for (i = 0; i < cfg->num_varinfo; i++) {
13467 		MonoInst *var = cfg->varinfo [i];
13468 		MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13469 
13470 		switch (var->type) {
13471 		case STACK_I4:
13472 		case STACK_OBJ:
13473 		case STACK_PTR:
13474 		case STACK_MP:
13475 		case STACK_VTYPE:
13476 #if SIZEOF_REGISTER == 8
13477 		case STACK_I8:
13478 #endif
13479 #if !defined(TARGET_X86)
13480 		/* Enabling this screws up the fp stack on x86 */
13481 		case STACK_R8:
13482 #endif
13483 			if (mono_arch_is_soft_float ())
13484 				break;
13485 
13486 			/*
13487 			if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13488 				break;
13489 			*/
13490 
13491 			/* Arguments are implicitly global */
13492 			/* Putting R4 vars into registers doesn't work currently */
13493 			/* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13494 			if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13495 				/*
13496 				 * Make that the variable's liveness interval doesn't contain a call, since
13497 				 * that would cause the lvreg to be spilled, making the whole optimization
13498 				 * useless.
13499 				 */
13500 				/* This is too slow for JIT compilation */
13501 #if 0
13502 				if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13503 					MonoInst *ins;
13504 					int def_index, call_index, ins_index;
13505 					gboolean spilled = FALSE;
13506 
13507 					def_index = -1;
13508 					call_index = -1;
13509 					ins_index = 0;
13510 					for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13511 						const char *spec = INS_INFO (ins->opcode);
13512 
13513 						if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13514 							def_index = ins_index;
13515 
13516 						if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13517 							((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13518 							if (call_index > def_index) {
13519 								spilled = TRUE;
13520 								break;
13521 							}
13522 						}
13523 
13524 						if (MONO_IS_CALL (ins))
13525 							call_index = ins_index;
13526 
13527 						ins_index ++;
13528 					}
13529 
13530 					if (spilled)
13531 						break;
13532 				}
13533 #endif
13534 
13535 				if (G_UNLIKELY (cfg->verbose_level > 2))
13536 					printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13537 				var->flags |= MONO_INST_IS_DEAD;
13538 				cfg->vreg_to_inst [var->dreg] = NULL;
13539 			}
13540 			break;
13541 		}
13542 	}
13543 
13544 	/*
13545 	 * Compress the varinfo and vars tables so the liveness computation is faster and
13546 	 * takes up less space.
13547 	 */
13548 	pos = 0;
13549 	for (i = 0; i < cfg->num_varinfo; ++i) {
13550 		MonoInst *var = cfg->varinfo [i];
13551 		if (pos < i && cfg->locals_start == i)
13552 			cfg->locals_start = pos;
13553 		if (!(var->flags & MONO_INST_IS_DEAD)) {
13554 			if (pos < i) {
13555 				cfg->varinfo [pos] = cfg->varinfo [i];
13556 				cfg->varinfo [pos]->inst_c0 = pos;
13557 				memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13558 				cfg->vars [pos].idx = pos;
13559 #if SIZEOF_REGISTER == 4
13560 				if (cfg->varinfo [pos]->type == STACK_I8) {
13561 					/* Modify the two component vars too */
13562 					MonoInst *var1;
13563 
13564 					var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13565 					var1->inst_c0 = pos;
13566 					var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13567 					var1->inst_c0 = pos;
13568 				}
13569 #endif
13570 			}
13571 			pos ++;
13572 		}
13573 	}
13574 	cfg->num_varinfo = pos;
13575 	if (cfg->locals_start > cfg->num_varinfo)
13576 		cfg->locals_start = cfg->num_varinfo;
13577 }
13578 
13579 /*
13580  * mono_allocate_gsharedvt_vars:
13581  *
13582  *   Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13583  * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13584  */
13585 void
mono_allocate_gsharedvt_vars(MonoCompile * cfg)13586 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13587 {
13588 	int i;
13589 
13590 	cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13591 
13592 	for (i = 0; i < cfg->num_varinfo; ++i) {
13593 		MonoInst *ins = cfg->varinfo [i];
13594 		int idx;
13595 
13596 		if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13597 			if (i >= cfg->locals_start) {
13598 				/* Local */
13599 				idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13600 				cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13601 				ins->opcode = OP_GSHAREDVT_LOCAL;
13602 				ins->inst_imm = idx;
13603 			} else {
13604 				/* Arg */
13605 				cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13606 				ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13607 			}
13608 		}
13609 	}
13610 }
13611 
13612 /**
13613  * mono_spill_global_vars:
13614  *
13615  *   Generate spill code for variables which are not allocated to registers,
13616  * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13617  * code is generated which could be optimized by the local optimization passes.
13618  */
13619 void
mono_spill_global_vars(MonoCompile * cfg,gboolean * need_local_opts)13620 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13621 {
13622 	MonoBasicBlock *bb;
13623 	char spec2 [16];
13624 	int orig_next_vreg;
13625 	guint32 *vreg_to_lvreg;
13626 	guint32 *lvregs;
13627 	guint32 i, lvregs_len, lvregs_size;
13628 	gboolean dest_has_lvreg = FALSE;
13629 	MonoStackType stacktypes [128];
13630 	MonoInst **live_range_start, **live_range_end;
13631 	MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13632 
13633 	*need_local_opts = FALSE;
13634 
13635 	memset (spec2, 0, sizeof (spec2));
13636 
13637 	/* FIXME: Move this function to mini.c */
13638 	stacktypes ['i'] = STACK_PTR;
13639 	stacktypes ['l'] = STACK_I8;
13640 	stacktypes ['f'] = STACK_R8;
13641 #ifdef MONO_ARCH_SIMD_INTRINSICS
13642 	stacktypes ['x'] = STACK_VTYPE;
13643 #endif
13644 
13645 #if SIZEOF_REGISTER == 4
13646 	/* Create MonoInsts for longs */
13647 	for (i = 0; i < cfg->num_varinfo; i++) {
13648 		MonoInst *ins = cfg->varinfo [i];
13649 
13650 		if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13651 			switch (ins->type) {
13652 			case STACK_R8:
13653 			case STACK_I8: {
13654 				MonoInst *tree;
13655 
13656 				if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13657 					break;
13658 
13659 				g_assert (ins->opcode == OP_REGOFFSET);
13660 
13661 				tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13662 				g_assert (tree);
13663 				tree->opcode = OP_REGOFFSET;
13664 				tree->inst_basereg = ins->inst_basereg;
13665 				tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13666 
13667 				tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13668 				g_assert (tree);
13669 				tree->opcode = OP_REGOFFSET;
13670 				tree->inst_basereg = ins->inst_basereg;
13671 				tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13672 				break;
13673 			}
13674 			default:
13675 				break;
13676 			}
13677 		}
13678 	}
13679 #endif
13680 
13681 	if (cfg->compute_gc_maps) {
13682 		/* registers need liveness info even for !non refs */
13683 		for (i = 0; i < cfg->num_varinfo; i++) {
13684 			MonoInst *ins = cfg->varinfo [i];
13685 
13686 			if (ins->opcode == OP_REGVAR)
13687 				ins->flags |= MONO_INST_GC_TRACK;
13688 		}
13689 	}
13690 
13691 	/* FIXME: widening and truncation */
13692 
13693 	/*
13694 	 * As an optimization, when a variable allocated to the stack is first loaded into
13695 	 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13696 	 * the variable again.
13697 	 */
13698 	orig_next_vreg = cfg->next_vreg;
13699 	vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13700 	lvregs_size = 1024;
13701 	lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13702 	lvregs_len = 0;
13703 
13704 	/*
13705 	 * These arrays contain the first and last instructions accessing a given
13706 	 * variable.
13707 	 * Since we emit bblocks in the same order we process them here, and we
13708 	 * don't split live ranges, these will precisely describe the live range of
13709 	 * the variable, i.e. the instruction range where a valid value can be found
13710 	 * in the variables location.
13711 	 * The live range is computed using the liveness info computed by the liveness pass.
13712 	 * We can't use vmv->range, since that is an abstract live range, and we need
13713 	 * one which is instruction precise.
13714 	 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13715 	 */
13716 	/* FIXME: Only do this if debugging info is requested */
13717 	live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13718 	live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13719 	live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13720 	live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13721 
13722 	/* Add spill loads/stores */
13723 	for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13724 		MonoInst *ins;
13725 
13726 		if (cfg->verbose_level > 2)
13727 			printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13728 
13729 		/* Clear vreg_to_lvreg array */
13730 		for (i = 0; i < lvregs_len; i++)
13731 			vreg_to_lvreg [lvregs [i]] = 0;
13732 		lvregs_len = 0;
13733 
13734 		cfg->cbb = bb;
13735 		MONO_BB_FOR_EACH_INS (bb, ins) {
13736 			const char *spec = INS_INFO (ins->opcode);
13737 			int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13738 			gboolean store, no_lvreg;
13739 			int sregs [MONO_MAX_SRC_REGS];
13740 
13741 			if (G_UNLIKELY (cfg->verbose_level > 2))
13742 				mono_print_ins (ins);
13743 
13744 			if (ins->opcode == OP_NOP)
13745 				continue;
13746 
13747 			/*
13748 			 * We handle LDADDR here as well, since it can only be decomposed
13749 			 * when variable addresses are known.
13750 			 */
13751 			if (ins->opcode == OP_LDADDR) {
13752 				MonoInst *var = (MonoInst *)ins->inst_p0;
13753 
13754 				if (var->opcode == OP_VTARG_ADDR) {
13755 					/* Happens on SPARC/S390 where vtypes are passed by reference */
13756 					MonoInst *vtaddr = var->inst_left;
13757 					if (vtaddr->opcode == OP_REGVAR) {
13758 						ins->opcode = OP_MOVE;
13759 						ins->sreg1 = vtaddr->dreg;
13760 					}
13761 					else if (var->inst_left->opcode == OP_REGOFFSET) {
13762 						ins->opcode = OP_LOAD_MEMBASE;
13763 						ins->inst_basereg = vtaddr->inst_basereg;
13764 						ins->inst_offset = vtaddr->inst_offset;
13765 					} else
13766 						NOT_IMPLEMENTED;
13767 				} else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13768 					/* gsharedvt arg passed by ref */
13769 					g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13770 
13771 					ins->opcode = OP_LOAD_MEMBASE;
13772 					ins->inst_basereg = var->inst_basereg;
13773 					ins->inst_offset = var->inst_offset;
13774 				} else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13775 					MonoInst *load, *load2, *load3;
13776 					int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13777 					int reg1, reg2, reg3;
13778 					MonoInst *info_var = cfg->gsharedvt_info_var;
13779 					MonoInst *locals_var = cfg->gsharedvt_locals_var;
13780 
13781 					/*
13782 					 * gsharedvt local.
13783 					 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13784 					 */
13785 
13786 					g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13787 
13788 					g_assert (info_var);
13789 					g_assert (locals_var);
13790 
13791 					/* Mark the instruction used to compute the locals var as used */
13792 					cfg->gsharedvt_locals_var_ins = NULL;
13793 
13794 					/* Load the offset */
13795 					if (info_var->opcode == OP_REGOFFSET) {
13796 						reg1 = alloc_ireg (cfg);
13797 						NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13798 					} else if (info_var->opcode == OP_REGVAR) {
13799 						load = NULL;
13800 						reg1 = info_var->dreg;
13801 					} else {
13802 						g_assert_not_reached ();
13803 					}
13804 					reg2 = alloc_ireg (cfg);
13805 					NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13806 					/* Load the locals area address */
13807 					reg3 = alloc_ireg (cfg);
13808 					if (locals_var->opcode == OP_REGOFFSET) {
13809 						NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13810 					} else if (locals_var->opcode == OP_REGVAR) {
13811 						NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13812 					} else {
13813 						g_assert_not_reached ();
13814 					}
13815 					/* Compute the address */
13816 					ins->opcode = OP_PADD;
13817 					ins->sreg1 = reg3;
13818 					ins->sreg2 = reg2;
13819 
13820 					mono_bblock_insert_before_ins (bb, ins, load3);
13821 					mono_bblock_insert_before_ins (bb, load3, load2);
13822 					if (load)
13823 						mono_bblock_insert_before_ins (bb, load2, load);
13824 				} else {
13825 					g_assert (var->opcode == OP_REGOFFSET);
13826 
13827 					ins->opcode = OP_ADD_IMM;
13828 					ins->sreg1 = var->inst_basereg;
13829 					ins->inst_imm = var->inst_offset;
13830 				}
13831 
13832 				*need_local_opts = TRUE;
13833 				spec = INS_INFO (ins->opcode);
13834 			}
13835 
13836 			if (ins->opcode < MONO_CEE_LAST) {
13837 				mono_print_ins (ins);
13838 				g_assert_not_reached ();
13839 			}
13840 
13841 			/*
13842 			 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13843 			 * src register.
13844 			 * FIXME:
13845 			 */
13846 			if (MONO_IS_STORE_MEMBASE (ins)) {
13847 				tmp_reg = ins->dreg;
13848 				ins->dreg = ins->sreg2;
13849 				ins->sreg2 = tmp_reg;
13850 				store = TRUE;
13851 
13852 				spec2 [MONO_INST_DEST] = ' ';
13853 				spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13854 				spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13855 				spec2 [MONO_INST_SRC3] = ' ';
13856 				spec = spec2;
13857 			} else if (MONO_IS_STORE_MEMINDEX (ins))
13858 				g_assert_not_reached ();
13859 			else
13860 				store = FALSE;
13861 			no_lvreg = FALSE;
13862 
13863 			if (G_UNLIKELY (cfg->verbose_level > 2)) {
13864 				printf ("\t %.3s %d", spec, ins->dreg);
13865 				num_sregs = mono_inst_get_src_registers (ins, sregs);
13866 				for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13867 					printf (" %d", sregs [srcindex]);
13868 				printf ("\n");
13869 			}
13870 
13871 			/***************/
13872 			/*    DREG     */
13873 			/***************/
13874 			regtype = spec [MONO_INST_DEST];
13875 			g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13876 			prev_dreg = -1;
13877 			int dreg_using_dest_to_membase_op = -1;
13878 
13879 			if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13880 				MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13881 				MonoInst *store_ins;
13882 				int store_opcode;
13883 				MonoInst *def_ins = ins;
13884 				int dreg = ins->dreg; /* The original vreg */
13885 
13886 				store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13887 
13888 				if (var->opcode == OP_REGVAR) {
13889 					ins->dreg = var->dreg;
13890 				} else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13891 					/*
13892 					 * Instead of emitting a load+store, use a _membase opcode.
13893 					 */
13894 					g_assert (var->opcode == OP_REGOFFSET);
13895 					if (ins->opcode == OP_MOVE) {
13896 						NULLIFY_INS (ins);
13897 						def_ins = NULL;
13898 					} else {
13899 						dreg_using_dest_to_membase_op = ins->dreg;
13900 						ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13901 						ins->inst_basereg = var->inst_basereg;
13902 						ins->inst_offset = var->inst_offset;
13903 						ins->dreg = -1;
13904 					}
13905 					spec = INS_INFO (ins->opcode);
13906 				} else {
13907 					guint32 lvreg;
13908 
13909 					g_assert (var->opcode == OP_REGOFFSET);
13910 
13911 					prev_dreg = ins->dreg;
13912 
13913 					/* Invalidate any previous lvreg for this vreg */
13914 					vreg_to_lvreg [ins->dreg] = 0;
13915 
13916 					lvreg = 0;
13917 
13918 					if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13919 						regtype = 'l';
13920 						store_opcode = OP_STOREI8_MEMBASE_REG;
13921 					}
13922 
13923 					ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13924 
13925 #if SIZEOF_REGISTER != 8
13926 					if (regtype == 'l') {
13927 						NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
13928 						mono_bblock_insert_after_ins (bb, ins, store_ins);
13929 						NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
13930 						mono_bblock_insert_after_ins (bb, ins, store_ins);
13931 						def_ins = store_ins;
13932 					}
13933 					else
13934 #endif
13935 					{
13936 						g_assert (store_opcode != OP_STOREV_MEMBASE);
13937 
13938 						/* Try to fuse the store into the instruction itself */
13939 						/* FIXME: Add more instructions */
13940 						if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13941 							ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13942 							ins->inst_imm = ins->inst_c0;
13943 							ins->inst_destbasereg = var->inst_basereg;
13944 							ins->inst_offset = var->inst_offset;
13945 							spec = INS_INFO (ins->opcode);
13946 						} else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
13947 							ins->opcode = store_opcode;
13948 							ins->inst_destbasereg = var->inst_basereg;
13949 							ins->inst_offset = var->inst_offset;
13950 
13951 							no_lvreg = TRUE;
13952 
13953 							tmp_reg = ins->dreg;
13954 							ins->dreg = ins->sreg2;
13955 							ins->sreg2 = tmp_reg;
13956 							store = TRUE;
13957 
13958 							spec2 [MONO_INST_DEST] = ' ';
13959 							spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13960 							spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13961 							spec2 [MONO_INST_SRC3] = ' ';
13962 							spec = spec2;
13963 						} else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13964 							// FIXME: The backends expect the base reg to be in inst_basereg
13965 							ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13966 							ins->dreg = -1;
13967 							ins->inst_basereg = var->inst_basereg;
13968 							ins->inst_offset = var->inst_offset;
13969 							spec = INS_INFO (ins->opcode);
13970 						} else {
13971 							/* printf ("INS: "); mono_print_ins (ins); */
13972 							/* Create a store instruction */
13973 							NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13974 
13975 							/* Insert it after the instruction */
13976 							mono_bblock_insert_after_ins (bb, ins, store_ins);
13977 
13978 							def_ins = store_ins;
13979 
13980 							/*
13981 							 * We can't assign ins->dreg to var->dreg here, since the
13982 							 * sregs could use it. So set a flag, and do it after
13983 							 * the sregs.
13984 							 */
13985 							if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13986 								dest_has_lvreg = TRUE;
13987 						}
13988 					}
13989 				}
13990 
13991 				if (def_ins && !live_range_start [dreg]) {
13992 					live_range_start [dreg] = def_ins;
13993 					live_range_start_bb [dreg] = bb;
13994 				}
13995 
13996 				if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13997 					MonoInst *tmp;
13998 
13999 					MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14000 					tmp->inst_c1 = dreg;
14001 					mono_bblock_insert_after_ins (bb, def_ins, tmp);
14002 				}
14003 			}
14004 
14005 			/************/
14006 			/*  SREGS   */
14007 			/************/
14008 			num_sregs = mono_inst_get_src_registers (ins, sregs);
14009 			for (srcindex = 0; srcindex < 3; ++srcindex) {
14010 				regtype = spec [MONO_INST_SRC1 + srcindex];
14011 				sreg = sregs [srcindex];
14012 
14013 				g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14014 				if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14015 					MonoInst *var = get_vreg_to_inst (cfg, sreg);
14016 					MonoInst *use_ins = ins;
14017 					MonoInst *load_ins;
14018 					guint32 load_opcode;
14019 
14020 					if (var->opcode == OP_REGVAR) {
14021 						sregs [srcindex] = var->dreg;
14022 						//mono_inst_set_src_registers (ins, sregs);
14023 						live_range_end [sreg] = use_ins;
14024 						live_range_end_bb [sreg] = bb;
14025 
14026 						if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14027 							MonoInst *tmp;
14028 
14029 							MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14030 							/* var->dreg is a hreg */
14031 							tmp->inst_c1 = sreg;
14032 							mono_bblock_insert_after_ins (bb, ins, tmp);
14033 						}
14034 
14035 						continue;
14036 					}
14037 
14038 					g_assert (var->opcode == OP_REGOFFSET);
14039 
14040 					load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14041 
14042 					g_assert (load_opcode != OP_LOADV_MEMBASE);
14043 
14044 					if (vreg_to_lvreg [sreg]) {
14045 						g_assert (vreg_to_lvreg [sreg] != -1);
14046 
14047 						/* The variable is already loaded to an lvreg */
14048 						if (G_UNLIKELY (cfg->verbose_level > 2))
14049 							printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14050 						sregs [srcindex] = vreg_to_lvreg [sreg];
14051 						//mono_inst_set_src_registers (ins, sregs);
14052 						continue;
14053 					}
14054 
14055 					/* Try to fuse the load into the instruction */
14056 					if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14057 						ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14058 						sregs [0] = var->inst_basereg;
14059 						//mono_inst_set_src_registers (ins, sregs);
14060 						ins->inst_offset = var->inst_offset;
14061 					} else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14062 						ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14063 						sregs [1] = var->inst_basereg;
14064 						//mono_inst_set_src_registers (ins, sregs);
14065 						ins->inst_offset = var->inst_offset;
14066 					} else {
14067 						if (MONO_IS_REAL_MOVE (ins)) {
14068 							ins->opcode = OP_NOP;
14069 							sreg = ins->dreg;
14070 						} else {
14071 							//printf ("%d ", srcindex); mono_print_ins (ins);
14072 
14073 							sreg = alloc_dreg (cfg, stacktypes [regtype]);
14074 
14075 							if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14076 								if (var->dreg == prev_dreg) {
14077 									/*
14078 									 * sreg refers to the value loaded by the load
14079 									 * emitted below, but we need to use ins->dreg
14080 									 * since it refers to the store emitted earlier.
14081 									 */
14082 									sreg = ins->dreg;
14083 								}
14084 								g_assert (sreg != -1);
14085 								if (var->dreg == dreg_using_dest_to_membase_op) {
14086 									if (cfg->verbose_level > 2)
14087 										printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg);
14088 								} else {
14089 									vreg_to_lvreg [var->dreg] = sreg;
14090 								}
14091 								if (lvregs_len >= lvregs_size) {
14092 									guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14093 									memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14094 									lvregs = new_lvregs;
14095 									lvregs_size *= 2;
14096 								}
14097 								lvregs [lvregs_len ++] = var->dreg;
14098 							}
14099 						}
14100 
14101 						sregs [srcindex] = sreg;
14102 						//mono_inst_set_src_registers (ins, sregs);
14103 
14104 #if SIZEOF_REGISTER != 8
14105 						if (regtype == 'l') {
14106 							NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14107 							mono_bblock_insert_before_ins (bb, ins, load_ins);
14108 							NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14109 							mono_bblock_insert_before_ins (bb, ins, load_ins);
14110 							use_ins = load_ins;
14111 						}
14112 						else
14113 #endif
14114 						{
14115 #if SIZEOF_REGISTER == 4
14116 							g_assert (load_opcode != OP_LOADI8_MEMBASE);
14117 #endif
14118 							NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14119 							mono_bblock_insert_before_ins (bb, ins, load_ins);
14120 							use_ins = load_ins;
14121 						}
14122 					}
14123 
14124 					if (var->dreg < orig_next_vreg) {
14125 						live_range_end [var->dreg] = use_ins;
14126 						live_range_end_bb [var->dreg] = bb;
14127 					}
14128 
14129 					if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14130 						MonoInst *tmp;
14131 
14132 						MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14133 						tmp->inst_c1 = var->dreg;
14134 						mono_bblock_insert_after_ins (bb, ins, tmp);
14135 					}
14136 				}
14137 			}
14138 			mono_inst_set_src_registers (ins, sregs);
14139 
14140 			if (dest_has_lvreg) {
14141 				g_assert (ins->dreg != -1);
14142 				vreg_to_lvreg [prev_dreg] = ins->dreg;
14143 				if (lvregs_len >= lvregs_size) {
14144 					guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14145 					memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14146 					lvregs = new_lvregs;
14147 					lvregs_size *= 2;
14148 				}
14149 				lvregs [lvregs_len ++] = prev_dreg;
14150 				dest_has_lvreg = FALSE;
14151 			}
14152 
14153 			if (store) {
14154 				tmp_reg = ins->dreg;
14155 				ins->dreg = ins->sreg2;
14156 				ins->sreg2 = tmp_reg;
14157 			}
14158 
14159 			if (MONO_IS_CALL (ins)) {
14160 				/* Clear vreg_to_lvreg array */
14161 				for (i = 0; i < lvregs_len; i++)
14162 					vreg_to_lvreg [lvregs [i]] = 0;
14163 				lvregs_len = 0;
14164 			} else if (ins->opcode == OP_NOP) {
14165 				ins->dreg = -1;
14166 				MONO_INST_NULLIFY_SREGS (ins);
14167 			}
14168 
14169 			if (cfg->verbose_level > 2)
14170 				mono_print_ins_index (1, ins);
14171 		}
14172 
14173 		/* Extend the live range based on the liveness info */
14174 		if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14175 			for (i = 0; i < cfg->num_varinfo; i ++) {
14176 				MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14177 
14178 				if (vreg_is_volatile (cfg, vi->vreg))
14179 					/* The liveness info is incomplete */
14180 					continue;
14181 
14182 				if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14183 					/* Live from at least the first ins of this bb */
14184 					live_range_start [vi->vreg] = bb->code;
14185 					live_range_start_bb [vi->vreg] = bb;
14186 				}
14187 
14188 				if (mono_bitset_test_fast (bb->live_out_set, i)) {
14189 					/* Live at least until the last ins of this bb */
14190 					live_range_end [vi->vreg] = bb->last_ins;
14191 					live_range_end_bb [vi->vreg] = bb;
14192 				}
14193 			}
14194 		}
14195 	}
14196 
14197 	/*
14198 	 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14199 	 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14200 	 */
14201 	if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14202 		for (i = 0; i < cfg->num_varinfo; ++i) {
14203 			int vreg = MONO_VARINFO (cfg, i)->vreg;
14204 			MonoInst *ins;
14205 
14206 			if (live_range_start [vreg]) {
14207 				MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14208 				ins->inst_c0 = i;
14209 				ins->inst_c1 = vreg;
14210 				mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14211 			}
14212 			if (live_range_end [vreg]) {
14213 				MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14214 				ins->inst_c0 = i;
14215 				ins->inst_c1 = vreg;
14216 				if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14217 					mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14218 				else
14219 					mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14220 			}
14221 		}
14222 	}
14223 
14224 	if (cfg->gsharedvt_locals_var_ins) {
14225 		/* Nullify if unused */
14226 		cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14227 		cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14228 	}
14229 
14230 	g_free (live_range_start);
14231 	g_free (live_range_end);
14232 	g_free (live_range_start_bb);
14233 	g_free (live_range_end_bb);
14234 }
14235 
14236 
14237 /**
14238  * FIXME:
14239  * - use 'iadd' instead of 'int_add'
14240  * - handling ovf opcodes: decompose in method_to_ir.
14241  * - unify iregs/fregs
14242  *   -> partly done, the missing parts are:
14243  *   - a more complete unification would involve unifying the hregs as well, so
14244  *     code wouldn't need if (fp) all over the place. but that would mean the hregs
14245  *     would no longer map to the machine hregs, so the code generators would need to
14246  *     be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14247  *     wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14248  *     fp/non-fp branches speeds it up by about 15%.
14249  * - use sext/zext opcodes instead of shifts
14250  * - add OP_ICALL
14251  * - get rid of TEMPLOADs if possible and use vregs instead
14252  * - clean up usage of OP_P/OP_ opcodes
14253  * - cleanup usage of DUMMY_USE
14254  * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14255  *   stack
14256  * - set the stack type and allocate a dreg in the EMIT_NEW macros
14257  * - get rid of all the <foo>2 stuff when the new JIT is ready.
14258  * - make sure handle_stack_args () is called before the branch is emitted
14259  * - when the new IR is done, get rid of all unused stuff
14260  * - COMPARE/BEQ as separate instructions or unify them ?
14261  *   - keeping them separate allows specialized compare instructions like
14262  *     compare_imm, compare_membase
14263  *   - most back ends unify fp compare+branch, fp compare+ceq
14264  * - integrate mono_save_args into inline_method
14265  * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14266  * - handle long shift opts on 32 bit platforms somehow: they require
14267  *   3 sregs (2 for arg1 and 1 for arg2)
14268  * - make byref a 'normal' type.
14269  * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14270  *   variable if needed.
14271  * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14272  *   like inline_method.
14273  * - remove inlining restrictions
14274  * - fix LNEG and enable cfold of INEG
14275  * - generalize x86 optimizations like ldelema as a peephole optimization
14276  * - add store_mem_imm for amd64
14277  * - optimize the loading of the interruption flag in the managed->native wrappers
14278  * - avoid special handling of OP_NOP in passes
14279  * - move code inserting instructions into one function/macro.
14280  * - try a coalescing phase after liveness analysis
14281  * - add float -> vreg conversion + local optimizations on !x86
14282  * - figure out how to handle decomposed branches during optimizations, ie.
14283  *   compare+branch, op_jump_table+op_br etc.
14284  * - promote RuntimeXHandles to vregs
14285  * - vtype cleanups:
14286  *   - add a NEW_VARLOADA_VREG macro
14287  * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14288  *   accessing vtype fields.
14289  * - get rid of I8CONST on 64 bit platforms
14290  * - dealing with the increase in code size due to branches created during opcode
14291  *   decomposition:
14292  *   - use extended basic blocks
14293  *     - all parts of the JIT
14294  *     - handle_global_vregs () && local regalloc
14295  *   - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14296  * - sources of increase in code size:
14297  *   - vtypes
14298  *   - long compares
14299  *   - isinst and castclass
14300  *   - lvregs not allocated to global registers even if used multiple times
14301  * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14302  *   meaningful.
14303  * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14304  * - add all micro optimizations from the old JIT
14305  * - put tree optimizations into the deadce pass
14306  * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14307  *   specific function.
14308  * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14309  *   fcompare + branchCC.
14310  * - create a helper function for allocating a stack slot, taking into account
14311  *   MONO_CFG_HAS_SPILLUP.
14312  * - merge r68207.
14313  * - optimize mono_regstate2_alloc_int/float.
14314  * - fix the pessimistic handling of variables accessed in exception handler blocks.
14315  * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14316  *   parts of the tree could be separated by other instructions, killing the tree
14317  *   arguments, or stores killing loads etc. Also, should we fold loads into other
14318  *   instructions if the result of the load is used multiple times ?
14319  * - make the REM_IMM optimization in mini-x86.c arch-independent.
14320  * - LAST MERGE: 108395.
14321  * - when returning vtypes in registers, generate IR and append it to the end of the
14322  *   last bb instead of doing it in the epilog.
14323  * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14324  */
14325 
14326 /*
14327 
14328 NOTES
14329 -----
14330 
14331 - When to decompose opcodes:
14332   - earlier: this makes some optimizations hard to implement, since the low level IR
14333   no longer contains the neccessary information. But it is easier to do.
14334   - later: harder to implement, enables more optimizations.
14335 - Branches inside bblocks:
14336   - created when decomposing complex opcodes.
14337     - branches to another bblock: harmless, but not tracked by the branch
14338       optimizations, so need to branch to a label at the start of the bblock.
14339     - branches to inside the same bblock: very problematic, trips up the local
14340       reg allocator. Can be fixed by spitting the current bblock, but that is a
14341       complex operation, since some local vregs can become global vregs etc.
14342 - Local/global vregs:
14343   - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14344     local register allocator.
14345   - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14346     structure, created by mono_create_var (). Assigned to hregs or the stack by
14347     the global register allocator.
14348 - When to do optimizations like alu->alu_imm:
14349   - earlier -> saves work later on since the IR will be smaller/simpler
14350   - later -> can work on more instructions
14351 - Handling of valuetypes:
14352   - When a vtype is pushed on the stack, a new temporary is created, an
14353     instruction computing its address (LDADDR) is emitted and pushed on
14354     the stack. Need to optimize cases when the vtype is used immediately as in
14355     argument passing, stloc etc.
14356 - Instead of the to_end stuff in the old JIT, simply call the function handling
14357   the values on the stack before emitting the last instruction of the bb.
14358 */
14359 #else /* !DISABLE_JIT */
14360 
14361 MONO_EMPTY_SOURCE_FILE (method_to_ir);
14362 #endif /* !DISABLE_JIT */
14363