From 0bf441b7c837334b61ed96fd988c5c2a1e68917d Mon Sep 17 00:00:00 2001 From: monojenkins Date: Fri, 5 Jun 2020 16:44:58 -0400 Subject: [PATCH] Start unifying unaligned memory access codepaths. (#37445) The intrinsic expansions for Unsafe.ReadUnaligned and Unsafe.WriteUnaligned now use mini_emit_memory_load and mini_emit_memory_store. stind is now implemented in terms of mini_emit_memory_store; when used during compilation of the the sgen nursery check wrapper, mini_emit_memory_store will no longer longer emit a release fence when storing a reference under the CLR memory model. mini_emit_memory_load and mini_emit_memory_store also no longer decompose unaligned loads/stores to explicit byte-wise memcpy move sequences; the LLVM IR backend already converts MONO_INST_UNALIGNED-flagged OP_LOAD*/OP_STORE* operations to LLVM memory movement operations with alignment 1. Should fix https://github.com/mono/mono/issues/19142. Co-authored-by: imhameed --- src/mono/mono/mini/intrinsics.c | 18 +++----- src/mono/mono/mini/memory-access.c | 9 ++-- src/mono/mono/mini/method-to-ir.c | 68 +++++++++--------------------- 3 files changed, 31 insertions(+), 64 deletions(-) diff --git a/src/mono/mono/mini/intrinsics.c b/src/mono/mono/mini/intrinsics.c index 9f41b3a5530ce..3951ba562c8ee 100644 --- a/src/mono/mono/mini/intrinsics.c +++ b/src/mono/mono/mini/intrinsics.c @@ -543,13 +543,7 @@ emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignatu t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); - if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) { - dreg = alloc_ireg (cfg); - EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, t, args [0]->dreg, 0); - ins->type = STACK_I4; - ins->flags |= MONO_INST_UNALIGNED; - return ins; - } + return mini_emit_memory_load (cfg, t, args [0], 0, MONO_INST_UNALIGNED); } else if (!strcmp (cmethod->name, "WriteUnaligned")) { g_assert (ctx); g_assert (ctx->method_inst); @@ -558,12 +552,10 @@ emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignatu t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); - if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) { - dreg = alloc_ireg (cfg); - EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, t, args [0]->dreg, 0, args [1]->dreg); - ins->flags |= MONO_INST_UNALIGNED; - return ins; - } + mini_emit_memory_store (cfg, t, args [0], args [1], MONO_INST_UNALIGNED); + MONO_INST_NEW (cfg, ins, OP_NOP); + MONO_ADD_INS (cfg->cbb, ins); + return ins; } else if (!strcmp (cmethod->name, "ByteOffset")) { g_assert (ctx); g_assert (ctx->method_inst); diff --git a/src/mono/mono/mini/memory-access.c b/src/mono/mono/mini/memory-access.c index 02088f15cb51d..52713030c5214 100644 --- a/src/mono/mono/mini/memory-access.c +++ b/src/mono/mono/mini/memory-access.c @@ -459,7 +459,10 @@ mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offs { MonoInst *ins; - if (ins_flag & MONO_INST_UNALIGNED) { + /* LLVM can handle unaligned loads and stores, so there's no reason to + * manually decompose an unaligned load here into a memcpy if we're + * using LLVM. */ + if ((ins_flag & MONO_INST_UNALIGNED) && !COMPILE_LLVM (cfg)) { MonoInst *addr, *tmp_var; int align; int size = mono_type_size (type, &align); @@ -496,10 +499,10 @@ mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoIn if (ins_flag & MONO_INST_VOLATILE) { /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); - } else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (type)) + } else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (type) && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); - if (ins_flag & MONO_INST_UNALIGNED) { + if ((ins_flag & MONO_INST_UNALIGNED) && !COMPILE_LLVM (cfg)) { MonoInst *addr, *mov, *tmp_var; tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL); diff --git a/src/mono/mono/mini/method-to-ir.c b/src/mono/mono/mini/method-to-ir.c index 6ec2d8b3fc2c2..1cce0624046e2 100644 --- a/src/mono/mono/mini/method-to-ir.c +++ b/src/mono/mono/mini/method-to-ir.c @@ -176,8 +176,6 @@ mono_tailcall_print (const char *format, ...) } \ } while (0) -static int stind_to_store_membase (int opcode); - int mono_op_to_op_imm (int opcode); int mono_op_to_op_imm_noemul (int opcode); @@ -1267,6 +1265,22 @@ ldind_to_type (int op) } } +static MonoClass* +stind_to_type (int op) +{ + switch (op) { + case MONO_CEE_STIND_I1: return mono_defaults.sbyte_class; + case MONO_CEE_STIND_I2: return mono_defaults.int16_class; + case MONO_CEE_STIND_I4: return mono_defaults.int32_class; + case MONO_CEE_STIND_I8: return mono_defaults.int64_class; + case MONO_CEE_STIND_I: return mono_defaults.int_class; + case MONO_CEE_STIND_R4: return mono_defaults.single_class; + case MONO_CEE_STIND_R8: return mono_defaults.double_class; + case MONO_CEE_STIND_REF: return mono_defaults.object_class; + default: g_error ("Unknown stind type %d", op); + } +} + #if 0 static const char @@ -8292,30 +8306,14 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b case MONO_CEE_STIND_R8: case MONO_CEE_STIND_I: { sp -= 2; - - if (ins_flag & MONO_INST_VOLATILE) { - /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */ - mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); + if (il_op == MONO_CEE_STIND_REF && sp [1]->type != STACK_OBJ) { + /* stind.ref must only be used with object references. */ + UNVERIFIED; } - if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8) sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]); - if (!mini_debug_options.weak_memory_model && il_op == MONO_CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) - mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); - NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (il_op), sp [0]->dreg, 0, sp [1]->dreg); - ins->flags |= ins_flag; + mini_emit_memory_store (cfg, m_class_get_byval_arg (stind_to_type (il_op)), sp [0], sp [1], ins_flag); ins_flag = 0; - - MONO_ADD_INS (cfg->cbb, ins); - - if (il_op == MONO_CEE_STIND_REF) { - /* stind.ref must only be used with object references. */ - if (sp [1]->type != STACK_OBJ) - UNVERIFIED; - if (cfg->gen_write_barriers && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])) - mini_emit_write_barrier (cfg, sp [0], sp [1]); - } - inline_costs += 1; break; } @@ -11681,32 +11679,6 @@ mono_op_to_op_imm (int opcode) return -1; } -static int -stind_to_store_membase (int opcode) -{ - switch (opcode) { - case MONO_CEE_STIND_I1: - return OP_STOREI1_MEMBASE_REG; - case MONO_CEE_STIND_I2: - return OP_STOREI2_MEMBASE_REG; - case MONO_CEE_STIND_I4: - return OP_STOREI4_MEMBASE_REG; - case MONO_CEE_STIND_I: - case MONO_CEE_STIND_REF: - return OP_STORE_MEMBASE_REG; - case MONO_CEE_STIND_I8: - return OP_STOREI8_MEMBASE_REG; - case MONO_CEE_STIND_R4: - return OP_STORER4_MEMBASE_REG; - case MONO_CEE_STIND_R8: - return OP_STORER8_MEMBASE_REG; - default: - g_assert_not_reached (); - } - - return -1; -} - int mono_load_membase_to_load_mem (int opcode) {