Skip to content

Commit

Permalink
Start unifying unaligned memory access codepaths. (#37445)
Browse files Browse the repository at this point in the history
The intrinsic expansions for Unsafe.ReadUnaligned and
Unsafe.WriteUnaligned now use mini_emit_memory_load and
mini_emit_memory_store.

stind is now implemented in terms of mini_emit_memory_store; when used
during compilation of the the sgen nursery check wrapper,
mini_emit_memory_store will no longer longer emit a release fence when
storing a reference under the CLR memory model.

mini_emit_memory_load and mini_emit_memory_store also no longer
decompose unaligned loads/stores to explicit byte-wise memcpy move
sequences; the LLVM IR backend already converts
MONO_INST_UNALIGNED-flagged OP_LOAD*/OP_STORE* operations to LLVM memory
movement operations with alignment 1.

Should fix mono/mono#19142.

Co-authored-by: imhameed <[email protected]>
  • Loading branch information
monojenkins and imhameed authored Jun 5, 2020
1 parent d6b5d17 commit 0bf441b
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 64 deletions.
18 changes: 5 additions & 13 deletions src/mono/mono/mini/intrinsics.c
Original file line number Diff line number Diff line change
Expand Up @@ -543,13 +543,7 @@ emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignatu

t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) {
dreg = alloc_ireg (cfg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, t, args [0]->dreg, 0);
ins->type = STACK_I4;
ins->flags |= MONO_INST_UNALIGNED;
return ins;
}
return mini_emit_memory_load (cfg, t, args [0], 0, MONO_INST_UNALIGNED);
} else if (!strcmp (cmethod->name, "WriteUnaligned")) {
g_assert (ctx);
g_assert (ctx->method_inst);
Expand All @@ -558,12 +552,10 @@ emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignatu

t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) {
dreg = alloc_ireg (cfg);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, t, args [0]->dreg, 0, args [1]->dreg);
ins->flags |= MONO_INST_UNALIGNED;
return ins;
}
mini_emit_memory_store (cfg, t, args [0], args [1], MONO_INST_UNALIGNED);
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (!strcmp (cmethod->name, "ByteOffset")) {
g_assert (ctx);
g_assert (ctx->method_inst);
Expand Down
9 changes: 6 additions & 3 deletions src/mono/mono/mini/memory-access.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,10 @@ mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offs
{
MonoInst *ins;

if (ins_flag & MONO_INST_UNALIGNED) {
/* LLVM can handle unaligned loads and stores, so there's no reason to
* manually decompose an unaligned load here into a memcpy if we're
* using LLVM. */
if ((ins_flag & MONO_INST_UNALIGNED) && !COMPILE_LLVM (cfg)) {
MonoInst *addr, *tmp_var;
int align;
int size = mono_type_size (type, &align);
Expand Down Expand Up @@ -496,10 +499,10 @@ mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoIn
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
} else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (type))
} else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (type) && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);

if (ins_flag & MONO_INST_UNALIGNED) {
if ((ins_flag & MONO_INST_UNALIGNED) && !COMPILE_LLVM (cfg)) {
MonoInst *addr, *mov, *tmp_var;

tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
Expand Down
68 changes: 20 additions & 48 deletions src/mono/mono/mini/method-to-ir.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,8 +176,6 @@ mono_tailcall_print (const char *format, ...)
} \
} while (0)

static int stind_to_store_membase (int opcode);

int mono_op_to_op_imm (int opcode);
int mono_op_to_op_imm_noemul (int opcode);

Expand Down Expand Up @@ -1267,6 +1265,22 @@ ldind_to_type (int op)
}
}

static MonoClass*
stind_to_type (int op)
{
switch (op) {
case MONO_CEE_STIND_I1: return mono_defaults.sbyte_class;
case MONO_CEE_STIND_I2: return mono_defaults.int16_class;
case MONO_CEE_STIND_I4: return mono_defaults.int32_class;
case MONO_CEE_STIND_I8: return mono_defaults.int64_class;
case MONO_CEE_STIND_I: return mono_defaults.int_class;
case MONO_CEE_STIND_R4: return mono_defaults.single_class;
case MONO_CEE_STIND_R8: return mono_defaults.double_class;
case MONO_CEE_STIND_REF: return mono_defaults.object_class;
default: g_error ("Unknown stind type %d", op);
}
}

#if 0

static const char
Expand Down Expand Up @@ -8292,30 +8306,14 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b
case MONO_CEE_STIND_R8:
case MONO_CEE_STIND_I: {
sp -= 2;

if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
if (il_op == MONO_CEE_STIND_REF && sp [1]->type != STACK_OBJ) {
/* stind.ref must only be used with object references. */
UNVERIFIED;
}

if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8)
sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]);
if (!mini_debug_options.weak_memory_model && il_op == MONO_CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (il_op), sp [0]->dreg, 0, sp [1]->dreg);
ins->flags |= ins_flag;
mini_emit_memory_store (cfg, m_class_get_byval_arg (stind_to_type (il_op)), sp [0], sp [1], ins_flag);
ins_flag = 0;

MONO_ADD_INS (cfg->cbb, ins);

if (il_op == MONO_CEE_STIND_REF) {
/* stind.ref must only be used with object references. */
if (sp [1]->type != STACK_OBJ)
UNVERIFIED;
if (cfg->gen_write_barriers && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
mini_emit_write_barrier (cfg, sp [0], sp [1]);
}

inline_costs += 1;
break;
}
Expand Down Expand Up @@ -11681,32 +11679,6 @@ mono_op_to_op_imm (int opcode)
return -1;
}

static int
stind_to_store_membase (int opcode)
{
switch (opcode) {
case MONO_CEE_STIND_I1:
return OP_STOREI1_MEMBASE_REG;
case MONO_CEE_STIND_I2:
return OP_STOREI2_MEMBASE_REG;
case MONO_CEE_STIND_I4:
return OP_STOREI4_MEMBASE_REG;
case MONO_CEE_STIND_I:
case MONO_CEE_STIND_REF:
return OP_STORE_MEMBASE_REG;
case MONO_CEE_STIND_I8:
return OP_STOREI8_MEMBASE_REG;
case MONO_CEE_STIND_R4:
return OP_STORER4_MEMBASE_REG;
case MONO_CEE_STIND_R8:
return OP_STORER8_MEMBASE_REG;
default:
g_assert_not_reached ();
}

return -1;
}

int
mono_load_membase_to_load_mem (int opcode)
{
Expand Down

0 comments on commit 0bf441b

Please sign in to comment.