diff --git a/src/coreclr/src/gc/sample/gcenv.ee.cpp b/src/coreclr/src/gc/sample/gcenv.ee.cpp index 9a4e9c9a74b13..687fd5624ba25 100644 --- a/src/coreclr/src/gc/sample/gcenv.ee.cpp +++ b/src/coreclr/src/gc/sample/gcenv.ee.cpp @@ -100,7 +100,11 @@ uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable) return result; } +#ifndef __GNUC__ __declspec(thread) Thread * pCurrentThread; +#else // !__GNUC__ +thread_local Thread * pCurrentThread; +#endif // !__GNUC__ Thread * GetThread() { diff --git a/src/coreclr/src/jit/CMakeLists.txt b/src/coreclr/src/jit/CMakeLists.txt index 5b98b4f507e02..013b8974ed677 100644 --- a/src/coreclr/src/jit/CMakeLists.txt +++ b/src/coreclr/src/jit/CMakeLists.txt @@ -4,6 +4,11 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) include_directories("./jitstd") include_directories("../inc") +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + add_compile_options(-fpermissive) + add_compile_options(-Wno-error) +endif() + if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR (CLR_CMAKE_TARGET_ARCH_I386 AND NOT CLR_CMAKE_PLATFORM_UNIX)) add_definitions(-DFEATURE_SIMD) add_definitions(-DFEATURE_HW_INTRINSICS) diff --git a/src/coreclr/src/jit/bitset.h b/src/coreclr/src/jit/bitset.h index a0192e62e8de5..bddc2ea532486 100644 --- a/src/coreclr/src/jit/bitset.h +++ b/src/coreclr/src/jit/bitset.h @@ -30,7 +30,7 @@ class BitSetSupport { unsigned res = 0; // We process "u" in 4-bit nibbles, hence the "*2" below. - for (int i = 0; i < sizeof(T) * 2; i++) + for (unsigned int i = 0; i < sizeof(T) * 2; i++) { res += BitCountTable[u & 0xf]; u >>= 4; diff --git a/src/coreclr/src/jit/bitsetasshortlong.h b/src/coreclr/src/jit/bitsetasshortlong.h index 128c0aab3f0d7..17e0e3a69cbaa 100644 --- a/src/coreclr/src/jit/bitsetasshortlong.h +++ b/src/coreclr/src/jit/bitsetasshortlong.h @@ -313,7 +313,10 @@ class BitSetOpsExtractTempReg(); } - genSetRegToIcon(regCnt, amount, ((int)amount == amount) ? TYP_INT : TYP_LONG); + genSetRegToIcon(regCnt, amount, ((unsigned int)amount == amount) ? TYP_INT : TYP_LONG); } if (compiler->info.compInitMem) @@ -2484,7 +2484,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode) } assert(dstAddr->isUsedFromReg()); - assert(initVal->isUsedFromReg() && !initVal->IsIntegralConst(0) || initVal->IsIntegralConst(0)); + assert((initVal->isUsedFromReg() && !initVal->IsIntegralConst(0)) || initVal->IsIntegralConst(0)); assert(size != 0); assert(size <= INITBLK_UNROLL_LIMIT); diff --git a/src/coreclr/src/jit/codegencommon.cpp b/src/coreclr/src/jit/codegencommon.cpp index a45710330f53f..f1e62925851b3 100644 --- a/src/coreclr/src/jit/codegencommon.cpp +++ b/src/coreclr/src/jit/codegencommon.cpp @@ -10816,7 +10816,7 @@ void CodeGen::genIPmappingAdd(IL_OFFSETX offsx, bool isLabel) default: - if (offsx != ICorDebugInfo::NO_MAPPING) + if (offsx != (IL_OFFSETX)ICorDebugInfo::NO_MAPPING) { noway_assert(jitGetILoffs(offsx) <= compiler->info.compILCodeSize); } diff --git a/src/coreclr/src/jit/earlyprop.cpp b/src/coreclr/src/jit/earlyprop.cpp index 26a50d894b9d7..693294b49c848 100644 --- a/src/coreclr/src/jit/earlyprop.cpp +++ b/src/coreclr/src/jit/earlyprop.cpp @@ -497,11 +497,11 @@ void Compiler::optFoldNullCheck(GenTree* tree) // Check for a pattern like this: // // = - // / \ + // / \. // x comma - // / \ + // / \. // nullcheck + - // | / \ + // | / \. // y y const // // @@ -517,9 +517,9 @@ void Compiler::optFoldNullCheck(GenTree* tree) // and transform it into // // = - // / \ + // / \. // x + - // / \ + // / \. // y const // // diff --git a/src/coreclr/src/jit/ee_il_dll.cpp b/src/coreclr/src/jit/ee_il_dll.cpp index 410ef6ca01aee..ca3fce2aad604 100644 --- a/src/coreclr/src/jit/ee_il_dll.cpp +++ b/src/coreclr/src/jit/ee_il_dll.cpp @@ -163,10 +163,11 @@ void jitShutdown(bool processIsTerminating) #ifndef FEATURE_MERGE_JIT_AND_ENGINE +extern "C" #ifdef FEATURE_PAL -DLLEXPORT // For Win32 PAL LoadLibrary emulation + DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif - extern "C" BOOL WINAPI + BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID pvReserved) { if (dwReason == DLL_PROCESS_ATTACH) @@ -230,7 +231,11 @@ DLLEXPORT ICorJitCompiler* __stdcall getJit() // If you are using it more broadly in retail code, you would need to understand the // performance implications of accessing TLS. +#ifndef __GNUC__ __declspec(thread) void* gJitTls = nullptr; +#else // !__GNUC__ +thread_local void* gJitTls = nullptr; +#endif // !__GNUC__ static void* GetJitTls() { diff --git a/src/coreclr/src/jit/emit.h b/src/coreclr/src/jit/emit.h index 876dc085fc2cb..c5b50a5d1bb42 100644 --- a/src/coreclr/src/jit/emit.h +++ b/src/coreclr/src/jit/emit.h @@ -153,7 +153,7 @@ class emitLocation // A constructor for code that needs to call it explicitly. void Init() { - this->emitLocation::emitLocation(); + *this = emitLocation(); } void CaptureLocation(emitter* emit); diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp index 4888ce27ec78a..3f830f6f4ca44 100644 --- a/src/coreclr/src/jit/flowgraph.cpp +++ b/src/coreclr/src/jit/flowgraph.cpp @@ -15283,11 +15283,11 @@ void Compiler::fgReorderBlocks() // is more than twice the max weight of the bPrev to block edge. // // bPrev --> [BB04, weight 31] - // | \ - // edgeToBlock -------------> O \ - // [min=8,max=10] V \ - // block --> [BB05, weight 10] \ - // \ + // | \. + // edgeToBlock -------------> O \. + // [min=8,max=10] V \. + // block --> [BB05, weight 10] \. + // \. // edgeToDest ----------------------------> O // [min=21,max=23] | // V @@ -15331,10 +15331,10 @@ void Compiler::fgReorderBlocks() // 2. Check that the weight of bPrev is at least three times more than block // // bPrev --> [BB04, weight 31] - // | \ - // V \ - // block --> [BB05, weight 10] \ - // \ + // | \. + // V \. + // block --> [BB05, weight 10] \. + // \. // | // V // bDest ---------------> [BB08, weight 21] @@ -21069,8 +21069,8 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // written to or address-exposed. assert(compThisArgAddrExposedOK && !lvaTable[info.compThisArg].lvHasILStoreOp && (lvaArg0Var == info.compThisArg || - lvaArg0Var != info.compThisArg && (lvaTable[lvaArg0Var].lvAddrExposed || - lvaTable[lvaArg0Var].lvHasILStoreOp || copiedForGenericsCtxt))); + (lvaArg0Var != info.compThisArg && (lvaTable[lvaArg0Var].lvAddrExposed || + lvaTable[lvaArg0Var].lvHasILStoreOp || copiedForGenericsCtxt)))); } } diff --git a/src/coreclr/src/jit/gcencode.cpp b/src/coreclr/src/jit/gcencode.cpp index 4ec6f4cb8b5b4..331796c33a819 100644 --- a/src/coreclr/src/jit/gcencode.cpp +++ b/src/coreclr/src/jit/gcencode.cpp @@ -3979,7 +3979,7 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz { // The predicate above is true only if there is an extra generic context parameter, not for // the case where the generic context is provided by "this." - assert(compiler->info.compTypeCtxtArg != BAD_VAR_NUM); + assert((SIZE_T)compiler->info.compTypeCtxtArg != BAD_VAR_NUM); GENERIC_CONTEXTPARAM_TYPE ctxtParamType = GENERIC_CONTEXTPARAM_NONE; switch (compiler->info.compMethodInfo->options & CORINFO_GENERICS_CTXT_MASK) { diff --git a/src/coreclr/src/jit/gtlist.h b/src/coreclr/src/jit/gtlist.h index dd23db486b584..2421a8f033a54 100644 --- a/src/coreclr/src/jit/gtlist.h +++ b/src/coreclr/src/jit/gtlist.h @@ -20,12 +20,12 @@ GTNODE(NONE , char ,0,GTK_SPECIAL) // Leaf nodes (i.e. these nodes have no sub-operands): //----------------------------------------------------------------------------- -GTNODE(LCL_VAR , GenTreeLclVar ,0,GTK_LEAF|GTK_LOCAL) // local variable -GTNODE(LCL_FLD , GenTreeLclFld ,0,GTK_LEAF|GTK_LOCAL) // field in a non-primitive variable +GTNODE(LCL_VAR , GenTreeLclVar ,0,(GTK_LEAF|GTK_LOCAL)) // local variable +GTNODE(LCL_FLD , GenTreeLclFld ,0,(GTK_LEAF|GTK_LOCAL)) // field in a non-primitive variable GTNODE(LCL_VAR_ADDR , GenTreeLclVar ,0,GTK_LEAF) // address of local variable GTNODE(LCL_FLD_ADDR , GenTreeLclFld ,0,GTK_LEAF) // address of field in a non-primitive variable -GTNODE(STORE_LCL_VAR , GenTreeLclVar ,0,GTK_UNOP|GTK_LOCAL|GTK_NOVALUE) // store to local variable -GTNODE(STORE_LCL_FLD , GenTreeLclFld ,0,GTK_UNOP|GTK_LOCAL|GTK_NOVALUE) // store to field in a non-primitive variable +GTNODE(STORE_LCL_VAR , GenTreeLclVar ,0,(GTK_UNOP|GTK_LOCAL|GTK_NOVALUE)) // store to local variable +GTNODE(STORE_LCL_FLD , GenTreeLclFld ,0,(GTK_UNOP|GTK_LOCAL|GTK_NOVALUE)) // store to field in a non-primitive variable GTNODE(CATCH_ARG , GenTree ,0,GTK_LEAF) // Exception object in a catch block GTNODE(LABEL , GenTree ,0,GTK_LEAF) // Jump-target GTNODE(FTN_ADDR , GenTreeFptrVal ,0,GTK_LEAF) // Address of a function @@ -35,56 +35,56 @@ GTNODE(RET_EXPR , GenTreeRetExpr ,0,GTK_LEAF) // Place // Constant nodes: //----------------------------------------------------------------------------- -GTNODE(CNS_INT , GenTreeIntCon ,0,GTK_LEAF|GTK_CONST) -GTNODE(CNS_LNG , GenTreeLngCon ,0,GTK_LEAF|GTK_CONST) -GTNODE(CNS_DBL , GenTreeDblCon ,0,GTK_LEAF|GTK_CONST) -GTNODE(CNS_STR , GenTreeStrCon ,0,GTK_LEAF|GTK_CONST) +GTNODE(CNS_INT , GenTreeIntCon ,0,(GTK_LEAF|GTK_CONST)) +GTNODE(CNS_LNG , GenTreeLngCon ,0,(GTK_LEAF|GTK_CONST)) +GTNODE(CNS_DBL , GenTreeDblCon ,0,(GTK_LEAF|GTK_CONST)) +GTNODE(CNS_STR , GenTreeStrCon ,0,(GTK_LEAF|GTK_CONST)) //----------------------------------------------------------------------------- // Unary operators (1 operand): //----------------------------------------------------------------------------- GTNODE(NOT , GenTreeOp ,0,GTK_UNOP) -GTNODE(NOP , GenTree ,0,GTK_UNOP|GTK_NOCONTAIN) +GTNODE(NOP , GenTree ,0,(GTK_UNOP|GTK_NOCONTAIN)) GTNODE(NEG , GenTreeOp ,0,GTK_UNOP) GTNODE(COPY , GenTreeCopyOrReload,0,GTK_UNOP) // Copies a variable from its current location to a register that satisfies // code generation constraints. The child is the actual lclVar node. GTNODE(RELOAD , GenTreeCopyOrReload,0,GTK_UNOP) -GTNODE(ARR_LENGTH , GenTreeArrLen ,0,GTK_UNOP|GTK_EXOP) // array-length -GTNODE(INTRINSIC , GenTreeIntrinsic ,0,GTK_BINOP|GTK_EXOP) // intrinsics +GTNODE(ARR_LENGTH , GenTreeArrLen ,0,(GTK_UNOP|GTK_EXOP)) // array-length +GTNODE(INTRINSIC , GenTreeIntrinsic ,0,(GTK_BINOP|GTK_EXOP)) // intrinsics -GTNODE(LOCKADD , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE) +GTNODE(LOCKADD , GenTreeOp ,0,(GTK_BINOP|GTK_NOVALUE)) GTNODE(XADD , GenTreeOp ,0,GTK_BINOP) GTNODE(XCHG , GenTreeOp ,0,GTK_BINOP) GTNODE(CMPXCHG , GenTreeCmpXchg ,0,GTK_SPECIAL) -GTNODE(MEMORYBARRIER , GenTree ,0,GTK_LEAF|GTK_NOVALUE) +GTNODE(MEMORYBARRIER , GenTree ,0,(GTK_LEAF|GTK_NOVALUE)) -GTNODE(CAST , GenTreeCast ,0,GTK_UNOP|GTK_EXOP) // conversion to another type +GTNODE(CAST , GenTreeCast ,0,(GTK_UNOP|GTK_EXOP)) // conversion to another type #if defined(_TARGET_ARM_) GTNODE(BITCAST , GenTreeMultiRegOp ,0,GTK_UNOP) // reinterpretation of bits as another type #else GTNODE(BITCAST , GenTreeOp ,0,GTK_UNOP) // reinterpretation of bits as another type #endif -GTNODE(CKFINITE , GenTreeOp ,0,GTK_UNOP|GTK_NOCONTAIN) // Check for NaN -GTNODE(LCLHEAP , GenTreeOp ,0,GTK_UNOP|GTK_NOCONTAIN) // alloca() -GTNODE(JMP , GenTreeVal ,0,GTK_LEAF|GTK_NOVALUE) // Jump to another function +GTNODE(CKFINITE , GenTreeOp ,0,(GTK_UNOP|GTK_NOCONTAIN)) // Check for NaN +GTNODE(LCLHEAP , GenTreeOp ,0,(GTK_UNOP|GTK_NOCONTAIN)) // alloca() +GTNODE(JMP , GenTreeVal ,0,(GTK_LEAF|GTK_NOVALUE)) // Jump to another function GTNODE(ADDR , GenTreeOp ,0,GTK_UNOP) // address of GTNODE(IND , GenTreeOp ,0,GTK_UNOP) // load indirection -GTNODE(STOREIND , GenTreeStoreInd ,0,GTK_BINOP|GTK_NOVALUE) // store indirection +GTNODE(STOREIND , GenTreeStoreInd ,0,(GTK_BINOP|GTK_NOVALUE)) // store indirection // TODO-Cleanup: GT_ARR_BOUNDS_CHECK should be made a GTK_BINOP now that it has only two child nodes -GTNODE(ARR_BOUNDS_CHECK , GenTreeBoundsChk ,0,GTK_SPECIAL|GTK_NOVALUE)// array bounds check -GTNODE(OBJ , GenTreeObj ,0,GTK_UNOP|GTK_EXOP) // Object that MAY have gc pointers, and thus includes the relevant gc layout info. -GTNODE(STORE_OBJ , GenTreeBlk ,0,GTK_BINOP|GTK_EXOP|GTK_NOVALUE) // Object that MAY have gc pointers, and thus includes the relevant gc layout info. +GTNODE(ARR_BOUNDS_CHECK , GenTreeBoundsChk ,0,(GTK_SPECIAL|GTK_NOVALUE))// array bounds check +GTNODE(OBJ , GenTreeObj ,0,(GTK_UNOP|GTK_EXOP)) // Object that MAY have gc pointers, and thus includes the relevant gc layout info. +GTNODE(STORE_OBJ , GenTreeBlk ,0,(GTK_BINOP|GTK_EXOP|GTK_NOVALUE)) // Object that MAY have gc pointers, and thus includes the relevant gc layout info. GTNODE(BLK , GenTreeBlk ,0,GTK_UNOP) // Block/object with no gc pointers, and with a known size (e.g. a struct with no gc fields) -GTNODE(STORE_BLK , GenTreeBlk ,0,GTK_BINOP|GTK_NOVALUE) // Block/object with no gc pointers, and with a known size (e.g. a struct with no gc fields) +GTNODE(STORE_BLK , GenTreeBlk ,0,(GTK_BINOP|GTK_NOVALUE)) // Block/object with no gc pointers, and with a known size (e.g. a struct with no gc fields) GTNODE(DYN_BLK , GenTreeBlk ,0,GTK_SPECIAL) // Dynamically sized block object -GTNODE(STORE_DYN_BLK , GenTreeBlk ,0,GTK_SPECIAL|GTK_NOVALUE)// Dynamically sized block object -GTNODE(BOX , GenTreeBox ,0,GTK_UNOP|GTK_EXOP|GTK_NOTLIR) +GTNODE(STORE_DYN_BLK , GenTreeBlk ,0,(GTK_SPECIAL|GTK_NOVALUE))// Dynamically sized block object +GTNODE(BOX , GenTreeBox ,0,(GTK_UNOP|GTK_EXOP|GTK_NOTLIR)) #ifdef FEATURE_SIMD -GTNODE(SIMD_CHK , GenTreeBoundsChk ,0,GTK_SPECIAL|GTK_NOVALUE)// Compare whether an index is less than the given SIMD vector length, and call CORINFO_HELP_RNGCHKFAIL if not. +GTNODE(SIMD_CHK , GenTreeBoundsChk ,0,(GTK_SPECIAL|GTK_NOVALUE))// Compare whether an index is less than the given SIMD vector length, and call CORINFO_HELP_RNGCHKFAIL if not. // TODO-CQ: In future may want to add a field that specifies different exceptions but we'll // need VM assistance for that. // TODO-CQ: It would actually be very nice to make this an unconditional throw, and expose the control flow that @@ -92,14 +92,14 @@ GTNODE(SIMD_CHK , GenTreeBoundsChk ,0,GTK_SPECIAL|GTK_NOVALUE)// Compa #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS -GTNODE(HW_INTRINSIC_CHK , GenTreeBoundsChk ,0,GTK_SPECIAL|GTK_NOVALUE)// Compare whether an imm8 argument is in the valid range, and throw ArgumentOutOfRangeException if not. +GTNODE(HW_INTRINSIC_CHK , GenTreeBoundsChk ,0,(GTK_SPECIAL|GTK_NOVALUE))// Compare whether an imm8 argument is in the valid range, and throw ArgumentOutOfRangeException if not. #endif -GTNODE(ALLOCOBJ , GenTreeAllocObj ,0,GTK_UNOP|GTK_EXOP) // object allocator +GTNODE(ALLOCOBJ , GenTreeAllocObj ,0,(GTK_UNOP|GTK_EXOP)) // object allocator GTNODE(INIT_VAL , GenTreeOp ,0,GTK_UNOP) // Initialization value for an initBlk -GTNODE(RUNTIMELOOKUP , GenTreeRuntimeLookup, 0,GTK_UNOP|GTK_EXOP) // Runtime handle lookup +GTNODE(RUNTIMELOOKUP , GenTreeRuntimeLookup, 0,(GTK_UNOP|GTK_EXOP)) // Runtime handle lookup GTNODE(BSWAP , GenTreeOp ,0,GTK_UNOP) // Byte swap (32-bit or 64-bit) GTNODE(BSWAP16 , GenTreeOp ,0,GTK_UNOP) // Byte swap (16-bit) @@ -117,9 +117,9 @@ GTNODE(MOD , GenTreeOp ,0,GTK_BINOP) GTNODE(UDIV , GenTreeOp ,0,GTK_BINOP) GTNODE(UMOD , GenTreeOp ,0,GTK_BINOP) -GTNODE(OR , GenTreeOp ,1,GTK_BINOP|GTK_LOGOP) -GTNODE(XOR , GenTreeOp ,1,GTK_BINOP|GTK_LOGOP) -GTNODE(AND , GenTreeOp ,1,GTK_BINOP|GTK_LOGOP) +GTNODE(OR , GenTreeOp ,1,(GTK_BINOP|GTK_LOGOP)) +GTNODE(XOR , GenTreeOp ,1,(GTK_BINOP|GTK_LOGOP)) +GTNODE(AND , GenTreeOp ,1,(GTK_BINOP|GTK_LOGOP)) GTNODE(LSH , GenTreeOp ,0,GTK_BINOP) GTNODE(RSH , GenTreeOp ,0,GTK_BINOP) @@ -131,13 +131,13 @@ GTNODE(MULHI , GenTreeOp ,1,GTK_BINOP) // returns high bits // the div into a MULHI + some adjustments. In codegen, we only use the // results of the high register, and we drop the low results. -GTNODE(ASG , GenTreeOp ,0,GTK_BINOP|GTK_NOTLIR) -GTNODE(EQ , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) -GTNODE(NE , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) -GTNODE(LT , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) -GTNODE(LE , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) -GTNODE(GE , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) -GTNODE(GT , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) +GTNODE(ASG , GenTreeOp ,0,(GTK_BINOP|GTK_NOTLIR)) +GTNODE(EQ , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) +GTNODE(NE , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) +GTNODE(LT , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) +GTNODE(LE , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) +GTNODE(GE , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) +GTNODE(GT , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) // These are similar to GT_EQ/GT_NE but they generate "test" instead of "cmp" instructions. // Currently these are generated during lowering for code like ((x & y) eq|ne 0) only on @@ -146,21 +146,21 @@ GTNODE(GT , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) // codegen which emits a "test reg, reg" instruction, that would be more difficult to do // during lowering because the source operand is used twice so it has to be a lclvar. // Because of this there is no need to also add GT_TEST_LT/LE/GE/GT opers. -GTNODE(TEST_EQ , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) -GTNODE(TEST_NE , GenTreeOp ,0,GTK_BINOP|GTK_RELOP) +GTNODE(TEST_EQ , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) +GTNODE(TEST_NE , GenTreeOp ,0,(GTK_BINOP|GTK_RELOP)) -GTNODE(COMMA , GenTreeOp ,0,GTK_BINOP|GTK_NOTLIR) +GTNODE(COMMA , GenTreeOp ,0,(GTK_BINOP|GTK_NOTLIR)) -GTNODE(QMARK , GenTreeQmark ,0,GTK_BINOP|GTK_EXOP|GTK_NOTLIR) -GTNODE(COLON , GenTreeColon ,0,GTK_BINOP|GTK_NOTLIR) +GTNODE(QMARK , GenTreeQmark ,0,(GTK_BINOP|GTK_EXOP|GTK_NOTLIR)) +GTNODE(COLON , GenTreeColon ,0,(GTK_BINOP|GTK_NOTLIR)) -GTNODE(INDEX , GenTreeIndex ,0,GTK_BINOP|GTK_EXOP|GTK_NOTLIR) // SZ-array-element -GTNODE(INDEX_ADDR , GenTreeIndex ,0,GTK_BINOP|GTK_EXOP) // addr of SZ-array-element; used when +GTNODE(INDEX , GenTreeIndex ,0,(GTK_BINOP|GTK_EXOP|GTK_NOTLIR)) // SZ-array-element +GTNODE(INDEX_ADDR , GenTreeIndex ,0,(GTK_BINOP|GTK_EXOP)) // addr of SZ-array-element; used when // aiming to minimize compile times. GTNODE(MKREFANY , GenTreeOp ,0,GTK_BINOP) -GTNODE(LEA , GenTreeAddrMode ,0,GTK_BINOP|GTK_EXOP) +GTNODE(LEA , GenTreeAddrMode ,0,(GTK_BINOP|GTK_EXOP)) #if !defined(_TARGET_64BIT_) // A GT_LONG node simply represents the long value produced by the concatenation @@ -200,35 +200,35 @@ GTNODE(RSH_LO , GenTreeOp ,0,GTK_BINOP) #endif // !defined(_TARGET_64BIT_) #ifdef FEATURE_SIMD -GTNODE(SIMD , GenTreeSIMD ,0,GTK_BINOP|GTK_EXOP) // SIMD functions/operators/intrinsics +GTNODE(SIMD , GenTreeSIMD ,0,(GTK_BINOP|GTK_EXOP)) // SIMD functions/operators/intrinsics #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS -GTNODE(HWIntrinsic , GenTreeHWIntrinsic ,0,GTK_BINOP|GTK_EXOP) // hardware intrinsics +GTNODE(HWIntrinsic , GenTreeHWIntrinsic ,0,(GTK_BINOP|GTK_EXOP)) // hardware intrinsics #endif // FEATURE_HW_INTRINSICS //----------------------------------------------------------------------------- // LIR specific compare and conditional branch/set nodes: //----------------------------------------------------------------------------- -GTNODE(CMP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE) // Sets the condition flags according to the compare result. +GTNODE(CMP , GenTreeOp ,0,(GTK_BINOP|GTK_NOVALUE)) // Sets the condition flags according to the compare result. // N.B. Not a relop, it does not produce a value and it cannot be reversed. -GTNODE(JCMP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE) // Makes a comparison and jump if the condition specified. Does not set flags -GTNODE(JCC , GenTreeCC ,0,GTK_LEAF|GTK_NOVALUE) // Checks the condition flags and branch if the condition specified +GTNODE(JCMP , GenTreeOp ,0,(GTK_BINOP|GTK_NOVALUE)) // Makes a comparison and jump if the condition specified. Does not set flags +GTNODE(JCC , GenTreeCC ,0,(GTK_LEAF|GTK_NOVALUE)) // Checks the condition flags and branch if the condition specified // by GenTreeCC::gtCondition is true. GTNODE(SETCC , GenTreeCC ,0,GTK_LEAF) // Checks the condition flags and produces 1 if the condition specified // by GenTreeCC::gtCondition is true and 0 otherwise. #ifdef _TARGET_XARCH_ -GTNODE(BT , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE) // The XARCH BT instruction. Like CMP, this sets the condition flags (CF +GTNODE(BT , GenTreeOp ,0,(GTK_BINOP|GTK_NOVALUE)) // The XARCH BT instruction. Like CMP, this sets the condition flags (CF // to be precise) and does not produce a value. #endif //----------------------------------------------------------------------------- // Other nodes that look like unary/binary operators: //----------------------------------------------------------------------------- -GTNODE(JTRUE , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE) +GTNODE(JTRUE , GenTreeOp ,0,(GTK_UNOP|GTK_NOVALUE)) -GTNODE(LIST , GenTreeArgList ,0,GTK_BINOP|GTK_NOVALUE) +GTNODE(LIST , GenTreeArgList ,0,(GTK_BINOP|GTK_NOVALUE)) GTNODE(FIELD_LIST , GenTreeFieldList ,0,GTK_BINOP) // List of fields of a struct, when passed as an argument //----------------------------------------------------------------------------- @@ -237,31 +237,31 @@ GTNODE(FIELD_LIST , GenTreeFieldList ,0,GTK_BINOP) // List of fields of GTNODE(FIELD , GenTreeField ,0,GTK_SPECIAL) // Member-field GTNODE(ARR_ELEM , GenTreeArrElem ,0,GTK_SPECIAL) // Multi-dimensional array-element address -GTNODE(ARR_INDEX , GenTreeArrIndex ,0,GTK_BINOP|GTK_EXOP) // Effective, bounds-checked index for one dimension of a multi-dimensional array element +GTNODE(ARR_INDEX , GenTreeArrIndex ,0,(GTK_BINOP|GTK_EXOP)) // Effective, bounds-checked index for one dimension of a multi-dimensional array element GTNODE(ARR_OFFSET , GenTreeArrOffs ,0,GTK_SPECIAL) // Flattened offset of multi-dimensional array element -GTNODE(CALL , GenTreeCall ,0,GTK_SPECIAL|GTK_NOCONTAIN) +GTNODE(CALL , GenTreeCall ,0,(GTK_SPECIAL|GTK_NOCONTAIN)) //----------------------------------------------------------------------------- // Statement operator nodes: //----------------------------------------------------------------------------- -GTNODE(BEG_STMTS , GenTree ,0,GTK_SPECIAL|GTK_NOVALUE)// used only temporarily in importer by impBegin/EndTreeList() -GTNODE(STMT , GenTreeStmt ,0,GTK_SPECIAL|GTK_NOVALUE)// top-level list nodes in bbTreeList +GTNODE(BEG_STMTS , GenTree ,0,(GTK_SPECIAL|GTK_NOVALUE))// used only temporarily in importer by impBegin/EndTreeList() +GTNODE(STMT , GenTreeStmt ,0,(GTK_SPECIAL|GTK_NOVALUE))// top-level list nodes in bbTreeList -GTNODE(RETURN , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE) // return from current function -GTNODE(SWITCH , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE) // switch +GTNODE(RETURN , GenTreeOp ,0,(GTK_UNOP|GTK_NOVALUE)) // return from current function +GTNODE(SWITCH , GenTreeOp ,0,(GTK_UNOP|GTK_NOVALUE)) // switch -GTNODE(NO_OP , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // nop! +GTNODE(NO_OP , GenTree ,0,(GTK_LEAF|GTK_NOVALUE)) // nop! -GTNODE(START_NONGC , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // starts a new instruction group that will be non-gc interruptible +GTNODE(START_NONGC , GenTree ,0,(GTK_LEAF|GTK_NOVALUE)) // starts a new instruction group that will be non-gc interruptible -GTNODE(START_PREEMPTGC , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // starts a new instruction group where preemptive GC is enabled +GTNODE(START_PREEMPTGC , GenTree ,0,(GTK_LEAF|GTK_NOVALUE)) // starts a new instruction group where preemptive GC is enabled -GTNODE(PROF_HOOK , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // profiler Enter/Leave/TailCall hook +GTNODE(PROF_HOOK , GenTree ,0,(GTK_LEAF|GTK_NOVALUE)) // profiler Enter/Leave/TailCall hook -GTNODE(RETFILT , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE) // end filter with TYP_I_IMPL return value +GTNODE(RETFILT , GenTreeOp ,0,(GTK_UNOP|GTK_NOVALUE)) // end filter with TYP_I_IMPL return value #if !FEATURE_EH_FUNCLETS -GTNODE(END_LFIN , GenTreeVal ,0,GTK_LEAF|GTK_NOVALUE) // end locally-invoked finally +GTNODE(END_LFIN , GenTreeVal ,0,(GTK_LEAF|GTK_NOVALUE)) // end locally-invoked finally #endif // !FEATURE_EH_FUNCLETS //----------------------------------------------------------------------------- @@ -269,14 +269,14 @@ GTNODE(END_LFIN , GenTreeVal ,0,GTK_LEAF|GTK_NOVALUE) // end l //----------------------------------------------------------------------------- GTNODE(PHI , GenTreeOp ,0,GTK_UNOP) // phi node for ssa. -GTNODE(PHI_ARG , GenTreePhiArg ,0,GTK_LEAF|GTK_LOCAL) // phi(phiarg, phiarg, phiarg) +GTNODE(PHI_ARG , GenTreePhiArg ,0,(GTK_LEAF|GTK_LOCAL)) // phi(phiarg, phiarg, phiarg) //----------------------------------------------------------------------------- // Nodes used by Lower to generate a closer CPU representation of other nodes //----------------------------------------------------------------------------- -GTNODE(JMPTABLE , GenTreeJumpTable ,0, GTK_LEAF|GTK_NOCONTAIN) // Generates the jump table for switches -GTNODE(SWITCH_TABLE , GenTreeOp ,0, GTK_BINOP|GTK_NOVALUE) // Jump Table based switch construct +GTNODE(JMPTABLE , GenTreeJumpTable ,0, (GTK_LEAF|GTK_NOCONTAIN)) // Generates the jump table for switches +GTNODE(SWITCH_TABLE , GenTreeOp ,0, (GTK_BINOP|GTK_NOVALUE)) // Jump Table based switch construct //----------------------------------------------------------------------------- // Nodes used only within the code generator: diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp index 08830deb9ab41..ab99a5c294c37 100644 --- a/src/coreclr/src/jit/importer.cpp +++ b/src/coreclr/src/jit/importer.cpp @@ -108,10 +108,10 @@ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this, // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt // method used in the last disjunct allows exactly this mismatch. - assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) || - ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF || - ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL || - ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF || + assert(ti.IsDead() || (ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL) || tree->TypeGet() == TYP_BYREF) || + (ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF) || + (ti.IsObjRef() && tree->TypeGet() == TYP_REF) || (ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL) || + (ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF) || typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti), NormaliseForStack(typeInfo(tree->TypeGet())))); @@ -10139,10 +10139,10 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr // to have a tree like this: // // - - // / \ - // / \ - // / \ - // / \ + // / \. + // / \. + // / \. + // / \. // const(h) int addr byref // // VSW 318822 @@ -10450,7 +10450,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1, // expand the methodtable match: // // condMT ==> GT_NE - // / \ + // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy @@ -10479,7 +10479,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1, // expand the null check: // // condNull ==> GT_EQ - // / \ + // / \. // op1Copy CNS_INT // null // @@ -10512,9 +10512,9 @@ GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1, // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK - // / \ + // / \. // condMT GT_COLON - // / \ + // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); @@ -10525,9 +10525,9 @@ GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1, // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK - // / \ + // / \. // condNull GT_COLON - // / \ + // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); @@ -18169,8 +18169,8 @@ BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut) void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { - assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining. - pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen. + assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. + (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. diff --git a/src/coreclr/src/jit/jitstd/hashtable.h b/src/coreclr/src/jit/jitstd/hashtable.h index 05b033a7462a8..27b47107aa627 100644 --- a/src/coreclr/src/jit/jitstd/hashtable.h +++ b/src/coreclr/src/jit/jitstd/hashtable.h @@ -777,6 +777,7 @@ typename hashtable::size_type hashtable::bucket_size(size_type size) const { rehash(size); + return bucket_count(); } template diff --git a/src/coreclr/src/jit/loopcloning.h b/src/coreclr/src/jit/loopcloning.h index cd9aa9f946bda..d0ec6b6b98992 100644 --- a/src/coreclr/src/jit/loopcloning.h +++ b/src/coreclr/src/jit/loopcloning.h @@ -113,7 +113,7 @@ struct ArrIndex void Print(unsigned dim = -1) { printf("V%02d", arrLcl); - for (unsigned i = 0; i < ((dim == -1) ? rank : dim); ++i) + for (unsigned i = 0; i < ((dim == (unsigned)-1) ? rank : dim); ++i) { printf("[V%02d]", indLcls.GetRef(i)); } diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp index 862b77a3042cb..fb6b3f44175f1 100644 --- a/src/coreclr/src/jit/morph.cpp +++ b/src/coreclr/src/jit/morph.cpp @@ -4322,7 +4322,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // call fgMorphMultiregStructArg on each of them. // // Arguments: -// call : a GenTreeCall node that has one or more TYP_STRUCT arguments\ +// call : a GenTreeCall node that has one or more TYP_STRUCT arguments\. // // Notes: // We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types. @@ -6447,7 +6447,7 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) // Build this tree: IND(*) # // | // ADD(I_IMPL) - // / \ + // / \. // / CNS(fldOffset) // / // / @@ -6455,9 +6455,9 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) // IND(I_IMPL) == [Base of this DLL's TLS] // | // ADD(I_IMPL) - // / \ + // / \. // / CNS(IdValue*4) or MUL - // / / \ + // / / \. // IND(I_IMPL) / CNS(4) // | / // CNS(TLS_HDL,0x2C) IND @@ -9060,7 +9060,7 @@ GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) // See if we can do a simple transformation: // // GT_ASG - // / \ + // / \. // GT_IND GT_IND or CNS_INT // | | // [dest] [src] @@ -12420,8 +12420,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) // Here we look for the following tree // // EQ/NE - // / \ - // op1 CNS 0/1 + // / \. + // op1 CNS 0/1 // ival2 = INT_MAX; // The value of INT_MAX for ival2 just means that the constant value is not 0 or 1 @@ -12445,12 +12445,12 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) // Here we look for the following transformation // // EQ/NE Possible REVERSE(RELOP) - // / \ / \ - // COMMA CNS 0/1 -> COMMA relop_op2 - // / \ / \ - // x RELOP x relop_op1 - // / \ - // relop_op1 relop_op2 + // / \ / \. + // COMMA CNS 0/1 -> COMMA relop_op2 + // / \ / \. + // x RELOP x relop_op1 + // / \. + // relop_op1 relop_op2 // // // @@ -12488,14 +12488,14 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) // and when the LCL_VAR is a temp we can fold the tree: // // EQ/NE EQ/NE - // / \ / \ - // COMMA CNS 0/1 -> RELOP CNS 0/1 - // / \ / \ - // ASG LCL_VAR - // / \ - // LCL_VAR RELOP - // / \ - // + // / \ / \. + // COMMA CNS 0/1 -> RELOP CNS 0/1 + // / \ / \. + // ASG LCL_VAR + // / \. + // LCL_VAR RELOP + // / \. + // GenTree* asg = op1->gtOp.gtOp1; GenTree* lcl = op1->gtOp.gtOp2; @@ -12562,9 +12562,9 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) // Here we look for the following tree // // EQ/NE -> RELOP/!RELOP - // / \ / \ + // / \ / \. // RELOP CNS 0/1 - // / \ + // / \. // // Note that we will remove/destroy the EQ/NE node and move // the RELOP up into it's location. @@ -12594,12 +12594,12 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) // Here we look for the following transformation: // // EQ/NE EQ/NE - // / \ / \ - // AND CNS 0/1 -> AND CNS 0 - // / \ / \ - // RSZ/RSH CNS 1 x CNS (1 << y) - // / \ - // x CNS_INT +y + // / \ / \. + // AND CNS 0/1 -> AND CNS 0 + // / \ / \. + // RSZ/RSH CNS 1 x CNS (1 << y) + // / \. + // x CNS_INT +y if (op1->gtOper == GT_AND) { @@ -14020,12 +14020,12 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) { // This takes // + (tree) - // / \ - // / \ - // / \ + // / \. + // / \. + // / \. // + (op1) op2 - // / \ - // \ + // / \. + // \. // ad2 // // And it swaps ad2 and op2. If (op2) is varTypeIsGC, then this implies that (tree) is @@ -14360,13 +14360,13 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) // Check for a rotation pattern, e.g., // // OR ROL - // / \ / \ + // / \ / \. // LSH RSZ -> x y - // / \ / \ + // / \ / \. // x AND x AND - // / \ / \ + // / \ / \. // y 31 ADD 31 - // / \ + // / \. // NEG 32 // | // y diff --git a/src/coreclr/src/jit/unwindarm.cpp b/src/coreclr/src/jit/unwindarm.cpp index 4a2b8382581e3..936ba1b656527 100644 --- a/src/coreclr/src/jit/unwindarm.cpp +++ b/src/coreclr/src/jit/unwindarm.cpp @@ -1780,7 +1780,7 @@ void UnwindInfo::InitUnwindInfo(Compiler* comp, emitLocation* startLoc, emitLoca // However, its constructor needs to be explicitly called, since the constructor for // UnwindInfo is not called. - uwiFragmentFirst.UnwindFragmentInfo::UnwindFragmentInfo(comp, startLoc, false); + new (&uwiFragmentFirst, jitstd::placement_t()) UnwindFragmentInfo(comp, startLoc, false); uwiFragmentLast = &uwiFragmentFirst; diff --git a/src/coreclr/src/jit/utils.cpp b/src/coreclr/src/jit/utils.cpp index 2010678242feb..888fe605b0516 100644 --- a/src/coreclr/src/jit/utils.cpp +++ b/src/coreclr/src/jit/utils.cpp @@ -2153,9 +2153,9 @@ const SignedMagic* TryGetSignedMagic(int32_t divisor) static const SignedMagic table[]{ {0x55555556, 0}, // 3 {}, - {0x66666667, 1}, // 5 - {0x2aaaaaab, 0}, // 6 - {0x92492493, 2}, // 7 + {0x66666667, 1}, // 5 + {0x2aaaaaab, 0}, // 6 + {(int32_t)0x92492493, 2}, // 7 {}, {0x38e38e39, 1}, // 9 {0x66666667, 2}, // 10 diff --git a/src/coreclr/src/vm/gdbjit.cpp b/src/coreclr/src/vm/gdbjit.cpp index 4ab5336f498d8..d7803f0b3ecaa 100644 --- a/src/coreclr/src/vm/gdbjit.cpp +++ b/src/coreclr/src/vm/gdbjit.cpp @@ -15,7 +15,11 @@ #include "gdbjit.h" #include "gdbjithelpers.h" +#ifndef __GNUC__ __declspec(thread) bool tls_isSymReaderInProgress = false; +#else // !__GNUC__ +thread_local bool tls_isSymReaderInProgress = false; +#endif // !__GNUC__ #ifdef _DEBUG static void DumpElf(const char* methodName, const char *addr, size_t size)