diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h
index 0ab8a81d89ef93..c5e65b081583df 100644
--- a/src/coreclr/jit/codegen.h
+++ b/src/coreclr/jit/codegen.h
@@ -173,7 +173,6 @@ class CodeGen final : public CodeGenInterface
// the GC info. Requires "codeSize" to be the size of the generated code, "prologSize" and "epilogSize"
// to be the sizes of the prolog and epilog, respectively. In DEBUG, makes a check involving the
// "codePtr", assumed to be a pointer to the start of the generated code.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef JIT32_GCENCODER
void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
@@ -529,7 +528,6 @@ class CodeGen final : public CodeGenInterface
//
// Epilog functions
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog);
diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp
index 12512a958e08e4..a9e2a41f73f945 100644
--- a/src/coreclr/jit/codegenarmarch.cpp
+++ b/src/coreclr/jit/codegenarmarch.cpp
@@ -5708,7 +5708,6 @@ void CodeGen::genFnEpilog(BasicBlock* block)
0, // disp
true); // isJump
// clang-format on
- CLANG_FORMAT_COMMENT_ANCHOR;
#endif // TARGET_ARMARCH
}
#if FEATURE_FASTTAILCALL
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index 0502339718f8fc..eed9a96a981724 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -43,7 +43,6 @@ void CodeGenInterface::setFramePointerRequiredEH(bool value)
// if they are fully-interruptible. So if we have a catch
// or finally that will keep frame-vars alive, we need to
// force fully-interruptible.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
@@ -392,26 +391,25 @@ void CodeGen::genMarkLabelsForCodegen()
case BBJ_CALLFINALLY:
// The finally target itself will get marked by walking the EH table, below, and marking
// all handler begins.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_EH_CALLFINALLY_THUNKS
+ {
+ // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair,
+ // as that's needed for identifying the range of the "duplicate finally" region in EH data.
+ BasicBlock* bbToLabel = block->Next();
+ if (block->isBBCallFinallyPair())
{
- // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair,
- // as that's needed for identifying the range of the "duplicate finally" region in EH data.
- BasicBlock* bbToLabel = block->Next();
- if (block->isBBCallFinallyPair())
- {
- bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET
- }
- if (bbToLabel != nullptr)
- {
- JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum);
- bbToLabel->SetFlags(BBF_HAS_LABEL);
- }
+ bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET
}
+ if (bbToLabel != nullptr)
+ {
+ JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum);
+ bbToLabel->SetFlags(BBF_HAS_LABEL);
+ }
+ }
#endif // FEATURE_EH_CALLFINALLY_THUNKS
- break;
+ break;
case BBJ_CALLFINALLYRET:
JITDUMP(" " FMT_BB " : finally continuation\n", block->GetFinallyContinuation()->bbNum);
@@ -932,7 +930,6 @@ void CodeGen::genAdjustStackLevel(BasicBlock* block)
{
#if !FEATURE_FIXED_OUT_ARGS
// Check for inserted throw blocks and adjust genStackLevel.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(UNIX_X86_ABI)
if (isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
@@ -1081,7 +1078,6 @@ bool CodeGen::genCreateAddrMode(GenTree* addr,
constant, or we have gone through a GT_NOP or GT_COMMA node. We never come back
here if we find a scaled index.
*/
- CLANG_FORMAT_COMMENT_ANCHOR;
assert(mul == 0);
@@ -3436,7 +3432,6 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
/* At this point, everything that has the "circular" flag
* set to "true" forms a circular dependency */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (regArgMaskLive)
@@ -4504,7 +4499,6 @@ void CodeGen::genCheckUseBlockInit()
// find structs that are guaranteed to be block initialized.
// If this logic changes, Compiler::fgVarNeedsExplicitZeroInit needs
// to be modified.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
#if defined(TARGET_AMD64)
@@ -5311,7 +5305,6 @@ void CodeGen::genFinalizeFrame()
genCheckUseBlockInit();
// Set various registers as "modified" for special code generation scenarios: Edit & Continue, P/Invoke calls, etc.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
@@ -5778,7 +5771,6 @@ void CodeGen::genFnProlog()
// If there is a frame pointer used, due to frame pointer chaining it will point to the stored value of the
// previous frame pointer. Thus, stkOffs can't be zero.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_AMD64)
// However, on amd64 there is no requirement to chain frame pointers.
@@ -6066,7 +6058,6 @@ void CodeGen::genFnProlog()
// Subtract the local frame size from SP.
//
//-------------------------------------------------------------------------
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64)
regMaskTP maskStackAlloc = RBM_NONE;
@@ -6256,8 +6247,7 @@ void CodeGen::genFnProlog()
// we've set the live-in regs with values from the Tier0 frame.
//
// Otherwise we'll do some of these fetches twice.
- //
- CLANG_FORMAT_COMMENT_ANCHOR;
+
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
genEnregisterOSRArgsAndLocals(initReg, &initRegZeroed);
#else
@@ -6648,7 +6638,6 @@ void CodeGen::genGeneratePrologsAndEpilogs()
genFnProlog();
// Generate all the prologs and epilogs.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(FEATURE_EH_FUNCLETS)
@@ -8487,7 +8476,6 @@ void CodeGen::genPoisonFrame(regMaskTP regLiveIn)
if ((size / TARGET_POINTER_SIZE) > 16)
{
// This will require more than 16 instructions, switch to rep stosd/memset call.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH)
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_EDI, (int)varNum, 0);
assert(size % 4 == 0);
diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp
index 2d8a2093454f86..038f9fea696bbf 100644
--- a/src/coreclr/jit/codegenlinear.cpp
+++ b/src/coreclr/jit/codegenlinear.cpp
@@ -396,7 +396,6 @@ void CodeGen::genCodeForBBlist()
// Traverse the block in linear order, generating code for each node as we
// as we encounter it.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Set the use-order numbers for each node.
@@ -1780,7 +1779,6 @@ void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode,
// If the op1 is already in the dstReg - nothing to do.
// Otherwise load the op1 (the address) into the dstReg to copy the struct on the stack by value.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
assert(dstReg != REG_SPBASE);
diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp
index 0762291af9da01..d6f18005c767c5 100644
--- a/src/coreclr/jit/codegenloongarch64.cpp
+++ b/src/coreclr/jit/codegenloongarch64.cpp
@@ -1307,7 +1307,6 @@ void CodeGen::genFnEpilog(BasicBlock* block)
0, // disp
true); // isJump
// clang-format on
- CLANG_FORMAT_COMMENT_ANCHOR;
}
#if FEATURE_FASTTAILCALL
else
diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp
index 0b0199b0646150..17d30e5ada2754 100644
--- a/src/coreclr/jit/codegenriscv64.cpp
+++ b/src/coreclr/jit/codegenriscv64.cpp
@@ -1294,7 +1294,6 @@ void CodeGen::genFnEpilog(BasicBlock* block)
0, // disp
true); // isJump
// clang-format on
- CLANG_FORMAT_COMMENT_ANCHOR;
}
#if FEATURE_FASTTAILCALL
else
diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp
index 132d4b01ccd240..77045cce0875ad 100644
--- a/src/coreclr/jit/codegenxarch.cpp
+++ b/src/coreclr/jit/codegenxarch.cpp
@@ -1719,7 +1719,6 @@ void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock, bool isRemovableJ
//
// Thus only on x86 do we need to assert that the stack level at the target block matches the current stack level.
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_X86_ABI
// bbTgtStkDepth is a (pure) argument count (stack alignment padding should be excluded).
@@ -6638,7 +6637,6 @@ void CodeGen::genJmpMethod(GenTree* jmp)
#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())) ||
((varDsc->TypeGet() == TYP_STRUCT) &&
@@ -9125,7 +9123,6 @@ void CodeGen::genAmd64EmitterUnitTestsSse2()
//
// Loads
//
- CLANG_FORMAT_COMMENT_ANCHOR;
genDefineTempLabel(genCreateTempLabel());
@@ -10742,7 +10739,6 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
compiler->unwindEndProlog();
// TODO We may need EBP restore sequence here if we introduce PSPSym
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_X86_ABI
// Add a padding for 16-byte alignment
@@ -10890,7 +10886,6 @@ void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNu
else
{
// Grab a non-argument, non-callee saved XMM reg
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
// System V x64 first temp reg is xmm8
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM8);
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index 3661e8005cb4cd..697c76527afe55 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -758,7 +758,6 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
{
// We have a (large) struct that can't be replaced with a "primitive" type
// and can't be passed in multiple registers
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI)
@@ -1908,7 +1907,6 @@ void Compiler::compInit(ArenaAllocator* pAlloc,
//
// Initialize all the per-method statistics gathering data structures.
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#if LOOP_HOIST_STATS
m_loopsConsidered = 0;
m_curLoopHasHoistedExpression = false;
@@ -2279,7 +2277,6 @@ void Compiler::compSetProcessor()
//
// Processor specific optimizations
//
- CLANG_FORMAT_COMMENT_ANCHOR;
CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags();
opts.compSupportsISA.Reset();
@@ -2880,7 +2877,6 @@ void Compiler::compInitOptions(JitFlags* jitFlags)
// The rest of the opts fields that we initialize here
// should only be used when we generate code for the method
// They should not be used when importing or inlining
- CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_TAILCALL_OPT
opts.compTailCallLoopOpt = true;
@@ -5825,7 +5821,6 @@ void Compiler::generatePatchpointInfo()
//
// For arm64, if the frame pointer is not at the top of the frame, we need to adjust the
// offset.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_AMD64)
// We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint
@@ -9866,7 +9861,6 @@ JITDBGAPI void __cdecl cTreeFlags(Compiler* comp, GenTree* tree)
chars += printf("flags=");
// Node flags
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(DEBUG)
if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE)
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index 6ad178b32feedc..7e6b2c57c89dc4 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -8945,7 +8945,6 @@ class Compiler
// We need to report the ISA dependency to the VM so that scenarios
// such as R2R work correctly for larger vector sizes, so we always
// do `compExactlyDependsOn` for such cases.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH)
if (compExactlyDependsOn(InstructionSet_VectorT512))
@@ -10364,7 +10363,6 @@ class Compiler
// There are cases where implicit RetBuf argument should be explicitly returned in a register.
// In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
// These cases are:
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_AMD64
// 1. on x64 Windows and Unix the address of RetBuf needs to be returned by
// methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
@@ -10383,7 +10381,6 @@ class Compiler
#endif
// 3. Windows ARM64 native instance calling convention requires the address of RetBuff
// to be returned in x0.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
@@ -10395,7 +10392,6 @@ class Compiler
}
#endif // TARGET_ARM64
// 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (info.compCallConv != CorInfoCallConvExtension::Managed)
{
diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp
index b1329e88b0436b..62efd5282a16a1 100644
--- a/src/coreclr/jit/compiler.hpp
+++ b/src/coreclr/jit/compiler.hpp
@@ -2594,7 +2594,6 @@ inline
if (!FPbased)
{
// Worst case stack based offset.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_FIXED_OUT_ARGS
int outGoingArgSpaceSize = lvaOutgoingArgSpaceSize;
#else
@@ -2606,7 +2605,6 @@ inline
else
{
// Worst case FP based offset.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta();
@@ -2694,7 +2692,6 @@ inline bool Compiler::lvaIsOriginalThisArg(unsigned varNum)
{
LclVarDsc* varDsc = lvaGetDesc(varNum);
// Should never write to or take the address of the original 'this' arg
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef JIT32_GCENCODER
// With the general encoder/decoder, when the original 'this' arg is needed as a generics context param, we
diff --git a/src/coreclr/jit/decomposelongs.cpp b/src/coreclr/jit/decomposelongs.cpp
index 2f05779f9ff137..ea87a996dbb1aa 100644
--- a/src/coreclr/jit/decomposelongs.cpp
+++ b/src/coreclr/jit/decomposelongs.cpp
@@ -2169,7 +2169,6 @@ void DecomposeLongs::TryPromoteLongVar(unsigned lclNum)
for (unsigned index = 0; index < 2; ++index)
{
// Grab the temp for the field local.
- CLANG_FORMAT_COMMENT_ANCHOR;
// Lifetime of field locals might span multiple BBs, so they are long lifetime temps.
unsigned fieldLclNum = m_compiler->lvaGrabTemp(
diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp
index b33e6eed17bbc3..2f86c8b5274536 100644
--- a/src/coreclr/jit/ee_il_dll.cpp
+++ b/src/coreclr/jit/ee_il_dll.cpp
@@ -367,7 +367,6 @@ unsigned Compiler::eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE ty
// Everything fits into a single 'slot' size
// to accommodate irregular sized structs, they are passed byref
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
if (varTypeIsStruct(argType))
@@ -396,7 +395,6 @@ unsigned Compiler::eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE ty
// For each target that supports passing struct args in multiple registers
// apply the target specific rules for them here:
- CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_MULTIREG_ARGS
#if defined(TARGET_ARM64)
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index cabad877f83835..85bbda9f3cfbc6 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -4667,7 +4667,6 @@ void emitter::emitRemoveJumpToNextInst()
// the last instruction in the group is the jmp we're looking for
// and it jumps to the next instruction group so we don't need it
- CLANG_FORMAT_COMMENT_ANCHOR
#ifdef DEBUG
unsigned instructionCount = jmpGroup->igInsCnt;
@@ -5079,7 +5078,6 @@ void emitter::emitJumpDistBind()
jmp->idjOffs -= adjLJ;
// If this is a jump via register, the instruction size does not change, so we are done.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
// JIT code and data will be allocated together for arm64 so the relative offset to JIT data is known.
@@ -5145,7 +5143,6 @@ void emitter::emitJumpDistBind()
else
{
/* First time we've seen this label, convert its target */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (EMITVERBOSE)
@@ -5554,7 +5551,6 @@ void emitter::emitJumpDistBind()
#endif
/* Is there a chance of other jumps becoming short? */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
#if defined(TARGET_ARM)
if (EMITVERBOSE)
@@ -5881,7 +5877,6 @@ unsigned emitter::getLoopSize(insGroup* igLoopHeader,
// jne IG06
//
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if ((igInLoop->igLoopBackEdge != nullptr) && (igInLoop->igLoopBackEdge != igLoopHeader))
@@ -6949,7 +6944,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
*consAddrRW = consBlockRW;
/* Nothing has been pushed on the stack */
- CLANG_FORMAT_COMMENT_ANCHOR;
#if EMIT_TRACK_STACK_DEPTH
emitCurStackLvl = 0;
@@ -7612,7 +7606,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
if (jmp->idjShort)
{
// Patch Forward Short Jump
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH)
*(BYTE*)(adr + writeableOffset) -= (BYTE)adj;
#elif defined(TARGET_ARM)
@@ -7632,7 +7625,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
else
{
// Patch Forward non-Short Jump
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH)
*(int*)(adr + writeableOffset) -= adj;
#elif defined(TARGET_ARMARCH)
@@ -8611,7 +8603,6 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp
desc->vpdNext = nullptr;
/* the lower 2 bits encode props about the stk ptr */
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS)
if (offs == emitSyncThisObjOffs)
@@ -10069,7 +10060,6 @@ void emitter::emitStackPopLargeStk(BYTE* addr, bool isCall, unsigned char callIn
Or do we have a partially interruptible EBP-less frame, and any
of EDI,ESI,EBX,EBP are live, or is there an outer/pending call?
*/
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !FPO_INTERRUPTIBLE
if (emitFullyInt || (gcrefRegs == 0 && byrefRegs == 0 && u2.emitGcArgTrackCnt == 0))
diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h
index 094720597ead70..4e37226e2b5816 100644
--- a/src/coreclr/jit/emit.h
+++ b/src/coreclr/jit/emit.h
@@ -328,7 +328,6 @@ struct insGroup
#endif // !FEATURE_EH_FUNCLETS
// Try to do better packing based on how large regMaskSmall is (8, 16, or 64 bits).
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !(REGMASK_BITS <= 32)
regMaskSmall igGCregs; // set of registers with live GC refs
@@ -807,17 +806,16 @@ class emitter
#endif // TARGET_XARCH
#ifdef TARGET_ARM64
-
unsigned _idLclVar : 1; // access a local on stack
- unsigned _idLclVarPair : 1 // carries information for 2 GC lcl vars.
+ unsigned _idLclVarPair : 1; // carries information for 2 GC lcl vars.
#endif
#ifdef TARGET_LOONGARCH64
- // TODO-LoongArch64: maybe delete on future.
- opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16
- insOpts _idInsOpt : 6; // loongarch options for special: placeholders. e.g emitIns_R_C, also identifying the
- // accessing a local on stack.
- unsigned _idLclVar : 1; // access a local on stack.
+ // TODO-LoongArch64: maybe delete on future.
+ opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16
+ insOpts _idInsOpt : 6; // loongarch options for special: placeholders. e.g emitIns_R_C, also identifying the
+ // accessing a local on stack.
+ unsigned _idLclVar : 1; // access a local on stack.
#endif
#ifdef TARGET_RISCV64
@@ -848,7 +846,6 @@ class emitter
// How many bits have been used beyond the first 32?
// Define ID_EXTRA_BITFIELD_BITS to that number.
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
#define ID_EXTRA_BITFIELD_BITS (16)
@@ -876,7 +873,6 @@ class emitter
// All instrDesc types are <= 56 bytes, but we also need m_debugInfoSize,
// which is pointer sized, so 5 bits are required on 64-bit and 4 bits
// on 32-bit.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef HOST_64BIT
unsigned _idScaledPrevOffset : 5;
@@ -898,7 +894,6 @@ class emitter
// arm64: 60/55 bits
// loongarch64: 53/48 bits
// risc-v: 53/48 bits
- CLANG_FORMAT_COMMENT_ANCHOR;
#define ID_EXTRA_BITS (ID_EXTRA_RELOC_BITS + ID_EXTRA_BITFIELD_BITS + ID_EXTRA_PREV_OFFSET_BITS)
@@ -915,7 +910,6 @@ class emitter
// arm64: 4/9 bits
// loongarch64: 11/16 bits
// risc-v: 11/16 bits
- CLANG_FORMAT_COMMENT_ANCHOR;
#define ID_ADJ_SMALL_CNS (int)(1 << (ID_BIT_SMALL_CNS - 1))
#define ID_CNT_SMALL_CNS (int)(1 << ID_BIT_SMALL_CNS)
@@ -940,7 +934,6 @@ class emitter
//
// SMALL_IDSC_SIZE is this size, in bytes.
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#define SMALL_IDSC_SIZE 8
@@ -957,7 +950,6 @@ class emitter
}
private:
- CLANG_FORMAT_COMMENT_ANCHOR;
void checkSizes();
@@ -2571,7 +2563,6 @@ class emitter
// instruction group depends on the instruction mix as well as DEBUG/non-DEBUG build type. See the
// EMITTER_STATS output for various statistics related to this.
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
// ARM32/64, LoongArch and RISC-V can require a bigger prolog instruction group. One scenario
diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp
index 539e07a1136541..c69ea7c5a36e6f 100644
--- a/src/coreclr/jit/emitloongarch64.cpp
+++ b/src/coreclr/jit/emitloongarch64.cpp
@@ -2873,7 +2873,6 @@ void emitter::emitJumpDistBind()
jmp->idjOffs += adjSJ;
// If this is a jump via register, the instruction size does not change, so we are done.
- CLANG_FORMAT_COMMENT_ANCHOR;
/* Have we bound this jump's target already? */
@@ -2894,7 +2893,6 @@ void emitter::emitJumpDistBind()
else
{
/* First time we've seen this label, convert its target */
- CLANG_FORMAT_COMMENT_ANCHOR;
tgtIG = (insGroup*)emitCodeGetCookie(jmp->idAddr()->iiaBBlabel);
diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp
index 525d5e5274ba74..533d26ef2307cf 100644
--- a/src/coreclr/jit/emitriscv64.cpp
+++ b/src/coreclr/jit/emitriscv64.cpp
@@ -1847,7 +1847,6 @@ void emitter::emitJumpDistBind()
jmp->idjOffs += adjSJ;
// If this is a jump via register, the instruction size does not change, so we are done.
- CLANG_FORMAT_COMMENT_ANCHOR;
/* Have we bound this jump's target already? */
@@ -1868,7 +1867,6 @@ void emitter::emitJumpDistBind()
else
{
/* First time we've seen this label, convert its target */
- CLANG_FORMAT_COMMENT_ANCHOR;
tgtIG = (insGroup*)emitCodeGetCookie(jmp->idAddr()->iiaBBlabel);
diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp
index e356ab8b3d1132..848ec0f479edd0 100644
--- a/src/coreclr/jit/emitxarch.cpp
+++ b/src/coreclr/jit/emitxarch.cpp
@@ -3863,7 +3863,6 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSVCalcDisp(instrDesc* id, code_t code,
#endif
{
// Dev10 804810 - failing this assert can lead to bad codegen and runtime crashes
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
const LclVarDsc* varDsc = emitComp->lvaGetDesc(var);
@@ -4146,7 +4145,6 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code)
if (reg == REG_NA)
{
/* The address is of the form "[disp]" */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
// Special case: "mov eax, [disp]" and "mov [disp], eax" can use a smaller 1-byte encoding.
@@ -15770,7 +15768,6 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i)
if (dstOffs <= srcOffs)
{
// This is a backward jump - distance is known at this point
- CLANG_FORMAT_COMMENT_ANCHOR;
#if DEBUG_EMIT
if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
@@ -17205,7 +17202,6 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
if (ins == INS_pop)
{
// The offset in "pop [ESP+xxx]" is relative to the new ESP value
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !FEATURE_FIXED_OUT_ARGS
emitCurStackLvl -= sizeof(int);
diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp
index a650ae437fccc6..48d8765857fd0b 100644
--- a/src/coreclr/jit/fgbasic.cpp
+++ b/src/coreclr/jit/fgbasic.cpp
@@ -5635,7 +5635,6 @@ bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter)
void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk)
{
/* We have decided to insert the block(s) after 'insertAfterBlk' */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
@@ -5742,7 +5741,6 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
// 1. Verify that all the blocks in the range are either all rarely run or not rarely run.
// When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks
// in the range.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(FEATURE_EH_FUNCLETS)
bool isRare;
diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp
index ba5ed96610dd32..7450df323aea0c 100644
--- a/src/coreclr/jit/fginline.cpp
+++ b/src/coreclr/jit/fginline.cpp
@@ -1582,7 +1582,6 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
}
// Update optMethodFlags
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
unsigned optMethodFlagsBefore = optMethodFlags;
diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp
index 9a98ea7f619cd2..f5ca834696ce5b 100644
--- a/src/coreclr/jit/fgopt.cpp
+++ b/src/coreclr/jit/fgopt.cpp
@@ -1897,7 +1897,6 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
if (block->NumSucc(this) == 1)
{
// Use BBJ_ALWAYS for a switch with only a default clause, or with only one unique successor.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
@@ -2007,7 +2006,6 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
// replace it with a COMMA node. In such a case we will end up with GT_JTRUE node pointing to
// a COMMA node which results in noway asserts in fgMorphSmpOp(), optAssertionGen() and rpPredictTreeRegUse().
// For the same reason fgMorphSmpOp() marks GT_JTRUE nodes with RELOP children as GTF_DONT_CSE.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index 96e0a3e785f1fc..92b84e31aa72bb 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -310,7 +310,6 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
// Create a GT_EQ node that checks against g_TrapReturningThreads. True jumps to Bottom,
// false falls through to poll. Add this to the end of Top. Top is now BBJ_COND. Bottom is
// now a jump target
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef ENABLE_FAST_GCPOLL_HELPER
// Prefer the fast gc poll helepr over the double indirection
diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp
index 9d521ebef799cd..a093d8a20e5981 100644
--- a/src/coreclr/jit/gcencode.cpp
+++ b/src/coreclr/jit/gcencode.cpp
@@ -134,7 +134,6 @@ void GCInfo::gcMarkFilterVarsPinned()
// (2) a regular one for after the filter
// and then adjust the original lifetime to end before
// the filter.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
@@ -177,7 +176,6 @@ void GCInfo::gcMarkFilterVarsPinned()
// somewhere inside it, so we only create 1 new lifetime,
// and then adjust the original lifetime to end before
// the filter.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
@@ -216,7 +214,6 @@ void GCInfo::gcMarkFilterVarsPinned()
// lifetime for the part inside the filter and adjust
// the start of the original lifetime to be the end
// of the filter
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
@@ -259,7 +256,6 @@ void GCInfo::gcMarkFilterVarsPinned()
{
// The variable lifetime is completely within the filter,
// so just add the pinned flag.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
@@ -1463,7 +1459,6 @@ size_t GCInfo::gcInfoBlockHdrSave(
#endif
/* Write the method size first (using between 1 and 5 bytes) */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
@@ -4112,7 +4107,6 @@ void GCInfo::gcMakeRegPtrTable(
// pointers" section of the GC info even if lvTracked==true
// Has this argument been fully enregistered?
- CLANG_FORMAT_COMMENT_ANCHOR;
if (!varDsc->lvOnFrame)
{
@@ -4141,7 +4135,6 @@ void GCInfo::gcMakeRegPtrTable(
}
// If we haven't continued to the next variable, we should report this as an untracked local.
- CLANG_FORMAT_COMMENT_ANCHOR;
GcSlotFlags flags = GC_SLOT_UNTRACKED;
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index 5125c3ad42fcfe..9cf06c7bb1fcb5 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -891,7 +891,6 @@ int GenTree::GetRegisterDstCount(Compiler* compiler) const
// A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST.
// For the latter two (ARM-only), they only have multiple registers if they produce a long value
// (GT_MUL_LONG always produces a long value).
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
return (TypeGet() == TYP_LONG) ? 2 : 1;
#else
@@ -4882,7 +4881,6 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ
// [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
- CLANG_FORMAT_COMMENT_ANCHOR;
assert((base != nullptr) || (idx != nullptr && mul >= 2));
@@ -9450,7 +9448,6 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree)
if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
- CLANG_FORMAT_COMMENT_ANCHOR;
switch (oper)
{
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index 7fbcf5471103ea..a0e8eb7242e6cf 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -826,7 +826,6 @@ struct GenTree
//
// Register or register pair number of the node.
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
@@ -858,7 +857,6 @@ struct GenTree
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
@@ -1309,7 +1307,6 @@ struct GenTree
{
// Note that only GT_EQ to GT_GT are HIR nodes, GT_TEST and GT_BITTEST
// nodes are backend nodes only.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_XARCH
static_assert_no_msg(AreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE,
GT_BITTEST_EQ, GT_BITTEST_NE));
@@ -1999,7 +1996,6 @@ struct GenTree
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 4a1590fb7e0bfb..0d1df79812f03a 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -5238,7 +5238,6 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr
// VSW 318822
//
// So here we decide to make the resulting type to be a native int.
- CLANG_FORMAT_COMMENT_ANCHOR;
// Insert an explicit upcast if needed.
op1 = *pOp1 = impImplicitIorI4Cast(op1, TYP_I_IMPL, fUnsigned);
@@ -9167,20 +9166,19 @@ void Compiler::impImportBlockCode(BasicBlock* block)
assert(!"Unexpected fieldAccessor");
}
- /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
- trust apps). The reason this works is that JIT stores an i4 constant in GenTree union during
- importation and reads from the union as if it were a long during code generation. Though this
- can potentially read garbage, one can get lucky to have this working correctly.
+ /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
+ trust apps). The reason this works is that JIT stores an i4 constant in GenTree union during
+ importation and reads from the union as if it were a long during code generation. Though this
+ can potentially read garbage, one can get lucky to have this working correctly.
- This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
- /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
- dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
- it works correctly always.
+ This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
+ /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
+ dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
+ it works correctly always.
- Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
- for V4.0.
- */
- CLANG_FORMAT_COMMENT_ANCHOR;
+ Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
+ for V4.0.
+ */
#ifndef TARGET_64BIT
// In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp
index 24d7370b8f985a..0974d654ffb309 100644
--- a/src/coreclr/jit/importercalls.cpp
+++ b/src/coreclr/jit/importercalls.cpp
@@ -587,7 +587,6 @@ var_types Compiler::impImportCall(OPCODE opcode,
tailcall to a function with a different number of arguments, we
are hosed. There are ways around this (caller remembers esp value,
varargs is not caller-pop, etc), but not worth it. */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
if (canTailCall)
@@ -9781,8 +9780,6 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
// be behind a relevant IsSupported check and will never be hit and the
// software fallback will be executed instead.
- CLANG_FORMAT_COMMENT_ANCHOR;
-
#ifdef FEATURE_HW_INTRINSICS
namespaceName += 10;
const char* platformNamespaceName;
diff --git a/src/coreclr/jit/inductionvariableopts.cpp b/src/coreclr/jit/inductionvariableopts.cpp
index 19755c312de350..a1ab0c58ecd976 100644
--- a/src/coreclr/jit/inductionvariableopts.cpp
+++ b/src/coreclr/jit/inductionvariableopts.cpp
@@ -481,7 +481,6 @@ PhaseStatus Compiler::optInductionVariables()
// Currently we only do IV widening which generally is only profitable for
// x64 because arm64 addressing modes can include the zero/sign-extension
// of the index for free.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH) && defined(TARGET_64BIT)
m_dfsTree = fgComputeDfs();
m_loops = FlowGraphNaturalLoops::Find(m_dfsTree);
diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h
index 1094740a8e25d0..cc8c8cb717d9ab 100644
--- a/src/coreclr/jit/jit.h
+++ b/src/coreclr/jit/jit.h
@@ -17,10 +17,6 @@
#endif
#endif
-// Clang-format messes with the indentation of comments if they directly precede an
-// ifdef. This macro allows us to anchor the comments to the regular flow of code.
-#define CLANG_FORMAT_COMMENT_ANCHOR ;
-
// Clang-tidy replaces 0 with nullptr in some templated functions, causing a build
// break. Replacing those instances with ZERO avoids this change
#define ZERO 0
diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp
index 573191fecb38c3..329f1c602cf989 100644
--- a/src/coreclr/jit/jiteh.cpp
+++ b/src/coreclr/jit/jiteh.cpp
@@ -1361,7 +1361,6 @@ void Compiler::fgAllocEHTable()
// twice the number of EH clauses in the IL, which should be good in practice.
// In extreme cases, we might need to abandon this and reallocate. See
// fgAddEHTableEntry() for more details.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG
@@ -1682,7 +1681,6 @@ void Compiler::fgSortEHTable()
// but ARM did. It turns out not sorting the table can cause the EH table to incorrectly
// set the bbHndIndex value in some nested cases, and that can lead to a security exploit
// that allows the execution of arbitrary code.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
@@ -2542,7 +2540,6 @@ bool Compiler::fgNormalizeEHCase3()
if (EHblkDsc::ebdIsSameTry(ehOuter, ehInner))
{
// We can't touch this 'try', since it's mutual protect.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
{
@@ -2729,7 +2726,6 @@ bool Compiler::fgNormalizeEHCase3()
if (innerIsTryRegion && ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast))
{
// We can't touch this 'try', since it's mutual protect.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
@@ -3195,7 +3191,6 @@ void Compiler::fgVerifyHandlerTab()
// blocks in the nested EH region. However, if funclets have been created, this is no longer true, since
// this 'try' might be in a handler that is pulled out to the funclet region, while the outer 'try'
// remains in the main function region.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(FEATURE_EH_FUNCLETS)
if (fgFuncletsCreated)
@@ -4203,7 +4198,6 @@ bool Compiler::fgRelocateEHRegions()
// Currently it is not good to move the rarely run handler regions to the end of the method
// because fgDetermineFirstColdBlock() must put the start of any handler region in the hot
// section.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if 0
// Now try to move the entire handler region if it can be moved.
diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h
index 288042d4c6b1e4..2258903a0603eb 100644
--- a/src/coreclr/jit/jitgcinfo.h
+++ b/src/coreclr/jit/jitgcinfo.h
@@ -340,7 +340,6 @@ class GCInfo
//
// These record the info about the procedure in the info-block
//
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef JIT32_GCENCODER
private:
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index 6b3650416574c0..042c411b306d0d 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -2590,7 +2590,6 @@ bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum)
// In that case, we would like to avoid promortion.
// However we haven't yet computed the lvRefCnt values so we can't do that.
//
- CLANG_FORMAT_COMMENT_ANCHOR;
return shouldPromote;
}
@@ -4449,7 +4448,6 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt,
{
// Variables can be marked as DoNotEngister in earlier stages like LocalAddressVisitor.
// No need to track them for single-def.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// TODO-CQ: If the varType needs partial callee save, conservatively do not enregister
@@ -6067,7 +6065,6 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
{
/* Argument is passed in a register, don't count it
* when updating the current offset on the stack */
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64)
#if DEBUG
@@ -6254,7 +6251,6 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
// r3 int a2 --> pushed (not pre-spilled) for alignment of a0 by lvaInitUserArgs.
// r2 struct { int } a1
// r0-r1 struct { long } a0
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef PROFILING_SUPPORTED
// On Arm under profiler, r0-r3 are always prespilled on stack.
@@ -6318,7 +6314,6 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
// For struct promoted parameters we need to set the offsets for both LclVars.
//
// For a dependent promoted struct we also assign the struct fields stack offset
- CLANG_FORMAT_COMMENT_ANCHOR;
if (varDsc->lvPromoted)
{
@@ -7385,7 +7380,6 @@ void Compiler::lvaAlignFrame()
// If this isn't the final frame layout, assume we have to push an extra QWORD
// Just so the offsets are true upper limits.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
// The compNeedToAlignFrame flag is indicating if there is a need to align the frame.
@@ -7507,8 +7501,6 @@ void Compiler::lvaAssignFrameOffsetsToPromotedStructs()
// assign their offsets in lvaAssignVirtualFrameOffsetToArg().
// This is not true for the System V systems since there is no
// outgoing args space. Assign the dependently promoted fields properly.
- //
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86)
// ARM: lo/hi parts of a promoted long arg need to be updated.
@@ -7584,7 +7576,6 @@ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign)
/* Figure out and record the stack offset of the temp */
/* Need to align the offset? */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0))
diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp
index 7f413b75d6649e..05c65d2de3450c 100644
--- a/src/coreclr/jit/liveness.cpp
+++ b/src/coreclr/jit/liveness.cpp
@@ -498,7 +498,6 @@ void Compiler::fgPerBlockLocalVarLiveness()
// 32-bit targets always pop the frame in the epilog.
// For 64-bit targets, we only do this in the epilog for IL stubs;
// for non-IL stubs the frame is popped after every PInvoke call.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
#endif
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 5d3a504175ee9e..0cd0500a9d1630 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -1633,7 +1633,6 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, CallArg* callArg,
// Mark this one as tail call arg if it is a fast tail call.
// This provides the info to put this argument in in-coming arg area slot
// instead of in out-going arg area slot.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce
@@ -5667,7 +5666,6 @@ void Lowering::InsertPInvokeMethodProlog()
// On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto
// the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame.
// As a result, don't push the frame onto the frame stack here for any 64-bit targets
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
#ifdef USE_PER_FRAME_PINVOKE_INIT
@@ -5732,7 +5730,6 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree*
// Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do
// this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef USE_PER_FRAME_PINVOKE_INIT
// For IL stubs, we push the frame once even when we're doing per-pinvoke init
@@ -5882,7 +5879,6 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
// Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method
// contains PInvokes; on 64-bit targets this is necessary in non-stubs.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef USE_PER_FRAME_PINVOKE_INIT
if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
@@ -5960,7 +5956,6 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
// Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets
// this happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef USE_PER_FRAME_PINVOKE_INIT
if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
@@ -6954,7 +6949,6 @@ bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod)
// On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one.
bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul;
#else
- CLANG_FORMAT_COMMENT_ANCHOR;
bool widenToNativeIntForMul = (type != TYP_I_IMPL);
#endif
@@ -8856,7 +8850,6 @@ GenTree* Lowering::LowerIndir(GenTreeIndir* ind)
// TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects
// address containment in some cases so we end up creating trivial (reg + offfset)
// or (reg + reg) LEAs that are not necessary.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
// Verify containment safety before creating an LEA that must be contained.
@@ -9528,7 +9521,6 @@ void Lowering::TryRetypingFloatingPointStoreToIntegerStore(GenTree* store)
// section and it is not a clear win to switch them to inline integers.
// ARM: FP constants are assembled from integral ones, so it is always profitable
// to directly use the integers as it avoids the int -> float conversion.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH) || defined(TARGET_ARM)
bool shouldSwitchToInteger = true;
diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp
index 50652ca075254d..ebedf7849004df 100644
--- a/src/coreclr/jit/lsra.cpp
+++ b/src/coreclr/jit/lsra.cpp
@@ -815,7 +815,6 @@ LinearScan::LinearScan(Compiler* theCompiler)
#endif
// Initialize the availableRegs to use for each TYP_*
- CLANG_FORMAT_COMMENT_ANCHOR;
#define DEF_TP(tn, nm, jitType, sz, sze, asze, st, al, regTyp, regFld, csr, ctr, tf) \
availableRegs[static_cast(TYP_##tn)] = ®Fld;
@@ -2003,7 +2002,6 @@ void LinearScan::identifyCandidates()
// We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count,
// and those that meet the second (see the definitions of thresholdFPRefCntWtd and maybeFPRefCntWtd
// above).
- CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Additionally, when we are generating code for a target with partial SIMD callee-save
@@ -2080,7 +2078,6 @@ void LinearScan::identifyCandidates()
// registers current include the number of fp vars, whether there are loops, and whether there are
// multiple exits. These have been selected somewhat empirically, but there is probably room for
// more tuning.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (VERBOSE)
@@ -3023,7 +3020,6 @@ regNumber LinearScan::allocateReg(Interval* currentInterval,
if (regSelector->isSpilling())
{
// We're spilling.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
if (currentInterval->registerType == TYP_DOUBLE)
@@ -5455,7 +5451,6 @@ void LinearScan::allocateRegistersMinimal()
}
// Free registers to clear associated intervals for resolution phase
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (getLsraExtendLifeTimes())
@@ -6756,7 +6751,6 @@ void LinearScan::allocateRegisters()
#endif // JIT32_GCENCODER
// Free registers to clear associated intervals for resolution phase
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (getLsraExtendLifeTimes())
diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h
index e038b4e8243a57..2705a93188dad6 100644
--- a/src/coreclr/jit/lsra.h
+++ b/src/coreclr/jit/lsra.h
@@ -773,7 +773,6 @@ class LinearScan : public LinearScanInterface
// At least for x86 and AMD64, and potentially other architecture that will support SIMD,
// we need a minimum of 5 fp regs in order to support the InitN intrinsic for Vector4.
// Hence the "SmallFPSet" has 5 elements.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_AMD64)
#ifdef UNIX_AMD64_ABI
diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp
index 4f3d39c76d3ad1..86abef939a9d8c 100644
--- a/src/coreclr/jit/lsrabuild.cpp
+++ b/src/coreclr/jit/lsrabuild.cpp
@@ -2533,7 +2533,6 @@ void LinearScan::buildIntervals()
// is at a new location and doesn't interfere with the uses.
// For multi-reg local stores, the 'BuildMultiRegStoreLoc' method will further increment the
// location by 2 for each destination register beyond the first.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
node->gtSeqNum = currentLoc;
diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp
index ad7d25709ee303..f380daeab59ac2 100644
--- a/src/coreclr/jit/lsraxarch.cpp
+++ b/src/coreclr/jit/lsraxarch.cpp
@@ -710,7 +710,6 @@ bool LinearScan::isRMWRegOper(GenTree* tree)
{
// TODO-XArch-CQ: Make this more accurate.
// For now, We assume that most binary operators are of the RMW form.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef FEATURE_HW_INTRINSICS
assert(tree->OperIsBinary() || (tree->OperIsMultiOp() && (tree->AsMultiOp()->GetOperandCount() <= 2)));
@@ -1069,7 +1068,6 @@ int LinearScan::BuildShiftRotate(GenTree* tree)
// TODO-CQ-XARCH: We can optimize generating 'test' instruction for GT_EQ/NE(shift, 0)
// if the shift count is known to be non-zero and in the range depending on the
// operand size.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
// The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that
@@ -1169,7 +1167,6 @@ int LinearScan::BuildCall(GenTreeCall* call)
RegisterType registerType = regType(call);
// Set destination candidates for return value of the call.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
@@ -3019,7 +3016,6 @@ int LinearScan::BuildMul(GenTree* tree)
// three-op form: reg = r/m * imm
// This special widening 32x32->64 MUL is not used on x64
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (tree->OperGet() != GT_MUL_LONG)
#endif
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 2144869b1ce40e..4b301696a1eeb3 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -1681,7 +1681,6 @@ void CallArgs::EvalArgsToTemps(Compiler* comp, GenTreeCall* call)
{
// Create a temp assignment for the argument
// Put the temp in the gtCallLateArgs list
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (comp->verbose)
@@ -1984,7 +1983,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call
// The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments
// in the implementation of fast tail call.
// *********** END NOTE *********
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
// A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper
@@ -2030,8 +2028,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call
// We are allowed to have a ret buffer argument combined
// with any of the remaining non-standard arguments
- //
- CLANG_FORMAT_COMMENT_ANCHOR;
if (call->IsVirtualStub() && addStubCellArg)
{
@@ -2158,7 +2154,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call
#endif // TARGET_X86
/* Morph the user arguments */
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
@@ -2729,7 +2724,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call
}
// Now we know if the argument goes in registers or not and how big it is.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
// If we ever allocate a floating point argument to the stack, then all
@@ -5317,7 +5311,6 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
// To reach here means that the return types of the caller and callee are tail call compatible.
// In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (callee->IsTailPrefixedCall())
@@ -5460,7 +5453,6 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
// We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg
// method. This is due to the ABI differences for native vararg methods for these platforms. There is
// work required to shuffle arguments to the correct locations.
- CLANG_FORMAT_COMMENT_ANCHOR;
if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs()))
{
@@ -7376,7 +7368,6 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// Local copy for implicit byref promotion that was undone. Do
// not introduce new references to it, all uses have been
// morphed to access the parameter.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
LclVarDsc* param = lvaGetDesc(firstField->lvParentLcl);
@@ -7643,7 +7634,6 @@ GenTree* Compiler::fgMorphCall(GenTreeCall* call)
// In the event the call indicates the block isn't a GC safe point
// and the call is unmanaged with a GC transition suppression request
// then insert a GC poll.
- CLANG_FORMAT_COMMENT_ANCHOR;
if (IsGcSafePoint(call))
{
@@ -12390,7 +12380,6 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
// (x >>> y) | (x << (-y + N))
// where N == bitsize(x), M is const, and
// M & (N - 1) == N - 1
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64))
@@ -13591,7 +13580,6 @@ void Compiler::fgMorphStmts(BasicBlock* block)
/* This must be a tailcall that caused a GCPoll to get
injected. We haven't actually morphed the call yet
but the flag still got set, clear it here... */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
@@ -14065,7 +14053,6 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block)
else
{
// We'll jump to the genReturnBB.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_X86)
if (info.compFlags & CORINFO_FLG_SYNCH)
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 289e37b16fc4e8..7daf7104271fdc 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -1693,7 +1693,6 @@ bool Compiler::optTryUnrollLoop(FlowGraphNaturalLoop* loop, bool* changedIR)
// The old loop body is unreachable now, but we will remove those
// blocks after we finish unrolling.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
@@ -3418,7 +3417,6 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu
switch (oper)
{
/* Constants can usually be narrowed by changing their value */
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
__int64 lval;
diff --git a/src/coreclr/jit/stacklevelsetter.cpp b/src/coreclr/jit/stacklevelsetter.cpp
index db97352d5e6977..d25f2683ca302d 100644
--- a/src/coreclr/jit/stacklevelsetter.cpp
+++ b/src/coreclr/jit/stacklevelsetter.cpp
@@ -287,7 +287,6 @@ void StackLevelSetter::SetThrowHelperBlock(SpecialCodeKind kind, BasicBlock* blo
// or generate all required helpers after all stack alignment
// has been added, and the stack level at each call to fgAddCodeRef()
// is known, or can be recalculated.
- CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(UNIX_X86_ABI)
framePointerRequired = true;
#else // !defined(UNIX_X86_ABI)
diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h
index 08a4ab996bd1bf..3a861c3d7ef35a 100644
--- a/src/coreclr/jit/targetx86.h
+++ b/src/coreclr/jit/targetx86.h
@@ -232,7 +232,6 @@
// Registers killed by CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF.
// Note that x86 normally emits an optimized (source-register-specific) write barrier, but can emit
// a call to a "general" write barrier.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
#define RBM_CALLEE_TRASH_WRITEBARRIER (RBM_EAX | RBM_EDX)
diff --git a/src/coreclr/jit/unwindarmarch.cpp b/src/coreclr/jit/unwindarmarch.cpp
index bdc7663bde7ed1..b292d74968f6ac 100644
--- a/src/coreclr/jit/unwindarmarch.cpp
+++ b/src/coreclr/jit/unwindarmarch.cpp
@@ -847,7 +847,6 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes)
&upcMem[upcCodeSlot], prologBytes);
// Note that the three UWC_END padding bytes still exist at the end of the array.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Zero out the epilog codes memory, to ensure we've copied the right bytes. Don't zero the padding bytes.
@@ -1909,7 +1908,6 @@ void UnwindInfo::Split()
// the actual offsets of the splits since we haven't issued the instructions yet, so store
// an emitter location instead of an offset, and "finalize" the offset in the unwindEmit() phase,
// like we do for the function length and epilog offsets.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (uwiComp->verbose)
diff --git a/src/coreclr/jit/unwindloongarch64.cpp b/src/coreclr/jit/unwindloongarch64.cpp
index 1b561eaaaae669..e46d3ec60e0794 100644
--- a/src/coreclr/jit/unwindloongarch64.cpp
+++ b/src/coreclr/jit/unwindloongarch64.cpp
@@ -1112,7 +1112,6 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes)
&upcMem[upcCodeSlot], prologBytes);
// Note that the three UWC_END padding bytes still exist at the end of the array.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Zero out the epilog codes memory, to ensure we've copied the right bytes. Don't zero the padding bytes.
@@ -2139,7 +2138,6 @@ void UnwindInfo::Split()
// the actual offsets of the splits since we haven't issued the instructions yet, so store
// an emitter location instead of an offset, and "finalize" the offset in the unwindEmit() phase,
// like we do for the function length and epilog offsets.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (uwiComp->verbose)
diff --git a/src/coreclr/jit/unwindriscv64.cpp b/src/coreclr/jit/unwindriscv64.cpp
index f9db0d433c6f13..05648c481744ea 100644
--- a/src/coreclr/jit/unwindriscv64.cpp
+++ b/src/coreclr/jit/unwindriscv64.cpp
@@ -923,7 +923,6 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes)
&upcMem[upcCodeSlot], prologBytes);
// Note that the three UWC_END padding bytes still exist at the end of the array.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Zero out the epilog codes memory, to ensure we've copied the right bytes. Don't zero the padding bytes.
@@ -1946,7 +1945,6 @@ void UnwindInfo::Split()
// the actual offsets of the splits since we haven't issued the instructions yet, so store
// an emitter location instead of an offset, and "finalize" the offset in the unwindEmit() phase,
// like we do for the function length and epilog offsets.
- CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (uwiComp->verbose)
diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp
index c3234e5524deaa..ea33f1d14fb035 100644
--- a/src/coreclr/jit/utils.cpp
+++ b/src/coreclr/jit/utils.cpp
@@ -323,7 +323,6 @@ const char* dspRegRange(regMaskTP regMask, size_t& minSiz, const char* sep, regN
minSiz -= strlen(sep) + strlen(nam);
// What kind of separator should we use for this range (if it is indeed going to be a range)?
- CLANG_FORMAT_COMMENT_ANCHOR;
if (genIsValidIntReg(regNum))
{
@@ -355,7 +354,6 @@ const char* dspRegRange(regMaskTP regMask, size_t& minSiz, const char* sep, regN
}
#elif defined(TARGET_X86)
// No register ranges
- CLANG_FORMAT_COMMENT_ANCHOR;
#elif defined(TARGET_LOONGARCH64)
if (REG_A0 <= regNum && regNum <= REG_T8)
{