diff --git a/Build/NuGet/.pack-version b/Build/NuGet/.pack-version
index 661e7aeadf3..10c088013f8 100644
--- a/Build/NuGet/.pack-version
+++ b/Build/NuGet/.pack-version
@@ -1 +1 @@
-1.7.3
+1.7.4
diff --git a/lib/Backend/Backend.h b/lib/Backend/Backend.h
index 4c74b33ad27..dcf3d58c41a 100644
--- a/lib/Backend/Backend.h
+++ b/lib/Backend/Backend.h
@@ -139,6 +139,7 @@ enum IRDumpFlags
#include "SymTable.h"
#include "IR.h"
#include "Opnd.h"
+#include "IntConstMath.h"
#include "IntOverflowDoesNotMatterRange.h"
#include "IntConstantBounds.h"
#include "ValueRelativeOffset.h"
diff --git a/lib/Backend/BackwardPass.cpp b/lib/Backend/BackwardPass.cpp
index a25b8215d2b..7b83fcbda92 100644
--- a/lib/Backend/BackwardPass.cpp
+++ b/lib/Backend/BackwardPass.cpp
@@ -2047,8 +2047,8 @@ BackwardPass::ProcessBailOutInfo(IR::Instr * instr)
bool
BackwardPass::IsImplicitCallBailOutCurrentlyNeeded(IR::Instr * instr, bool mayNeedImplicitCallBailOut, bool hasLiveFields)
{
- return this->globOpt->IsImplicitCallBailOutCurrentlyNeeded(
- instr, nullptr, nullptr, this->currentBlock, hasLiveFields, mayNeedImplicitCallBailOut, false);
+ return this->globOpt->IsImplicitCallBailOutCurrentlyNeeded(instr, nullptr, nullptr, this->currentBlock, hasLiveFields, mayNeedImplicitCallBailOut, false) ||
+ this->NeedBailOutOnImplicitCallsForTypedArrayStore(instr);
}
void
@@ -2235,6 +2235,30 @@ BackwardPass::DeadStoreImplicitCallBailOut(IR::Instr * instr, bool hasLiveFields
}
}
+bool
+BackwardPass::NeedBailOutOnImplicitCallsForTypedArrayStore(IR::Instr* instr)
+{
+ if ((instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) &&
+ instr->GetDst()->IsIndirOpnd() &&
+ instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyTypedArray())
+ {
+ IR::Opnd * opnd = instr->GetSrc1();
+ if (opnd->IsRegOpnd())
+ {
+ return !opnd->AsRegOpnd()->GetValueType().IsPrimitive() &&
+ !opnd->AsRegOpnd()->m_sym->IsInt32() &&
+ !opnd->AsRegOpnd()->m_sym->IsFloat64() &&
+ !opnd->AsRegOpnd()->m_sym->IsFloatConst() &&
+ !opnd->AsRegOpnd()->m_sym->IsIntConst();
+ }
+ else
+ {
+ Assert(opnd->IsIntConstOpnd() || opnd->IsInt64ConstOpnd() || opnd->IsFloat32ConstOpnd() || opnd->IsFloatConstOpnd() || opnd->IsAddrOpnd());
+ }
+ }
+ return false;
+}
+
void
BackwardPass::ProcessPendingPreOpBailOutInfo(IR::Instr *const currentInstr)
{
diff --git a/lib/Backend/BackwardPass.h b/lib/Backend/BackwardPass.h
index b0dd5ef6f31..8a3a1eb11f1 100644
--- a/lib/Backend/BackwardPass.h
+++ b/lib/Backend/BackwardPass.h
@@ -101,6 +101,7 @@ class BackwardPass
void DeadStoreImplicitCallBailOut(IR::Instr * instr, bool hasLiveFields);
void DeadStoreTypeCheckBailOut(IR::Instr * instr);
bool IsImplicitCallBailOutCurrentlyNeeded(IR::Instr * instr, bool mayNeedImplicitCallBailOut, bool hasLiveFields);
+ bool NeedBailOutOnImplicitCallsForTypedArrayStore(IR::Instr* instr);
bool TrackNoImplicitCallInlinees(IR::Instr *instr);
bool ProcessBailOnNoProfile(IR::Instr *instr, BasicBlock *block);
diff --git a/lib/Backend/CMakeLists.txt b/lib/Backend/CMakeLists.txt
index bc6c37117db..aa8460865a5 100644
--- a/lib/Backend/CMakeLists.txt
+++ b/lib/Backend/CMakeLists.txt
@@ -39,6 +39,7 @@ add_library (Chakra.Backend OBJECT
InliningDecider.cpp
InliningHeuristics.cpp
IntBounds.cpp
+ IntConstMath.cpp
InterpreterThunkEmitter.cpp
JITThunkEmitter.cpp
JITOutput.cpp
diff --git a/lib/Backend/Chakra.Backend.vcxproj b/lib/Backend/Chakra.Backend.vcxproj
index 03caa4e7f2e..2cec191daca 100644
--- a/lib/Backend/Chakra.Backend.vcxproj
+++ b/lib/Backend/Chakra.Backend.vcxproj
@@ -218,6 +218,7 @@
+
@@ -259,6 +260,7 @@
+
diff --git a/lib/Backend/Chakra.Backend.vcxproj.filters b/lib/Backend/Chakra.Backend.vcxproj.filters
index 3d97d9a5a99..c59b619bd96 100644
--- a/lib/Backend/Chakra.Backend.vcxproj.filters
+++ b/lib/Backend/Chakra.Backend.vcxproj.filters
@@ -130,6 +130,7 @@
+
@@ -345,6 +346,7 @@
+
diff --git a/lib/Backend/CodeGenAllocators.cpp b/lib/Backend/CodeGenAllocators.cpp
index 7b4027e57a7..a8fdd7509a5 100644
--- a/lib/Backend/CodeGenAllocators.cpp
+++ b/lib/Backend/CodeGenAllocators.cpp
@@ -5,10 +5,10 @@
#include "Backend.h"
template
-CodeGenAllocators::CodeGenAllocators(AllocationPolicyManager * policyManager, Js::ScriptContext * scriptContext, CustomHeap::CodePageAllocators * codePageAllocators, HANDLE processHandle)
+CodeGenAllocators::CodeGenAllocators(AllocationPolicyManager * policyManager, Js::ScriptContext * scriptContext, ThreadContextInfo * threadContext, CustomHeap::CodePageAllocators * codePageAllocators, HANDLE processHandle)
: pageAllocator(policyManager, Js::Configuration::Global.flags, PageAllocatorType_BGJIT, 0)
, allocator(_u("NativeCode"), &pageAllocator, Js::Throw::OutOfMemory)
-, emitBufferManager(&allocator, codePageAllocators, scriptContext, _u("JIT code buffer"), processHandle)
+, emitBufferManager(&allocator, codePageAllocators, scriptContext, threadContext, _u("JIT code buffer"), processHandle)
#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
, canCreatePreReservedSegment(false)
#endif
diff --git a/lib/Backend/CodeGenAllocators.h b/lib/Backend/CodeGenAllocators.h
index d73b500c9da..1e19594e1c1 100644
--- a/lib/Backend/CodeGenAllocators.h
+++ b/lib/Backend/CodeGenAllocators.h
@@ -17,7 +17,7 @@ class CodeGenAllocators
bool canCreatePreReservedSegment;
#endif
- CodeGenAllocators(AllocationPolicyManager * policyManager, Js::ScriptContext * scriptContext, CustomHeap::CodePageAllocators * codePageAllocators, HANDLE processHandle);
+ CodeGenAllocators(AllocationPolicyManager * policyManager, Js::ScriptContext * scriptContext, ThreadContextInfo * threadContext, CustomHeap::CodePageAllocators * codePageAllocators, HANDLE processHandle);
~CodeGenAllocators();
#if DBG
diff --git a/lib/Backend/CodeGenWorkItem.cpp b/lib/Backend/CodeGenWorkItem.cpp
index 968537f82dc..ac5092e26ee 100644
--- a/lib/Backend/CodeGenWorkItem.cpp
+++ b/lib/Backend/CodeGenWorkItem.cpp
@@ -205,7 +205,7 @@ void CodeGenWorkItem::OnWorkItemProcessFail(NativeCodeGenerator* codeGen)
#if DBG
this->allocation->allocation->isNotExecutableBecauseOOM = true;
#endif
- codeGen->FreeNativeCodeGenAllocation(this->allocation->allocation->address, nullptr);
+ codeGen->FreeNativeCodeGenAllocation(this->allocation->allocation->address);
}
}
diff --git a/lib/Backend/EmitBuffer.cpp b/lib/Backend/EmitBuffer.cpp
index 2671d0af227..203c09e5654 100644
--- a/lib/Backend/EmitBuffer.cpp
+++ b/lib/Backend/EmitBuffer.cpp
@@ -10,11 +10,12 @@
//----------------------------------------------------------------------------
template
EmitBufferManager::EmitBufferManager(ArenaAllocator * allocator, CustomHeap::CodePageAllocators * codePageAllocators,
- Js::ScriptContext * scriptContext, LPCWSTR name, HANDLE processHandle) :
+ Js::ScriptContext * scriptContext, ThreadContextInfo * threadContext, LPCWSTR name, HANDLE processHandle) :
allocationHeap(allocator, codePageAllocators, processHandle),
allocator(allocator),
allocations(nullptr),
scriptContext(scriptContext),
+ threadContext(threadContext),
processHandle(processHandle)
{
#if DBG_DUMP
@@ -193,12 +194,14 @@ bool
EmitBufferManager::FreeAllocation(void* address)
{
AutoRealOrFakeCriticalSection autoCs(&this->criticalSection);
-
+#if _M_ARM
+ address = (void*)((uintptr_t)address & ~0x1); // clear the thumb bit
+#endif
TEmitBufferAllocation* previous = nullptr;
TEmitBufferAllocation* allocation = allocations;
while(allocation != nullptr)
{
- if (address >= allocation->allocation->address && address < (allocation->allocation->address + allocation->bytesUsed))
+ if (address == allocation->allocation->address)
{
if (previous == nullptr)
{
@@ -214,6 +217,26 @@ EmitBufferManager::FreeAllocation(void* a
this->scriptContext->GetThreadContext()->SubCodeSize(allocation->bytesCommitted);
}
+#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64)
+ if (allocation->allocation->thunkAddress)
+ {
+ if (JITManager::GetJITManager()->IsJITServer())
+ {
+ ((ServerThreadContext*)this->threadContext)->GetJITThunkEmitter()->FreeThunk(allocation->allocation->thunkAddress);
+ }
+ else
+ {
+ ((ThreadContext*)this->threadContext)->GetJITThunkEmitter()->FreeThunk(allocation->allocation->thunkAddress);
+ }
+ }
+ else
+#endif
+ {
+ if (!JITManager::GetJITManager()->IsJITServer() || CONFIG_FLAG(OOPCFGRegistration))
+ {
+ threadContext->SetValidCallTargetForCFG(address, false);
+ }
+ }
VerboseHeapTrace(_u("Freeing 0x%p, allocation: 0x%p\n"), address, allocation->allocation->address);
this->allocationHeap.Free(allocation->allocation);
diff --git a/lib/Backend/EmitBuffer.h b/lib/Backend/EmitBuffer.h
index 382a20d47ef..ac6548186a3 100644
--- a/lib/Backend/EmitBuffer.h
+++ b/lib/Backend/EmitBuffer.h
@@ -34,7 +34,7 @@ class EmitBufferManager
{
typedef EmitBufferAllocation TEmitBufferAllocation;
public:
- EmitBufferManager(ArenaAllocator * allocator, CustomHeap::CodePageAllocators * codePageAllocators, Js::ScriptContext * scriptContext, LPCWSTR name, HANDLE processHandle);
+ EmitBufferManager(ArenaAllocator * allocator, CustomHeap::CodePageAllocators * codePageAllocators, Js::ScriptContext * scriptContext, ThreadContextInfo * threadContext, LPCWSTR name, HANDLE processHandle);
~EmitBufferManager();
// All the following methods are guarded with the SyncObject
@@ -75,6 +75,7 @@ class EmitBufferManager
#endif
ArenaAllocator * allocator;
Js::ScriptContext * scriptContext;
+ ThreadContextInfo * threadContext;
TEmitBufferAllocation * NewAllocation(DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode);
TEmitBufferAllocation* GetBuffer(TEmitBufferAllocation *allocation, DECLSPEC_GUARD_OVERFLOW __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer);
diff --git a/lib/Backend/GlobOpt.cpp b/lib/Backend/GlobOpt.cpp
index 78b4862ce17..5d3384eaae9 100644
--- a/lib/Backend/GlobOpt.cpp
+++ b/lib/Backend/GlobOpt.cpp
@@ -12351,7 +12351,7 @@ GlobOpt::OptConstFoldBinary(
}
IntConstType tmpValueOut;
- if (!instr->BinaryCalculator(src1IntConstantValue, src2IntConstantValue, &tmpValueOut)
+ if (!instr->BinaryCalculator(src1IntConstantValue, src2IntConstantValue, &tmpValueOut, TyInt32)
|| !Math::FitsInDWord(tmpValueOut))
{
return false;
diff --git a/lib/Backend/IR.cpp b/lib/Backend/IR.cpp
index 990a398539c..7a2dcfc7be6 100644
--- a/lib/Backend/IR.cpp
+++ b/lib/Backend/IR.cpp
@@ -3807,28 +3807,28 @@ bool Instr::BinaryCalculatorT(T src1Const, T src2Const, int64 *pResult, bool che
template bool Instr::BinaryCalculatorT(int src1Const64, int src2Const64, int64 *pResult, bool checkWouldTrap);
template bool Instr::BinaryCalculatorT(int64 src1Const64, int64 src2Const64, int64 *pResult, bool checkWouldTrap);
-bool Instr::BinaryCalculator(IntConstType src1Const, IntConstType src2Const, IntConstType *pResult)
+bool Instr::BinaryCalculator(IntConstType src1Const, IntConstType src2Const, IntConstType *pResult, IRType type)
{
IntConstType value = 0;
switch (this->m_opcode)
{
case Js::OpCode::Add_A:
- if (IntConstMath::Add(src1Const, src2Const, &value))
+ if (IntConstMath::Add(src1Const, src2Const, type, &value))
{
return false;
}
break;
case Js::OpCode::Sub_A:
- if (IntConstMath::Sub(src1Const, src2Const, &value))
+ if (IntConstMath::Sub(src1Const, src2Const, type, &value))
{
return false;
}
break;
case Js::OpCode::Mul_A:
- if (IntConstMath::Mul(src1Const, src2Const, &value))
+ if (IntConstMath::Mul(src1Const, src2Const, type, &value))
{
return false;
}
@@ -3852,7 +3852,7 @@ bool Instr::BinaryCalculator(IntConstType src1Const, IntConstType src2Const, Int
// folds to -0. Bail for now...
return false;
}
- if (IntConstMath::Div(src1Const, src2Const, &value))
+ if (IntConstMath::Div(src1Const, src2Const, type, &value))
{
return false;
}
@@ -3870,7 +3870,7 @@ bool Instr::BinaryCalculator(IntConstType src1Const, IntConstType src2Const, Int
// Bail for now...
return false;
}
- if (IntConstMath::Mod(src1Const, src2Const, &value))
+ if (IntConstMath::Mod(src1Const, src2Const, type, &value))
{
return false;
}
@@ -3884,17 +3884,15 @@ bool Instr::BinaryCalculator(IntConstType src1Const, IntConstType src2Const, Int
case Js::OpCode::Shl_A:
// We don't care about overflow here
- IntConstMath::Shl(src1Const, src2Const & 0x1F, &value);
+ value = src1Const << (src2Const & 0x1F);
break;
case Js::OpCode::Shr_A:
- // We don't care about overflow here, and there shouldn't be any
- IntConstMath::Shr(src1Const, src2Const & 0x1F, &value);
+ value = src1Const >> (src2Const & 0x1F);
break;
case Js::OpCode::ShrU_A:
- // We don't care about overflow here, and there shouldn't be any
- IntConstMath::ShrU(src1Const, src2Const & 0x1F, &value);
+ value = ((UIntConstType)src1Const) >> (src2Const & 0x1F);
if (value < 0)
{
// ShrU produces a UInt32. If it doesn't fit in an Int32, bail as we don't
@@ -3904,18 +3902,15 @@ bool Instr::BinaryCalculator(IntConstType src1Const, IntConstType src2Const, Int
break;
case Js::OpCode::And_A:
- // We don't care about overflow here, and there shouldn't be any
- IntConstMath::And(src1Const, src2Const, &value);
+ value = src1Const & src2Const;
break;
case Js::OpCode::Or_A:
- // We don't care about overflow here, and there shouldn't be any
- IntConstMath::Or(src1Const, src2Const, &value);
+ value = src1Const | src2Const;
break;
case Js::OpCode::Xor_A:
- // We don't care about overflow here, and there shouldn't be any
- IntConstMath::Xor(src1Const, src2Const, &value);
+ value = src1Const ^ src2Const;
break;
case Js::OpCode::InlineMathMin:
@@ -3935,7 +3930,7 @@ bool Instr::BinaryCalculator(IntConstType src1Const, IntConstType src2Const, Int
return true;
}
-bool Instr::UnaryCalculator(IntConstType src1Const, IntConstType *pResult)
+bool Instr::UnaryCalculator(IntConstType src1Const, IntConstType *pResult, IRType type)
{
IntConstType value = 0;
@@ -3948,14 +3943,14 @@ bool Instr::UnaryCalculator(IntConstType src1Const, IntConstType *pResult)
return false;
}
- if (IntConstMath::Neg(src1Const, &value))
+ if (IntConstMath::Neg(src1Const, type, &value))
{
return false;
}
break;
case Js::OpCode::Not_A:
- IntConstMath::Not(src1Const, &value);
+ value = ~src1Const;
break;
case Js::OpCode::Ld_A:
@@ -3973,14 +3968,14 @@ bool Instr::UnaryCalculator(IntConstType src1Const, IntConstType *pResult)
break;
case Js::OpCode::Incr_A:
- if (IntConstMath::Inc(src1Const, &value))
+ if (IntConstMath::Inc(src1Const, type, &value))
{
return false;
}
break;
case Js::OpCode::Decr_A:
- if (IntConstMath::Dec(src1Const, &value))
+ if (IntConstMath::Dec(src1Const, type, &value))
{
return false;
}
diff --git a/lib/Backend/IR.h b/lib/Backend/IR.h
index e82dadf2a2d..502f669bed9 100644
--- a/lib/Backend/IR.h
+++ b/lib/Backend/IR.h
@@ -334,10 +334,10 @@ class Instr
bool IsCmCC_R8();
bool IsCmCC_I4();
bool IsNeq();
- bool BinaryCalculator(IntConstType src1Const, IntConstType src2Const, IntConstType *pResult);
+ bool BinaryCalculator(IntConstType src1Const, IntConstType src2Const, IntConstType *pResult, IRType type);
template
bool BinaryCalculatorT(T src1Const, T src2Const, int64 *pResult, bool checkWouldTrap);
- bool UnaryCalculator(IntConstType src1Const, IntConstType *pResult);
+ bool UnaryCalculator(IntConstType src1Const, IntConstType *pResult, IRType type);
IR::Instr* GetNextArg();
// Iterates argument chain
diff --git a/lib/Backend/IRBuilder.cpp b/lib/Backend/IRBuilder.cpp
index 3d33638feb5..a7fe5517a2c 100644
--- a/lib/Backend/IRBuilder.cpp
+++ b/lib/Backend/IRBuilder.cpp
@@ -1094,7 +1094,7 @@ IRBuilder::CreateLabel(IR::BranchInstr * branchInstr, uint& offset)
instrPrev = targetInstr->GetPrevRealInstrOrLabel();
}
- if (instrPrev && instrPrev->IsLabelInstr())
+ if (instrPrev && instrPrev->IsLabelInstr() && instrPrev->GetByteCodeOffset() == offset)
{
// Found an existing label at the right offset. Just reuse it.
labelInstr = instrPrev->AsLabelInstr();
@@ -1103,7 +1103,7 @@ IRBuilder::CreateLabel(IR::BranchInstr * branchInstr, uint& offset)
{
// No label at the desired offset. Create one.
- labelInstr = IR::LabelInstr::New( Js::OpCode::Label, this->m_func);
+ labelInstr = IR::LabelInstr::New(Js::OpCode::Label, this->m_func);
labelInstr->SetByteCodeOffset(offset);
if (instrPrev)
{
@@ -2690,35 +2690,52 @@ IRBuilder::BuildUnsigned1(Js::OpCode newOpcode, uint32 offset, uint32 num)
case Js::OpCode::ProfiledLoopBodyStart:
{
- if (!(m_func->DoSimpleJitDynamicProfile() && m_func->GetJITFunctionBody()->DoJITLoopBody()))
+ // This opcode is removed from the IR when we aren't doing Profiling SimpleJit or not jitting loop bodies
+ if (m_func->DoSimpleJitDynamicProfile() && m_func->GetJITFunctionBody()->DoJITLoopBody())
{
- // This opcode is removed from the IR when we aren't doing Profiling SimpleJit or not jitting loop bodies
- break;
- }
+ // Attach a register to the dest of this instruction to communicate whether we should bail out (the deciding of this is done in lowering)
+ IR::Opnd* fullJitExists = IR::RegOpnd::New(TyUint8, m_func);
+ auto start = m_lastInstr;
+
+ if (start->m_opcode == Js::OpCode::InitLoopBodyCount)
+ {
+ Assert(this->IsLoopBody());
+ start = start->m_prev;
+ }
+
+ Assert(start->m_opcode == Js::OpCode::ProfiledLoopStart && start->GetDst());
+ IR::JitProfilingInstr* instr = IR::JitProfilingInstr::New(Js::OpCode::ProfiledLoopBodyStart, fullJitExists, start->GetDst(), m_func);
+ // profileId is used here to represent the loop number
+ instr->loopNumber = num;
+ this->AddInstr(instr, offset);
- // Attach a register to the dest of this instruction to communicate whether we should bail out (the deciding of this is done in lowering)
- IR::Opnd* fullJitExists = IR::RegOpnd::New(TyUint8, m_func);
- auto start = m_lastInstr->m_prev;
+ // If fullJitExists isn't 0, bail out so that we can get the fulljitted version
+ BailOutInfo * bailOutInfo = JitAnew(m_func->m_alloc, BailOutInfo, instr->GetByteCodeOffset(), m_func);
+ IR::BailOutInstr * bailInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, IR::BailOnSimpleJitToFullJitLoopBody, bailOutInfo, bailOutInfo->bailOutFunc);
+ bailInstr->SetSrc1(fullJitExists);
+ bailInstr->SetSrc2(IR::IntConstOpnd::New(0, TyUint8, m_func, true));
+ this->AddInstr(bailInstr, offset);
- if (start->m_opcode == Js::OpCode::InitLoopBodyCount)
- {
- Assert(this->IsLoopBody());
- start = start->m_prev;
}
- Assert(start->m_opcode == Js::OpCode::ProfiledLoopStart && start->GetDst());
- IR::JitProfilingInstr* instr = IR::JitProfilingInstr::New(Js::OpCode::ProfiledLoopBodyStart, fullJitExists, start->GetDst(), m_func);
- // profileId is used here to represent the loop number
- instr->loopNumber = num;
- this->AddInstr(instr, offset);
+ Js::ImplicitCallFlags flags = Js::ImplicitCall_HasNoInfo;
+ Js::LoopFlags loopFlags;
+ if (this->m_func->HasProfileInfo())
+ {
+ flags = m_func->GetReadOnlyProfileInfo()->GetLoopImplicitCallFlags(num);
+ loopFlags = m_func->GetReadOnlyProfileInfo()->GetLoopFlags(num);
+ }
- // If fullJitExists isn't 0, bail out so that we can get the fulljitted version
- BailOutInfo * bailOutInfo = JitAnew(m_func->m_alloc, BailOutInfo, instr->GetByteCodeOffset(), m_func);
- IR::BailOutInstr * bailInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, IR::BailOnSimpleJitToFullJitLoopBody, bailOutInfo, bailOutInfo->bailOutFunc);
- bailInstr->SetSrc1(fullJitExists);
- bailInstr->SetSrc2(IR::IntConstOpnd::New(0, TyUint8, m_func, true));
- this->AddInstr(bailInstr, offset);
+ // Put a label the instruction stream to carry the profile info
+ IR::ProfiledLabelInstr * labelInstr = IR::ProfiledLabelInstr::New(Js::OpCode::Label, this->m_func, flags, loopFlags);
+#if DBG
+ labelInstr->loopNum = num;
+#endif
+ m_lastInstr->InsertAfter(labelInstr);
+ m_lastInstr = labelInstr;
+ // Set it to the offset to the start of the loop
+ labelInstr->SetByteCodeOffset(m_jnReader.GetCurrentOffset());
break;
}
@@ -2750,29 +2767,10 @@ IRBuilder::BuildUnsigned1(Js::OpCode newOpcode, uint32 offset, uint32 num)
this->AddInstr(instr, offset);
}
- Js::ImplicitCallFlags flags = Js::ImplicitCall_HasNoInfo;
- Js::LoopFlags loopFlags;
- if (this->m_func->HasProfileInfo())
- {
- flags = m_func->GetReadOnlyProfileInfo()->GetLoopImplicitCallFlags(num);
- loopFlags = m_func->GetReadOnlyProfileInfo()->GetLoopFlags(num);
- }
-
if (this->IsLoopBody() && !m_loopCounterSym)
{
InsertInitLoopBodyLoopCounter(num);
}
-
- // Put a label the instruction stream to carry the profile info
- IR::ProfiledLabelInstr * labelInstr = IR::ProfiledLabelInstr::New(Js::OpCode::Label, this->m_func, flags, loopFlags);
-#if DBG
- labelInstr->loopNum = num;
-#endif
- m_lastInstr->InsertAfter(labelInstr);
- m_lastInstr = labelInstr;
-
- // Set it to the offset to the start of the loop
- labelInstr->SetByteCodeOffset(m_jnReader.GetCurrentOffset());
break;
}
diff --git a/lib/Backend/IRBuilderAsmJs.cpp b/lib/Backend/IRBuilderAsmJs.cpp
index fe1973adef0..5ac08230eff 100644
--- a/lib/Backend/IRBuilderAsmJs.cpp
+++ b/lib/Backend/IRBuilderAsmJs.cpp
@@ -995,7 +995,7 @@ IRBuilderAsmJs::CreateLabel(IR::BranchInstr * branchInstr, uint & offset)
}
IR::LabelInstr * labelInstr;
- if (instrPrev && instrPrev->IsLabelInstr())
+ if (instrPrev && instrPrev->IsLabelInstr() && instrPrev->GetByteCodeOffset() == offset)
{
// Found an existing label at the right offset. Just reuse it.
labelInstr = instrPrev->AsLabelInstr();
diff --git a/lib/Backend/Inline.cpp b/lib/Backend/Inline.cpp
index a89f5acd5ab..c52f3eff0ba 100644
--- a/lib/Backend/Inline.cpp
+++ b/lib/Backend/Inline.cpp
@@ -2845,7 +2845,7 @@ Inline::InlineCallApplyTarget_Shared(IR::Instr *callInstr, bool originalCallTarg
// instrNext
IR::Instr* instrNext = callInstr->m_next;
- return InlineFunctionCommon(callInstr, originalCallTargetOpndIsJITOpt, originalCallTargetStackSym, inlineeData, inlinee, instrNext, returnValueOpnd, callInstr, nullptr, recursiveInlineDepth, safeThis, isApplyTarget);
+ return InlineFunctionCommon(callInstr, originalCallTargetOpndIsJITOpt, originalCallTargetStackSym, inlineeData, inlinee, instrNext, returnValueOpnd, callInstr, nullptr, recursiveInlineDepth, safeThis, isApplyTarget)->m_prev;
}
IR::Opnd *
@@ -4419,7 +4419,7 @@ bool Inline::InlConstFold(IR::Instr *instr, IntConstType *pValue, __in_ecount_op
{
IntConstType src2Constant = *pValue;
- if (!instr->BinaryCalculator(src1Constant, src2Constant, pValue)
+ if (!instr->BinaryCalculator(src1Constant, src2Constant, pValue, TyInt32)
|| !Math::FitsInDWord(*pValue))
{
return false;
@@ -4457,7 +4457,7 @@ bool Inline::InlConstFold(IR::Instr *instr, IntConstType *pValue, __in_ecount_op
}
else
{
- if (!instr->UnaryCalculator(src1Constant, pValue)
+ if (!instr->UnaryCalculator(src1Constant, pValue, TyInt32)
|| !Math::FitsInDWord(*pValue))
{
// Skip over BytecodeArgOutCapture
diff --git a/lib/Backend/IntBounds.cpp b/lib/Backend/IntBounds.cpp
index 5906a65dfae..c0c471e302d 100644
--- a/lib/Backend/IntBounds.cpp
+++ b/lib/Backend/IntBounds.cpp
@@ -743,22 +743,25 @@ bool IntBoundCheck::SetBoundOffset(const int offset, const bool isLoopCountBased
// Determine the previous offset from the instruction (src1 <= src2 + dst)
IR::IntConstOpnd *dstOpnd = nullptr;
IntConstType previousOffset = 0;
+ IRType offsetType = TyMachReg;
if (instr->GetDst())
{
dstOpnd = instr->GetDst()->AsIntConstOpnd();
previousOffset = dstOpnd->GetValue();
+ offsetType = dstOpnd->GetType();
}
IR::IntConstOpnd *src1Opnd = nullptr;
if (instr->GetSrc1()->IsIntConstOpnd())
{
src1Opnd = instr->GetSrc1()->AsIntConstOpnd();
- if (IntConstMath::Sub(previousOffset, src1Opnd->GetValue(), &previousOffset))
+ if (IntConstMath::Sub(previousOffset, src1Opnd->GetValue(), src1Opnd->GetType(), &previousOffset))
return false;
+ offsetType = src1Opnd->GetType();
}
IR::IntConstOpnd *src2Opnd = (instr->GetSrc2()->IsIntConstOpnd() ? instr->GetSrc2()->AsIntConstOpnd() : nullptr);
- if(src2Opnd && IntConstMath::Add(previousOffset, src2Opnd->GetValue(), &previousOffset))
+ if(src2Opnd && IntConstMath::Add(previousOffset, src2Opnd->GetValue(), src2Opnd->GetType(), &previousOffset))
return false;
// Given a bounds check (a <= b + offset), the offset may only be decreased such that it does not invalidate the invariant
@@ -768,7 +771,7 @@ bool IntBoundCheck::SetBoundOffset(const int offset, const bool isLoopCountBased
return true;
IntConstType offsetDecrease;
- if(IntConstMath::Sub(previousOffset, offset, &offsetDecrease))
+ if(IntConstMath::Sub(previousOffset, offset, offsetType, &offsetDecrease))
return false;
Assert(offsetDecrease > 0);
@@ -776,14 +779,14 @@ bool IntBoundCheck::SetBoundOffset(const int offset, const bool isLoopCountBased
{
// Prefer to increase src1, as this is an upper bound check and src1 corresponds to the index
IntConstType newSrc1Value;
- if(IntConstMath::Add(src1Opnd->GetValue(), offsetDecrease, &newSrc1Value))
+ if(IntConstMath::Add(src1Opnd->GetValue(), offsetDecrease, src1Opnd->GetType(), &newSrc1Value))
return false;
src1Opnd->SetValue(newSrc1Value);
}
else if(dstOpnd)
{
IntConstType newDstValue;
- if(IntConstMath::Sub(dstOpnd->GetValue(), offsetDecrease, &newDstValue))
+ if(IntConstMath::Sub(dstOpnd->GetValue(), offsetDecrease, dstOpnd->GetType(), &newDstValue))
return false;
if (newDstValue == 0)
instr->FreeDst();
diff --git a/lib/Backend/IntConstMath.cpp b/lib/Backend/IntConstMath.cpp
new file mode 100644
index 00000000000..d426e178669
--- /dev/null
+++ b/lib/Backend/IntConstMath.cpp
@@ -0,0 +1,84 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft Corporation and contributors. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+#include "Backend.h"
+
+bool IntConstMath::IsValid(IntConstType val, IRType type)
+{
+ switch (type)
+ {
+#if TARGET_32
+ case TyInt32:
+ case TyUint32:
+ CompileAssert(sizeof(IntConstType) == sizeof(int32));
+ return true;
+#elif TARGET_64
+ case TyInt32:
+ case TyUint32:
+ return Math::FitsInDWord(val);
+ case TyInt64:
+ case TyUint64:
+ CompileAssert(sizeof(IntConstType) == sizeof(int64));
+ return true;
+#endif
+ default:
+ Assert(UNREACHED);
+ return false;
+ }
+}
+
+bool IntConstMath::Add(IntConstType left, IntConstType right, IRType type, IntConstType * result)
+{
+ bool overflowed = IntMathCommon::Add(left, right, result);
+ return overflowed || !IsValid(*result, type);
+}
+
+bool IntConstMath::Sub(IntConstType left, IntConstType right, IRType type, IntConstType * result)
+{
+ bool overflowed = IntMathCommon::Sub(left, right, result);
+ return overflowed || !IsValid(*result, type);
+}
+
+bool IntConstMath::Mul(IntConstType left, IntConstType right, IRType type, IntConstType * result)
+{
+#if TARGET_32
+ bool overflowed = Int32Math::Mul(left, right, result);
+ CompileAssert(sizeof(IntConstType) == sizeof(int32));
+#elif TARGET_64
+ bool overflowed = Int64Math::Mul(left, right, result);
+ CompileAssert(sizeof(IntConstType) == sizeof(int64));
+#endif
+ return overflowed || !IsValid(*result, type);
+}
+
+bool IntConstMath::Div(IntConstType left, IntConstType right, IRType type, IntConstType * result)
+{
+ bool overflowed = IntMathCommon::Div(left, right, result);
+ return overflowed || !IsValid(*result, type);
+}
+
+bool IntConstMath::Mod(IntConstType left, IntConstType right, IRType type, IntConstType * result)
+{
+ bool overflowed = IntMathCommon::Mod(left, right, result);
+ return overflowed || !IsValid(*result, type);
+}
+
+bool IntConstMath::Dec(IntConstType val, IRType type, IntConstType * result)
+{
+ bool overflowed = IntMathCommon::Dec(val, result);
+ return overflowed || !IsValid(*result, type);
+}
+
+bool IntConstMath::Inc(IntConstType val, IRType type, IntConstType * result)
+{
+ bool overflowed = IntMathCommon::Inc(val, result);
+ return overflowed || !IsValid(*result, type);
+}
+
+bool IntConstMath::Neg(IntConstType val, IRType type, IntConstType * result)
+{
+ bool overflowed = IntMathCommon::Neg(val, result);
+ return overflowed || !IsValid(*result, type);
+}
diff --git a/lib/Backend/IntConstMath.h b/lib/Backend/IntConstMath.h
new file mode 100644
index 00000000000..4b6efcdfe6b
--- /dev/null
+++ b/lib/Backend/IntConstMath.h
@@ -0,0 +1,23 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft Corporation and contributors. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+#pragma once
+
+class IntConstMath
+{
+public:
+ static bool Add(IntConstType left, IntConstType right, IRType type, IntConstType * result);
+ static bool Sub(IntConstType left, IntConstType right, IRType type, IntConstType * result);
+ static bool Mul(IntConstType left, IntConstType right, IRType type, IntConstType * result);
+ static bool Div(IntConstType left, IntConstType right, IRType type, IntConstType * result);
+ static bool Mod(IntConstType left, IntConstType right, IRType type, IntConstType * result);
+
+ static bool Dec(IntConstType val, IRType type, IntConstType * result);
+ static bool Inc(IntConstType val, IRType type, IntConstType * result);
+ static bool Neg(IntConstType val, IRType type, IntConstType * result);
+
+private:
+ static bool IsValid(IntConstType val, IRType type);
+};
diff --git a/lib/Backend/InterpreterThunkEmitter.cpp b/lib/Backend/InterpreterThunkEmitter.cpp
index e8de8cfe4b8..991dc8e9ad5 100644
--- a/lib/Backend/InterpreterThunkEmitter.cpp
+++ b/lib/Backend/InterpreterThunkEmitter.cpp
@@ -242,7 +242,7 @@ const BYTE InterpreterThunkEmitter::HeaderSize()
}
InterpreterThunkEmitter::InterpreterThunkEmitter(Js::ScriptContext* context, ArenaAllocator* allocator, CustomHeap::InProcCodePageAllocators * codePageAllocators, bool isAsmInterpreterThunk) :
- emitBufferManager(allocator, codePageAllocators, /*scriptContext*/ nullptr, _u("Interpreter thunk buffer"), GetCurrentProcess()),
+ emitBufferManager(allocator, codePageAllocators, /*scriptContext*/ nullptr, nullptr, _u("Interpreter thunk buffer"), GetCurrentProcess()),
scriptContext(context),
allocator(allocator),
thunkCount(0),
diff --git a/lib/Backend/JITOutput.cpp b/lib/Backend/JITOutput.cpp
index d3e1b119c46..b1b7c414874 100644
--- a/lib/Backend/JITOutput.cpp
+++ b/lib/Backend/JITOutput.cpp
@@ -250,6 +250,7 @@ JITOutput::RecordXData(BYTE * xdata)
void
JITOutput::FinalizeNativeCode()
{
+ CustomHeap::Allocation * allocation = GetAllocation();
#if ENABLE_OOP_NATIVE_CODEGEN
if (JITManager::GetJITManager()->IsJITServer())
{
@@ -258,7 +259,7 @@ JITOutput::FinalizeNativeCode()
#if _M_IX86 || _M_X64
if (!m_func->IsLoopBody() && CONFIG_FLAG(UseJITTrampoline))
{
- m_outputData->thunkAddress = m_func->GetOOPThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
+ allocation->thunkAddress = m_func->GetOOPThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
}
#endif
#endif
@@ -278,18 +279,30 @@ JITOutput::FinalizeNativeCode()
#if _M_IX86 || _M_X64
if (!m_func->IsLoopBody() && CONFIG_FLAG(UseJITTrampoline))
{
- m_outputData->thunkAddress = m_func->GetInProcThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
+ allocation->thunkAddress = m_func->GetInProcThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
}
#endif
#endif
}
-
- if (!m_outputData->thunkAddress && CONFIG_FLAG(OOPCFGRegistration))
+ m_outputData->thunkAddress = allocation->thunkAddress;
+ if (!allocation->thunkAddress && CONFIG_FLAG(OOPCFGRegistration))
{
m_func->GetThreadContextInfo()->SetValidCallTargetForCFG((PVOID)m_outputData->codeAddress);
}
}
+CustomHeap::Allocation *
+JITOutput::GetAllocation() const
+{
+#if ENABLE_OOP_NATIVE_CODEGEN
+ if (JITManager::GetJITManager()->IsJITServer())
+ {
+ return m_oopAlloc->allocation;
+ }
+#endif
+ return m_inProcAlloc->allocation;
+}
+
JITOutputIDL *
JITOutput::GetOutputData()
{
diff --git a/lib/Backend/JITOutput.h b/lib/Backend/JITOutput.h
index bcf7b897c7b..d61804d144c 100644
--- a/lib/Backend/JITOutput.h
+++ b/lib/Backend/JITOutput.h
@@ -54,6 +54,7 @@ class JITOutput
private:
template
void RecordNativeCode(const BYTE* sourceBuffer, BYTE* localCodeAddress, TEmitBufferAllocation allocation, TCodeGenAllocators codeGenAllocators);
+ CustomHeap::Allocation * GetAllocation() const;
union
{
EmitBufferAllocation * m_inProcAlloc;
diff --git a/lib/Backend/Lower.cpp b/lib/Backend/Lower.cpp
index fe2037d77b0..309ae5e2d8b 100644
--- a/lib/Backend/Lower.cpp
+++ b/lib/Backend/Lower.cpp
@@ -12751,7 +12751,7 @@ void Lowerer::LowerBoundCheck(IR::Instr *const instr)
{
// Try to aggregate right + offset into a constant offset
IntConstType newOffset;
- if(!IntConstMath::Add(offset, rightOpnd->AsIntConstOpnd()->GetValue(), &newOffset))
+ if(!IntConstMath::Add(offset, rightOpnd->AsIntConstOpnd()->GetValue(), TyInt32, &newOffset))
{
offset = newOffset;
rightOpnd = nullptr;
diff --git a/lib/Backend/NativeCodeGenerator.cpp b/lib/Backend/NativeCodeGenerator.cpp
index b3a570fa028..3d939f854fc 100644
--- a/lib/Backend/NativeCodeGenerator.cpp
+++ b/lib/Backend/NativeCodeGenerator.cpp
@@ -2331,130 +2331,137 @@ NativeCodeGenerator::GatherCodeGenData(
bool isPolymorphic = (cacheType & Js::FldInfo_Polymorphic) != 0;
if (!isPolymorphic)
{
- Js::InlineCache *inlineCache;
+ Js::InlineCache *inlineCache = nullptr;
+
if(function && Js::ScriptFunctionWithInlineCache::Is(function))
{
- inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i);
+ if (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr)
+ {
+ inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i);
+ }
}
else
{
inlineCache = functionBody->GetInlineCache(i);
}
- ObjTypeSpecFldInfo* objTypeSpecFldInfo = nullptr;
+ if (inlineCache != nullptr)
+ {
+ ObjTypeSpecFldInfo* objTypeSpecFldInfo = nullptr;
#if ENABLE_DEBUG_CONFIG_OPTIONS
- if (PHASE_VERBOSE_TRACE(Js::ObjTypeSpecPhase, topFunctionBody) || PHASE_VERBOSE_TRACE(Js::EquivObjTypeSpecPhase, topFunctionBody))
- {
- char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
- Js::PropertyId propertyId = functionBody->GetPropertyIdFromCacheId(i);
- Js::PropertyRecord const * const propertyRecord = functionBody->GetScriptContext()->GetPropertyName(propertyId);
- Output::Print(_u("ObTypeSpec: top function %s (%s), function %s (%s): cloning mono cache for %s (#%d) cache %d \n"),
- topFunctionBody->GetDisplayName(), topFunctionBody->GetDebugNumberSet(debugStringBuffer),
- functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer2), propertyRecord->GetBuffer(), propertyId, i);
- Output::Flush();
- }
+ if (PHASE_VERBOSE_TRACE(Js::ObjTypeSpecPhase, topFunctionBody) || PHASE_VERBOSE_TRACE(Js::EquivObjTypeSpecPhase, topFunctionBody))
+ {
+ char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
+ Js::PropertyId propertyId = functionBody->GetPropertyIdFromCacheId(i);
+ Js::PropertyRecord const * const propertyRecord = functionBody->GetScriptContext()->GetPropertyName(propertyId);
+ Output::Print(_u("ObTypeSpec: top function %s (%s), function %s (%s): cloning mono cache for %s (#%d) cache %d \n"),
+ topFunctionBody->GetDisplayName(), topFunctionBody->GetDebugNumberSet(debugStringBuffer),
+ functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer2), propertyRecord->GetBuffer(), propertyId, i);
+ Output::Flush();
+ }
#endif
- IncInlineCacheCount(monoInlineCacheCount);
+ IncInlineCacheCount(monoInlineCacheCount);
- if (inlineCache->IsEmpty())
- {
- IncInlineCacheCount(emptyMonoInlineCacheCount);
- }
+ if (inlineCache->IsEmpty())
+ {
+ IncInlineCacheCount(emptyMonoInlineCacheCount);
+ }
- if(!PHASE_OFF(Js::ObjTypeSpecPhase, functionBody) || !PHASE_OFF(Js::FixedMethodsPhase, functionBody) || !PHASE_OFF(Js::UseFixedDataPropsPhase, functionBody))
- {
- if(cacheType & (Js::FldInfo_FromLocal | Js::FldInfo_FromLocalWithoutProperty | Js::FldInfo_FromProto))
+ if(!PHASE_OFF(Js::ObjTypeSpecPhase, functionBody) || !PHASE_OFF(Js::FixedMethodsPhase, functionBody) || !PHASE_OFF(Js::UseFixedDataPropsPhase, functionBody))
{
- // WinBlue 170722: Disable ObjTypeSpec optimization for activation object in debug mode,
- // as it can result in BailOutFailedTypeCheck before locals are set to undefined,
- // which can result in using garbage object during bailout/restore values.
- if (!(functionBody->IsInDebugMode() && inlineCache->GetType() &&
- inlineCache->GetType()->GetTypeId() == Js::TypeIds_ActivationObject))
+ if(cacheType & (Js::FldInfo_FromLocal | Js::FldInfo_FromLocalWithoutProperty | Js::FldInfo_FromProto))
{
- objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
- if (objTypeSpecFldInfo)
+ // WinBlue 170722: Disable ObjTypeSpec optimization for activation object in debug mode,
+ // as it can result in BailOutFailedTypeCheck before locals are set to undefined,
+ // which can result in using garbage object during bailout/restore values.
+ if (!(functionBody->IsInDebugMode() && inlineCache->GetType() &&
+ inlineCache->GetType()->GetTypeId() == Js::TypeIds_ActivationObject))
{
- IncInlineCacheCount(clonedMonoInlineCacheCount);
-
- if (!PHASE_OFF(Js::InlineApplyTargetPhase, functionBody) && (cacheType & Js::FldInfo_InlineCandidate))
+ objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
+ if (objTypeSpecFldInfo)
{
- if (IsInlinee || objTypeSpecFldInfo->IsBuiltin())
+ IncInlineCacheCount(clonedMonoInlineCacheCount);
+
+ if (!PHASE_OFF(Js::InlineApplyTargetPhase, functionBody) && (cacheType & Js::FldInfo_InlineCandidate))
{
- inlineApplyTarget = true;
+ if (IsInlinee || objTypeSpecFldInfo->IsBuiltin())
+ {
+ inlineApplyTarget = true;
+ }
}
- }
- if (!PHASE_OFF(Js::InlineCallTargetPhase, functionBody) && (cacheType & Js::FldInfo_InlineCandidate))
- {
- inlineCallTarget = true;
- }
- if (!isJitTimeDataComputed)
- {
- jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
- objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
+ if (!PHASE_OFF(Js::InlineCallTargetPhase, functionBody) && (cacheType & Js::FldInfo_InlineCandidate))
+ {
+ inlineCallTarget = true;
+ }
+ if (!isJitTimeDataComputed)
+ {
+ jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
+ objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
+ }
}
}
}
}
- }
- if(!PHASE_OFF(Js::FixAccessorPropsPhase, functionBody))
- {
- if (!objTypeSpecFldInfo && (cacheType & Js::FldInfo_FromAccessor) && (cacheType & Js::FldInfo_InlineCandidate))
+ if(!PHASE_OFF(Js::FixAccessorPropsPhase, functionBody))
{
- objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
- if (objTypeSpecFldInfo)
+ if (!objTypeSpecFldInfo && (cacheType & Js::FldInfo_FromAccessor) && (cacheType & Js::FldInfo_InlineCandidate))
{
- inlineGetterSetter = true;
- if (!isJitTimeDataComputed)
+ objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
+ if (objTypeSpecFldInfo)
{
- IncInlineCacheCount(clonedMonoInlineCacheCount);
- jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
- objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
+ inlineGetterSetter = true;
+ if (!isJitTimeDataComputed)
+ {
+ IncInlineCacheCount(clonedMonoInlineCacheCount);
+ jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
+ objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
+ }
}
- }
+ }
}
- }
- if (!PHASE_OFF(Js::RootObjectFldFastPathPhase, functionBody))
- {
- if (i >= functionBody->GetRootObjectLoadInlineCacheStart() && inlineCache->IsLocal())
+ if (!PHASE_OFF(Js::RootObjectFldFastPathPhase, functionBody))
{
- void * rawType = inlineCache->u.local.type;
- Js::Type * type = TypeWithoutAuxSlotTag(rawType);
- Js::RootObjectBase * rootObject = functionBody->GetRootObject();
- if (rootObject->GetType() == type)
+ if (i >= functionBody->GetRootObjectLoadInlineCacheStart() && inlineCache->IsLocal())
{
- Js::BigPropertyIndex propertyIndex = inlineCache->u.local.slotIndex;
- if (rawType == type)
+ void * rawType = inlineCache->u.local.type;
+ Js::Type * type = TypeWithoutAuxSlotTag(rawType);
+ Js::RootObjectBase * rootObject = functionBody->GetRootObject();
+ if (rootObject->GetType() == type)
{
- // type is not tagged, inline slot
- propertyIndex = rootObject->GetPropertyIndexFromInlineSlotIndex(inlineCache->u.local.slotIndex);
- }
- else
- {
- propertyIndex = rootObject->GetPropertyIndexFromAuxSlotIndex(inlineCache->u.local.slotIndex);
- }
- Js::PropertyAttributes attributes;
- if (rootObject->GetAttributesWithPropertyIndex(functionBody->GetPropertyIdFromCacheId(i), propertyIndex, &attributes)
- && (attributes & PropertyConfigurable) == 0
- && !isJitTimeDataComputed)
- {
- // non configurable
- if (objTypeSpecFldInfo == nullptr)
+ Js::BigPropertyIndex propertyIndex = inlineCache->u.local.slotIndex;
+ if (rawType == type)
{
- objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
- if (objTypeSpecFldInfo)
- {
- IncInlineCacheCount(clonedMonoInlineCacheCount);
- jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
- objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
- }
+ // type is not tagged, inline slot
+ propertyIndex = rootObject->GetPropertyIndexFromInlineSlotIndex(inlineCache->u.local.slotIndex);
}
- if (objTypeSpecFldInfo != nullptr)
+ else
{
- objTypeSpecFldInfo->SetRootObjectNonConfigurableField(i < functionBody->GetRootObjectStoreInlineCacheStart());
+ propertyIndex = rootObject->GetPropertyIndexFromAuxSlotIndex(inlineCache->u.local.slotIndex);
+ }
+ Js::PropertyAttributes attributes;
+ if (rootObject->GetAttributesWithPropertyIndex(functionBody->GetPropertyIdFromCacheId(i), propertyIndex, &attributes)
+ && (attributes & PropertyConfigurable) == 0
+ && !isJitTimeDataComputed)
+ {
+ // non configurable
+ if (objTypeSpecFldInfo == nullptr)
+ {
+ objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
+ if (objTypeSpecFldInfo)
+ {
+ IncInlineCacheCount(clonedMonoInlineCacheCount);
+ jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
+ objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
+ }
+ }
+ if (objTypeSpecFldInfo != nullptr)
+ {
+ objTypeSpecFldInfo->SetRootObjectNonConfigurableField(i < functionBody->GetRootObjectStoreInlineCacheStart());
+ }
}
}
}
@@ -2464,33 +2471,36 @@ NativeCodeGenerator::GatherCodeGenData(
// Even if the FldInfo says that the field access may be polymorphic, be optimistic that if the function object has inline caches, they'll be monomorphic
else if(function && Js::ScriptFunctionWithInlineCache::Is(function) && (cacheType & Js::FldInfo_InlineCandidate || !polymorphicCacheOnFunctionBody))
{
- Js::InlineCache *inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i);
- ObjTypeSpecFldInfo* objTypeSpecFldInfo = nullptr;
-
- if(!PHASE_OFF(Js::ObjTypeSpecPhase, functionBody) || !PHASE_OFF(Js::FixedMethodsPhase, functionBody))
+ if (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr)
{
- if(cacheType & (Js::FldInfo_FromLocal | Js::FldInfo_FromProto)) // Remove FldInfo_FromLocal?
- {
+ Js::InlineCache *inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i);
+ ObjTypeSpecFldInfo* objTypeSpecFldInfo = nullptr;
- // WinBlue 170722: Disable ObjTypeSpec optimization for activation object in debug mode,
- // as it can result in BailOutFailedTypeCheck before locals are set to undefined,
- // which can result in using garbage object during bailout/restore values.
- if (!(functionBody->IsInDebugMode() && inlineCache->GetType() &&
- inlineCache->GetType()->GetTypeId() == Js::TypeIds_ActivationObject))
+ if(!PHASE_OFF(Js::ObjTypeSpecPhase, functionBody) || !PHASE_OFF(Js::FixedMethodsPhase, functionBody))
+ {
+ if(cacheType & (Js::FldInfo_FromLocal | Js::FldInfo_FromProto)) // Remove FldInfo_FromLocal?
{
- objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
- if (objTypeSpecFldInfo)
- {
- IncInlineCacheCount(clonedMonoInlineCacheCount);
- if (!PHASE_OFF(Js::InlineApplyTargetPhase, functionBody) && IsInlinee && (cacheType & Js::FldInfo_InlineCandidate))
- {
- inlineApplyTarget = true;
- }
- if (!isJitTimeDataComputed)
+ // WinBlue 170722: Disable ObjTypeSpec optimization for activation object in debug mode,
+ // as it can result in BailOutFailedTypeCheck before locals are set to undefined,
+ // which can result in using garbage object during bailout/restore values.
+ if (!(functionBody->IsInDebugMode() && inlineCache->GetType() &&
+ inlineCache->GetType()->GetTypeId() == Js::TypeIds_ActivationObject))
+ {
+ objTypeSpecFldInfo = ObjTypeSpecFldInfo::CreateFrom(objTypeSpecFldInfoList->Count(), inlineCache, i, entryPoint, topFunctionBody, functionBody, InlineCacheStatsArg(jitTimeData));
+ if (objTypeSpecFldInfo)
{
- jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
- objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
+ IncInlineCacheCount(clonedMonoInlineCacheCount);
+
+ if (!PHASE_OFF(Js::InlineApplyTargetPhase, functionBody) && IsInlinee && (cacheType & Js::FldInfo_InlineCandidate))
+ {
+ inlineApplyTarget = true;
+ }
+ if (!isJitTimeDataComputed)
+ {
+ jitTimeData->GetObjTypeSpecFldInfoArray()->SetInfo(recycler, functionBody, i, objTypeSpecFldInfo);
+ objTypeSpecFldInfoList->Prepend(objTypeSpecFldInfo);
+ }
}
}
}
@@ -2597,21 +2607,25 @@ NativeCodeGenerator::GatherCodeGenData(
// the inline caches, as their cached data is not guaranteed to be stable while jitting.
Js::InlineCache *const inlineCache =
function && Js::ScriptFunctionWithInlineCache::Is(function)
- ? Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i)
+ ? (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr ? Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i) : nullptr)
: functionBody->GetInlineCache(i);
- Js::PropertyId propertyId = functionBody->GetPropertyIdFromCacheId(i);
- const auto clone = runtimeData->ClonedInlineCaches()->GetInlineCache(functionBody, i);
- if (clone)
- {
- inlineCache->CopyTo(propertyId, functionBody->GetScriptContext(), clone);
- }
- else
+
+ if (inlineCache != nullptr)
{
- runtimeData->ClonedInlineCaches()->SetInlineCache(
- recycler,
- functionBody,
- i,
- inlineCache->Clone(propertyId, functionBody->GetScriptContext()));
+ Js::PropertyId propertyId = functionBody->GetPropertyIdFromCacheId(i);
+ const auto clone = runtimeData->ClonedInlineCaches()->GetInlineCache(functionBody, i);
+ if (clone)
+ {
+ inlineCache->CopyTo(propertyId, functionBody->GetScriptContext(), clone);
+ }
+ else
+ {
+ runtimeData->ClonedInlineCaches()->SetInlineCache(
+ recycler,
+ functionBody,
+ i,
+ inlineCache->Clone(propertyId, functionBody->GetScriptContext()));
+ }
}
}
}
@@ -2724,7 +2738,10 @@ NativeCodeGenerator::GatherCodeGenData(
{
if(function && Js::ScriptFunctionWithInlineCache::Is(function))
{
- inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(ldFldInlineCacheIndex);
+ if (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr)
+ {
+ inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(ldFldInlineCacheIndex);
+ }
}
else
{
@@ -3174,24 +3191,17 @@ bool NativeCodeGenerator::TryReleaseNonHiPriWorkItem(CodeGenWorkItem* workItem)
}
void
-NativeCodeGenerator::FreeNativeCodeGenAllocation(void* codeAddress, void* thunkAddress)
+NativeCodeGenerator::FreeNativeCodeGenAllocation(void* codeAddress)
{
if (JITManager::GetJITManager()->IsOOPJITEnabled())
{
ThreadContext * context = this->scriptContext->GetThreadContext();
- HRESULT hr = JITManager::GetJITManager()->FreeAllocation(context->GetRemoteThreadContextAddr(), (intptr_t)codeAddress, (intptr_t)thunkAddress);
+ HRESULT hr = JITManager::GetJITManager()->FreeAllocation(context->GetRemoteThreadContextAddr(), (intptr_t)codeAddress);
JITManager::HandleServerCallResult(hr, RemoteCallType::MemFree);
}
else if(this->backgroundAllocators)
{
this->backgroundAllocators->emitBufferManager.FreeAllocation(codeAddress);
-
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64)
- if (thunkAddress)
- {
- this->scriptContext->GetThreadContext()->GetJITThunkEmitter()->FreeThunk((uintptr_t)thunkAddress);
- }
-#endif
}
}
@@ -3205,7 +3215,7 @@ NativeCodeGenerator::QueueFreeNativeCodeGenAllocation(void* codeAddress, void *
return;
}
- if (!JITManager::GetJITManager()->IsOOPJITEnabled() || !CONFIG_FLAG(OOPCFGRegistration))
+ if (JITManager::GetJITManager()->IsOOPJITEnabled() && !CONFIG_FLAG(OOPCFGRegistration))
{
//DeRegister Entry Point for CFG
if (thunkAddress)
@@ -3228,12 +3238,6 @@ NativeCodeGenerator::QueueFreeNativeCodeGenAllocation(void* codeAddress, void *
// The foreground allocators may have been used
if(this->foregroundAllocators && this->foregroundAllocators->emitBufferManager.FreeAllocation(codeAddress))
{
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64)
- if (thunkAddress)
- {
- this->scriptContext->GetThreadContext()->GetJITThunkEmitter()->FreeThunk((uintptr_t)thunkAddress);
- }
-#endif
return;
}
@@ -3695,6 +3699,10 @@ JITManager::HandleServerCallResult(HRESULT hr, RemoteCallType callType)
break;
}
+ if (CONFIG_FLAG(CrashOnOOPJITFailure))
+ {
+ RpcFailure_fatal_error(hr);
+ }
// we only expect to see these hresults in case server has been closed. failfast otherwise
if (hr != HRESULT_FROM_WIN32(RPC_S_CALL_FAILED) &&
hr != HRESULT_FROM_WIN32(RPC_S_CALL_FAILED_DNE))
diff --git a/lib/Backend/NativeCodeGenerator.h b/lib/Backend/NativeCodeGenerator.h
index dc87262b272..0992e11f91e 100644
--- a/lib/Backend/NativeCodeGenerator.h
+++ b/lib/Backend/NativeCodeGenerator.h
@@ -101,7 +101,7 @@ void SetProfileMode(BOOL fSet);
void UpdateQueueForDebugMode();
bool IsBackgroundJIT() const;
void EnterScriptStart();
- void FreeNativeCodeGenAllocation(void* codeAddress, void* thunkAddress);
+ void FreeNativeCodeGenAllocation(void* codeAddress);
bool TryReleaseNonHiPriWorkItem(CodeGenWorkItem* workItem);
void QueueFreeNativeCodeGenAllocation(void* codeAddress, void* thunkAddress);
@@ -129,7 +129,7 @@ void SetProfileMode(BOOL fSet);
InProcCodeGenAllocators *CreateAllocators(PageAllocator *const pageAllocator)
{
- return HeapNew(InProcCodeGenAllocators, pageAllocator->GetAllocationPolicyManager(), scriptContext, scriptContext->GetThreadContext()->GetCodePageAllocators(), GetCurrentProcess());
+ return HeapNew(InProcCodeGenAllocators, pageAllocator->GetAllocationPolicyManager(), scriptContext, scriptContext->GetThreadContext(), scriptContext->GetThreadContext()->GetCodePageAllocators(), GetCurrentProcess());
}
InProcCodeGenAllocators *EnsureForegroundAllocators(PageAllocator * pageAllocator)
@@ -276,7 +276,7 @@ void SetProfileMode(BOOL fSet);
FreeLoopBodyJob* freeLoopBodyJob = static_cast(job);
// Free Loop Body
- nativeCodeGen->FreeNativeCodeGenAllocation(freeLoopBodyJob->codeAddress, freeLoopBodyJob->thunkAddress);
+ nativeCodeGen->FreeNativeCodeGenAllocation(freeLoopBodyJob->codeAddress);
return true;
}
diff --git a/lib/Backend/ServerScriptContext.cpp b/lib/Backend/ServerScriptContext.cpp
index 9b4ed6e6cf9..674df524dad 100644
--- a/lib/Backend/ServerScriptContext.cpp
+++ b/lib/Backend/ServerScriptContext.cpp
@@ -22,8 +22,8 @@ ServerScriptContext::ServerScriptContext(ScriptContextDataIDL * contextData, Ser
threadContextHolder(threadContextInfo),
m_isPRNGSeeded(false),
m_sourceCodeArena(_u("JITSourceCodeArena"), threadContextInfo->GetForegroundPageAllocator(), Js::Throw::OutOfMemory, nullptr),
- m_interpreterThunkBufferManager(&m_sourceCodeArena, threadContextInfo->GetThunkPageAllocators(), nullptr, _u("Interpreter thunk buffer"), GetThreadContext()->GetProcessHandle()),
- m_asmJsInterpreterThunkBufferManager(&m_sourceCodeArena, threadContextInfo->GetThunkPageAllocators(), nullptr, _u("Asm.js interpreter thunk buffer"), GetThreadContext()->GetProcessHandle()),
+ m_interpreterThunkBufferManager(&m_sourceCodeArena, threadContextInfo->GetThunkPageAllocators(), nullptr, threadContextInfo, _u("Interpreter thunk buffer"), GetThreadContext()->GetProcessHandle()),
+ m_asmJsInterpreterThunkBufferManager(&m_sourceCodeArena, threadContextInfo->GetThunkPageAllocators(), nullptr, threadContextInfo, _u("Asm.js interpreter thunk buffer"), GetThreadContext()->GetProcessHandle()),
m_domFastPathHelperMap(nullptr),
m_moduleRecords(&HeapAllocator::Instance),
m_globalThisAddr(0),
diff --git a/lib/Backend/ServerThreadContext.cpp b/lib/Backend/ServerThreadContext.cpp
index 2fa5119117d..cd67bf2a1da 100644
--- a/lib/Backend/ServerThreadContext.cpp
+++ b/lib/Backend/ServerThreadContext.cpp
@@ -18,10 +18,10 @@ ServerThreadContext::ServerThreadContext(ThreadContextDataIDL * data, HANDLE pro
m_sectionAllocator(processHandle),
m_thunkPageAllocators(nullptr, /* allocXData */ false, &m_sectionAllocator, nullptr, processHandle),
m_codePageAllocators(nullptr, ALLOC_XDATA, &m_sectionAllocator, &m_preReservedSectionAllocator, processHandle),
- m_codeGenAlloc(nullptr, nullptr, &m_codePageAllocators, processHandle),
#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64)
m_jitThunkEmitter(this, &m_sectionAllocator, processHandle),
#endif
+ m_codeGenAlloc(nullptr, nullptr, this, &m_codePageAllocators, processHandle),
m_pageAlloc(nullptr, Js::Configuration::Global.flags, PageAllocatorType_BGJIT,
AutoSystemInfo::Data.IsLowMemoryProcess() ?
PageAllocator::DefaultLowMaxFreePageCount :
diff --git a/lib/Backend/ServerThreadContext.h b/lib/Backend/ServerThreadContext.h
index f6b3dcc7c9c..520b55a6821 100644
--- a/lib/Backend/ServerThreadContext.h
+++ b/lib/Backend/ServerThreadContext.h
@@ -68,10 +68,10 @@ class ServerThreadContext : public ThreadContextInfo
SectionAllocWrapper m_sectionAllocator;
CustomHeap::OOPCodePageAllocators m_thunkPageAllocators;
CustomHeap::OOPCodePageAllocators m_codePageAllocators;
+ OOPCodeGenAllocators m_codeGenAlloc;
#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64)
OOPJITThunkEmitter m_jitThunkEmitter;
#endif
- OOPCodeGenAllocators m_codeGenAlloc;
// only allocate with this from foreground calls (never from CodeGen calls)
PageAllocator m_pageAlloc;
HANDLE m_processHandle;
diff --git a/lib/Common/BackendApi.h b/lib/Common/BackendApi.h
index e5ed56b8a3a..6b939d99fa6 100644
--- a/lib/Common/BackendApi.h
+++ b/lib/Common/BackendApi.h
@@ -37,7 +37,6 @@ struct InlinedFrameLayout;
typedef intptr_t IntConstType;
typedef uintptr_t UIntConstType;
-typedef IntMath::Type IntConstMath;
typedef double FloatConstType;
#include "EmitBuffer.h"
diff --git a/lib/Common/ChakraCoreVersion.h b/lib/Common/ChakraCoreVersion.h
index 6d1250b8a3e..c53910414ce 100644
--- a/lib/Common/ChakraCoreVersion.h
+++ b/lib/Common/ChakraCoreVersion.h
@@ -17,7 +17,7 @@
// ChakraCore version number definitions (used in ChakraCore binary metadata)
#define CHAKRA_CORE_MAJOR_VERSION 1
#define CHAKRA_CORE_MINOR_VERSION 7
-#define CHAKRA_CORE_PATCH_VERSION 3
+#define CHAKRA_CORE_PATCH_VERSION 4
#define CHAKRA_CORE_VERSION_RELEASE_QFE 0 // Redundant with PATCH_VERSION. Keep this value set to 0.
// -------------
diff --git a/lib/Common/ConfigFlagsList.h b/lib/Common/ConfigFlagsList.h
index d6950aec70f..eff25065052 100644
--- a/lib/Common/ConfigFlagsList.h
+++ b/lib/Common/ConfigFlagsList.h
@@ -685,6 +685,7 @@ PHASE(All)
#define DEFAULT_CONFIG_PerfHintLevel (1)
#define DEFAULT_CONFIG_OOPJITMissingOpts (true)
#define DEFAULT_CONFIG_OOPCFGRegistration (true)
+#define DEFAULT_CONFIG_CrashOnOOPJITFailure (false)
#define DEFAULT_CONFIG_ForceJITCFGCheck (false)
#define DEFAULT_CONFIG_UseJITTrampoline (true)
@@ -1260,6 +1261,7 @@ FLAGNR(Number, FuncObjectInlineCacheThreshold , "Maximum number of inline cach
FLAGNR(Boolean, NoDeferParse , "Disable deferred parsing", false)
FLAGNR(Boolean, NoLogo , "No logo, which we don't display anyways", false)
FLAGNR(Boolean, OOPJITMissingOpts , "Use optimizations that are missing from OOP JIT", DEFAULT_CONFIG_OOPJITMissingOpts)
+FLAGNR(Boolean, CrashOnOOPJITFailure , "Crash runtime process if JIT process crashes", DEFAULT_CONFIG_CrashOnOOPJITFailure)
FLAGNR(Boolean, OOPCFGRegistration , "Do CFG registration OOP (under OOP JIT)", DEFAULT_CONFIG_OOPCFGRegistration)
FLAGNR(Boolean, ForceJITCFGCheck , "Have JIT code always do CFG check even if range check succeeded", DEFAULT_CONFIG_ForceJITCFGCheck)
FLAGNR(Boolean, UseJITTrampoline , "Use trampoline for JIT entry points and emit range checks for it", DEFAULT_CONFIG_UseJITTrampoline)
diff --git a/lib/Common/Memory/CustomHeap.cpp b/lib/Common/Memory/CustomHeap.cpp
index 1dd3f2f3dbe..b775ed690de 100644
--- a/lib/Common/Memory/CustomHeap.cpp
+++ b/lib/Common/Memory/CustomHeap.cpp
@@ -467,6 +467,7 @@ Allocation* Heap::AllocLargeObject(size_t bytes, usho
allocation->largeObjectAllocation.segment = segment;
allocation->largeObjectAllocation.isDecommitted = false;
allocation->size = pages * AutoSystemInfo::PageSize;
+ allocation->thunkAddress = 0;
#if PDATA_ENABLED
allocation->xdata = xdata;
@@ -607,6 +608,7 @@ bool Heap::AllocInPage(Page* page, size_t bytes, usho
allocation->page = page;
allocation->size = bytes;
allocation->address = address;
+ allocation->thunkAddress = 0;
#if DBG_DUMP
this->allocationsSinceLastCompact += bytes;
diff --git a/lib/Common/Memory/CustomHeap.h b/lib/Common/Memory/CustomHeap.h
index e19a7eb971c..0260f352592 100644
--- a/lib/Common/Memory/CustomHeap.h
+++ b/lib/Common/Memory/CustomHeap.h
@@ -86,6 +86,7 @@ struct Allocation
} largeObjectAllocation;
};
+ uintptr_t thunkAddress;
__field_bcount(size) char* address;
size_t size;
diff --git a/lib/JITClient/JITManager.cpp b/lib/JITClient/JITManager.cpp
index 2389e420f31..517ff168d2e 100644
--- a/lib/JITClient/JITManager.cpp
+++ b/lib/JITClient/JITManager.cpp
@@ -547,15 +547,14 @@ JITManager::CloseScriptContext(
HRESULT
JITManager::FreeAllocation(
__in PTHREADCONTEXT_HANDLE threadContextInfoAddress,
- __in intptr_t codeAddress,
- __in intptr_t thunkAddress)
+ __in intptr_t codeAddress)
{
Assert(IsOOPJITEnabled());
HRESULT hr = E_FAIL;
RpcTryExcept
{
- hr = ClientFreeAllocation(m_rpcBindingHandle, threadContextInfoAddress, codeAddress, thunkAddress);
+ hr = ClientFreeAllocation(m_rpcBindingHandle, threadContextInfoAddress, codeAddress);
}
RpcExcept(RpcExceptionFilter(RpcExceptionCode()))
{
diff --git a/lib/JITClient/JITManager.h b/lib/JITClient/JITManager.h
index 0de35b11203..129932e6323 100644
--- a/lib/JITClient/JITManager.h
+++ b/lib/JITClient/JITManager.h
@@ -83,8 +83,7 @@ class JITManager
HRESULT FreeAllocation(
__in PTHREADCONTEXT_HANDLE threadContextInfoAddress,
- __in intptr_t codeAddress,
- __in intptr_t thunkAddress);
+ __in intptr_t codeAddress);
HRESULT SetIsPRNGSeeded(
__in PSCRIPTCONTEXT_HANDLE scriptContextInfoAddress,
@@ -204,8 +203,7 @@ class JITManager
HRESULT FreeAllocation(
__in PTHREADCONTEXT_HANDLE threadContextInfoAddress,
- __in intptr_t codeAddress,
- __in intptr_t thunkAddress)
+ __in intptr_t codeAddress)
{ Assert(false); return E_FAIL; }
HRESULT SetIsPRNGSeeded(
diff --git a/lib/JITIDL/ChakraJIT.acf b/lib/JITIDL/ChakraJIT.acf
index 3eec504caea..2326e62a04c 100644
--- a/lib/JITIDL/ChakraJIT.acf
+++ b/lib/JITIDL/ChakraJIT.acf
@@ -4,4 +4,11 @@
//-------------------------------------------------------------------------------------------------------
typedef [context_handle_noserialize] PTHREADCONTEXT_HANDLE;
-typedef [context_handle_noserialize] PSCRIPTCONTEXT_HANDLE;
\ No newline at end of file
+typedef [context_handle_noserialize] PSCRIPTCONTEXT_HANDLE;
+
+[
+ type_strict_context_handle
+]
+interface IChakraJIT
+{
+}
diff --git a/lib/JITIDL/ChakraJIT.idl b/lib/JITIDL/ChakraJIT.idl
index 473a768be55..0f014ba09e1 100644
--- a/lib/JITIDL/ChakraJIT.idl
+++ b/lib/JITIDL/ChakraJIT.idl
@@ -78,8 +78,7 @@ interface IChakraJIT
HRESULT FreeAllocation(
[in] handle_t binding,
[in] PTHREADCONTEXT_HANDLE threadContextInfoAddress,
- [in] CHAKRA_PTR codeAddress,
- [in] CHAKRA_PTR thunkAddress);
+ [in] CHAKRA_PTR codeAddress);
HRESULT NewInterpreterThunkBlock(
[in] handle_t binding,
diff --git a/lib/JITServer/JITServer.cpp b/lib/JITServer/JITServer.cpp
index d9c9a23b346..b96a0def39a 100644
--- a/lib/JITServer/JITServer.cpp
+++ b/lib/JITServer/JITServer.cpp
@@ -629,8 +629,7 @@ HRESULT
ServerFreeAllocation(
/* [in] */ handle_t binding,
/* [in] */ __RPC__in PTHREADCONTEXT_HANDLE threadContextInfo,
- /* [in] */ intptr_t codeAddress,
- /* [in] */ intptr_t thunkAddress)
+ /* [in] */ intptr_t codeAddress)
{
ServerThreadContext * context = (ServerThreadContext*)DecodePointer(threadContextInfo);
@@ -642,17 +641,7 @@ ServerFreeAllocation(
return ServerCallWrapper(context, [&]()->HRESULT
{
- if (CONFIG_FLAG(OOPCFGRegistration) && !thunkAddress)
- {
- context->SetValidCallTargetForCFG((PVOID)codeAddress, false);
- }
context->GetCodeGenAllocators()->emitBufferManager.FreeAllocation((void*)codeAddress);
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64)
- if (thunkAddress)
- {
- context->GetJITThunkEmitter()->FreeThunk(thunkAddress);
- }
-#endif
return S_OK;
});
}
diff --git a/lib/Parser/Parse.cpp b/lib/Parser/Parse.cpp
index ae968ac8e08..4df6b8b2f37 100644
--- a/lib/Parser/Parse.cpp
+++ b/lib/Parser/Parse.cpp
@@ -2505,7 +2505,7 @@ ModuleImportOrExportEntry* Parser::AddModuleImportOrExportEntry(ModuleImportOrEx
void Parser::AddModuleLocalExportEntry(ParseNodePtr varDeclNode)
{
- Assert(varDeclNode->nop == knopVarDecl || varDeclNode->nop == knopLetDecl || varDeclNode->nop == knopConstDecl);
+ AssertOrFailFast(varDeclNode->nop == knopVarDecl || varDeclNode->nop == knopLetDecl || varDeclNode->nop == knopConstDecl);
IdentPtr localName = varDeclNode->sxVar.pid;
varDeclNode->sxVar.sym->SetIsModuleExportStorage(true);
@@ -3014,15 +3014,19 @@ ParseNodePtr Parser::ParseExportDeclaration(bool *needTerminator)
if (buildAST)
{
- ParseNodePtr temp = pnode;
- while (temp->nop == knopList)
- {
- ParseNodePtr varDeclNode = temp->sxBin.pnode1;
- temp = temp->sxBin.pnode2;
-
- AddModuleLocalExportEntry(varDeclNode);
- }
- AddModuleLocalExportEntry(temp);
+ ForEachItemInList(pnode, [&](ParseNodePtr item) {
+ if (item->nop == knopAsg)
+ {
+ Parser::MapBindIdentifier(item, [&](ParseNodePtr subItem)
+ {
+ AddModuleLocalExportEntry(subItem);
+ });
+ }
+ else
+ {
+ AddModuleLocalExportEntry(item);
+ }
+ });
}
}
break;
@@ -5194,6 +5198,10 @@ ParseNodePtr Parser::ParseFncDecl(ushort flags, LPCOLESTR pNameHint, const bool
bool isRedecl = false;
ParseNodePtr vardecl = CreateVarDeclNode(pnodeFnc->sxFnc.pnodeName->sxVar.pid, STVariable, false, nullptr, false, &isRedecl);
vardecl->sxVar.isBlockScopeFncDeclVar = true;
+ if (vardecl->sxVar.sym->GetIsFormal())
+ {
+ GetCurrentFunctionNode()->sxFnc.SetHasAnyWriteToFormals(true);
+ }
if (isRedecl)
{
vardecl->sxVar.sym->SetHasBlockFncVarRedecl();
@@ -12962,6 +12970,7 @@ ParseNodePtr Parser::ParseDestructuredInitializer(ParseNodePtr lhsNode,
pnodeDestructAsg = CreateNodeWithScanner();
pnodeDestructAsg->sxBin.pnode1 = lhsNode;
pnodeDestructAsg->sxBin.pnode2 = pnodeDefault;
+ pnodeDestructAsg->sxBin.pnodeNext = nullptr;
pnodeDestructAsg->ichMin = lhsNode->ichMin;
pnodeDestructAsg->ichLim = pnodeDefault->ichLim;
}
diff --git a/lib/Parser/RegexCompileTime.cpp b/lib/Parser/RegexCompileTime.cpp
index fcac6735a0b..7d6ad5d34cc 100644
--- a/lib/Parser/RegexCompileTime.cpp
+++ b/lib/Parser/RegexCompileTime.cpp
@@ -794,7 +794,7 @@ namespace UnifiedRegex
{
// We'll need to expand each character of literal into its equivalence class
isEquivClass = true;
- return length * CaseInsensitive::EquivClassSize;
+ return UInt32Math::MulAdd(length);
}
else
return length;
@@ -1756,7 +1756,7 @@ namespace UnifiedRegex
{
Assert(curr->head->tag != Concat);
Assert(prev == 0 || !(prev->head->LiteralLength() > 0 && curr->head->LiteralLength() > 0));
- n += curr->head->TransferPass0(compiler, litbuf);
+ n = UInt32Math::Add(n, curr->head->TransferPass0(compiler, litbuf));
#if DBG
prev = curr;
#endif
@@ -2087,7 +2087,7 @@ namespace UnifiedRegex
{
Assert(curr->head->tag != Alt);
Assert(prev == 0 || !(prev->head->IsCharOrPositiveSet() && curr->head->IsCharOrPositiveSet()));
- n += curr->head->TransferPass0(compiler, litbuf);
+ n = UInt32Math::Add(n, curr->head->TransferPass0(compiler, litbuf));
#if DBG
prev = curr;
#endif
@@ -4381,6 +4381,11 @@ namespace UnifiedRegex
{
// Program will own literal buffer. Prepare buffer and nodes for case-invariant matching if necessary.
CharCount finalLen = root->TransferPass0(*this, litbuf);
+ if (finalLen < root->LiteralLength()) // overflowed
+ {
+ Js::Throw::OutOfMemory();
+ }
+
program->rep.insts.litbuf = finalLen == 0 ? 0 : RecyclerNewArrayLeaf(scriptContext->GetRecycler(), Char, finalLen);
program->rep.insts.litbufLen = 0;
diff --git a/lib/Parser/Scan.cpp b/lib/Parser/Scan.cpp
index bfba818b4dc..7c7f97c064a 100644
--- a/lib/Parser/Scan.cpp
+++ b/lib/Parser/Scan.cpp
@@ -1110,6 +1110,10 @@ tokens Scanner::ScanStringConstant(OLECHAR delim, EncodedCharPtr
{
// Notify the scanner to update current line, number of lines etc
NotifyScannedNewLine();
+
+ // We haven't updated m_currentCharacter yet, so make sure the MinLine info is correct in case we error out.
+ m_pchMinLine = p;
+
break;
}
@@ -1400,6 +1404,10 @@ tokens Scanner::ScanStringConstant(OLECHAR delim, EncodedCharPtr
// Template literal strings ignore all escaped line continuation tokens
NotifyScannedNewLine();
+
+ // We haven't updated m_currentCharacter yet, so make sure the MinLine info is correct in case we error out.
+ m_pchMinLine = p;
+
continue;
}
diff --git a/lib/Parser/Scan.h b/lib/Parser/Scan.h
index 30e3ca7ddf4..09d9c1e3255 100644
--- a/lib/Parser/Scan.h
+++ b/lib/Parser/Scan.h
@@ -480,17 +480,31 @@ class Scanner : public IScanner, public EncodingPolicy
// have if the entire file was converted to Unicode (UTF16-LE).
charcount_t IchMinTok(void) const
{
- Assert(m_pchMinTok - m_pchBase >= 0);
- Assert(m_pchMinTok - m_pchBase <= LONG_MAX);
- return static_cast< charcount_t >(m_pchMinTok - m_pchBase - m_cMinTokMultiUnits);
+
+ AssertOrFailFast(m_pchMinTok - m_pchBase >= 0);
+ AssertOrFailFast(m_pchMinTok - m_pchBase <= LONG_MAX);
+ if (static_cast(m_pchMinTok - m_pchBase) < m_cMinTokMultiUnits)
+ {
+ AssertMsg(false, "IchMinTok subtraction overflow");
+ return 0;
+ }
+
+ return static_cast(m_pchMinTok - m_pchBase - m_cMinTokMultiUnits);
}
// Returns the character offset of the character immediately following the token. The character offset is the offset the first
// character of the token would have if the entire file was converted to Unicode (UTF16-LE).
charcount_t IchLimTok(void) const
{
- Assert(m_currentCharacter - m_pchBase >= 0);
- Assert(m_currentCharacter - m_pchBase <= LONG_MAX);
+
+ AssertOrFailFast(m_currentCharacter - m_pchBase >= 0);
+ AssertOrFailFast(m_currentCharacter - m_pchBase <= LONG_MAX);
+ if (static_cast(m_currentCharacter - m_pchBase) < this->m_cMultiUnits)
+ {
+ AssertMsg(false, "IchLimTok subtraction overflow");
+ return 0;
+ }
+
return static_cast< charcount_t >(m_currentCharacter - m_pchBase - this->m_cMultiUnits);
}
@@ -542,8 +556,15 @@ class Scanner : public IScanner, public EncodingPolicy
// Returns the character offset within the stream of the first character on the current line.
charcount_t IchMinLine(void) const
{
- Assert(m_pchMinLine - m_pchBase >= 0);
- Assert(m_pchMinLine - m_pchBase <= LONG_MAX);
+
+ AssertOrFailFast(m_pchMinLine - m_pchBase >= 0);
+ AssertOrFailFast(m_pchMinLine - m_pchBase <= LONG_MAX);
+ if (static_cast(m_pchMinLine - m_pchBase) < m_cMinLineMultiUnits)
+ {
+ AssertMsg(false, "IchMinLine subtraction overflow");
+ return 0;
+ }
+
return static_cast(m_pchMinLine - m_pchBase - m_cMinLineMultiUnits);
}
diff --git a/lib/Runtime/Language/InterpreterStackFrame.cpp b/lib/Runtime/Language/InterpreterStackFrame.cpp
index a60b38b4334..2ddc0b49a45 100644
--- a/lib/Runtime/Language/InterpreterStackFrame.cpp
+++ b/lib/Runtime/Language/InterpreterStackFrame.cpp
@@ -1065,6 +1065,7 @@ namespace Js
if (this->function->GetHasInlineCaches() && Js::ScriptFunctionWithInlineCache::Is(this->function))
{
this->inlineCaches = (void**)Js::ScriptFunctionWithInlineCache::FromVar(this->function)->GetInlineCaches();
+ Assert(this->inlineCaches != nullptr);
}
else
{
diff --git a/lib/Runtime/Language/JavascriptOperators.cpp b/lib/Runtime/Language/JavascriptOperators.cpp
index 103eb8b0f67..4ed5cecaefa 100644
--- a/lib/Runtime/Language/JavascriptOperators.cpp
+++ b/lib/Runtime/Language/JavascriptOperators.cpp
@@ -4772,64 +4772,45 @@ namespace Js
return returnValue;
}
+ template bool MemsetConversion(Var value, ScriptContext* scriptContext, T* result)
+ {
+ ImplicitCallFlags flags = scriptContext->GetThreadContext()->TryWithDisabledImplicitCall([&]
+ {
+ *result = func(value, scriptContext);
+ });
+ return (flags & (~ImplicitCall_None)) == 0;
+ }
+
BOOL JavascriptOperators::OP_Memset(Var instance, int32 start, Var value, int32 length, ScriptContext* scriptContext)
{
if (length <= 0)
{
return false;
}
+
TypeId instanceType = JavascriptOperators::GetTypeId(instance);
BOOL returnValue = false;
// The typed array will deal with all possible values for the index
-#define MEMSET_TYPED_ARRAY(type, conversion) type ## ::FromVar(instance)->DirectSetItemAtRange(start, length, value, JavascriptConversion:: ## conversion)
- switch (instanceType)
- {
- case TypeIds_Int8Array:
- {
- returnValue = MEMSET_TYPED_ARRAY(Int8Array, ToInt8);
- break;
+#define MEMSET_TYPED_ARRAY_CASE(type, conversion) \
+ case TypeIds_##type: \
+ { \
+ type## ::TypedArrayType typedValue = 0; \
+ if (!MemsetConversion(value, scriptContext, &typedValue)) return false; \
+ returnValue = type## ::FromVar(instance)->DirectSetItemAtRange(start, length, typedValue); \
+ break; \
}
- case TypeIds_Uint8Array:
- {
- returnValue = MEMSET_TYPED_ARRAY(Uint8Array, ToUInt8);
- break;
- }
- case TypeIds_Uint8ClampedArray:
- {
- returnValue = MEMSET_TYPED_ARRAY(Uint8ClampedArray, ToUInt8Clamped);
- break;
- }
- case TypeIds_Int16Array:
- {
- returnValue = MEMSET_TYPED_ARRAY(Int16Array, ToInt16);
- break;
- }
- case TypeIds_Uint16Array:
- {
- returnValue = MEMSET_TYPED_ARRAY(Uint16Array, ToUInt16);
- break;
- }
- case TypeIds_Int32Array:
- {
- returnValue = MEMSET_TYPED_ARRAY(Int32Array, ToInt32);
- break;
- }
- case TypeIds_Uint32Array:
- {
- returnValue = MEMSET_TYPED_ARRAY(Uint32Array, ToUInt32);
- break;
- }
- case TypeIds_Float32Array:
- {
- returnValue = MEMSET_TYPED_ARRAY(Float32Array, ToFloat);
- break;
- }
- case TypeIds_Float64Array:
+ switch (instanceType)
{
- returnValue = MEMSET_TYPED_ARRAY(Float64Array, ToNumber);
- break;
- }
+ MEMSET_TYPED_ARRAY_CASE(Int8Array, ToInt8)
+ MEMSET_TYPED_ARRAY_CASE(Uint8Array, ToUInt8)
+ MEMSET_TYPED_ARRAY_CASE(Uint8ClampedArray, ToUInt8Clamped)
+ MEMSET_TYPED_ARRAY_CASE(Int16Array, ToInt16)
+ MEMSET_TYPED_ARRAY_CASE(Uint16Array, ToUInt16)
+ MEMSET_TYPED_ARRAY_CASE(Int32Array, ToInt32)
+ MEMSET_TYPED_ARRAY_CASE(Uint32Array, ToUInt32)
+ MEMSET_TYPED_ARRAY_CASE(Float32Array, ToFloat)
+ MEMSET_TYPED_ARRAY_CASE(Float64Array, ToNumber)
case TypeIds_NativeFloatArray:
case TypeIds_NativeIntArray:
case TypeIds_Array:
@@ -4858,7 +4839,11 @@ namespace Js
{
return false;
}
- int32 intValue = JavascriptConversion::ToInt32(value, scriptContext);
+ int32 intValue = 0;
+ if (!MemsetConversion(value, scriptContext, &intValue))
+ {
+ return false;
+ }
returnValue = JavascriptArray::FromVar(instance)->DirectSetItemAtRange(start, length, intValue);
}
else
@@ -4869,7 +4854,11 @@ namespace Js
return false;
}
- double doubleValue = JavascriptConversion::ToNumber(value, scriptContext);
+ double doubleValue = 0;
+ if (!MemsetConversion(value, scriptContext, &doubleValue))
+ {
+ return false;
+ }
// Special case for missing item
if (SparseArraySegment::IsMissingItem(&doubleValue))
{
diff --git a/lib/Runtime/Language/ValueType.cpp b/lib/Runtime/Language/ValueType.cpp
index e6f5cf92a3f..22ef773b63d 100644
--- a/lib/Runtime/Language/ValueType.cpp
+++ b/lib/Runtime/Language/ValueType.cpp
@@ -1058,7 +1058,7 @@ ValueType ValueType::ToDefiniteNumber() const
ValueType ValueType::ToDefiniteAnyNumber() const
{
// Not asserting on expected value type because Conv_Num allows converting values of arbitrary types to number
- if(OneOn(Bits::Object))
+ if(OneOn(Bits::Object | Bits::PrimitiveOrObject))
return Verify(Bits::Number | Bits::CanBeTaggedValue);
Bits numberBits =
bits &
diff --git a/lib/Runtime/Library/GlobalObject.cpp b/lib/Runtime/Library/GlobalObject.cpp
index a8661ca82e3..8381a0648b0 100644
--- a/lib/Runtime/Library/GlobalObject.cpp
+++ b/lib/Runtime/Library/GlobalObject.cpp
@@ -597,7 +597,13 @@ namespace Js
char16 const * sourceString = argString->GetSz();
charcount_t sourceLen = argString->GetLength();
FastEvalMapString key(sourceString, sourceLen, moduleID, strictMode, isLibraryCode);
- bool found = scriptContext->IsInEvalMap(key, isIndirect, &pfuncScript);
+
+
+
+ // PropertyString's buffer references to PropertyRecord's inline buffer, if both PropertyString and PropertyRecord are collected
+ // we'll leave the PropertyRecord's interior buffer pointer in the EvalMap. So do not use evalmap if we are evaluating PropertyString
+ bool useEvalMap = !VirtualTableInfo::HasVirtualTable(argString);
+ bool found = useEvalMap && scriptContext->IsInEvalMap(key, isIndirect, &pfuncScript);
if (!found || (!isIndirect && pfuncScript->GetEnvironment() != &NullFrameDisplay))
{
uint32 grfscr = additionalGrfscr | fscrReturnExpression | fscrEval | fscrEvalCode | fscrGlobalCode;
@@ -610,7 +616,7 @@ namespace Js
pfuncScript = library->GetGlobalObject()->EvalHelper(scriptContext, argString->GetSz(), argString->GetLength(), moduleID,
grfscr, Constants::EvalCode, doRegisterDocument, isIndirect, strictMode);
- if (!found)
+ if (useEvalMap && !found)
{
scriptContext->AddToEvalMap(key, isIndirect, pfuncScript);
}
@@ -1448,7 +1454,6 @@ namespace Js
bs->AppendChars(chw);
}
}
-
LEAVE_PINNED_SCOPE(); // src
return bs;
diff --git a/lib/Runtime/Library/JavascriptArray.cpp b/lib/Runtime/Library/JavascriptArray.cpp
index 3aaf9afa1cf..f922d696549 100644
--- a/lib/Runtime/Library/JavascriptArray.cpp
+++ b/lib/Runtime/Library/JavascriptArray.cpp
@@ -5825,6 +5825,8 @@ namespace Js
{
isFloatArray = true;
}
+ // Code below has potential to throw due to OOM or SO. Just FailFast on those cases
+ AutoDisableInterrupt failFastOnError(scriptContext->GetThreadContext());
if (pArr->head->length != 0)
{
@@ -5884,6 +5886,8 @@ namespace Js
{
ShiftHelper(pArr, scriptContext);
}
+
+ failFastOnError.Completed();
}
else
{
diff --git a/lib/Runtime/Library/JavascriptString.cpp b/lib/Runtime/Library/JavascriptString.cpp
index 926d4d4394c..524bfa6f26e 100644
--- a/lib/Runtime/Library/JavascriptString.cpp
+++ b/lib/Runtime/Library/JavascriptString.cpp
@@ -2342,9 +2342,7 @@ namespace Js
Assert(converted == countToCase);
}
-
resultVar = builder.ToString();
-
LeavePinnedScope(); // pThis
return resultVar;
@@ -2606,7 +2604,6 @@ namespace Js
return scriptContext->GetLibrary()->GetTrue();
}
}
-
LEAVE_PINNED_SCOPE(); // pSearch
LEAVE_PINNED_SCOPE(); // pThis
@@ -3625,8 +3622,8 @@ namespace Js
uint string1Len = string1->GetLength();
uint string2Len = string2->GetLength();
- // We want to pin the strings string1 and string2 because flattening of any of these strings could cause a GC and result in the other string getting collected if it was optimized
- // away by the compiler. We would normally have called the EnterPinnedScope/LeavePinnedScope methods here but it adds extra call instructions to the assembly code. As Equals
+ // We want to pin the strings string1 and string2 because flattening of any of these strings could cause a GC and result in the other string getting collected if it was optimized
+ // away by the compiler. We would normally have called the EnterPinnedScope/LeavePinnedScope methods here but it adds extra call instructions to the assembly code. As Equals
// methods could get called a lot of times this can show up as regressions in benchmarks.
volatile Js::JavascriptString** keepAliveString1 = (volatile Js::JavascriptString**)& string1;
volatile Js::JavascriptString** keepAliveString2 = (volatile Js::JavascriptString**)& string2;
@@ -3634,7 +3631,6 @@ namespace Js
UNREFERENCED_PARAMETER(keepAliveString1);
UNREFERENCED_PARAMETER(keepAliveString2);
};
-
int result = wmemcmp(string1->GetString(), string2->GetString(), min(string1Len, string2Len));
return (result == 0) ? (int)(string1Len - string2Len) : result;
@@ -3980,8 +3976,6 @@ namespace Js
T *leftString = T::FromVar(aLeft);
T *rightString = T::FromVar(aRight);
- // We want to pin the strings leftString and rightString because flattening of any of these strings could cause a GC and result in the other string getting collected if it was optimized
- // away by the compiler. We would normally have called the EnterPinnedScope/LeavePinnedScope methods here but it adds extra call instructions to the assembly code. As Equals
// methods could get called a lot of times this can show up as regressions in benchmarks.
volatile T** keepAliveLeftString = (volatile T**)& leftString;
volatile T** keepAliveRightString = (volatile T**)& rightString;
diff --git a/lib/Runtime/Library/ScriptFunction.cpp b/lib/Runtime/Library/ScriptFunction.cpp
index afd697dc706..3092e74250b 100644
--- a/lib/Runtime/Library/ScriptFunction.cpp
+++ b/lib/Runtime/Library/ScriptFunction.cpp
@@ -430,7 +430,7 @@ namespace Js
returnStr = LiteralString::NewCopyBuffer(funcBodyStr, (charcount_t)totalLength, scriptContext);
- LEAVE_PINNED_SCOPE();
+ LEAVE_PINNED_SCOPE(); // computedName
LeavePinnedScope(); // inputString
return returnStr;
@@ -717,7 +717,7 @@ namespace Js
InlineCache * ScriptFunctionWithInlineCache::GetInlineCache(uint index)
{
void** inlineCaches = this->GetInlineCaches();
- Assert(inlineCaches != nullptr);
+ AssertOrFailFast(inlineCaches != nullptr);
AssertOrFailFast(index < this->GetInlineCacheCount());
#if DBG
Assert(this->m_inlineCacheTypes[index] == InlineCacheTypeNone ||
@@ -730,12 +730,22 @@ namespace Js
Field(void**) ScriptFunctionWithInlineCache::GetInlineCaches()
{
// If script function have inline caches pointing to function body and function body got reparsed we need to reset cache
- if (this->GetHasInlineCaches() &&
- !this->GetHasOwnInlineCaches() &&
- this->m_inlineCaches != this->GetFunctionBody()->GetInlineCaches())
+ if (this->GetHasInlineCaches() && !this->GetHasOwnInlineCaches())
{
- Assert(this->GetFunctionBody()->GetCompileCount() > 1);
- this->SetInlineCachesFromFunctionBody();
+ // Script function have inline caches pointing to function body
+ if (!this->HasFunctionBody())
+ {
+ // Function body got re-deferred and have not been re-parsed yet. Reset cache to null
+ this->m_inlineCaches = nullptr;
+ this->inlineCacheCount = 0;
+ this->SetHasInlineCaches(false);
+ }
+ else if (this->m_inlineCaches != this->GetFunctionBody()->GetInlineCaches())
+ {
+ // Function body got reparsed we need to reset cache
+ Assert(this->GetFunctionBody()->GetCompileCount() > 1);
+ this->SetInlineCachesFromFunctionBody();
+ }
}
return this->m_inlineCaches;
diff --git a/lib/Runtime/Library/TypedArray.h b/lib/Runtime/Library/TypedArray.h
index 992c3592fe7..16ddc641870 100644
--- a/lib/Runtime/Library/TypedArray.h
+++ b/lib/Runtime/Library/TypedArray.h
@@ -244,6 +244,7 @@ namespace Js
TypedArray(DynamicType *type): TypedArrayBase(nullptr, 0, 0, sizeof(TypeName), type) { buffer = nullptr; }
public:
+ typedef TypeName TypedArrayType;
class EntryInfo
{
public:
@@ -363,13 +364,12 @@ namespace Js
return true;
}
- inline BOOL DirectSetItemAtRange(__in int32 start, __in uint32 length, __in Js::Var value, TypeName(*convFunc)(Var value, ScriptContext* scriptContext))
+ inline BOOL DirectSetItemAtRange(__in int32 start, __in uint32 length, __in TypeName typedValue)
{
if (CrossSite::IsCrossSiteObjectTyped(this))
{
return false;
}
- TypeName typedValue = convFunc(value, GetScriptContext());
if (this->IsDetachedBuffer()) // 9.4.5.9 IntegerIndexedElementSet
{
diff --git a/test/Function/FuncBody.bug227901.js b/test/Function/FuncBody.bug227901.js
index 18217e50346..6a26a19d70d 100644
--- a/test/Function/FuncBody.bug227901.js
+++ b/test/Function/FuncBody.bug227901.js
@@ -7,7 +7,7 @@ function test0(){
var obj0 = {};
var arrObj0 = {};
var func0 = function(){
- eval("");
+ eval(" ");
}
var func1 = function(){
var obj4 = {nd0: {nd0: {lf0: {prop0: -46, prop1: 3, prop2: -2147483648, length: -6.02625054824609E+18 , method0: func0}}}};
diff --git a/test/Function/FuncBody.bug232281.js b/test/Function/FuncBody.bug232281.js
index 82fcd35743c..034f505ee1d 100644
--- a/test/Function/FuncBody.bug232281.js
+++ b/test/Function/FuncBody.bug232281.js
@@ -10,7 +10,7 @@ function test0(){
var func0 = function(){
}
var func1 = function(argObj0,argArr1,argMath2){
- eval("");
+ eval(" ");
func0();
}
Object.prototype.method0 = func1;