summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/inc/jithelpers.h2
-rw-r--r--src/jit/assertionprop.cpp12
-rw-r--r--src/jit/codegenarm.cpp97
-rw-r--r--src/jit/codegenarm64.cpp6
-rw-r--r--src/jit/codegenlinear.h9
-rw-r--r--src/jit/codegenxarch.cpp180
-rw-r--r--src/jit/compiler.h6
-rw-r--r--src/jit/emit.cpp24
-rw-r--r--src/jit/emit.h31
-rw-r--r--src/jit/emitxarch.cpp2
-rw-r--r--src/jit/flowgraph.cpp2
-rw-r--r--src/jit/gentree.cpp18
-rw-r--r--src/jit/gentree.h14
-rw-r--r--src/jit/importer.cpp8
-rw-r--r--src/jit/jit.h8
-rw-r--r--src/jit/lclvars.cpp20
-rw-r--r--src/jit/liveness.cpp87
-rw-r--r--src/jit/lowerxarch.cpp27
-rw-r--r--src/jit/lsra.cpp75
-rw-r--r--src/jit/morph.cpp120
-rw-r--r--src/jit/optimizer.cpp19
-rw-r--r--src/jit/rangecheck.cpp7
-rw-r--r--src/jit/rationalize.cpp20
-rw-r--r--src/jit/regalloc.cpp3
-rw-r--r--src/jit/regset.cpp47
-rw-r--r--src/jit/stackfp.cpp3
-rw-r--r--src/jit/valuenum.cpp20
-rw-r--r--src/jit/valuenumfuncs.h4
-rw-r--r--src/mscorlib/src/System/AppContext/AppContextDefaultValues.Defaults.cs1
-rw-r--r--src/mscorlib/src/System/AppContext/AppContextSwitches.cs12
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/ActivityTracker.cs4
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/EventProvider.cs25
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/EventSource.cs1069
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/FrameworkEventSource.cs2
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/StubEnvironment.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ArrayTypeInfo.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSet.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSetItem.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/DataCollector.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EmptyStruct.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumHelper.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumerableTypeInfo.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventDataAttribute.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldAttribute.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldFormat.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventIgnoreAttribute.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventPayload.cs5
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceActivity.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceOptions.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/FieldMetadata.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/InvokeTypeInfo.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/NameInfo.cs17
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAccessor.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAnalysis.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleEventTypes.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/Statics.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataCollector.cs3
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataType.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventSource.cs15
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTraits.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTypes.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingMetadataCollector.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo_T.cs1
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TypeAnalysis.cs1
-rw-r--r--src/vm/codeman.cpp9
-rw-r--r--src/vm/codeman.h10
-rw-r--r--src/vm/jitinterface.cpp77
-rw-r--r--src/vm/rejit.cpp1
-rw-r--r--src/zap/zapinfo.cpp15
71 files changed, 1439 insertions, 745 deletions
diff --git a/src/inc/jithelpers.h b/src/inc/jithelpers.h
index 686004499c..6542582a17 100644
--- a/src/inc/jithelpers.h
+++ b/src/inc/jithelpers.h
@@ -175,7 +175,7 @@
JITHELPER1(CORINFO_HELP_VERIFICATION_RUNTIME_CHECK, JIT_VerificationRuntimeCheck,CORINFO_HELP_SIG_REG_ONLY, MDIL_HELP_VERIFICATION_RUNTIME_CHECK)
// GC support
- JITHELPER1(CORINFO_HELP_STOP_FOR_GC, JIT_RareDisableHelper,CORINFO_HELP_SIG_REG_ONLY, MDIL_HELP_STOP_FOR_GC)
+ DYNAMICJITHELPER1(CORINFO_HELP_STOP_FOR_GC, JIT_RareDisableHelper,CORINFO_HELP_SIG_REG_ONLY, MDIL_HELP_STOP_FOR_GC)
#ifdef ENABLE_FAST_GCPOLL_HELPER
DYNAMICJITHELPER1(CORINFO_HELP_POLL_GC, JIT_PollGC, CORINFO_HELP_SIG_REG_ONLY, MDIL_HELP_POLL_GC)
#else
diff --git a/src/jit/assertionprop.cpp b/src/jit/assertionprop.cpp
index 3a7f8437dd..b3c1da6944 100644
--- a/src/jit/assertionprop.cpp
+++ b/src/jit/assertionprop.cpp
@@ -2708,8 +2708,7 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(EXPSET_TP assertions, const Ge
printf("Assertion index=#%02u: ", index);
printTreeID(op1);
printf(" %s ", (curAssertion->assertionKind == OAK_EQUAL) ? "==" : "!=");
- if (op1->TypeGet() == TYP_INT || op1->TypeGet() == TYP_BYTE || op1->TypeGet() == TYP_SHORT ||
- op1->TypeGet() == TYP_UINT || op1->TypeGet() == TYP_UBYTE || op1->TypeGet() == TYP_USHORT)
+ if (genActualType(op1->TypeGet()) == TYP_INT)
{
printf("%d\n", vnStore->ConstantValue<int>(vnCns));
}
@@ -2742,8 +2741,7 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(EXPSET_TP assertions, const Ge
lvaTable[op1->gtLclVar.gtLclNum].decRefCnts(compCurBB->getBBWeight(this), this);
// Change the oper to const.
- if (op1->TypeGet() == TYP_INT || op1->TypeGet() == TYP_BYTE || op1->TypeGet() == TYP_SHORT ||
- op1->TypeGet() == TYP_UINT || op1->TypeGet() == TYP_UBYTE || op1->TypeGet() == TYP_USHORT)
+ if (genActualType(op1->TypeGet()) == TYP_INT)
{
op1->ChangeOperConst(GT_CNS_INT);
op1->gtIntCon.gtIconVal = vnStore->ConstantValue<int>(vnCns);
@@ -3383,12 +3381,12 @@ GenTreePtr Compiler::optAssertionProp_BndsChk(EXPSET_TP assertions, const GenTre
assert(index1 != index2);
// It can always be considered as redundant with any previous higher constant value
- // a[K1] followed by a[K2], with K1 > K2
- if (index1 >= index2)
+ // a[K1] followed by a[K2], with K2 >= 0 and K1 >= K2
+ if (index2 >= 0 && index1 >= index2)
{
isRedundant = true;
#ifdef DEBUG
- dbgMsg = "a[K1] followed by a[K2], with K1 > K2";
+ dbgMsg = "a[K1] followed by a[K2], with K2 >= 0 and K1 >= K2";
#endif
}
}
diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
index 347bf36fec..3f8e52c115 100644
--- a/src/jit/codegenarm.cpp
+++ b/src/jit/codegenarm.cpp
@@ -821,64 +821,6 @@ void CodeGen::genCodeForBBlist()
case BBJ_CALLFINALLY:
-#if defined(_TARGET_X86_)
-
- /* If we are about to invoke a finally locally from a try block,
- we have to set the hidden slot corresponding to the finally's
- nesting level. When invoked in response to an exception, the
- EE usually does it.
-
- We must have : BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
-
- This code depends on this order not being messed up.
- We will emit :
- mov [ebp-(n+1)],0
- mov [ebp- n ],0xFC
- push &step
- jmp finallyBlock
-
- step: mov [ebp- n ],0
- jmp leaveTarget
- leaveTarget:
- */
-
- noway_assert(isFramePointerUsed());
-
- // Get the nesting level which contains the finally
- compiler->fgGetNestingLevel(block, &finallyNesting);
-
- // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
- unsigned filterEndOffsetSlotOffs;
- filterEndOffsetSlotOffs = (unsigned)(lvaLclSize(lvaShadowSPslotsVar) - (sizeof(void*)));
-
- unsigned curNestingSlotOffs;
- curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*)));
-
- // Zero out the slot for the next nesting level
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0,
- lvaShadowSPslotsVar, curNestingSlotOffs - sizeof(void*));
-
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK,
- lvaShadowSPslotsVar, curNestingSlotOffs);
-
- // Now push the address of where the finally funclet should
- // return to directly.
- if ( !(block->bbFlags & BBF_RETLESS_CALL) )
- {
- assert(block->isBBCallAlwaysPair());
- getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
- }
- else
- {
- // EE expects a DWORD, so we give him 0
- inst_IV(INS_push_hide, 0);
- }
-
- // Jump to the finally BB
- inst_JMP(EJ_jmp, block->bbJumpDest);
-
-#elif defined(_TARGET_ARM_)
-
// Now set REG_LR to the address of where the finally funclet should
// return to directly.
@@ -922,45 +864,6 @@ void CodeGen::genCodeForBBlist()
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
-#elif defined(_TARGET_AMD64_)
-
- // Generate a call to the finally, like this:
- // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
- // call finally-funclet
- // jmp finally-return // Only for non-retless finally calls
-
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_RCX, compiler->lvaPSPSym, 0);
- getEmitter()->emitIns_J(INS_call, block->bbJumpDest);
-
- if (block->bbFlags & BBF_RETLESS_CALL)
- {
- // We have a retless call, and the last instruction generated was a call.
- // If the next block is in a different EH region (or is the end of the code
- // block), then we need to generate a breakpoint here (since it will never
- // get executed) to get proper unwind behavior.
-
- if ((block->bbNext == nullptr) ||
- !BasicBlock::sameEHRegion(block, block->bbNext))
- {
- instGen(INS_BREAKPOINT); // This should never get executed
- }
- }
- else
- {
- // because of the way the flowgraph is connected, the liveness info for this one instruction
- // after the call is not (can not be) correct in cases where a variable has a last use in the
- // handler. So turn off GC reporting for this single jmp instruction.
- getEmitter()->emitMakeRemainderNonInterruptible();
-
- // Now push the address of where the finally funclet should
- // return to directly.
- inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
- }
-
-#else
- NYI("TARGET");
-#endif
-
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index 71b238308d..a2cd2db472 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -1897,7 +1897,7 @@ void CodeGen::genCodeForBBlist()
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
- getEmitter()->emitMakeRemainderNonInterruptible();
+ getEmitter()->emitDisableGC();
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
@@ -1912,6 +1912,8 @@ void CodeGen::genCodeForBBlist()
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
+
+ getEmitter()->emitEnableGC();
}
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
@@ -2256,7 +2258,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
switch (treeNode->gtOper)
{
case GT_START_NONGC:
- getEmitter()->emitMakeRemainderNonInterruptible();
+ getEmitter()->emitDisableGC();
break;
case GT_PROF_HOOK:
diff --git a/src/jit/codegenlinear.h b/src/jit/codegenlinear.h
index 515e7d0741..57eac7ced4 100644
--- a/src/jit/codegenlinear.h
+++ b/src/jit/codegenlinear.h
@@ -101,6 +101,8 @@
void genConsumeAddrMode(GenTreeAddrMode *mode);
+ void genConsumeBlockOp(GenTreeBlkOp* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
+
void genConsumeRegs(GenTree* tree);
void genConsumeOperands(GenTreeOp* tree);
@@ -157,4 +159,11 @@
return(varDsc->lvIsRegCandidate());
}
+#ifdef DEBUG
+ GenTree* lastConsumedNode;
+ void genCheckConsumeNode(GenTree* treeNode);
+#else // !DEBUG
+ inline void genCheckConsumeNode(GenTree* treeNode) {}
+#endif // DEBUG
+
#endif // !LEGACY_BACKEND
diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp
index 82677a3b6a..e6252daaf4 100644
--- a/src/jit/codegenxarch.cpp
+++ b/src/jit/codegenxarch.cpp
@@ -913,7 +913,7 @@ void CodeGen::genCodeForBBlist()
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
- getEmitter()->emitMakeRemainderNonInterruptible();
+ getEmitter()->emitDisableGC();
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
@@ -928,6 +928,8 @@ void CodeGen::genCodeForBBlist()
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
+
+ getEmitter()->emitEnableGC();
}
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
@@ -1419,6 +1421,10 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
emitter *emit = getEmitter();
#ifdef DEBUG
+ // Validate that all the operands for the current node are consumed in order.
+ // This is important because LSRA ensures that any necessary copies will be
+ // handled correctly.
+ lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
@@ -1447,7 +1453,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
switch (treeNode->gtOper)
{
case GT_START_NONGC:
- getEmitter()->emitMakeRemainderNonInterruptible();
+ getEmitter()->emitDisableGC();
break;
case GT_PROF_HOOK:
@@ -3209,9 +3215,7 @@ void CodeGen::genCodeForInitBlkRepStos(GenTreeInitBlk* initBlkNode)
#endif // DEBUG
- genConsumeRegAndCopy(blockSize, REG_RCX);
- genConsumeRegAndCopy(initVal, REG_RAX);
- genConsumeRegAndCopy(dstAddr, REG_RDI);
+ genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
instGen(INS_r_stosb);
}
@@ -3242,8 +3246,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode)
emitter *emit = getEmitter();
- genConsumeReg(initVal);
- genConsumeReg(dstAddr);
+ genConsumeOperands(initBlkNode->gtGetOp1()->AsOp());
// If the initVal was moved, or spilled and reloaded to a different register,
// get the original initVal from below the GT_RELOAD, but only after capturing the valReg,
@@ -3337,9 +3340,7 @@ void CodeGen::genCodeForInitBlk(GenTreeInitBlk* initBlkNode)
}
#endif // DEBUG
- genConsumeRegAndCopy(blockSize, REG_ARG_2);
- genConsumeRegAndCopy(initVal, REG_ARG_1);
- genConsumeRegAndCopy(dstAddr, REG_ARG_0);
+ genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
#else // !_TARGET_AMD64_
@@ -3502,9 +3503,7 @@ void CodeGen::genCodeForCpBlkRepMovs(GenTreeCpBlk* cpBlkNode)
#endif // DEBUG
- genConsumeRegAndCopy(blockSize, REG_RCX);
- genConsumeRegAndCopy(srcAddr, REG_RSI);
- genConsumeRegAndCopy(dstAddr, REG_RDI);
+ genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
@@ -3547,10 +3546,8 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
// Consume these registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
- genConsumeRegAndCopy(srcAddr, REG_RSI);
+ genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddr->TypeGet());
-
- genConsumeRegAndCopy(dstAddr, REG_RDI);
gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
unsigned slots = cpObjNode->gtSlots;
@@ -3674,9 +3671,7 @@ void CodeGen::genCodeForCpBlk(GenTreeCpBlk* cpBlkNode)
}
#endif // DEBUG
- genConsumeRegAndCopy(blockSize, REG_ARG_2);
- genConsumeRegAndCopy(srcAddr, REG_ARG_1);
- genConsumeRegAndCopy(dstAddr, REG_ARG_0);
+ genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
#else // !_TARGET_AMD64_
@@ -3980,14 +3975,34 @@ CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
regMaskTP tmpRegMask = arrOffset->gtRsvdRegs;
regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ // First, consume the operands in the correct order.
+ regNumber offsetReg = REG_NA;
+ if (!offsetNode->IsZero())
+ {
+ offsetReg = genConsumeReg(offsetNode);
+ }
+ else
+ {
+ assert(offsetNode->isContained());
+ }
+ regNumber indexReg = genConsumeReg(indexNode);
+ // Although arrReg may not be used in the constant-index case, if we have generated
+ // the value into a register, we must consume it, otherwise we will fail to end the
+ // live range of the gc ptr.
+ // TODO-CQ: Currently arrObj will always have a register allocated to it.
+ // We could avoid allocating a register for it, which would be of value if the arrObj
+ // is an on-stack lclVar.
+ regNumber arrReg = REG_NA;
+ if (arrObj->gtHasReg())
+ {
+ arrReg = genConsumeReg(arrObj);
+ }
+
if (!offsetNode->IsZero())
{
// Evaluate tgtReg = offsetReg*dim_size + indexReg.
// tmpReg is used to load dim_size and the result of the multiplication.
// Note that dim_size will never be negative.
- regNumber offsetReg = genConsumeReg(offsetNode);
- regNumber indexReg = genConsumeReg(indexNode);
- regNumber arrReg = genConsumeReg(arrObj);
getEmitter()->emitIns_R_AR(INS_mov,
emitActualTypeSize(TYP_INT),
@@ -4011,7 +4026,6 @@ CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
}
else
{
- regNumber indexReg = genConsumeReg(indexNode);
if (indexReg != tgtReg)
{
inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_INT);
@@ -4108,7 +4122,8 @@ void CodeGen::genCodeForShift(GenTreePtr operand, GenTreePtr shiftBy,
if (!isRMW)
{
- operandReg = genConsumeReg(operand);
+ genConsumeOperands(parent->AsOp());
+ operandReg = operand->gtRegNum;
}
else
{
@@ -4137,7 +4152,7 @@ void CodeGen::genCodeForShift(GenTreePtr operand, GenTreePtr shiftBy,
assert(actualOperand->OperGet() == GT_CLS_VAR_ADDR);
// We don't expect to see GT_COPY or GT_RELOAD for GT_CLS_VAR_ADDR
- // so 'actualOperand' should be the same as 'operand'
+ // so 'actualOperand' should be the same as 'operand'
assert(operand == actualOperand);
}
@@ -4211,7 +4226,7 @@ void CodeGen::genCodeForShift(GenTreePtr operand, GenTreePtr shiftBy,
// sit in ECX, in case this didn't happen, LSRA expects
// the code generator to move it since it's a single
// register destination requirement.
- regNumber shiftReg = genConsumeReg(shiftBy);
+ regNumber shiftReg = shiftBy->gtRegNum;
if (shiftReg != REG_RCX)
{
// Issue the mov to RCX:
@@ -4265,7 +4280,32 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
// Load local variable from its home location.
- inst_RV_TT(ins_Load(unspillTree->gtType, compiler->isSIMDTypeLocalAligned(lcl->gtLclNum)), dstReg, unspillTree);
+ // In most cases the tree type will indicate the correct type to use for the load.
+ // However, if it is NOT a normalizeOnLoad lclVar (i.e. NOT a small int that always gets
+ // widened when loaded into a register), and its size is not the same as genActualType of
+ // the type of the lclVar, then we need to change the type of the tree node when loading.
+ // This situation happens due to "optimizations" that avoid a cast and
+ // simply retype the node when using long type lclVar as an int.
+ // While loading the int in that case would work for this use of the lclVar, if it is
+ // later used as a long, we will have incorrectly truncated the long.
+ // In the normalizeOnLoad case ins_Load will return an appropriate sign- or zero-
+ // extending load.
+
+ var_types treeType = unspillTree->TypeGet();
+ if (treeType != genActualType(varDsc->lvType) &&
+ !varTypeIsGC(treeType) &&
+ !varDsc->lvNormalizeOnLoad())
+ {
+ assert(!varTypeIsGC(varDsc));
+ var_types spillType = genActualType(varDsc->lvType);
+ unspillTree->gtType = spillType;
+ inst_RV_TT(ins_Load(spillType, compiler->isSIMDTypeLocalAligned(lcl->gtLclNum)), dstReg, unspillTree);
+ unspillTree->gtType = treeType;
+ }
+ else
+ {
+ inst_RV_TT(ins_Load(treeType, compiler->isSIMDTypeLocalAligned(lcl->gtLclNum)), dstReg, unspillTree);
+ }
unspillTree->SetInReg();
@@ -4331,6 +4371,10 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
// of locating the value on the desired register.
void CodeGen::genConsumeRegAndCopy(GenTree *tree, regNumber needReg)
{
+ if (needReg == REG_NA)
+ {
+ return;
+ }
regNumber treeReg = genConsumeReg(tree);
if (treeReg != needReg)
{
@@ -4414,6 +4458,39 @@ void CodeGen::genRegCopy(GenTree* treeNode)
genProduceReg(treeNode);
}
+// Check that registers are consumed in the right order for the current node being generated.
+#ifdef DEBUG
+void CodeGen::genCheckConsumeNode(GenTree* treeNode)
+{
+ // GT_PUTARG_REG is consumed out of order.
+ if (treeNode->gtSeqNum != 0 && treeNode->OperGet() != GT_PUTARG_REG)
+ {
+ if (lastConsumedNode != nullptr)
+ {
+ if (treeNode == lastConsumedNode)
+ {
+ if (verbose)
+ {
+ printf("Node was consumed twice:\n ");
+ compiler->gtDispTree(treeNode, nullptr, nullptr, true);
+ }
+ }
+ else
+ {
+ if (verbose && (lastConsumedNode->gtSeqNum > treeNode->gtSeqNum))
+ {
+ printf("Nodes were consumed out-of-order:\n");
+ compiler->gtDispTree(lastConsumedNode, nullptr, nullptr, true);
+ compiler->gtDispTree(treeNode, nullptr, nullptr, true);
+ }
+ // assert(lastConsumedNode->gtSeqNum < treeNode->gtSeqNum);
+ }
+ }
+ lastConsumedNode = treeNode;
+ }
+}
+#endif // DEBUG
+
// Do liveness update for a subnode that is being consumed by codegen.
regNumber CodeGen::genConsumeReg(GenTree *tree)
{
@@ -4473,6 +4550,7 @@ regNumber CodeGen::genConsumeReg(GenTree *tree)
gcInfo.gcMarkRegSetNpt(genRegMask(tree->gtRegNum));
}
+ genCheckConsumeNode(tree);
return tree->gtRegNum;
}
@@ -4492,10 +4570,7 @@ void CodeGen::genConsumeAddress(GenTree* addr)
// do liveness update for a subnode that is being consumed by codegen
void CodeGen::genConsumeAddrMode(GenTreeAddrMode *addr)
{
- if (addr->Base())
- genConsumeReg(addr->Base());
- if (addr->Index())
- genConsumeReg(addr->Index());
+ genConsumeOperands(addr);
}
void CodeGen::genConsumeRegs(GenTree* tree)
@@ -4567,6 +4642,44 @@ void CodeGen::genConsumeOperands(GenTreeOp* tree)
}
}
+void CodeGen::genConsumeBlockOp(GenTreeBlkOp* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg)
+{
+ // We have to consume the registers, and perform any copies, in the actual execution order.
+ // The nominal order is: dst, src, size. However this may have been changed
+ // with reverse flags on either the GT_LIST or the GT_INITVAL itself.
+
+ GenTree* dst = blkNode->Dest();
+ GenTree* src = blkNode->gtOp.gtOp1->gtOp.gtOp2;
+ GenTree* size = blkNode->gtOp.gtOp2;
+ if (!blkNode->IsReverseOp() && !blkNode->gtOp1->IsReverseOp())
+ {
+ genConsumeRegAndCopy(dst, dstReg);
+ genConsumeRegAndCopy(src, srcReg);
+ genConsumeRegAndCopy(size, sizeReg);
+ }
+ else if (!blkNode->IsReverseOp())
+ {
+ // We know that the GT_LIST must be reversed.
+ genConsumeRegAndCopy(src, srcReg);
+ genConsumeRegAndCopy(dst, dstReg);
+ genConsumeRegAndCopy(size, sizeReg);
+ }
+ else if (!blkNode->gtOp1->IsReverseOp())
+ {
+ // We know from above that the initBlkNode must be reversed.
+ genConsumeRegAndCopy(size, sizeReg);
+ genConsumeRegAndCopy(dst, dstReg);
+ genConsumeRegAndCopy(src, srcReg);
+ }
+ else
+ {
+ // They are BOTH reversed.
+ genConsumeRegAndCopy(size, sizeReg);
+ genConsumeRegAndCopy(src, srcReg);
+ genConsumeRegAndCopy(dst, dstReg);
+ }
+}
+
// do liveness update for register produced by the current node in codegen
void CodeGen::genProduceReg(GenTree *tree)
{
@@ -4576,7 +4689,10 @@ void CodeGen::genProduceReg(GenTree *tree)
{
// Store local variable to its home location.
tree->gtFlags &= ~GTF_REG_VAL;
- inst_TT_RV(ins_Store(tree->gtType, compiler->isSIMDTypeLocalAligned(tree->gtLclVarCommon.gtLclNum)), tree, tree->gtRegNum);
+ // Ensure that lclVar stores are typed correctly.
+ unsigned varNum = tree->gtLclVarCommon.gtLclNum;
+ assert(!compiler->lvaTable[varNum].lvNormalizeOnStore() || (tree->TypeGet() == genActualType(compiler->lvaTable[varNum].TypeGet())));
+ inst_TT_RV(ins_Store(tree->gtType, compiler->isSIMDTypeLocalAligned(varNum)), tree, tree->gtRegNum);
}
else
{
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index ba849109ff..9e79a98b00 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -2392,6 +2392,7 @@ public :
static fgWalkPreFn lvaDecRefCntsCB;
void lvaDecRefCnts (GenTreePtr tree);
void lvaRecursiveDecRefCounts(GenTreePtr tree);
+ void lvaRecursiveIncRefCounts(GenTreePtr tree);
void lvaAdjustRefCnts ();
@@ -3447,10 +3448,11 @@ public :
VARSET_VALRET_TP fgComputeLife (VARSET_VALARG_TP life,
GenTreePtr startNode,
GenTreePtr endNode,
- VARSET_VALARG_TP volatileVars
+ VARSET_VALARG_TP volatileVars,
+ bool* pStmtInfoDirty
DEBUGARG( bool * treeModf));
- bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP life, bool* doAgain DEBUGARG(bool* treeModf));
+ bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP life, bool* doAgain, bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
// For updating liveset during traversal AFTER fgComputeLife has completed
VARSET_VALRET_TP fgGetVarBits (GenTreePtr tree);
diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp
index 6cce83addf..20f8af3fa2 100644
--- a/src/jit/emit.cpp
+++ b/src/jit/emit.cpp
@@ -787,7 +787,6 @@ insGroup* emitter::emitSavIG(bool emitAdd)
if (!(ig->igFlags & IGF_EMIT_ADD))
{
- assert((ig->igFlags & IGF_PLACEHOLDER) == 0);
ig->igGCregs = (regMaskSmall)emitInitGCrefRegs;
}
@@ -1029,6 +1028,7 @@ void emitter::emitBegFN(bool hasFramePtr
emitFwdJumps = false;
emitNoGCIG = false;
+ emitForceNewIG = false;
/* We have not recorded any live sets */
@@ -1321,8 +1321,11 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
/* Make sure we have enough space for the new instruction */
- if (emitCurIGfreeNext + sz >= emitCurIGfreeEndp)
+ if ((emitCurIGfreeNext + sz >= emitCurIGfreeEndp) ||
+ emitForceNewIG)
+ {
emitNxtIG(true);
+ }
/* Grab the space for the instruction */
@@ -1720,6 +1723,20 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
}
else
{
+ if (igType == IGPT_EPILOG
+#if FEATURE_EH_FUNCLETS
+ || igType == IGPT_FUNCLET_EPILOG
+#endif // FEATURE_EH_FUNCLETS
+ )
+ {
+ // If this was an epilog, then assume this is the end of any currently in progress
+ // no-GC region. If a block after the epilog needs to be no-GC, it needs to call
+ // emitter::emitDisableGC() directly. This behavior is depended upon by the fast
+ // tailcall implementation, which disables GC at the beginning of argument setup,
+ // but assumes that after the epilog it will be re-enabled.
+ emitNoGCIG = false;
+ }
+
emitNewIG();
// We don't know what the GC ref state will be at the end of the placeholder
@@ -6413,6 +6430,9 @@ void emitter::emitNxtIG(bool emitAdd)
if (emitAdd)
emitCurIG->igFlags |= IGF_EMIT_ADD;
+
+ // We've created a new IG; no need to force another one.
+ emitForceNewIG = false;
}
diff --git a/src/jit/emit.h b/src/jit/emit.h
index 0b1c26ce0a..913fdcb692 100644
--- a/src/jit/emit.h
+++ b/src/jit/emit.h
@@ -1526,6 +1526,7 @@ private:
bool emitFwdJumps; // forward jumps present?
bool emitNoGCIG; // Are we generating IGF_NOGCINTERRUPT insGroups (for prologs, epilogs, etc.)
+ bool emitForceNewIG; // If we generate an instruction, and not another instruction group, force create a new emitAdd instruction group.
BYTE * emitCurIGfreeNext; // next available byte in buffer
BYTE * emitCurIGfreeEndp; // one byte past the last available byte in buffer
@@ -1605,7 +1606,6 @@ private:
void emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig);
void emitNewIG();
- void emitMakeRemainderNonInterruptible();
void emitDisableGC();
void emitEnableGC();
void emitGenIG(insGroup *ig);
@@ -2521,22 +2521,12 @@ void emitter::emitNewIG()
emitGenIG(ig);
}
-// start a new instruction group that will be non-interruptible
-// this is used for the very last part of the callfinally block on
-// x64
-inline void emitter::emitMakeRemainderNonInterruptible()
-{
- emitNxtIG();
-
- emitCurIG->igFlags |= IGF_NOGCINTERRUPT;
-}
-
// Start a new instruction group that is not interruptable
inline void emitter::emitDisableGC()
{
emitNoGCIG = true;
- if (emitCurIGinsCnt > 0)
+ if (emitCurIGnonEmpty())
{
emitNxtIG(true);
}
@@ -2550,14 +2540,15 @@ inline void emitter::emitDisableGC()
inline void emitter::emitEnableGC()
{
emitNoGCIG = false;
- if (emitCurIGinsCnt > 0)
- {
- emitNxtIG(true);
- }
- else
- {
- emitCurIG->igFlags &= ~IGF_NOGCINTERRUPT;
- }
+
+ // The next time an instruction needs to be generated, force a new instruction group.
+ // It will be an emitAdd group in that case. Note that the next thing we see might be
+ // a label, which will force a non-emitAdd group.
+ //
+ // Note that we can't just create a new instruction group here, because we don't know
+ // if there are going to be any instructions added to it, and we don't support empty
+ // instruction groups.
+ emitForceNewIG = true;
}
diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp
index 3996c1395b..2d106e531e 100644
--- a/src/jit/emitxarch.cpp
+++ b/src/jit/emitxarch.cpp
@@ -8982,7 +8982,7 @@ BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id)
// The reg must currently be holding either a gcref or a byref
// and the instruction must be inc or dec
assert(((emitThisGCrefRegs | emitThisByrefRegs) & regMask) &&
- (ins == INS_inc || ins == INS_dec || ins == INS_inc_l));
+ (ins == INS_inc || ins == INS_dec || ins == INS_inc_l || ins == INS_dec_l));
assert(id->idGCref() == GCT_BYREF);
// Mark it as holding a GCT_BYREF
emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index e86900d475..f6118ab136 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -13965,6 +13965,8 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
// Extracting side-effects won't work in rationalized form.
// Instead just transform the JTRUE into a NEG which has the effect of
// evaluating the side-effecting tree and perform a benign operation on it.
+ // TODO-CQ: [TFS:1121057] We should be able to simply remove the jump node,
+ // and change gtStmtExpr to its op1.
cond->gtStmtExpr->SetOper(GT_NEG);
cond->gtStmtExpr->gtType = TYP_I_IMPL;
}
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index dac1ad0190..8a6417aeb8 100644
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -4228,7 +4228,7 @@ DONE:
unsigned GenTree::GetScaleIndexMul()
{
- if (IsCnsIntOrI() && jitIsScaleIndexMul(gtIntConCommon.IconValue()))
+ if (IsCnsIntOrI() && jitIsScaleIndexMul(gtIntConCommon.IconValue()) && gtIntConCommon.IconValue()!=1)
return (unsigned)gtIntConCommon.IconValue();
return 0;
@@ -10710,7 +10710,7 @@ GenTreePtr Compiler::gtBuildCommaList(GenTreePtr list, GenTreePtr expr)
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one
- assert(list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined());
+ noway_assert(list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined());
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
@@ -12378,13 +12378,15 @@ void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
{
- assert(fldSeqIter != FieldSeqStore::NotAField());
-
- if (!FieldSeqStore::IsPseudoField(fldSeqIter->m_fieldHnd) &&
- // TODO-Review: A NotAField here indicates a failure to properly maintain the field seuqence
+ if (fldSeqIter == FieldSeqStore::NotAField())
+ {
+ // TODO-Review: A NotAField here indicates a failure to properly maintain the field sequence
// See test case self_host_tests_x86\jit\regression\CLR-x86-JIT\v1-m12-beta2\ b70992\ b70992.exe
- fldSeqIter != FieldSeqStore::NotAField()
- )
+ // Safest thing to do here is to drop back to MinOpts
+ noway_assert(!"fldSeqIter is NotAField() in ParseArrayAddress");
+ }
+
+ if (!FieldSeqStore::IsPseudoField(fldSeqIter->m_fieldHnd))
{
if (*pFldSeq == nullptr)
*pFldSeq = fldSeqIter;
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index 1c631179c9..29c670f48d 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -1843,6 +1843,20 @@ struct GenTreeIntCon: public GenTreeIntConCommon
assert(fields != NULL);
}
+#ifdef _TARGET_64BIT_
+ void TruncateOrSignExtend32()
+ {
+ if (gtFlags & GTF_UNSIGNED)
+ {
+ gtIconVal = UINT32(gtIconVal);
+ }
+ else
+ {
+ gtIconVal = INT32(gtIconVal);
+ }
+ }
+#endif // _TARGET_64BIT_
+
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon() {}
#endif
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 2be6ec5099..d438d369e2 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -5273,7 +5273,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
return false;
}
-#if FEATURE_TAILCALL_OPT
+#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the sequence.
// Make sure we don't go past the end of the IL however.
codeEnd = min(codeEnd + 1, info.compCode+info.compILCodeSize);
@@ -5357,6 +5357,12 @@ bool Compiler::impIsImplicitTailCallCandidate(OPCODE opcode,
if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
return false;
+#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
+ // the block containing call is marked as BBJ_RETURN
+ if (compCurBB->bbJumpKind != BBJ_RETURN)
+ return false;
+#endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
+
// must be call+ret or call+pop+ret
if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode,codeEnd))
return false;
diff --git a/src/jit/jit.h b/src/jit/jit.h
index c9fbc7b74b..2c393f4098 100644
--- a/src/jit/jit.h
+++ b/src/jit/jit.h
@@ -667,6 +667,14 @@ private:
#include "alloc.h"
#include "target.h"
+#if FEATURE_TAILCALL_OPT
+// Switch to 1 or Remove, when no GetCallingAssembly artifacts are encountered.
+// Refer to TF: Bug: 824625 and its associated regression TF Bug: 1113265
+#define FEATURE_TAILCALL_OPT_SHARED_RETURN 0
+#else // !FEATURE_TAILCALL_OPT
+#define FEATURE_TAILLCALL_OPT_SHARED_RETURN 0
+#endif // !FEATURE_TAILCALL_OPT
+
/*****************************************************************************/
#ifndef INLINE_MATH
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp
index 5ce6f8ceea..44b0dfe6b7 100644
--- a/src/jit/lclvars.cpp
+++ b/src/jit/lclvars.cpp
@@ -1930,6 +1930,26 @@ void Compiler::lvaRecursiveDecRefCounts(GenTreePtr tree)
}
}
+// Increment the ref counts for all locals contained in the tree and its children.
+void Compiler::lvaRecursiveIncRefCounts(GenTreePtr tree)
+{
+ assert(lvaLocalVarRefCounted);
+
+ // We could just use the recursive walker for all cases but that is a
+ // fairly heavyweight thing to spin up when we're usually just handling a leaf.
+ if (tree->OperIsLeaf())
+ {
+ if (tree->OperIsLocal())
+ {
+ lvaIncRefCnts(tree);
+ }
+ }
+ else
+ {
+ fgWalkTreePre(&tree, Compiler::lvaIncRefCntsCB, (void *)this, true);
+ }
+}
+
/*****************************************************************************
*
* Helper passed to the tree walker to decrement the refCnts for
diff --git a/src/jit/liveness.cpp b/src/jit/liveness.cpp
index 2ac59245a1..dddb45fa28 100644
--- a/src/jit/liveness.cpp
+++ b/src/jit/liveness.cpp
@@ -700,14 +700,14 @@ void Compiler::fgPerBlockLocalVarLiveness()
}
+ // If this is an assignment to local var with no SIDE EFFECTS,
+ // set lhsNode so that genMarkUseDef will flag potential
+ // x=f(x) expressions as GTF_VAR_USEDEF.
+ // Reset the flag before recomputing it - it may have been set before,
+ // but subsequent optimizations could have removed the rhs reference.
+ lhsNode->gtFlags &= ~GTF_VAR_USEDEF;
if ((rhsNode->gtFlags & GTF_SIDE_EFFECT) == 0)
{
- // Assignment to local var with no SIDE EFFECTS.
- // Set lhsNode so that genMarkUseDef will flag potential
- // x=f(x) expressions as GTF_VAR_USEDEF.
- // Reset the flag before recomputing it - it may have been set before,
- // but subsequent optimizations could have removed the rhs reference.
- lhsNode->gtFlags &= ~GTF_VAR_USEDEF;
noway_assert(lhsNode->gtFlags & GTF_VAR_DEF);
}
else
@@ -1631,7 +1631,8 @@ VARSET_VALRET_TP Compiler::fgUpdateLiveSet(VARSET_VALARG_TP liveSet,
VARSET_VALRET_TP Compiler::fgComputeLife(VARSET_VALARG_TP lifeArg,
GenTreePtr startNode,
GenTreePtr endNode,
- VARSET_VALARG_TP volatileVars
+ VARSET_VALARG_TP volatileVars,
+ bool* pStmtInfoDirty
DEBUGARG(bool* treeModf))
{
GenTreePtr tree;
@@ -2029,7 +2030,7 @@ SKIP_QMARK:
that will be used (e.g. while (i++) or a GT_COMMA) */
bool doAgain = false;
- if (fgRemoveDeadStore(&tree, varDsc, life, &doAgain DEBUGARG(treeModf)))
+ if (fgRemoveDeadStore(&tree, varDsc, life, &doAgain, pStmtInfoDirty DEBUGARG(treeModf)))
break;
if (doAgain)
@@ -2188,7 +2189,7 @@ SKIP_QMARK:
(nextColonExit == gtQMark->gtOp.gtOp1 ||
nextColonExit == gtQMark->gtOp.gtOp2));
- VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, tree, nextColonExit, volatileVars DEBUGARG(treeModf)));
+ VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, tree, nextColonExit, volatileVars, pStmtInfoDirty DEBUGARG(treeModf)));
/* Continue with exit node (the last node in the enclosing colon branch) */
@@ -2228,14 +2229,15 @@ SKIP_QMARK:
// fgRemoveDeadStore - remove a store to a local which has no exposed uses.
//
-// pTree - GenTree** to an assign node (pre-rationalize) or store-form local (post-rationalize)
-// varDsc - var that is being stored to
-// life - current live tracked vars (maintained as we walk backwards)
-// doAgain - out parameter, true if we should restart the statement
+// pTree - GenTree** to an assign node (pre-rationalize) or store-form local (post-rationalize)
+// varDsc - var that is being stored to
+// life - current live tracked vars (maintained as we walk backwards)
+// doAgain - out parameter, true if we should restart the statement
+// pStmtInfoDirty - should defer the cost computation to the point after the reverse walk is completed?
//
// Returns: true if we should skip the rest of the statement, false if we should continue
-bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP life, bool *doAgain DEBUGARG(bool* treeModf))
+bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP life, bool *doAgain, bool* pStmtInfoDirty DEBUGARG(bool* treeModf))
{
GenTree* asgNode;
GenTree* rhsNode;
@@ -2355,23 +2357,28 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
*treeModf = true;
#endif // DEBUG
- /* Update ordering, costs, FP levels, etc. */
- gtSetStmtInfo(compCurStmt);
+ // Make sure no previous cousin subtree rooted at a common ancestor has
+ // asked to defer the recomputation of costs.
+ if (!*pStmtInfoDirty)
+ {
+ /* Update ordering, costs, FP levels, etc. */
+ gtSetStmtInfo(compCurStmt);
- /* Re-link the nodes for this statement */
- fgSetStmtSeq(compCurStmt);
+ /* Re-link the nodes for this statement */
+ fgSetStmtSeq(compCurStmt);
- // Start from the old assign node, as we have changed the order of its operands.
- // No need to update liveness, as nothing has changed (the target of the asgNode
- // either goes dead here, in which case the whole expression is now dead, or it
- // was already live).
+ // Start from the old assign node, as we have changed the order of its operands.
+ // No need to update liveness, as nothing has changed (the target of the asgNode
+ // either goes dead here, in which case the whole expression is now dead, or it
+ // was already live).
- // TODO-Throughput: Redo this so that the graph is modified BEFORE traversing it!
- // We can determine this case when we first see the asgNode
+ // TODO-Throughput: Redo this so that the graph is modified BEFORE traversing it!
+ // We can determine this case when we first see the asgNode
- *pTree = asgNode;
+ *pTree = asgNode;
- *doAgain = true;
+ *doAgain = true;
+ }
return false;
}
@@ -2443,6 +2450,10 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
/* Re-link the nodes for this statement */
fgSetStmtSeq(compCurStmt);
+ // Since the whole statement gets replaced it is safe to
+ // re-thread and update order. No need to compute costs again.
+ *pStmtInfoDirty = false;
+
/* Compute the live set for the new statement */
*doAgain = true;
return false;
@@ -2467,6 +2478,13 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
goto EXTRACT_SIDE_EFFECTS;
}
+ // If there is an embedded statement this could be tricky because we need to
+ // walk them next, and we have already skipped over them because they were
+ // not top level (but will be if we delete the top level statement)
+ if (compCurStmt->gtStmt.gtNextStmt &&
+ !compCurStmt->gtStmt.gtNextStmt->gtStmtIsTopLevel())
+ return false;
+
/* No side effects - remove the whole statement from the block->bbTreeList */
fgRemoveStmt(compCurBB, compCurStmt);
@@ -2589,7 +2607,13 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
if (!compRationalIRForm)
{
- gtSetStmtInfo(compCurStmt);
+ // Do not update costs by calling gtSetStmtInfo. fgSetStmtSeq modifies
+ // the tree threading based on the new costs. Removing nodes could
+ // cause a subtree to get evaluated first (earlier second) during the
+ // liveness walk. Instead just set a flag that costs are dirty and
+ // caller has to call gtSetStmtInfo.
+ *pStmtInfoDirty = true;
+
fgSetStmtSeq(compCurStmt);
}
@@ -2817,8 +2841,15 @@ void Compiler::fgInterBlockLocalVarLiveness()
continue;
/* Compute the liveness for each tree node in the statement */
+ bool stmtInfoDirty = false;
- VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, compCurStmt->gtStmt.gtStmtExpr, NULL, volatileVars DEBUGARG(&treeModf)));
+ VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, compCurStmt->gtStmt.gtStmtExpr, NULL, volatileVars, &stmtInfoDirty DEBUGARG(&treeModf)));
+
+ if (stmtInfoDirty)
+ {
+ gtSetStmtInfo(compCurStmt);
+ fgSetStmtSeq(compCurStmt);
+ }
#ifdef DEBUG
if (verbose && treeModf)
diff --git a/src/jit/lowerxarch.cpp b/src/jit/lowerxarch.cpp
index 121a3c8921..c78d5789e8 100644
--- a/src/jit/lowerxarch.cpp
+++ b/src/jit/lowerxarch.cpp
@@ -704,6 +704,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
source->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RCX);
shiftBy->gtLsraInfo.setSrcCandidates(l, RBM_RCX);
+ info->setDstCandidates(l, l->allRegs(TYP_INT) & ~RBM_RCX);
}
else
{
@@ -787,6 +788,15 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
}
+ // If this is a varargs call, we will clear the internal candidates in case we need
+ // to reserve some integer registers for copying float args.
+ // We have to do this because otherwise the default candidates are allRegs, and adding
+ // the individual specific registers will have no effect.
+ if (tree->gtCall.IsVarargs())
+ {
+ tree->gtLsraInfo.setInternalCandidates(l, RBM_NONE);
+ }
+
// Set destination candidates for return value of the call.
if (varTypeIsFloating(registerType))
{
@@ -919,6 +929,17 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
argNode->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, l->getUseCandidates(argNode));
}
+ // In the case of a varargs call, the ABI dictates that if we have floating point args,
+ // we must pass the enregistered arguments in both the integer and floating point registers.
+ // Since the integer register is not associated with this arg node, we will reserve it as
+ // an internal register so that it is not used during the evaluation of the call node
+ // (e.g. for the target).
+ if (tree->gtCall.IsVarargs() && varTypeIsFloating(argNode))
+ {
+ regNumber targetReg = compiler->getCallArgIntRegister(argReg);
+ tree->gtLsraInfo.setInternalIntCount(tree->gtLsraInfo.internalIntCount + 1);
+ tree->gtLsraInfo.addInternalCandidates(l, genRegMask(targetReg));
+ }
}
// Now, count stack args
@@ -2396,6 +2417,12 @@ void Lowering::LowerCmp(GenTreePtr tree)
tree->gtOp.gtOp1 = castOp1;
castOp1->gtType = TYP_UBYTE;
+ // trim down the value if castOp1 is an int constant since its type changed to UBYTE.
+ if (castOp1Oper == GT_CNS_INT)
+ {
+ castOp1->gtIntCon.gtIconVal = (UINT8)castOp1->gtIntCon.gtIconVal;
+ }
+
if (op2->isContainedIntOrIImmed())
{
ssize_t val = (ssize_t)op2->AsIntConCommon()->IconValue();
diff --git a/src/jit/lsra.cpp b/src/jit/lsra.cpp
index 0db1ab0659..ebac7cd4ac 100644
--- a/src/jit/lsra.cpp
+++ b/src/jit/lsra.cpp
@@ -950,6 +950,41 @@ LinearScan::LinearScan(Compiler * theCompiler)
// Get the value of the environment variable that controls stress for register allocation
static ConfigDWORD fJitStressRegs;
lsraStressMask = fJitStressRegs.val(CLRConfig::INTERNAL_JitStressRegs);
+#if 0
+#ifdef DEBUG
+ if (lsraStressMask != 0)
+ {
+ // The code in this #if can be used to debug JitStressRegs issues according to
+ // method hash. To use, simply set environment variables JitStressRegsHashLo and JitStressRegsHashHi
+ unsigned methHash = compiler->info.compMethodHash();
+ char* lostr = getenv("JitStressRegsHashLo");
+ unsigned methHashLo = 0;
+ bool dump = false;
+ if (lostr != nullptr)
+ {
+ sscanf_s(lostr, "%x", &methHashLo);
+ dump = true;
+ }
+ char* histr = getenv("JitStressRegsHashHi");
+ unsigned methHashHi = UINT32_MAX;
+ if (histr != nullptr)
+ {
+ sscanf_s(histr, "%x", &methHashHi);
+ dump = true;
+ }
+ if (methHash < methHashLo || methHash > methHashHi)
+ {
+ lsraStressMask = 0;
+ }
+ else if (dump == true)
+ {
+ printf("JitStressRegs = %x for method %s, hash = 0x%x.\n",
+ lsraStressMask, compiler->info.compFullName, compiler->info.compMethodHash());
+ printf(""); // in our logic this causes a flush
+ }
+ }
+#endif // DEBUG
+#endif
static ConfigDWORD fJitDumpTerseLsra;
dumpTerse = (fJitDumpTerseLsra.val(CLRConfig::INTERNAL_JitDumpTerseLsra) != 0);
@@ -2638,17 +2673,34 @@ LinearScan::buildInternalRegisterDefsForNode(GenTree *tree,
{
int count;
int internalIntCount = tree->gtLsraInfo.internalIntCount;
+ regMaskTP internalCands = tree->gtLsraInfo.getInternalCandidates(this);
+
+ // If this is a varArgs call, the internal candidates represent the integer registers that
+ // floating point arguments must be copied into. These must be handled as fixed regs.
+ bool fixedRegs = false;
+ if ((internalIntCount != 0) && (tree->OperGet() == GT_CALL))
+ {
+ assert(tree->gtCall.IsVarargs());
+ fixedRegs = true;
+ assert((int)genCountBits(internalCands) == internalIntCount);
+ }
+
for (count = 0; count < internalIntCount; count++)
{
- regMaskTP internalCands = (tree->gtLsraInfo.getInternalCandidates(this) & allRegs(TYP_INT));
- temps[count] = defineNewInternalTemp(tree, IntRegisterType, currentLoc, internalCands);
+ regMaskTP internalIntCands = (internalCands & allRegs(TYP_INT));
+ if (fixedRegs)
+ {
+ internalIntCands = genFindLowestBit(internalIntCands);
+ internalCands &= ~internalIntCands;
+ }
+ temps[count] = defineNewInternalTemp(tree, IntRegisterType, currentLoc, internalIntCands);
}
int internalFloatCount = tree->gtLsraInfo.internalFloatCount;
for (int i = 0; i < internalFloatCount; i++)
{
- regMaskTP internalCands = (tree->gtLsraInfo.getInternalCandidates(this) & internalFloatRegCandidates());
- temps[count++] = defineNewInternalTemp(tree, FloatRegisterType, currentLoc, internalCands);
+ regMaskTP internalFPCands = (internalCands & internalFloatRegCandidates());
+ temps[count++] = defineNewInternalTemp(tree, FloatRegisterType, currentLoc, internalFPCands);
}
noway_assert(count < MaxInternalRegisters);
@@ -3185,7 +3237,11 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
LsraLocation defLocation = (i == produce-1) ? lastDefLocation : currentLoc;
RefPosition *pos = newRefPosition(interval, defLocation, defRefType, defNode, currCandidates);
- pos->isLocalDefUse = info.isLocalDefUse;
+ if (info.isLocalDefUse)
+ {
+ pos->isLocalDefUse = true;
+ pos->lastUse = true;
+ }
DBEXEC(VERBOSE, pos->dump());
interval->updateRegisterPreferences(currCandidates);
interval->updateRegisterPreferences(useCandidates);
@@ -4384,8 +4440,9 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
continue;
}
- // If this is a constant interval, check to see if its value is already in this register.
+ // If this is a definition of a constant interval, check to see if its value is already in this register.
if (currentInterval->isConstant &&
+ RefTypeIsDef(refPosition->refType) &&
(physRegRecord->assignedInterval != nullptr) &&
physRegRecord->assignedInterval->isConstant)
{
@@ -4398,7 +4455,8 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
switch (otherTreeNode->OperGet())
{
case GT_CNS_INT:
- if (refPosition->treeNode->AsIntCon()->IconValue() == otherTreeNode->AsIntCon()->IconValue())
+ if ((refPosition->treeNode->AsIntCon()->IconValue() == otherTreeNode->AsIntCon()->IconValue()) &&
+ (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode)))
{
score |= VALUE_AVAILABLE;
}
@@ -6812,6 +6870,7 @@ LinearScan::resolveRegisters()
lclNum++)
{
localVarIntervals[lclNum]->recentRefPosition = nullptr;
+ localVarIntervals[lclNum]->isActive = false;
}
// handle incoming arguments and special temps
@@ -10010,7 +10069,7 @@ LinearScan::verifyFinalAllocation()
assert(interval->assignedReg != nullptr);
regRecord = interval->assignedReg;
}
- if (currentRefPosition->spillAfter || currentRefPosition->lastUse || currentRefPosition->isLocalDefUse)
+ if (currentRefPosition->spillAfter || currentRefPosition->lastUse)
{
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index 3cd314d675..0da9d1e0ab 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -574,14 +574,13 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
}
}
- // Only apply this transformation when neither the cast node
- // nor the oper node may throw an exception based on the upper 32 bits
- // and neither node is currently a CSE candidate.
+ // Only apply this transformation during global morph,
+ // when neither the cast node nor the oper node may throw an exception
+ // based on the upper 32 bits.
//
- if (!tree->gtOverflow() &&
- !oper->gtOverflowEx() &&
- !gtIsActiveCSE_Candidate(tree) &&
- !gtIsActiveCSE_Candidate(oper))
+ if (fgGlobalMorph &&
+ !tree->gtOverflow() &&
+ !oper->gtOverflowEx())
{
// For these operations the lower 32 bits of the result only depends
// upon the lower 32 bits of the operands
@@ -5605,7 +5604,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
goto NO_TAIL_CALL;
}
-#if FEATURE_TAILCALL_OPT
+#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// Many tailcalls will have call and ret in the same block, and thus be BBJ_RETURN,
// but if the call falls through to a ret, and we are doing a tailcall, change it here.
if (compCurBB->bbJumpKind != BBJ_RETURN)
@@ -5613,7 +5612,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
#endif
// Set this flag before calling fgMorphCall() to prevent inlining this call.
- call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL;
+ call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL;
// Do some target-specific transformations (before we process the args, etc.)
// This is needed only for tail prefixed calls that cannot be dispatched as
@@ -8690,7 +8689,7 @@ DONE_MORPHING_CHILDREN:
/* If we are storing a small type, we might be able to omit a cast */
if ((op1->gtOper == GT_IND) && varTypeIsSmall(op1->TypeGet()))
{
- if ((op2->gtOper == GT_CAST) && !op2->gtOverflow())
+ if (!gtIsActiveCSE_Candidate(op2) && (op2->gtOper == GT_CAST) && !op2->gtOverflow())
{
var_types castType = op2->CastToType();
@@ -8781,19 +8780,19 @@ DONE_MORPHING_CHILDREN:
ival2 = cns2->gtIntCon.gtIconVal;
if (op1->gtOper == GT_ADD)
+ {
ival2 -= ival1;
+ }
else
+ {
ival2 += ival1;
+ }
+ cns2->gtIntCon.gtIconVal = ival2;
#ifdef _TARGET_64BIT_
// we need to properly re-sign-extend or truncate as needed.
- if (cns2->gtFlags & GTF_UNSIGNED)
- ival2 = UINT32(ival2);
- else
- ival2 = INT32(ival2);
-#endif // _TARGET_64BIT_
-
- cns2->gtIntCon.gtIconVal = ival2;
+ cns2->AsIntCon()->TruncateOrSignExtend32();
+#endif // _TARGET_64BIT_
op1 = tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
}
@@ -9134,7 +9133,7 @@ SKIP:
case GT_LE:
case GT_GE:
case GT_GT:
-
+
if ((tree->gtFlags & GTF_UNSIGNED) == 0)
{
if (op2->gtOper == GT_CNS_INT)
@@ -9173,9 +9172,27 @@ SKIP:
{
/* Change to "expr >= 0" */
oper = GT_GE;
+
SET_OPER:
+ // IF we get here we should be changing 'oper'
+ assert(tree->OperGet() != oper);
+
+ ValueNumPair vnp;
+ vnp = tree->gtVNPair; // Save the existing ValueNumber for 'tree'
+
tree->SetOper(oper);
cns2->gtIntCon.gtIconVal = 0;
+
+ // vnStore is null before the ValueNumber phase has run
+ if (vnStore != nullptr)
+ {
+ // Update the ValueNumber for 'cns2', as we just changed it to 0
+ fgValueNumberTreeConst(cns2);
+ // Restore the old ValueNumber for 'tree' as the new expr
+ // will still compute the same value as before
+ tree->gtVNPair = vnp;
+ }
+
op2 = tree->gtOp.gtOp2 = gtFoldExpr(op2);
}
}
@@ -9369,7 +9386,6 @@ COMPARE:
/* Negate the constant and change the node to be "+" */
op2->gtIntConCommon.SetIconValue(-op2->gtIntConCommon.IconValue());
- noway_assert((op2->gtIntConCommon.IconValue() != 0) || !opts.OptEnabled(CLFLG_CONSTANTFOLD)); // This should get folded in gtFoldExprSpecial
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
@@ -9413,7 +9429,10 @@ COMPARE:
case GT_MOD:
case GT_UMOD:
// For "val % 1", return 0 if op1 doesn't have any side effects
- if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
+ // and we are not in the CSE phase, we cannot discard 'tree'
+ // because it may contain CSE expressions that we haven't yet examined.
+ //
+ if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
{
if (((op2->gtOper == GT_CNS_INT) && (op2->gtIntConCommon.IconValue() == 1))
|| ((op2->gtOper == GT_CNS_LNG) && (op2->gtIntConCommon.LngValue() == 1)))
@@ -9475,6 +9494,7 @@ CM_ADD_OP:
if (op1->gtOper == GT_ADD &&
op2->gtOper == GT_ADD &&
+ !gtIsActiveCSE_Candidate(op2) &&
op1->gtOp.gtOp2->gtOper == GT_CNS_INT &&
op2->gtOp.gtOp2->gtOper == GT_CNS_INT &&
!op1->gtOverflow() &&
@@ -9483,6 +9503,14 @@ CM_ADD_OP:
cns1 = op1->gtOp.gtOp2;
cns2 = op2->gtOp.gtOp2;
cns1->gtIntCon.gtIconVal += cns2->gtIntCon.gtIconVal;
+#ifdef _TARGET_64BIT_
+ if (cns1->TypeGet() == TYP_INT)
+ {
+ // we need to properly re-sign-extend or truncate after adding two int constants above
+ cns1->AsIntCon()->TruncateOrSignExtend32();
+ }
+#endif //_TARGET_64BIT_
+
tree->gtOp.gtOp2 = cns1;
DEBUG_DESTROY_NODE(cns2);
@@ -9497,12 +9525,21 @@ CM_ADD_OP:
/* Fold "((x+icon1)+icon2) to (x+(icon1+icon2))" */
if (op1->gtOper == GT_ADD &&
+ !gtIsActiveCSE_Candidate(op1) &&
op1->gtOp.gtOp2->IsCnsIntOrI() &&
!op1->gtOverflow() &&
op1->gtOp.gtOp2->OperGet() == op2->OperGet())
{
cns1 = op1->gtOp.gtOp2;
- op2->gtIntConCommon.SetIconValue(cns1->gtIntConCommon.IconValue() + op2->gtIntConCommon.IconValue());
+ op2->gtIntConCommon.SetIconValue(cns1->gtIntConCommon.IconValue() + op2->gtIntConCommon.IconValue());
+#ifdef _TARGET_64BIT_
+ if (op2->TypeGet() == TYP_INT)
+ {
+ // we need to properly re-sign-extend or truncate after adding two int constants above
+ op2->AsIntCon()->TruncateOrSignExtend32();
+ }
+#endif //_TARGET_64BIT_
+
if (cns1->OperGet() == GT_CNS_INT)
{
op2->gtIntCon.gtFieldSeq =
@@ -10268,9 +10305,11 @@ CM_ADD_OP:
op2->gtType = commaOp2->gtType = TYP_LONG;
}
- if ((typ == TYP_INT) && (genActualType(op2->gtType) == TYP_LONG ||
- varTypeIsFloating(op2->TypeGet())))
+ if ((genActualType(typ) == TYP_INT) && (genActualType(op2->gtType) == TYP_LONG ||
+ varTypeIsFloating(op2->TypeGet())))
{
+ // An example case is comparison (say GT_GT) of two longs or floating point values.
+
GenTreePtr commaOp2 = op2->gtOp.gtOp2;
commaOp2->ChangeOperConst(GT_CNS_INT);
@@ -10787,6 +10826,13 @@ ASG_OP:
op1->ChangeOper(GT_MUL);
add->gtIntCon.gtIconVal = imul;
+#ifdef _TARGET_64BIT_
+ if (add->gtType == TYP_INT)
+ {
+ // we need to properly re-sign-extend or truncate after multiplying two int constants above
+ add->AsIntCon()->TruncateOrSignExtend32();
+ }
+#endif //_TARGET_64BIT_
}
}
@@ -10830,11 +10876,14 @@ ASG_OP:
tree->ChangeOper(GT_ADD);
ssize_t result = iadd << ishf;
+ op2->gtIntConCommon.SetIconValue(result);
#ifdef _TARGET_64BIT_
if (op1->gtType == TYP_INT)
- result = (int) result;
+ {
+ op2->AsIntCon()->TruncateOrSignExtend32();
+ }
#endif // _TARGET_64BIT_
- op2->gtIntConCommon.SetIconValue(result);
+
// we are reusing the shift amount node here, but the type we want is that of the shift result
op2->gtType = op1->gtType;
@@ -10974,17 +11023,20 @@ bool Compiler::fgShouldUseMagicNumberDivide(GenTreeOp* tree)
return false;
#else
- // Fix for 1106790, during the optOptimizeValnumCSEs phase we can call fgMorph
- // and when we do, if this method returns true we will introduce a new LclVar and
+ // During the optOptimizeValnumCSEs phase we can call fgMorph and when we do,
+ // if this method returns true we will introduce a new LclVar and
// a couple of new GenTree nodes, including an assignment to the new LclVar.
// None of these new GenTree nodes will have valid ValueNumbers.
// That is an invalid state for a GenTree node during the optOptimizeValnumCSEs phase.
+ //
+ // Also during optAssertionProp when extracting side effects we can assert
+ // during gtBuildCommaList if we have one tree that has Value Numbers
+ // and another one that does not.
//
- if (optValnumCSE_phase)
+ if (!fgGlobalMorph)
{
- // It is not safe to perform this optimization while we are optimizing CSE's
- // as this optimization will introduce new local and an assignment
- // and these new nodes will not have valid value numbers
+ // We only perform the Magic Number Divide optimization during
+ // the initial global morph phase
return false;
}
@@ -12609,6 +12661,12 @@ void Compiler::fgMorphBlocks()
/* Fold the two increments/decrements into one */
src1->gtIntCon.gtIconVal = itemp;
+#ifdef _TARGET_64BIT_
+ if (src1->gtType == TYP_INT)
+ {
+ src1->AsIntCon()->TruncateOrSignExtend32();
+ }
+#endif //_TARGET_64BIT_
/* Remove the second statement completely */
diff --git a/src/jit/optimizer.cpp b/src/jit/optimizer.cpp
index ab3016595c..eda3488189 100644
--- a/src/jit/optimizer.cpp
+++ b/src/jit/optimizer.cpp
@@ -4699,6 +4699,10 @@ bool Compiler::optNarrowTree(GenTreePtr tree,
{
tree->gtType = TYP_INT;
tree->gtIntCon.gtIconVal = (int) ival;
+ if (vnStore != nullptr)
+ {
+ fgValueNumberTreeConst(tree);
+ }
}
#endif // _TARGET_64BIT_
@@ -4853,6 +4857,7 @@ NARROW_IND:
tree->ChangeOper (GT_NOP);
tree->gtType = dstt;
tree->gtOp.gtOp2 = nullptr;
+ tree->gtVNPair = op1->gtVNPair; // Set to op1's ValueNumber
}
else
{
@@ -5178,8 +5183,15 @@ void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsig
BasicBlock * preHead = optLoopTable[lnum].lpHead;
assert (preHead->bbJumpKind == BBJ_NONE);
- compCurBB = preHead; // fgMorphTree requires that compCurBB be the block that contains
- // (or in this case, will contain) the expression.
+ // fgMorphTree and lvaRecursiveIncRefCounts requires that compCurBB be the block that contains
+ // (or in this case, will contain) the expression.
+ compCurBB = preHead;
+
+ // Increment the ref counts of any local vars appearing in "hoist".
+ // Note that we need to do this before fgMorphTree() as fgMorph() could constant
+ // fold away some of the lcl vars referenced by "hoist".
+ lvaRecursiveIncRefCounts(hoist);
+
hoist = fgMorphTree(hoist);
GenTreePtr hoistStmt = gtNewStmt(hoist);
@@ -5218,9 +5230,6 @@ void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsig
}
#endif
- // Update the ref counts of any local vars appearing in "hoist".
- fgUpdateRefCntForClone(preHead, hoist);
-
if (fgStmtListThreaded)
{
fgSetStmtSeq(hoistStmt);
diff --git a/src/jit/rangecheck.cpp b/src/jit/rangecheck.cpp
index 3caad508ff..99a3fa1a32 100644
--- a/src/jit/rangecheck.cpp
+++ b/src/jit/rangecheck.cpp
@@ -250,7 +250,12 @@ void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTreePtr stmt, GenTreeP
JITDUMP("ArrSize for lengthVN:%03X = %d\n", arrLenVn, arrSize);
if (m_pCompiler->vnStore->IsVNConstant(idxVn) && arrSize > 0)
{
- int idxVal = m_pCompiler->vnStore->ConstantValue<int>(idxVn);
+ ssize_t idxVal = -1;
+ unsigned iconFlags = 0;
+ if (!m_pCompiler->optIsTreeKnownIntValue(true, treeIndex, &idxVal, &iconFlags))
+ {
+ return;
+ }
JITDUMP("[RangeCheck::OptimizeRangeCheck] Is index %d in <0, arrLenVn VN%X sz:%d>.\n", idxVal, arrLenVn, arrSize);
if (arrSize > 0 && idxVal < arrSize && idxVal >= 0)
diff --git a/src/jit/rationalize.cpp b/src/jit/rationalize.cpp
index 504a137bfc..aaa50550b8 100644
--- a/src/jit/rationalize.cpp
+++ b/src/jit/rationalize.cpp
@@ -1087,12 +1087,6 @@ Location Rationalizer::RewriteSimpleTransforms(Location loc)
return loc1;
}
- if (tree->OperIsLocalRead())
- {
- comp->lvaTable[tree->AsLclVarCommon()->gtLclNum].decRefCnts(comp->compCurBB->getBBWeight(comp), comp);
- tree->gtBashToNOP();
- }
-
SplitData tmpState = {0};
tmpState.root = statement;
tmpState.continueSubtrees = true;
@@ -1104,6 +1098,13 @@ Location Rationalizer::RewriteSimpleTransforms(Location loc)
NULL,
&tmpState);
+ tree = statement->gtStmt.gtStmtExpr;
+ if (tree->OperIsLocalRead())
+ {
+ comp->lvaTable[tree->AsLclVarCommon()->gtLclNum].decRefCnts(comp->compCurBB->getBBWeight(comp), comp);
+ tree->gtBashToNOP();
+ }
+
JITDUMP("After simple transforms:\n");
DISPTREE(statement);
JITDUMP("\n");
@@ -1726,7 +1727,6 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
Compiler::fgSnipNode(tmpState->root->AsStmt(), tree);
Compiler::fgSnipNode(tmpState->root->AsStmt(), child);
*ppTree = child->gtOp.gtOp1;
-
JITDUMP("Rewriting GT_ADDR(GT_IND(X)) to X:\n");
}
comp->fgFixupIfCallArg(data->parentStack, tree, *ppTree);
@@ -1741,6 +1741,12 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
Compiler::fgSnipNode(tmpState->root->AsStmt(), tree);
*ppTree = tree->gtOp.gtOp1;
comp->fgFixupIfCallArg(data->parentStack, tree, *ppTree);
+
+ // Since GT_NOP(op1) is replaced with op1, pop GT_NOP node (i.e the current node)
+ // and replace it with op1 on parent stack.
+ (void)data->parentStack->Pop();
+ data->parentStack->Push(tree->gtOp.gtOp1);
+
JITDUMP("Rewriting GT_NOP(X) to X:\n");
DISPTREE(*ppTree);
JITDUMP("\n");
diff --git a/src/jit/regalloc.cpp b/src/jit/regalloc.cpp
index cb712c6e77..faa03d2bef 100644
--- a/src/jit/regalloc.cpp
+++ b/src/jit/regalloc.cpp
@@ -3763,6 +3763,9 @@ HANDLE_SHIFT_COUNT:
//
rpRecordRegIntf(RBM_SHIFT, liveSet
DEBUGARG("Variable Shift Register"));
+ // In case op2Mask doesn't contain the required shift register,
+ // we will or it in now.
+ op2Mask |= RBM_SHIFT;
}
}
diff --git a/src/jit/regset.cpp b/src/jit/regset.cpp
index 5fad4029fd..0f4425f960 100644
--- a/src/jit/regset.cpp
+++ b/src/jit/regset.cpp
@@ -2508,29 +2508,32 @@ void RegSet::rsUnspillRegPair(GenTreePtr tree,
rsMarkRegFree(genRegMask(regLo));
}
- /* Has the register holding the upper half been spilled? */
-
- if (!rsIsTreeInReg(regHi, tree))
+ if (regHi != REG_STK)
{
- regMaskTP regLoUsed;
-
- /* Temporarily lock the low part so it doesnt get spilled */
-
- rsLockReg(genRegMask(regLo), &regLoUsed);
-
- /* Pick a new home for the upper half */
-
- regHi = rsUnspillOneReg(tree, regHi, keepReg, needReg);
-
- /* We can unlock the low register now */
-
- rsUnlockReg(genRegMask(regLo), regLoUsed);
- }
- else
- {
- /* Free the register holding the upper half */
-
- rsMarkRegFree(genRegMask(regHi));
+ /* Has the register holding the upper half been spilled? */
+
+ if (!rsIsTreeInReg(regHi, tree))
+ {
+ regMaskTP regLoUsed;
+
+ /* Temporarily lock the low part so it doesnt get spilled */
+
+ rsLockReg(genRegMask(regLo), &regLoUsed);
+
+ /* Pick a new home for the upper half */
+
+ regHi = rsUnspillOneReg(tree, regHi, keepReg, needReg);
+
+ /* We can unlock the low register now */
+
+ rsUnlockReg(genRegMask(regLo), regLoUsed);
+ }
+ else
+ {
+ /* Free the register holding the upper half */
+
+ rsMarkRegFree(genRegMask(regHi));
+ }
}
/* The value is now residing in the new register */
diff --git a/src/jit/stackfp.cpp b/src/jit/stackfp.cpp
index a70d9860d2..a939da1236 100644
--- a/src/jit/stackfp.cpp
+++ b/src/jit/stackfp.cpp
@@ -3002,8 +3002,7 @@ BasicBlock* CodeGen::genTransitionBlockStackFP(FlatFPStateX87* pState, BasicBloc
//
// Insert pBlock between lastHotBlock and fgFirstColdBlock
//
- lastHotBlock->bbNext = pBlock;
- pBlock->bbNext = compiler->fgFirstColdBlock;
+ compiler->fgInsertBBafter(lastHotBlock, pBlock);
}
return pBlock;
diff --git a/src/jit/valuenum.cpp b/src/jit/valuenum.cpp
index 156cdf94a1..dc1e96363b 100644
--- a/src/jit/valuenum.cpp
+++ b/src/jit/valuenum.cpp
@@ -1610,7 +1610,7 @@ ValueNum ValueNumStore::EvalFuncForConstantFPArgs(var_types typ, VNFunc func, Va
if (VNFuncIsComparison(func))
{
- assert(typ == TYP_INT);
+ assert(genActualType(typ) == TYP_INT);
result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
}
else
@@ -2886,7 +2886,22 @@ ValueNum ValueNumStore::EvalMathFunc(var_types typ, CorInfoIntrinsics gtMathFN,
vnf = VNF_Abs;
break;
case CORINFO_INTRINSIC_Round:
- vnf = VNF_Round;
+ if (typ == TYP_DOUBLE)
+ {
+ vnf = VNF_RoundDouble;
+ }
+ else if (typ == TYP_FLOAT)
+ {
+ vnf = VNF_RoundFloat;
+ }
+ else if (typ == TYP_INT)
+ {
+ vnf = VNF_RoundInt;
+ }
+ else
+ {
+ noway_assert(!"Invalid INTRINSIC_Round");
+ }
break;
default:
unreached(); // the above are the only math intrinsics at the time of this writing.
@@ -2894,6 +2909,7 @@ ValueNum ValueNumStore::EvalMathFunc(var_types typ, CorInfoIntrinsics gtMathFN,
assert(typ == TYP_DOUBLE
|| typ == TYP_FLOAT
|| (typ == TYP_INT && gtMathFN == CORINFO_INTRINSIC_Round));
+
return VNForFunc(typ, vnf, arg0VN);
}
}
diff --git a/src/jit/valuenumfuncs.h b/src/jit/valuenumfuncs.h
index 4f9d952b2a..aa352fe2b1 100644
--- a/src/jit/valuenumfuncs.h
+++ b/src/jit/valuenumfuncs.h
@@ -71,7 +71,9 @@ ValueNumFuncDef(Sin, 1, false, false, false)
ValueNumFuncDef(Cos, 1, false, false, false)
ValueNumFuncDef(Sqrt, 1, false, false, false)
ValueNumFuncDef(Abs, 1, false, false, false)
-ValueNumFuncDef(Round, 1, false, false, false)
+ValueNumFuncDef(RoundDouble, 1, false, false, false)
+ValueNumFuncDef(RoundFloat, 1, false, false, false)
+ValueNumFuncDef(RoundInt, 1, false, false, false)
ValueNumFuncDef(ManagedThreadId, 0, false, false, false)
diff --git a/src/mscorlib/src/System/AppContext/AppContextDefaultValues.Defaults.cs b/src/mscorlib/src/System/AppContext/AppContextDefaultValues.Defaults.cs
index 11c3cdfbcf..66f865e7bc 100644
--- a/src/mscorlib/src/System/AppContext/AppContextDefaultValues.Defaults.cs
+++ b/src/mscorlib/src/System/AppContext/AppContextDefaultValues.Defaults.cs
@@ -10,6 +10,7 @@ namespace System
internal static readonly string SwitchNoAsyncCurrentCulture = "Switch.System.Globalization.NoAsyncCurrentCulture";
internal static readonly string SwitchThrowExceptionIfDisposedCancellationTokenSource = "Switch.System.Threading.ThrowExceptionIfDisposedCancellationTokenSource";
+ internal static readonly string SwitchPreserveEventListnerObjectIdentity = "Switch.System.Diagnostics.EventSource.PreserveEventListnerObjectIdentity";
// This is a partial method. Platforms can provide an implementation of it that will set override values
diff --git a/src/mscorlib/src/System/AppContext/AppContextSwitches.cs b/src/mscorlib/src/System/AppContext/AppContextSwitches.cs
index 3a1d7e00f5..c0181c883a 100644
--- a/src/mscorlib/src/System/AppContext/AppContextSwitches.cs
+++ b/src/mscorlib/src/System/AppContext/AppContextSwitches.cs
@@ -28,6 +28,16 @@ namespace System
}
}
+ private static int _preserveEventListnerObjectIdentity;
+ public static bool PreserveEventListnerObjectIdentity
+ {
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ get
+ {
+ return GetCachedSwitchValue(AppContextDefaultValues.SwitchPreserveEventListnerObjectIdentity, ref _preserveEventListnerObjectIdentity);
+ }
+ }
+
//
// Implementation details
//
@@ -66,4 +76,4 @@ namespace System
return isSwitchEnabled;
}
}
-} \ No newline at end of file
+}
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/ActivityTracker.cs b/src/mscorlib/src/System/Diagnostics/Eventing/ActivityTracker.cs
index 38d2c4cf78..637a607026 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/ActivityTracker.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/ActivityTracker.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Diagnostics;
using System.Threading;
@@ -87,7 +88,7 @@ namespace System.Diagnostics.Tracing
{
ActivityInfo existingActivity = FindActiveActivity(fullActivityName, currentActivity);
if (existingActivity != null)
- {
+ {
OnStop(providerName, activityName, task, ref activityId);
currentActivity = m_current.Value;
}
@@ -213,7 +214,6 @@ namespace System.Diagnostics.Tracing
{
if (m_current == null)
{
- EventSource.OutputDebugString("Enabling Activity Tracking");
m_current = new AsyncLocal<ActivityInfo>(ActivityChanging);
}
}
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/EventProvider.cs b/src/mscorlib/src/System/Diagnostics/Eventing/EventProvider.cs
index 13f1d70199..c2c29063c8 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/EventProvider.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/EventProvider.cs
@@ -155,11 +155,6 @@ namespace System.Diagnostics.Tracing
{
throw new ArgumentException(Win32Native.GetMessage(unchecked((int)status)));
}
- else
- {
- // if we registered successfully ensure we unregister on ProcessExit
- DisposeOnProcessExit(new WeakReference(this));
- }
}
[System.Security.SecurityCritical]
@@ -189,19 +184,6 @@ namespace System.Diagnostics.Tracing
return status;
}
- private static void DisposeOnProcessExit(WeakReference wrThis)
- {
-#if !ES_BUILD_PCL && !FEATURE_CORECLR
- EventHandler doDispose = (sender, e) => {
- EventProvider ep = wrThis.Target as EventProvider;
- if (ep != null)
- ep.Dispose(true);
- };
- AppDomain.CurrentDomain.ProcessExit += doDispose;
- AppDomain.CurrentDomain.DomainUnload += doDispose;
-#endif
- }
-
//
// implement Dispose Pattern to early deregister from ETW insted of waiting for
// the finalizer to call deregistration.
@@ -235,7 +217,7 @@ namespace System.Diagnostics.Tracing
// Disable the provider.
m_enabled = false;
- // Do most of the work under a lock to avoid shutdown race condition.
+ // Do most of the work under a lock to avoid shutdown race.
lock (EventListener.EventListenersLock)
{
// Double check
@@ -307,8 +289,6 @@ namespace System.Diagnostics.Tracing
byte[] data;
int keyIndex;
bool skipFinalOnControllerCommand = false;
- EventSource.OutputDebugString(string.Format("EtwEnableCallBack(ctrl {0}, lvl {1}, any {2:x}, all {3:x})",
- controlCode, setLevel, anyKeyword, allKeyword));
if (controlCode == UnsafeNativeMethods.ManifestEtw.EVENT_CONTROL_CODE_ENABLE_PROVIDER)
{
m_enabled = true;
@@ -323,9 +303,6 @@ namespace System.Diagnostics.Tracing
int etwSessionId = session.Item1.etwSessionId;
bool bEnabling = session.Item2;
- EventSource.OutputDebugString(string.Format(CultureInfo.InvariantCulture, "EtwEnableCallBack: session changed {0}:{1}:{2}",
- sessionChanged, etwSessionId, bEnabling));
-
skipFinalOnControllerCommand = true;
args = null; // reinitialize args for every session...
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/EventSource.cs b/src/mscorlib/src/System/Diagnostics/Eventing/EventSource.cs
index c6c3de88c5..d141690dee 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/EventSource.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/EventSource.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
// This program uses code hyperlinks available as part of the HyperAddin Visual Studio plug-in.
// It is available from http://www.codeplex.com/hyperAddin
#if !PLATFORM_UNIX
@@ -17,6 +18,161 @@
// #define FEATURE_ADVANCED_MANAGED_ETW_CHANNELS
#endif
+/* DESIGN NOTES DESIGN NOTES DESIGN NOTES DESIGN NOTES */
+// DESIGN NOTES
+// Over the years EventSource has become more complex and so it is important to understand
+// the basic structure of the code to insure that it does not grow more complex.
+//
+// Basic Model
+//
+// PRINCIPLE: EventSource - ETW decoupling
+//
+// Conceptually and EventSouce is something takes event logging data from the source methods
+// To the EventListener that can subscribe them. Note that CONCEPTUALLY EVENTSOURCES DON'T
+// KNOW ABOUT ETW!. The MODEL of the system is that there is a special EventListern Which
+// we will call the EtwEventListener, that forwards commands from ETW to EventSources and
+// listeners to the EventSources and forwards on those events to ETW. THus the model should
+// be that you DON'T NEED ETW.
+//
+// Now in actual practice, EventSouce have rather intimate knowledge of ETW and send events
+// to it directly, but this can be VIEWED AS AN OPTIMIATION.
+//
+// Basic Event Data Flow:
+//
+// There are two ways for event Data to enter the system
+// 1) WriteEvent* and friends. This is called the 'contract' based approach because
+// you write a method per event which forms a contract that is know at compile time.
+// In this scheme each event is given an EVENTID (small integer). which is its identity
+// 2) Write<T> methods. This is called the 'dynamic' approach because new events
+// can be created on the fly. Event identity is determined by the event NAME, and these
+// are not quite as efficient at runtime since you have at least a hash table lookup
+// on every event write.
+//
+// EventSource-EventListener transfer fully support both ways of writing events (either contract
+// based (WriteEvent*) or dynamic (Write<T>). Both way fully support the same set of data
+// types. It is suggested, however, that you use the contract based approach when the event scheme
+// is known at compile time (that is whenever possible). It is more efficient, but more importantly
+// it makes the contract very explicit, and centralizes all policy about logging. These are good
+// things. The Write<T> API is really meant for more ad-hoc
+//
+// Allowed Data.
+//
+// Note that EventSource-EventListeners have a conceptual serialization-deserialization that happens
+// during the transfer. In particular object identity is not preserved, some objects are morphed,
+// and not all data types are supported. In particular you can pass
+//
+// A Valid type to log to an EventSource include
+// * Primitive data types
+// * IEnumerable<T> of valid types T (this include arrays) (* New for V4.6)
+// * Explicitly Opted in class or struct with public property Getters over Valid types. (* New for V4.6)
+//
+// This set of types is roughly a generalization of JSON support (Basically primitives, bags, and arrays).
+//
+// Explicitly allowed structs include (* New for V4.6)
+// * Marked with the EventData attribute
+// * implicitly defined (e.g the C# new {x = 3, y = 5} syntax)
+// * KeyValuePair<K,V> (thus dictionaries can be passed since they are an IEnumerable of KeyValuePair)
+//
+// When classes are returned in an EventListener, what is returned is something that implements
+// IDictionary<string, T>. Thus when objects are passed to an EventSource they are transformed
+// into a key-value bag (the IDictionary<string, T>) for consumption in the listener. These
+// are obvious NOT the original objects.
+//
+// ETWserialization formats:
+//
+// As mentioned conceptually EventSource's send data to EventListeners and there is a conceptual
+// copy/morph of that data as described above. In addition the .NET framework supports a conceptual
+// ETWListener that will send the data to then ETW stream. If you use this feature, the data needs
+// to be serialized in a way that ETW supports. ETW supports the following serialization formats
+//
+// 1) Manifest Based serialization.
+// 2) SelfDescribing serialization (TraceLogging style in the TraceLogging directory)
+//
+// A key factor is that the Write<T> method, which support on the fly definition of events, can't
+// support the manifest based serialization because the manifest needs the schema of all events
+// to be known before any events are emitted. This implies the following
+//
+// If you use Write<T> and the output goes to ETW it will use the SelfDescribing format.
+// If you use the EventSource(string) constructor for an eventSource (in which you don't
+// create a subclass), the default is also to use Self-Describing serialization. In addition
+// you can use the EventSoruce(EventSourceSettings) constructor to also explicitly specify
+// Self-Describing serialization format. These effect the WriteEvent* APIs going to ETW.
+//
+// Note that none of this ETW serialization logic affects EventListeners. Only the ETW listener.
+//
+// *************************************************************************************
+// *** INTERNALS: Event Propagation
+//
+// Data enters the system either though
+//
+// 1) A user defined method in the user defined subclass of EventSource which calls
+// A) A typesafe type specific overload of WriteEvent(ID, ...) e.g. WriteEvent(ID, string, string)
+// * which calls into the unsafe WriteEventCore(ID COUNT EventData*) WriteEventWithRelatedActivityIdCore()
+// B) The typesafe overload WriteEvent(ID, object[]) which calls the private helper WriteEventVarargs(ID, Guid* object[])
+// C) Directly into the unsafe WriteEventCore(ID, COUNT EventData*) or WriteEventWithRelatedActivityIdCore()
+//
+// All event data eventually flows to one of
+// * WriteEventWithRelatedActivityIdCore(ID, Guid*, COUNT, EventData*)
+// * WriteEventVarargs(ID, Guid*, object[])
+//
+// 2) A call to one of the overloads of Write<T>. All these overloads end up in
+// * WriteImpl<T>(EventName, Options, Data, Guid*, Guid*)
+//
+// On output there are the following routines
+// Writing to all listeners that are NOT ETW, we have the following routines
+// * WriteToAllListeners(ID, Guid*, COUNT, EventData*)
+// * WriteToAllListeners(ID, Guid*, object[])
+// * WriteToAllListeners(NAME, Guid*, EventPayload)
+//
+// EventPayload is the internal type that implements the IDictionary<string, object> interface
+// The EventListeners will pass back for serialized classes for nested object, but
+// WriteToAllListeners(NAME, Guid*, EventPayload) unpacks this uses the fields as if they
+// were parameters to a method.
+//
+// The first two are used for the WriteEvent* case, and the later is used for the Write<T> case.
+//
+// Writing to ETW, Manifest Based
+// EventProvider.WriteEvent(EventDescriptor, Guid*, COUNT, EventData*)
+// EventProvider.WriteEvent(EventDescriptor, Guid*, object[])
+// Writing to ETW, Self-Describing format
+// WriteMultiMerge(NAME, Options, Types, EventData*)
+// WriteMultiMerge(NAME, Options, Types, object[])
+// WriteImpl<T> has logic that knows how to serialize (like WriteMultiMerge) but also knows
+// will write it to
+//
+// All ETW writes eventually call
+// EventWriteTransfer (native PINVOKE wrapper)
+// EventWriteTransferWrapper (fixes compat problem if you pass null as the related activityID)
+// EventProvider.WriteEventRaw - sets last error
+// EventSource.WriteEventRaw - Does EventSource exception handling logic
+// WriteMultiMerge
+// WriteImpl<T>
+// EventProvider.WriteEvent(EventDescriptor, Guid*, COUNT, EventData*)
+// EventProvider.WriteEvent(EventDescriptor, Guid*, object[])
+//
+// Serialization: We have a bit of a hodge-podge of serializers right now. Only the one for ETW knows
+// how to deal with nested classes or arrays. I will call this serializer the 'TypeInfo' serializer
+// since it is the TraceLoggingTypeInfo structure that knows how to do this. Effectively for a type you
+// can call one of these
+// WriteMetadata - transforms the type T into serialization meta data blob for that type
+// WriteObjectData - transforms an object of T into serialization meta data blob for that type
+// GetData - transforms an object of T into its deserialized form suitable for passing to EventListener.
+// The first two are used to serialize something for ETW. The second one is used to transform the object
+// for use by the EventListener. We also have a 'DecodeObject' method that will take a EventData* and
+// deserialize to pass to an EventListener, but it only works on primitive types (types supported in version V4.5).
+//
+// It is an important observation that while EventSource does support users directly calling with EventData*
+// blobs, we ONLY support that for the primitive types (V4.5 level support). Thus while there is a EventData*
+// path through the system it is only for some types. The object[] path is the more general (but less efficient) path.
+//
+// TODO There is cleanup needed There should be no divergence until WriteEventRaw.
+//
+// TODO: We should have a single choke point (right now we always have this parallel EventData* and object[] path. This
+// was historical (at one point we tried to pass object directly from EventSoruce to EventListener. That was always
+// fragile and a compatibility headache, but we have finally been forced into the idea that there is always a transformation.
+// This allows us to use the EventData* form to be the canonical data format in the low level APIs. This also gives us the
+// opportunity to expose this format to EventListeners in the future.
+//
using System;
#if FEATURE_ACTIVITYSAMPLING
using System.Collections.Concurrent;
@@ -57,7 +213,7 @@ namespace System.Diagnostics.Tracing
{
/// <summary>
/// This class is meant to be inherited by a user-defined event source in order to define a managed
- /// ETW provider.
+ /// ETW provider. Please See DESIGN NOTES above for the internal architecture.
/// The minimal definition of an EventSource simply specifies a number of ETW event methods that
/// call one of the EventSource.WriteEvent overloads, <see cref="EventSource.WriteEventCore"/>,
/// or <see cref="EventSource.WriteEventWithRelatedActivityIdCore"/> to log them. This functionality
@@ -402,7 +558,7 @@ namespace System.Diagnostics.Tracing
}
#endif // FEATURE_MANAGED_ETW
if (System.Threading.Tasks.TplEtwProvider.Log != null)
- System.Threading.Tasks.TplEtwProvider.Log.SetActivityId(activityId);
+ System.Threading.Tasks.TplEtwProvider.Log.SetActivityId(activityId);
}
/// <summary>
@@ -439,7 +595,7 @@ namespace System.Diagnostics.Tracing
// We don't call the activityDying callback here because the caller has declared that
// it is not dying.
if (System.Threading.Tasks.TplEtwProvider.Log != null)
- System.Threading.Tasks.TplEtwProvider.Log.SetActivityId(activityId);
+ System.Threading.Tasks.TplEtwProvider.Log.SetActivityId(activityId);
}
/// <summary>
@@ -581,7 +737,6 @@ namespace System.Diagnostics.Tracing
protected EventSource(EventSourceSettings settings, params string[] traits)
{
m_config = ValidateSettings(settings);
- Contract.Assert(m_lastCommandException == null);
var myType = this.GetType();
Initialize(GetGuid(myType), GetName(myType), traits);
}
@@ -937,7 +1092,7 @@ namespace System.Diagnostics.Tracing
internal long m_Ptr;
internal int m_Size;
#pragma warning disable 0649
- internal int m_Reserved; // Used to pad the size to match the Win32 API
+ internal int m_Reserved; // Used to pad the size to match the Win32 API
#pragma warning restore 0649
#endregion
}
@@ -1087,13 +1242,12 @@ namespace System.Diagnostics.Tracing
if (tlet == null)
{
tlet = new TraceLoggingEventTypes(m_eventData[eventId].Name,
- EventTags.None,
+ EventTags.None,
m_eventData[eventId].Parameters);
Interlocked.CompareExchange(ref m_eventData[eventId].TraceLoggingEventTypes, tlet, null);
}
long origKwd = unchecked((long)((ulong)m_eventData[eventId].Descriptor.Keywords & ~(SessionMask.All.ToEventKeywords())));
- // TODO: activity ID support
EventSourceOptions opt = new EventSourceOptions
{
Keywords = (EventKeywords)unchecked((long)etwSessions.ToEventKeywords() | origKwd),
@@ -1115,11 +1269,11 @@ namespace System.Diagnostics.Tracing
TraceLoggingEventTypes tlet = m_eventData[eventId].TraceLoggingEventTypes;
if (tlet == null)
{
- tlet = new TraceLoggingEventTypes(m_eventData[eventId].Name,
- m_eventData[eventId].Tags,
+ tlet = new TraceLoggingEventTypes(m_eventData[eventId].Name,
+ m_eventData[eventId].Tags,
m_eventData[eventId].Parameters);
Interlocked.CompareExchange(ref m_eventData[eventId].TraceLoggingEventTypes, tlet, null);
-
+
}
EventSourceOptions opt = new EventSourceOptions
{
@@ -1242,8 +1396,7 @@ namespace System.Diagnostics.Tracing
{
if (listener == null)
{
- string eventName = "EventActivityInfo";
- WriteEventString(0, unchecked((long)m.ToEventKeywords()), eventName, msg);
+ WriteEventString(0, unchecked((long)m.ToEventKeywords()), msg);
}
else
{
@@ -1278,33 +1431,6 @@ namespace System.Diagnostics.Tracing
#endif // FEATURE_MANAGED_ETW
}
-
- /// <summary>
- /// Best effort method to notify all listeners (ETW or otherwise) of a "message" of interest.
- /// Since this is a means of reporting errors (see ReportoutOfBandMessage) any failure encountered
- /// while writing the message to any one of the listeners will be silently ignored.
- /// </summary>
- internal void WriteString(string msg, SessionMask m, bool isError)
- {
-#if FEATURE_MANAGED_ETW
- if (m_provider != null)
- {
- var eventName = isError ? "EventSourceErrorMessage" : "EventSourceMessage";
- WriteEventString(0, unchecked((long)m.ToEventKeywords()), eventName, msg);
- WriteStringToAllListeners(eventName, msg);
- }
-#endif // FEATURE_MANAGED_ETW
- }
- /// <summary>
- /// Best effort method to notify all listeners (ETW or otherwise) of a "message" of interest.
- /// Since this is a means of reporting errors (see ReportoutOfBandMessage) any failure encountered
- /// while writing the message to any one of the listeners will be silently ignored.
- /// </summary>
- internal void WriteString(string msg, bool isError = true)
- {
- WriteString(msg, SessionMask.All, isError);
- }
-
// FrameworkEventSource is on the startup path for the framework, so we have this internal overload that it can use
// to prevent the working set hit from looking at the custom attributes on the type to get the Guid.
internal EventSource(Guid eventSourceGuid, string eventSourceName)
@@ -1328,91 +1454,54 @@ namespace System.Diagnostics.Tracing
[SecuritySafeCritical]
private unsafe void Initialize(Guid eventSourceGuid, string eventSourceName, string[] traits)
{
- m_traits = traits;
- if (m_traits != null && m_traits.Length % 2 != 0)
- throw new ArgumentException(Environment.GetResourceString("TraitEven"), "traits");
-
- if (eventSourceGuid == Guid.Empty)
+ try
{
- // this will go to the debugger only, as m_provider is still null
- ReportOutOfBandMessage(Environment.GetResourceString("EventSource_NeedGuid"), true);
- return;
- }
+ m_traits = traits;
+ if (m_traits != null && m_traits.Length % 2 != 0)
+ throw new ArgumentException(Environment.GetResourceString("TraitEven"), "traits");
- if (eventSourceName == null)
- {
- // this will go to the debugger only, as m_provider is still null
- ReportOutOfBandMessage(Environment.GetResourceString("EventSource_NeedName"), true);
- return;
- }
+ if (eventSourceGuid == Guid.Empty)
+ throw new ArgumentException(Environment.GetResourceString("EventSource_NeedGuid"));
+
+ if (eventSourceName == null)
+ throw new ArgumentException(Environment.GetResourceString("EventSource_NeedName"));
- m_name = eventSourceName;
- m_guid = eventSourceGuid;
+ m_name = eventSourceName;
+ m_guid = eventSourceGuid;
#if FEATURE_ACTIVITYSAMPLING
- m_curLiveSessions = new SessionMask(0);
- m_etwSessionIdMap = new EtwSession[SessionMask.MAX];
+ m_curLiveSessions = new SessionMask(0);
+ m_etwSessionIdMap = new EtwSession[SessionMask.MAX];
#endif // FEATURE_ACTIVITYSAMPLING
-
- //Enable Implicit Activity tracker
- m_activityTracker = ActivityTracker.Instance;
+ //Enable Implicit Activity tracker
+ m_activityTracker = ActivityTracker.Instance;
#if FEATURE_MANAGED_ETW
- m_provider = new OverideEventProvider(this);
+ // Create and register our provider traits. We do this early because it is needed to log errors
+ // In the self-describing event case.
+ this.InitializeProviderMetadata();
- try
- {
- m_provider.Register(eventSourceGuid);
- }
- catch (Exception e)
- {
- // Failed to register. Don't crash the app, just don't write events to ETW.
- m_provider = null;
- if (m_constructionException == null)
- m_constructionException = e;
- }
+ // Register the provider with ETW
+ var provider = new OverideEventProvider(this);
+ provider.Register(eventSourceGuid);
#endif
- // AddEventSource may end up calling in user code (e.g. the event listener may call
- // EnableEvents, or other code, that may throw). We swallow the exception and report
- // it below in ReportOutOfBandMessage()
- try
- {
- // Add the eventSource to the global (weak) list. This also sets m_id, which is the
- // index in the list.
+ // Add the eventSource to the global (weak) list.
+ // This also sets m_id, which is the index in the list.
EventListener.AddEventSource(this);
- }
- catch (Exception e)
- {
- if (m_constructionException == null)
- m_constructionException = e;
- }
-
- // lastCommandExceptions are usually more interesting than the other exceptions
- // we may have caught along the way
- if (m_lastCommandException != null)
- m_constructionException = m_lastCommandException;
-
- if (m_constructionException != null)
- {
- Contract.Assert(m_eventSourceEnabled == false);
- ReportOutOfBandMessage("ERROR: Exception during construction of EventSource " + Name + ": "
- + m_constructionException.Message, false);
- m_eventSourceEnabled = false; // This is insurance, it should still be off.
- }
#if FEATURE_MANAGED_ETW
- if (m_provider != null)
- {
+ // OK if we get this far without an exception, then we can at least write out error messages.
+ // Set m_provider, which allows this.
+ m_provider = provider;
+#endif
+
#if !ES_BUILD_STANDALONE
// API available on OS >= Win 8 and patched Win 7.
- // Disable only for FrameworkEventSource to avoid recursion inside
- // exception handling.
+ // Disable only for FrameworkEventSource to avoid recursion inside exception handling.
var osVer = Environment.OSVersion.Version.Major * 10 + Environment.OSVersion.Version.Minor;
if (this.Name != "System.Diagnostics.Eventing.FrameworkEventSource" || osVer >= 62)
#endif
{
- // Create and register our provider traits:
- this.InitializeProviderMetadata();
int setInformationResult;
fixed (void* providerMetadata = this.providerMetadata)
{
@@ -1422,36 +1511,29 @@ namespace System.Diagnostics.Tracing
this.providerMetadata.Length);
}
}
- }
-#endif // FEATURE_MANAGED_ETW
-
- // report any possible errors
- ReportOutOfBandMessage(null, true);
-
- // We are logically completely initialized at this point.
- m_completelyInited = true;
-#if FEATURE_ACTIVITYSAMPLING
- // we cue sending sampling info here based on whether we had to defer sending
- // the manifest
- // note: we do *not* send sampling info to any EventListeners because
- // the following common code pattern would cause an AV:
- // class MyEventSource: EventSource
- // {
- // public static EventSource Log;
- // }
- // class MyEventListener: EventListener
- // {
- // protected override void OnEventWritten(...)
- // { MyEventSource.Log.anything; } <-- AV, as the static Log was not set yet
- // }
- if (m_eventSourceEnabled && m_deferedSendManifest)
- ReportActivitySamplingInfo(null, m_curLiveSessions);
-#endif // FEATURE_ACTIVITYSAMPLING
+ Contract.Assert(!m_eventSourceEnabled); // We can't be enabled until we are completely initted.
+ // We are logically completely initialized at this point.
+ m_completelyInited = true;
+ }
+ catch (Exception e)
+ {
+ if (m_constructionException == null)
+ m_constructionException = e;
+ ReportOutOfBandMessage("ERROR: Exception during construction of EventSource " + Name + ": " + e.Message, true);
+ }
- // If we are active and we have not sent our manifest, do so now.
- if (m_eventSourceEnabled && !SelfDescribingEvents && m_deferedSendManifest)
- SendManifest(m_rawManifest);
+ // Once m_completelyInited is set, you can have concurrency, so all work is under the lock.
+ lock (EventListener.EventListenersLock)
+ {
+ // If there are any deferred commands, we can do them now.
+ // This is the most likely place for exceptions to happen.
+ while (m_deferredCommands != null)
+ {
+ DoCommand(m_deferredCommands); // This can never throw, it catches them and reports the errors.
+ m_deferredCommands = m_deferredCommands.nextCommand;
+ }
+ }
}
private static string GetName(Type eventSourceType, EventManifestOptions flags)
@@ -1897,6 +1979,7 @@ namespace System.Diagnostics.Tracing
Interlocked.CompareExchange(ref m_eventData[eventId].TraceLoggingEventTypes, tlet, null);
}
+ // TODO: activity ID support
EventSourceOptions opt = new EventSourceOptions
{
Keywords = (EventKeywords)m_eventData[eventId].Descriptor.Keywords,
@@ -1912,8 +1995,8 @@ namespace System.Diagnostics.Tracing
if (m_Dispatchers != null && m_eventData[eventId].EnabledForAnyListener)
{
#if !ES_BUILD_STANDALONE
- // Mentain old behavoir - object identity is preserved
- if (!System.Runtime.Versioning.BinaryCompatibility.TargetsAtLeast_Desktop_V4_5_3)
+ // Maintain old behavior - object identity is preserved
+ if (AppContextSwitches.PreserveEventListnerObjectIdentity)
{
WriteToAllListeners(eventId, childActivityID, args);
}
@@ -1986,7 +2069,7 @@ namespace System.Diagnostics.Tracing
DisptachToAllListeners(eventId, childActivityID, eventCallbackArgs);
}
-
+
[SecurityCritical]
private unsafe void DisptachToAllListeners(int eventId, Guid* childActivityID, EventWrittenEventArgs eventCallbackArgs)
{
@@ -2031,19 +2114,47 @@ namespace System.Diagnostics.Tracing
[SecuritySafeCritical]
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "This does not need to be correct when racing with other threads")]
- private unsafe void WriteEventString(EventLevel level, long keywords, string eventName, string msgString)
+ private unsafe void WriteEventString(EventLevel level, long keywords, string msgString)
{
#if FEATURE_MANAGED_ETW
if (m_provider != null)
{
- EventSourceOptions opt = new EventSourceOptions
+ string eventName = "EventSourceMessage";
+ if (SelfDescribingEvents)
+ {
+ EventSourceOptions opt = new EventSourceOptions
+ {
+ Keywords = (EventKeywords)unchecked(keywords),
+ Level = level
+ };
+ var msg = new { message = msgString };
+ var tlet = new TraceLoggingEventTypes(eventName, EventTags.None, new Type[] { msg.GetType() });
+ WriteMultiMergeInner(eventName, ref opt, tlet, null, null, msg);
+ }
+ else
{
- Keywords = (EventKeywords)unchecked(keywords),
- Level = level
- };
- var msg = new { message = msgString };
- var tlet = new TraceLoggingEventTypes(eventName, EventTags.None, new Type[] { msg.GetType() });
- WriteMultiMergeInner(eventName, ref opt, tlet, null, null, msg);
+ // We want the name of the provider to show up so if we don't have a manifest we create
+ // on that at least has the provider name (I don't define any events).
+ if (m_rawManifest == null && m_outOfBandMessageCount == 1)
+ {
+ ManifestBuilder manifestBuilder = new ManifestBuilder(Name, Guid, Name, null, EventManifestOptions.None);
+ manifestBuilder.StartEvent(eventName, new EventAttribute(0) { Level = EventLevel.LogAlways, Task = (EventTask)0xFFFE });
+ manifestBuilder.AddEventParameter(typeof(string), "message");
+ manifestBuilder.EndEvent();
+ SendManifest(manifestBuilder.CreateManifest());
+ }
+
+ // We use this low level routine to to bypass the enabled checking, since the eventSource itself is only partially inited.
+ fixed (char* msgStringPtr = msgString)
+ {
+ EventDescriptor descr = new EventDescriptor(0, 0, 0, (byte)level, 0, 0, keywords);
+ EventProvider.EventData data = new EventProvider.EventData();
+ data.Ptr = (ulong)msgStringPtr;
+ data.Size = (uint)(2 * (msgString.Length + 1));
+ data.Reserved = 0;
+ m_provider.WriteEvent(ref descr, null, null, 1, (IntPtr)((void*)&data));
+ }
+ }
}
#endif // FEATURE_MANAGED_ETW
}
@@ -2057,24 +2168,30 @@ namespace System.Diagnostics.Tracing
EventWrittenEventArgs eventCallbackArgs = new EventWrittenEventArgs(this);
eventCallbackArgs.EventId = 0;
eventCallbackArgs.Message = msg;
+ eventCallbackArgs.Payload = new ReadOnlyCollection<object>(new List<object>() { msg });
+ eventCallbackArgs.PayloadNames = new ReadOnlyCollection<string>(new List<string> { "message" });
eventCallbackArgs.EventName = eventName;
for (EventDispatcher dispatcher = m_Dispatchers; dispatcher != null; dispatcher = dispatcher.m_Next)
{
- // skip listeners that weren't correctly initialized
+ bool dispatcherEnabled = false;
if (dispatcher.m_EventEnabled == null)
{
- continue;
+ // if the listeners that weren't correctly initialized, we will send to it
+ // since this is an error message and we want to see it go out.
+ dispatcherEnabled = true;
}
- // if there's *any* enabled event on the dispatcher we'll write out the string
- // otherwise we'll treat the listener as disabled and skip it
- bool dispatcherEnabled = false;
- for (int evtId = 0; evtId < dispatcher.m_EventEnabled.Length; ++evtId)
+ else
{
- if (dispatcher.m_EventEnabled[evtId])
+ // if there's *any* enabled event on the dispatcher we'll write out the string
+ // otherwise we'll treat the listener as disabled and skip it
+ for (int evtId = 0; evtId < dispatcher.m_EventEnabled.Length; ++evtId)
{
- dispatcherEnabled = true;
- break;
+ if (dispatcher.m_EventEnabled[evtId])
+ {
+ dispatcherEnabled = true;
+ break;
+ }
}
}
try
@@ -2241,7 +2358,6 @@ namespace System.Diagnostics.Tracing
{
m_EventSourceExceptionRecurenceCount--;
}
-
}
private void ValidateEventOpcodeForTransfer(ref EventMetadata eventData)
@@ -2263,7 +2379,7 @@ namespace System.Diagnostics.Tracing
}
else if (eventName.EndsWith(s_ActivityStopSuffix))
{
- return EventOpcode.Stop;
+ return EventOpcode.Stop;
}
}
@@ -2349,253 +2465,257 @@ namespace System.Diagnostics.Tracing
EventLevel level, EventKeywords matchAnyKeyword,
IDictionary<string, string> commandArguments)
{
- m_lastCommandException = null;
- bool shouldReport = (perEventSourceSessionId > 0) && (perEventSourceSessionId <= SessionMask.MAX);
+ var commandArgs = new EventCommandEventArgs(command, commandArguments, this, listener, perEventSourceSessionId, etwSessionId, enable, level, matchAnyKeyword);
+ lock (EventListener.EventListenersLock)
+ {
+ if (m_completelyInited) // We are fully initialized, do the command
+ DoCommand(commandArgs);
+ else
+ {
+ // We can't do the command, simply remember it and we do it when we are fully constructed.
+ commandArgs.nextCommand = m_deferredCommands;
+ m_deferredCommands = commandArgs;
+ }
+ }
+ }
+
+ /// <summary>
+ /// We want the eventSource to be fully initialized when we do commands because that way we can send
+ /// error messages and other logging directly to the event stream. Unfortunately we can get callbacks
+ /// when we are not fully initialized. In that case we store them in 'commandArgs' and do them later.
+ /// This helper actually does all actual command logic.
+ /// </summary>
+ internal void DoCommand(EventCommandEventArgs commandArgs)
+ {
+ // PRECONDITION: We should be holding the EventListener.EventListenersLock
+ // We defer commands until we are completely inited. This allows error messages to be sent.
+ Contract.Assert(m_completelyInited);
+ if (m_provider == null) // If we failed to construct
+ return;
+
+ m_outOfBandMessageCount = 0;
+ bool shouldReport = (commandArgs.perEventSourceSessionId > 0) && (commandArgs.perEventSourceSessionId <= SessionMask.MAX);
try
{
- lock (EventListener.EventListenersLock)
- {
- EnsureInitialized();
- Contract.Assert(m_eventData != null);
+ EnsureDescriptorsInitialized();
+ Contract.Assert(m_eventData != null);
- // Find the per-EventSource dispatcher corresponding to registered dispatcher
- EventDispatcher eventSourceDispatcher = GetDispatcher(listener);
- if (eventSourceDispatcher == null && listener != null) // dispatcher == null means ETW dispatcher
- throw new ArgumentException(Environment.GetResourceString("EventSource_ListenerNotFound"));
+ // Find the per-EventSource dispatcher corresponding to registered dispatcher
+ commandArgs.dispatcher = GetDispatcher(commandArgs.listener);
+ if (commandArgs.dispatcher == null && commandArgs.listener != null) // dispatcher == null means ETW dispatcher
+ throw new ArgumentException(Environment.GetResourceString("EventSource_ListenerNotFound"));
- if (commandArguments == null)
- commandArguments = new Dictionary<string, string>();
+ if (commandArgs.Arguments == null)
+ commandArgs.Arguments = new Dictionary<string, string>();
- if (command == EventCommand.Update)
- {
- // Set it up using the 'standard' filtering bitfields (use the "global" enable, not session specific one)
- for (int i = 0; i < m_eventData.Length; i++)
- EnableEventForDispatcher(eventSourceDispatcher, i, IsEnabledByDefault(i, enable, level, matchAnyKeyword));
+ if (commandArgs.Command == EventCommand.Update)
+ {
+ // Set it up using the 'standard' filtering bitfields (use the "global" enable, not session specific one)
+ for (int i = 0; i < m_eventData.Length; i++)
+ EnableEventForDispatcher(commandArgs.dispatcher, i, IsEnabledByDefault(i, commandArgs.enable, commandArgs.level, commandArgs.matchAnyKeyword));
- if (enable)
+ if (commandArgs.enable)
+ {
+ if (!m_eventSourceEnabled)
{
- if (!m_eventSourceEnabled)
- {
- // EventSource turned on for the first time, simply copy the bits.
- m_level = level;
- m_matchAnyKeyword = matchAnyKeyword;
- }
- else
- {
- // Already enabled, make it the most verbose of the existing and new filter
- if (level > m_level)
- m_level = level;
- if (matchAnyKeyword == 0)
- m_matchAnyKeyword = 0;
- else if (m_matchAnyKeyword != 0)
- m_matchAnyKeyword = unchecked(m_matchAnyKeyword | matchAnyKeyword);
- }
+ // EventSource turned on for the first time, simply copy the bits.
+ m_level = commandArgs.level;
+ m_matchAnyKeyword = commandArgs.matchAnyKeyword;
}
-
- // interpret perEventSourceSessionId's sign, and adjust perEventSourceSessionId to
- // represent 0-based positive values
- bool bSessionEnable = (perEventSourceSessionId >= 0);
- if (perEventSourceSessionId == 0 && enable == false)
- bSessionEnable = false;
-
- if (listener == null)
+ else
{
- if (!bSessionEnable)
- perEventSourceSessionId = -perEventSourceSessionId;
- // for "global" enable/disable (passed in with listener == null and
- // perEventSourceSessionId == 0) perEventSourceSessionId becomes -1
- --perEventSourceSessionId;
+ // Already enabled, make it the most verbose of the existing and new filter
+ if (commandArgs.level > m_level)
+ m_level = commandArgs.level;
+ if (commandArgs.matchAnyKeyword == 0)
+ m_matchAnyKeyword = 0;
+ else if (m_matchAnyKeyword != 0)
+ m_matchAnyKeyword = unchecked(m_matchAnyKeyword | commandArgs.matchAnyKeyword);
}
+ }
- command = bSessionEnable ? EventCommand.Enable : EventCommand.Disable;
+ // interpret perEventSourceSessionId's sign, and adjust perEventSourceSessionId to
+ // represent 0-based positive values
+ bool bSessionEnable = (commandArgs.perEventSourceSessionId >= 0);
+ if (commandArgs.perEventSourceSessionId == 0 && commandArgs.enable == false)
+ bSessionEnable = false;
- // perEventSourceSessionId = -1 when ETW sent a notification, but the set of active sessions
- // hasn't changed.
- // sesisonId = SessionMask.MAX when one of the legacy ETW sessions changed
- // 0 <= perEventSourceSessionId < SessionMask.MAX for activity-tracing aware sessions
- Contract.Assert(perEventSourceSessionId >= -1 && perEventSourceSessionId <= SessionMask.MAX);
+ if (commandArgs.listener == null)
+ {
+ if (!bSessionEnable)
+ commandArgs.perEventSourceSessionId = -commandArgs.perEventSourceSessionId;
+ // for "global" enable/disable (passed in with listener == null and
+ // perEventSourceSessionId == 0) perEventSourceSessionId becomes -1
+ --commandArgs.perEventSourceSessionId;
+ }
- // Send the manifest if we are enabling an ETW session
- if (bSessionEnable && eventSourceDispatcher == null)
- {
- // eventSourceDispatcher == null means this is the ETW manifest
-
- // SendCommand can be called from the EventSource constructor as a side effect of
- // ETW registration. Unfortunately when this callback is active the provider is
- // not actually enabled (WriteEvents will fail). Thus if we detect this condition
- // (that we are still being constructed), we simply skip sending the manifest.
- // When the constructor completes we will try again and send the manifest at that time.
- //
- // Note that we unconditionally send the manifest whenever we are enabled, even if
- // we were already enabled. This is because there may be multiple sessions active
- // and we can't know that all the sessions have seen the manifest.
- if (m_completelyInited && !SelfDescribingEvents)
- SendManifest(m_rawManifest);
- else
- m_deferedSendManifest = true;
- }
+ commandArgs.Command = bSessionEnable ? EventCommand.Enable : EventCommand.Disable;
+
+ // perEventSourceSessionId = -1 when ETW sent a notification, but the set of active sessions
+ // hasn't changed.
+ // sesisonId = SessionMask.MAX when one of the legacy ETW sessions changed
+ // 0 <= perEventSourceSessionId < SessionMask.MAX for activity-tracing aware sessions
+ Contract.Assert(commandArgs.perEventSourceSessionId >= -1 && commandArgs.perEventSourceSessionId <= SessionMask.MAX);
+
+ // Send the manifest if we are enabling an ETW session
+ if (bSessionEnable && commandArgs.dispatcher == null)
+ {
+ // eventSourceDispatcher == null means this is the ETW manifest
+
+ // Note that we unconditionally send the manifest whenever we are enabled, even if
+ // we were already enabled. This is because there may be multiple sessions active
+ // and we can't know that all the sessions have seen the manifest.
+ if (!SelfDescribingEvents)
+ SendManifest(m_rawManifest);
+ }
#if FEATURE_ACTIVITYSAMPLING
- if (bSessionEnable && perEventSourceSessionId != -1)
- {
- bool participateInSampling = false;
- string activityFilters;
- int sessionIdBit;
+ if (bSessionEnable && commandArgs.perEventSourceSessionId != -1)
+ {
+ bool participateInSampling = false;
+ string activityFilters;
+ int sessionIdBit;
- ParseCommandArgs(commandArguments, out participateInSampling,
+ ParseCommandArgs(commandArgs.Arguments, out participateInSampling,
out activityFilters, out sessionIdBit);
- if (listener == null && commandArguments.Count > 0 && perEventSourceSessionId != sessionIdBit)
- {
- throw new ArgumentException(Environment.GetResourceString("EventSource_SessionIdError",
- perEventSourceSessionId + SessionMask.SHIFT_SESSION_TO_KEYWORD,
- sessionIdBit + SessionMask.SHIFT_SESSION_TO_KEYWORD));
- }
+ if (commandArgs.listener == null && commandArgs.Arguments.Count > 0 && commandArgs.perEventSourceSessionId != sessionIdBit)
+ {
+ throw new ArgumentException(Environment.GetResourceString("EventSource_SessionIdError",
+ commandArgs.perEventSourceSessionId + SessionMask.SHIFT_SESSION_TO_KEYWORD,
+ sessionIdBit + SessionMask.SHIFT_SESSION_TO_KEYWORD));
+ }
- if (listener == null)
- {
- UpdateEtwSession(perEventSourceSessionId, etwSessionId, true, activityFilters, participateInSampling);
- }
- else
- {
- ActivityFilter.UpdateFilter(ref listener.m_activityFilter, this, 0, activityFilters);
- eventSourceDispatcher.m_activityFilteringEnabled = participateInSampling;
- }
+ if (commandArgs.listener == null)
+ {
+ UpdateEtwSession(commandArgs.perEventSourceSessionId, commandArgs.etwSessionId, true, activityFilters, participateInSampling);
}
- else if (!bSessionEnable && listener == null)
+ else
{
- // if we disable an ETW session, indicate that in a synthesized command argument
- if (perEventSourceSessionId >= 0 && perEventSourceSessionId < SessionMask.MAX)
- {
- commandArguments["EtwSessionKeyword"] = (perEventSourceSessionId + SessionMask.SHIFT_SESSION_TO_KEYWORD).ToString(CultureInfo.InvariantCulture);
- }
+ ActivityFilter.UpdateFilter(ref commandArgs.listener.m_activityFilter, this, 0, activityFilters);
+ commandArgs.dispatcher.m_activityFilteringEnabled = participateInSampling;
}
-#endif // FEATURE_ACTIVITYSAMPLING
-
- if (enable)
+ }
+ else if (!bSessionEnable && commandArgs.listener == null)
+ {
+ // if we disable an ETW session, indicate that in a synthesized command argument
+ if (commandArgs.perEventSourceSessionId >= 0 && commandArgs.perEventSourceSessionId < SessionMask.MAX)
{
- Contract.Assert(m_eventData != null);
- m_eventSourceEnabled = true;
+ commandArgs.Arguments["EtwSessionKeyword"] = (commandArgs.perEventSourceSessionId + SessionMask.SHIFT_SESSION_TO_KEYWORD).ToString(CultureInfo.InvariantCulture);
}
+ }
+#endif // FEATURE_ACTIVITYSAMPLING
+
+ // Turn on the enable bit before making the OnEventCommand callback This allows you to do useful
+ // things like log messages, or test if keywords are enabled in the callback.
+ if (commandArgs.enable)
+ {
+ Contract.Assert(m_eventData != null);
+ m_eventSourceEnabled = true;
+ }
- this.OnEventCommand(new EventCommandEventArgs(command, commandArguments, this, eventSourceDispatcher));
+ this.OnEventCommand(commandArgs);
#if FEATURE_ACTIVITYSAMPLING
- if (listener == null && !bSessionEnable && perEventSourceSessionId != -1)
- {
- // if we disable an ETW session, complete disabling it
- UpdateEtwSession(perEventSourceSessionId, etwSessionId, false, null, false);
- }
+ if (commandArgs.listener == null && !bSessionEnable && commandArgs.perEventSourceSessionId != -1)
+ {
+ // if we disable an ETW session, complete disabling it
+ UpdateEtwSession(commandArgs.perEventSourceSessionId, commandArgs.etwSessionId, false, null, false);
+ }
#endif // FEATURE_ACTIVITYSAMPLING
- if (!enable)
- {
- // If we are disabling, maybe we can turn on 'quick checks' to filter
- // quickly. These are all just optimizations (since later checks will still filter)
+ if (!commandArgs.enable)
+ {
+ // If we are disabling, maybe we can turn on 'quick checks' to filter
+ // quickly. These are all just optimizations (since later checks will still filter)
#if FEATURE_ACTIVITYSAMPLING
- // Turn off (and forget) any information about Activity Tracing.
- if (listener == null)
- {
- // reset all filtering information for activity-tracing-aware sessions
- for (int i = 0; i < SessionMask.MAX; ++i)
- {
- EtwSession etwSession = m_etwSessionIdMap[i];
- if (etwSession != null)
- ActivityFilter.DisableFilter(ref etwSession.m_activityFilter, this);
- }
- m_activityFilteringForETWEnabled = new SessionMask(0);
- m_curLiveSessions = new SessionMask(0);
- // reset activity-tracing-aware sessions
- if (m_etwSessionIdMap != null)
- for (int i = 0; i < SessionMask.MAX; ++i)
- m_etwSessionIdMap[i] = null;
- // reset legacy sessions
- if (m_legacySessions != null)
- m_legacySessions.Clear();
- }
- else
+ // Turn off (and forget) any information about Activity Tracing.
+ if (commandArgs.listener == null)
+ {
+ // reset all filtering information for activity-tracing-aware sessions
+ for (int i = 0; i < SessionMask.MAX; ++i)
{
- ActivityFilter.DisableFilter(ref listener.m_activityFilter, this);
- eventSourceDispatcher.m_activityFilteringEnabled = false;
+ EtwSession etwSession = m_etwSessionIdMap[i];
+ if (etwSession != null)
+ ActivityFilter.DisableFilter(ref etwSession.m_activityFilter, this);
}
+ m_activityFilteringForETWEnabled = new SessionMask(0);
+ m_curLiveSessions = new SessionMask(0);
+ // reset activity-tracing-aware sessions
+ if (m_etwSessionIdMap != null)
+ for (int i = 0; i < SessionMask.MAX; ++i)
+ m_etwSessionIdMap[i] = null;
+ // reset legacy sessions
+ if (m_legacySessions != null)
+ m_legacySessions.Clear();
+ }
+ else
+ {
+ ActivityFilter.DisableFilter(ref commandArgs.listener.m_activityFilter, this);
+ commandArgs.dispatcher.m_activityFilteringEnabled = false;
+ }
#endif // FEATURE_ACTIVITYSAMPLING
- // There is a good chance EnabledForAnyListener are not as accurate as
- // they could be, go ahead and get a better estimate.
- for (int i = 0; i < m_eventData.Length; i++)
+ // There is a good chance EnabledForAnyListener are not as accurate as
+ // they could be, go ahead and get a better estimate.
+ for (int i = 0; i < m_eventData.Length; i++)
+ {
+ bool isEnabledForAnyListener = false;
+ for (EventDispatcher dispatcher = m_Dispatchers; dispatcher != null; dispatcher = dispatcher.m_Next)
{
- bool isEnabledForAnyListener = false;
- for (EventDispatcher dispatcher = m_Dispatchers; dispatcher != null; dispatcher = dispatcher.m_Next)
+ if (dispatcher.m_EventEnabled[i])
{
- if (dispatcher.m_EventEnabled[i])
- {
- isEnabledForAnyListener = true;
- break;
- }
+ isEnabledForAnyListener = true;
+ break;
}
- m_eventData[i].EnabledForAnyListener = isEnabledForAnyListener;
}
+ m_eventData[i].EnabledForAnyListener = isEnabledForAnyListener;
+ }
- // If no events are enabled, disable the global enabled bit.
- if (!AnyEventEnabled())
- {
- m_level = 0;
- m_matchAnyKeyword = 0;
- m_eventSourceEnabled = false;
- }
+ // If no events are enabled, disable the global enabled bit.
+ if (!AnyEventEnabled())
+ {
+ m_level = 0;
+ m_matchAnyKeyword = 0;
+ m_eventSourceEnabled = false;
}
+ }
#if FEATURE_ACTIVITYSAMPLING
- UpdateKwdTriggers(enable);
+ UpdateKwdTriggers(commandArgs.enable);
#endif // FEATURE_ACTIVITYSAMPLING
- }
- else
+ }
+ else
+ {
+ if (commandArgs.Command == EventCommand.SendManifest)
{
- if (command == EventCommand.SendManifest)
- {
- if (m_rawManifest != null)
- SendManifest(m_rawManifest);
- }
+ if (m_rawManifest != null)
+ SendManifest(m_rawManifest);
+ }
- // These are not used for non-update commands and thus should always be 'default' values
- // Contract.Assert(enable == true);
- // Contract.Assert(level == EventLevel.LogAlways);
- // Contract.Assert(matchAnyKeyword == EventKeywords.None);
+ // These are not used for non-update commands and thus should always be 'default' values
+ // Contract.Assert(enable == true);
+ // Contract.Assert(level == EventLevel.LogAlways);
+ // Contract.Assert(matchAnyKeyword == EventKeywords.None);
- this.OnEventCommand(new EventCommandEventArgs(command, commandArguments, null, null));
- }
+ this.OnEventCommand(commandArgs);
+ }
#if FEATURE_ACTIVITYSAMPLING
- if (m_completelyInited && (listener != null || shouldReport))
- {
- SessionMask m = SessionMask.FromId(perEventSourceSessionId);
- ReportActivitySamplingInfo(listener, m);
- }
- OutputDebugString(string.Format(CultureInfo.InvariantCulture, "{0}.SendCommand(session {1}, cmd {2}, enable {3}, level {4}): live sessions {5:x}, sampling {6:x}",
- m_name, perEventSourceSessionId, command, enable, level,
- (ulong)m_curLiveSessions, (ulong)m_activityFilteringForETWEnabled));
-#endif // FEATURE_ACTIVITYSAMPLING
+ if (m_completelyInited && (commandArgs.listener != null || shouldReport))
+ {
+ SessionMask m = SessionMask.FromId(commandArgs.perEventSourceSessionId);
+ ReportActivitySamplingInfo(commandArgs.listener, m);
}
+#endif // FEATURE_ACTIVITYSAMPLING
}
catch (Exception e)
{
-#if FEATURE_MANAGED_ETW
- if (m_provider != null && m_provider.IsValid())
-#endif // FEATURE_MANAGED_ETW
- {
- // When the ETW session is created after the EventSource has registered with the ETW system
- // we can send any error messages here.
- ReportOutOfBandMessage("ERROR: Exception during SendCommand for EventSource " +
- Name + ": " + e.Message, true);
- }
-
- // When the EventSource is created while an ETW session already exists, the registration has not
- // completed yet, and m_provider does not have a valid registration handle, so we'll remember
- // the exception and rethrow. We'll call ReportOutOfBandMessage() in Initialize() at a point
- // where the ETW registration completed.
- m_lastCommandException = e;
- throw;
+ // When the ETW session is created after the EventSource has registered with the ETW system
+ // we can send any error messages here.
+ ReportOutOfBandMessage("ERROR: Exception in Command Processing for EventSource " + Name + ": " + e.Message, true);
+ // We never throw when doing a command.
}
}
@@ -2774,7 +2894,7 @@ namespace System.Diagnostics.Tracing
}
[SecuritySafeCritical]
- private void EnsureInitialized()
+ private void EnsureDescriptorsInitialized()
{
#if !ES_BUILD_STANDALONE
Contract.Assert(Monitor.IsEntered(EventListener.EventListenersLock));
@@ -3062,6 +3182,11 @@ namespace System.Diagnostics.Tracing
manifest = new ManifestBuilder(GetName(eventSourceType, flags), GetGuid(eventSourceType), eventSourceDllName,
resources, flags);
+ // Add an entry unconditionally for event ID 0 which will be for a string message.
+ manifest.StartEvent("EventSourceMessage", new EventAttribute(0) { Level = EventLevel.LogAlways, Task = (EventTask)0xFFFE });
+ manifest.AddEventParameter(typeof(string), "message");
+ manifest.EndEvent();
+
// eventSourceType must be sealed and must derive from this EventSource
if ((flags & EventManifestOptions.Strict) != 0)
{
@@ -3146,7 +3271,7 @@ namespace System.Diagnostics.Tracing
continue;
}
- // If we explictly mark the method as not being an event, then honor that.
+ // If we explicitly mark the method as not being an event, then honor that.
if (GetCustomAttributeHelper(method, typeof(NonEventAttribute), flags) != null)
continue;
@@ -3175,43 +3300,43 @@ namespace System.Diagnostics.Tracing
eventAttribute.Opcode = GetOpcodeWithDefault(EventOpcode.Info, eventName);
// Make the stop opcode have the same task as the start opcode.
- if (noTask)
+ if (noTask)
{
if (eventAttribute.Opcode == EventOpcode.Start)
{
- string taskName = eventName.Substring(0, eventName.Length-s_ActivityStartSuffix.Length); // Remove the Stop suffix to get the task name
- if (string.Compare(eventName, 0, taskName, 0, taskName.Length) == 0 &&
- string.Compare(eventName, taskName.Length, s_ActivityStartSuffix, 0, Math.Max(eventName.Length-taskName.Length, s_ActivityStartSuffix.Length)) == 0)
+ string taskName = eventName.Substring(0, eventName.Length - s_ActivityStartSuffix.Length); // Remove the Stop suffix to get the task name
+ if (string.Compare(eventName, 0, taskName, 0, taskName.Length) == 0 &&
+ string.Compare(eventName, taskName.Length, s_ActivityStartSuffix, 0, Math.Max(eventName.Length - taskName.Length, s_ActivityStartSuffix.Length)) == 0)
{
// Add a task that is just the task name for the start event. This supress the auto-task generation
// That would otherwise happen (and create 'TaskName'Start as task name rather than just 'TaskName'
- manifest.AddTask(taskName, (int) eventAttribute.Task);
+ manifest.AddTask(taskName, (int)eventAttribute.Task);
}
}
else if (eventAttribute.Opcode == EventOpcode.Stop)
{
// Find the start associated with this stop event. We requre start to be immediately before the stop
- int startEventId = eventAttribute.EventId-1;
+ int startEventId = eventAttribute.EventId - 1;
Contract.Assert(0 <= startEventId); // Since we reserve id 0, we know that id-1 is <= 0
EventMetadata startEventMetadata = eventData[startEventId];
-
+
// If you remove the Stop and add a Start does that name match the Start Event's Name?
// Ideally we would throw an error
- string taskName = eventName.Substring(0, eventName.Length-s_ActivityStopSuffix.Length); // Remove the Stop suffix to get the task name
- if (startEventMetadata.Descriptor.Opcode == (byte) EventOpcode.Start &&
- string.Compare(startEventMetadata.Name, 0, taskName, 0, taskName.Length) == 0 &&
- string.Compare(startEventMetadata.Name, taskName.Length, s_ActivityStartSuffix, 0, Math.Max(startEventMetadata.Name.Length-taskName.Length, s_ActivityStartSuffix.Length)) == 0)
+ string taskName = eventName.Substring(0, eventName.Length - s_ActivityStopSuffix.Length); // Remove the Stop suffix to get the task name
+ if (startEventMetadata.Descriptor.Opcode == (byte)EventOpcode.Start &&
+ string.Compare(startEventMetadata.Name, 0, taskName, 0, taskName.Length) == 0 &&
+ string.Compare(startEventMetadata.Name, taskName.Length, s_ActivityStartSuffix, 0, Math.Max(startEventMetadata.Name.Length - taskName.Length, s_ActivityStartSuffix.Length)) == 0)
{
// Make the stop event match the start event
- eventAttribute.Task = (EventTask) startEventMetadata.Descriptor.Task;
- }
+ eventAttribute.Task = (EventTask)startEventMetadata.Descriptor.Task;
+ }
else if ((flags & EventManifestOptions.Strict) != 0) // Throw an error if we can compatibly.
throw new ArgumentException(Environment.GetResourceString("EventSource_StopsFollowStarts"));
}
}
}
-
+
RemoveFirstArgIfRelatedActivityId(ref args);
if (!(source != null && source.SelfDescribingEvents))
{
@@ -3249,10 +3374,13 @@ namespace System.Diagnostics.Tracing
}
}
+ // Tell the TraceLogging stuff where to start allocating its own IDs.
+ NameInfo.ReserveEventIDsBelow(eventId);
+
if (source != null)
{
TrimEventDescriptors(ref eventData);
- source.m_eventData = eventData; // officialy initialize it. We do this at most once (it is racy otherwise).
+ source.m_eventData = eventData; // officially initialize it. We do this at most once (it is racy otherwise).
#if FEATURE_MANAGED_ETW_CHANNELS
source.m_channelData = manifest.GetChannelData();
#endif
@@ -3609,8 +3737,9 @@ namespace System.Diagnostics.Tracing
return -1;
}
+#if false // This routine is not needed at all, it was used for unit test debugging.
[Conditional("DEBUG")]
- internal static void OutputDebugString(string msg)
+ private static void OutputDebugString(string msg)
{
#if !ES_BUILD_PCL
msg = msg.TrimEnd('\r', '\n') +
@@ -3618,36 +3747,38 @@ namespace System.Diagnostics.Tracing
System.Diagnostics.Debugger.Log(0, null, msg);
#endif
}
+#endif
+ /// <summary>
+ /// Sends an error message to the debugger (outputDebugString), as well as the EventListeners
+ /// It will do this even if the EventSource is not enabled.
+ /// TODO remove flush parameter it is not used.
+ /// </summary>
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "This does not need to be correct when racing with other threads")]
internal void ReportOutOfBandMessage(string msg, bool flush)
{
- // msg == null is a signal to flush what's accumulated in the buffer
- if (msg == null && flush)
+ try
{
- if (!string.IsNullOrEmpty(m_deferredErrorInfo))
- {
- WriteString(m_deferredErrorInfo);
- m_deferredErrorInfo = String.Empty;
- }
- return;
- }
-
- if (!msg.EndsWith(Environment.NewLine, StringComparison.Ordinal))
- msg = msg + Environment.NewLine;
-
#if !ES_BUILD_PCL
- // send message to debugger without delay
- System.Diagnostics.Debugger.Log(0, null, msg);
+ // send message to debugger without delay
+ System.Diagnostics.Debugger.Log(0, null, msg + "\r\n");
#endif
- m_deferredErrorInfo = m_deferredErrorInfo + msg;
- if (flush)
- {
- // send message to the ETW listener if available
- WriteString(m_deferredErrorInfo);
- m_deferredErrorInfo = String.Empty;
+ // Send it to all listeners.
+ if (m_outOfBandMessageCount < 254) // Note this is only if size byte
+ m_outOfBandMessageCount++;
+ else
+ {
+ if (m_outOfBandMessageCount == 255)
+ return;
+ m_outOfBandMessageCount = 255; // Mark that we hit the limit. Notify them that this is the case.
+ msg = "Reached message limit. End of EventSource error messages.";
+ }
+
+ WriteEventString(EventLevel.LogAlways, -1, msg);
+ WriteStringToAllListeners("EventSourceMessage", msg);
}
+ catch (Exception) { } // If we fail during last chance logging, well, we have to give up....
}
private EventSourceSettings ValidateSettings(EventSourceSettings settings)
@@ -3749,7 +3880,7 @@ namespace System.Diagnostics.Tracing
// Enabling bits
private bool m_eventSourceEnabled; // am I enabled (any of my events are enabled for any dispatcher)
- internal EventLevel m_level; // higest level enabled by any output dispatcher
+ internal EventLevel m_level; // highest level enabled by any output dispatcher
internal EventKeywords m_matchAnyKeyword; // the logical OR of all levels enabled by any output dispatcher (zero is a special case) meaning 'all keywords'
// Dispatching state
@@ -3758,16 +3889,15 @@ namespace System.Diagnostics.Tracing
private volatile OverideEventProvider m_provider; // This hooks up ETW commands to our 'OnEventCommand' callback
#endif
private bool m_completelyInited; // The EventSource constructor has returned without exception.
- private bool m_deferedSendManifest; // We did not send the manifest in the startup path
- private Exception m_lastCommandException; // If there was an exception during a command, this is it.
private Exception m_constructionException; // If there was an exception construction, this is it
- private string m_deferredErrorInfo; // non-fatal error info accumulated during construction
+ private byte m_outOfBandMessageCount; // The number of out of band messages sent (we throttle them
+ private EventCommandEventArgs m_deferredCommands;// If we get commands before we are fully we store them here and run the when we are fully inited.
private string[] m_traits; // Used to implement GetTraits
internal static uint s_currentPid; // current process id, used in synthesizing quasi-GUIDs
[ThreadStatic]
- private static int m_EventSourceExceptionRecurenceCount = 0; // current recursion count inside ThrowEventSourceException
+ private static byte m_EventSourceExceptionRecurenceCount = 0; // current recursion count inside ThrowEventSourceException
#if FEATURE_MANAGED_ETW_CHANNELS
internal volatile ulong[] m_channelData;
@@ -4075,6 +4205,16 @@ namespace System.Diagnostics.Tracing
if (s_EventSources == null)
s_EventSources = new List<WeakReference>(2);
+ if (!s_EventSourceShutdownRegistered)
+ {
+ s_EventSourceShutdownRegistered = true;
+#if !ES_BUILD_PCL && !FEATURE_CORECLR
+ AppDomain.CurrentDomain.ProcessExit += DisposeOnShutdown;
+ AppDomain.CurrentDomain.DomainUnload += DisposeOnShutdown;
+#endif
+ }
+
+
// Periodically search the list for existing entries to reuse, this avoids
// unbounded memory use if we keep recycling eventSources (an unlikely thing).
int newIndex = -1;
@@ -4108,6 +4248,23 @@ namespace System.Diagnostics.Tracing
}
}
+ // Whenver we have async callbacks from native code, there is an ugly issue where
+ // during .NET shutdown native code could be calling the callback, but the CLR
+ // has already prohibited callbacks to managed code in the appdomain, causing the CLR
+ // to throw a COMPLUS_BOOT_EXCEPTION. The guideline we give is that you must unregister
+ // such callbacks on process shutdown or appdomain so that unmanaged code will never
+ // do this. This is what this callback is for.
+ // See bug 724140 for more
+ private static void DisposeOnShutdown(object sender, EventArgs e)
+ {
+ foreach (var esRef in s_EventSources)
+ {
+ EventSource es = esRef.Target as EventSource;
+ if (es != null)
+ es.Dispose();
+ }
+ }
+
/// <summary>
/// Helper used in code:Dispose that removes any references to 'listenerToRemove' in any of the
/// eventSources in the appdomain.
@@ -4240,6 +4397,11 @@ namespace System.Diagnostics.Tracing
/// Used to disallow reentrancy.
/// </summary>
private static bool s_CreatingListener = false;
+
+ /// <summary>
+ /// Used to register AD/Process shutdown callbacks.
+ /// </summary>
+ private static bool s_EventSourceShutdownRegistered = false;
#endregion
}
@@ -4251,12 +4413,12 @@ namespace System.Diagnostics.Tracing
/// <summary>
/// Gets the command for the callback.
/// </summary>
- public EventCommand Command { get; private set; }
+ public EventCommand Command { get; internal set; }
/// <summary>
/// Gets the arguments for the callback.
/// </summary>
- public IDictionary<String, String> Arguments { get; private set; }
+ public IDictionary<String, String> Arguments { get; internal set; }
/// <summary>
/// Enables the event that has the specified identifier.
@@ -4284,17 +4446,32 @@ namespace System.Diagnostics.Tracing
#region private
- internal EventCommandEventArgs(EventCommand command, IDictionary<string, string> arguments, EventSource eventSource, EventDispatcher dispatcher)
+ internal EventCommandEventArgs(EventCommand command, IDictionary<string, string> arguments, EventSource eventSource,
+ EventListener listener, int perEventSourceSessionId, int etwSessionId, bool enable, EventLevel level, EventKeywords matchAnyKeyword)
{
this.Command = command;
this.Arguments = arguments;
this.eventSource = eventSource;
- this.dispatcher = dispatcher;
+ this.listener = listener;
+ this.perEventSourceSessionId = perEventSourceSessionId;
+ this.etwSessionId = etwSessionId;
+ this.enable = enable;
+ this.level = level;
+ this.matchAnyKeyword = matchAnyKeyword;
}
internal EventSource eventSource;
internal EventDispatcher dispatcher;
+ // These are the arguments of sendCommand and are only used for deferring commands until after we are fully initialized.
+ internal EventListener listener;
+ internal int perEventSourceSessionId;
+ internal int etwSessionId;
+ internal bool enable;
+ internal EventLevel level;
+ internal EventKeywords matchAnyKeyword;
+ internal EventCommandEventArgs nextCommand; // We form a linked list of these deferred commands.
+
#endregion
}
@@ -4311,7 +4488,7 @@ namespace System.Diagnostics.Tracing
{
get
{
- if (m_eventName != null)
+ if (m_eventName != null || EventId < 0) // TraceLogging convention EventID == -1
{
return m_eventName;
}
@@ -4356,8 +4533,8 @@ namespace System.Diagnostics.Tracing
/// <summary>
/// Gets the payload argument names.
/// </summary>
- public ReadOnlyCollection<string> PayloadNames
- {
+ public ReadOnlyCollection<string> PayloadNames
+ {
get
{
// For contract based events we create the list lazily.
@@ -4367,7 +4544,7 @@ namespace System.Diagnostics.Tracing
Contract.Assert(EventId != -1);
var names = new List<string>();
- foreach(var parameter in m_eventSource.m_eventData[EventId].Parameters)
+ foreach (var parameter in m_eventSource.m_eventData[EventId].Parameters)
{
names.Add(parameter.Name);
}
@@ -4391,23 +4568,56 @@ namespace System.Diagnostics.Tracing
/// <summary>
/// Gets the keywords for the event.
/// </summary>
- public EventKeywords Keywords { get { return (EventKeywords)m_eventSource.m_eventData[EventId].Descriptor.Keywords; } }
+ public EventKeywords Keywords
+ {
+ get
+ {
+ if (EventId < 0) // TraceLogging convention EventID == -1
+ return m_keywords;
+
+ return (EventKeywords)m_eventSource.m_eventData[EventId].Descriptor.Keywords;
+ }
+ }
/// <summary>
/// Gets the operation code for the event.
/// </summary>
- public EventOpcode Opcode { get { return (EventOpcode)m_eventSource.m_eventData[EventId].Descriptor.Opcode; } }
+ public EventOpcode Opcode
+ {
+ get
+ {
+ if (EventId < 0) // TraceLogging convention EventID == -1
+ return m_opcode;
+ return (EventOpcode)m_eventSource.m_eventData[EventId].Descriptor.Opcode;
+ }
+ }
/// <summary>
/// Gets the task for the event.
/// </summary>
- public EventTask Task { get { return (EventTask)m_eventSource.m_eventData[EventId].Descriptor.Task; } }
+ public EventTask Task
+ {
+ get
+ {
+ if (EventId < 0) // TraceLogging convention EventID == -1
+ return EventTask.None;
+
+ return (EventTask)m_eventSource.m_eventData[EventId].Descriptor.Task;
+ }
+ }
/// <summary>
/// Any provider/user defined options associated with the event.
/// </summary>
- ///
- public EventTags Tags { get { return m_eventSource.m_eventData[EventId].Tags; } }
+ public EventTags Tags
+ {
+ get
+ {
+ if (EventId < 0) // TraceLogging convention EventID == -1
+ return m_tags;
+ return m_eventSource.m_eventData[EventId].Tags;
+ }
+ }
/// <summary>
/// Gets the message for the event.
@@ -4416,7 +4626,7 @@ namespace System.Diagnostics.Tracing
{
get
{
- if (m_message != null)
+ if (EventId < 0) // TraceLogging convention EventID == -1
return m_message;
else
return m_eventSource.m_eventData[EventId].Message;
@@ -4432,13 +4642,29 @@ namespace System.Diagnostics.Tracing
/// <summary>
/// Gets the channel for the event.
/// </summary>
- public EventChannel Channel { get { return (EventChannel)m_eventSource.m_eventData[EventId].Descriptor.Channel; } }
+ public EventChannel Channel
+ {
+ get
+ {
+ if (EventId < 0) // TraceLogging convention EventID == -1
+ return EventChannel.None;
+ return (EventChannel)m_eventSource.m_eventData[EventId].Descriptor.Channel;
+ }
+ }
#endif
/// <summary>
/// Gets the version of the event.
/// </summary>
- public byte Version { get { return m_eventSource.m_eventData[EventId].Descriptor.Version; } }
+ public byte Version
+ {
+ get
+ {
+ if (EventId < 0) // TraceLogging convention EventID == -1
+ return 0;
+ return m_eventSource.m_eventData[EventId].Descriptor.Version;
+ }
+ }
/// <summary>
/// Gets the level for the event.
@@ -4462,6 +4688,9 @@ namespace System.Diagnostics.Tracing
private string m_eventName;
private EventSource m_eventSource;
private ReadOnlyCollection<string> m_payloadNames;
+ internal EventTags m_tags;
+ internal EventOpcode m_opcode;
+ internal EventKeywords m_keywords;
#endregion
}
@@ -4642,7 +4871,6 @@ namespace System.Diagnostics.Tracing
/// </summary>
public string ImportChannel { get; set; }
#endif
-
}
/// <summary>
@@ -5887,7 +6115,8 @@ namespace System.Diagnostics.Tracing
sb.Append(" <maps>").AppendLine();
foreach (Type enumType in mapsTab.Values)
{
- string mapKind = EventSource.GetCustomAttributeHelper(enumType, typeof(FlagsAttribute), flags) != null ? "bitMap" : "valueMap";
+ bool isbitmap = EventSource.GetCustomAttributeHelper(enumType, typeof(FlagsAttribute), flags) != null;
+ string mapKind = isbitmap ? "bitMap" : "valueMap";
sb.Append(" <").Append(mapKind).Append(" name=\"").Append(enumType.Name).Append("\">").AppendLine();
// write out each enum value
@@ -5897,12 +6126,20 @@ namespace System.Diagnostics.Tracing
object constantValObj = staticField.GetRawConstantValue();
if (constantValObj != null)
{
- string hexStr = null;
+ long hexValue;
if (constantValObj is int)
- hexStr = ((int)constantValObj).ToString("x", CultureInfo.InvariantCulture);
+ hexValue = ((int)constantValObj);
else if (constantValObj is long)
- hexStr = ((long)constantValObj).ToString("x", CultureInfo.InvariantCulture);
- sb.Append(" <map value=\"0x").Append(hexStr).Append("\"");
+ hexValue = ((long)constantValObj);
+ else
+ continue;
+
+ // ETW requires all bitmap values to be powers of 2. Skip the ones that are not.
+ // TODO: Warn people about the dropping of values.
+ if (isbitmap && ((hexValue & (hexValue - 1)) != 0 || hexValue == 0))
+ continue;
+
+ sb.Append(" <map value=\"0x").Append(hexValue.ToString("x", CultureInfo.InvariantCulture)).Append("\"");
WriteMessageAttrib(sb, "map", enumType.Name + "." + staticField.Name, staticField.Name);
sb.Append("/>").AppendLine();
}
@@ -6118,7 +6355,7 @@ namespace System.Diagnostics.Tracing
string ret;
if (taskTab == null)
taskTab = new Dictionary<int, string>();
- if (!taskTab.TryGetValue((int)task, out ret))
+ if (!taskTab.TryGetValue((int)task, out ret))
ret = taskTab[(int)task] = eventName;
return ret;
}
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/FrameworkEventSource.cs b/src/mscorlib/src/System/Diagnostics/Eventing/FrameworkEventSource.cs
index 38437c5b30..a3e1348c79 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/FrameworkEventSource.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/FrameworkEventSource.cs
@@ -1,4 +1,4 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/StubEnvironment.cs b/src/mscorlib/src/System/Diagnostics/Eventing/StubEnvironment.cs
index e697a341bd..a6d10506e7 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/StubEnvironment.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/StubEnvironment.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ArrayTypeInfo.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ArrayTypeInfo.cs
index 1b7772246c..ce98e38bf2 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ArrayTypeInfo.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ArrayTypeInfo.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSet.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSet.cs
index b07d671f5a..e411b14ee7 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSet.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSet.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using Interlocked = System.Threading.Interlocked;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSetItem.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSetItem.cs
index 322f664303..fd24ad6018 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSetItem.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/ConcurrentSetItem.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if ES_BUILD_STANDALONE
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/DataCollector.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/DataCollector.cs
index 7166297e8a..1543ee56e4 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/DataCollector.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/DataCollector.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Runtime.InteropServices;
using System.Security;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EmptyStruct.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EmptyStruct.cs
index 829020ac23..9ab5f1f6bb 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EmptyStruct.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EmptyStruct.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
#if ES_BUILD_STANDALONE
namespace Microsoft.Diagnostics.Tracing
#else
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumHelper.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumHelper.cs
index db6317ee51..12e05845e0 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumHelper.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumHelper.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Reflection;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumerableTypeInfo.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumerableTypeInfo.cs
index 5ff6c07889..af5e60baaf 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumerableTypeInfo.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EnumerableTypeInfo.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventDataAttribute.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventDataAttribute.cs
index 905bf5ac1c..93817a8459 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventDataAttribute.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventDataAttribute.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if ES_BUILD_STANDALONE
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldAttribute.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldAttribute.cs
index 0b896b7fcf..4dc4767207 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldAttribute.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldAttribute.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if ES_BUILD_STANDALONE
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldFormat.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldFormat.cs
index 0505d6af63..409535287e 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldFormat.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventFieldFormat.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
#if ES_BUILD_STANDALONE
namespace Microsoft.Diagnostics.Tracing
#else
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventIgnoreAttribute.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventIgnoreAttribute.cs
index 367693f0cd..7697b49418 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventIgnoreAttribute.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventIgnoreAttribute.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if ES_BUILD_STANDALONE
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventPayload.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventPayload.cs
index 3240a8d738..86010f6d04 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventPayload.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventPayload.cs
@@ -1,4 +1,7 @@
-using System.Collections.Generic;
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System.Collections.Generic;
using System.Collections;
#if !ES_BUILD_AGAINST_DOTNET_V35
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceActivity.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceActivity.cs
index 2114707d69..3279a76642 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceActivity.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceActivity.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if !ES_BUILD_AGAINST_DOTNET_V35
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceOptions.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceOptions.cs
index 5b12ea9098..603679b5ff 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceOptions.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/EventSourceOptions.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if ES_BUILD_STANDALONE
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/FieldMetadata.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/FieldMetadata.cs
index 8dbe604767..b64940ac7a 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/FieldMetadata.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/FieldMetadata.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using Encoding = System.Text.Encoding;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/InvokeTypeInfo.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/InvokeTypeInfo.cs
index ec4196efa5..903114f10d 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/InvokeTypeInfo.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/InvokeTypeInfo.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/NameInfo.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/NameInfo.cs
index 1820443092..acc76078ff 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/NameInfo.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/NameInfo.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
using Interlocked = System.Threading.Interlocked;
@@ -17,6 +18,22 @@ namespace System.Diagnostics.Tracing
internal sealed class NameInfo
: ConcurrentSetItem<KeyValuePair<string, EventTags>, NameInfo>
{
+ /// <summary>
+ /// Insure that eventIds strictly less than 'eventId' will not be
+ /// used by the SelfDescribing events.
+ /// </summary>
+ internal static void ReserveEventIDsBelow(int eventId)
+ {
+ for(;;)
+ {
+ int snapshot =lastIdentity;
+ int newIdentity = (lastIdentity & ~0xFFFFFF) + eventId;
+ newIdentity = Math.Max(newIdentity, snapshot); // Should be redundant. as we only create descriptors once.
+ if (Interlocked.CompareExchange(ref lastIdentity, newIdentity, snapshot) == snapshot)
+ break;
+ }
+ }
+
private static int lastIdentity = Statics.TraceLoggingChannel << 24;
internal readonly string name;
internal readonly EventTags tags;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAccessor.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAccessor.cs
index 7fb7802435..388e0b61c0 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAccessor.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAccessor.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Reflection;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAnalysis.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAnalysis.cs
index 523d6830ef..64972d1bfb 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAnalysis.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/PropertyAnalysis.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Reflection;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleEventTypes.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleEventTypes.cs
index 9685885879..7a613f4293 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleEventTypes.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleEventTypes.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using Interlocked = System.Threading.Interlocked;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs
index 87798d8878..6490a3a2dd 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/Statics.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/Statics.cs
index 5239c31402..8897ae2219 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/Statics.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/Statics.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
using System.Reflection;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataCollector.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataCollector.cs
index 4b8158c0d7..2ac1df17fd 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataCollector.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataCollector.cs
@@ -1,5 +1,6 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Security;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataType.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataType.cs
index 8e218ba9bf..009632a578 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataType.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingDataType.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if ES_BUILD_STANDALONE
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventSource.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventSource.cs
index 7eea8dfaca..c85fdf8c21 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventSource.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventSource.cs
@@ -1,7 +1,9 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
// This program uses code hyperlinks available as part of the HyperAddin Visual Studio plug-in.
// It is available from http://www.codeplex.com/hyperAddin
+
#define FEATURE_MANAGED_ETW
#if !ES_BUILD_STANDALONE
@@ -645,7 +647,7 @@ namespace System.Diagnostics.Tracing
pinCount);
eventTypes.typeInfo.WriteData(TraceLoggingDataCollector.Instance, ref data);
-
+
this.WriteEventRaw(
ref descriptor,
pActivityId,
@@ -653,11 +655,11 @@ namespace System.Diagnostics.Tracing
(int)(DataCollector.ThreadInstance.Finish() - descriptors),
(IntPtr)descriptors);
- // TODO enable filtering for listners.
+ // TODO enable filtering for listeners.
if (m_Dispatchers != null)
{
var eventData = (EventPayload)(eventTypes.typeInfo.GetData(data));
- WriteToAllListeners(eventName, pActivityId, eventData);
+ WriteToAllListeners(eventName, ref descriptor, nameInfo.tags, pActivityId, eventData);
}
}
@@ -683,12 +685,15 @@ namespace System.Diagnostics.Tracing
ThrowEventSourceException(ex);
}
}
-
+
[SecurityCritical]
- private unsafe void WriteToAllListeners(string eventName, Guid* pActivityId, EventPayload payload)
+ private unsafe void WriteToAllListeners(string eventName, ref EventDescriptor eventDescriptor, EventTags tags, Guid* pActivityId, EventPayload payload)
{
EventWrittenEventArgs eventCallbackArgs = new EventWrittenEventArgs(this);
eventCallbackArgs.EventName = eventName;
+ eventCallbackArgs.m_keywords = (EventKeywords) eventDescriptor.Keywords;
+ eventCallbackArgs.m_opcode = (EventOpcode) eventDescriptor.Opcode;
+ eventCallbackArgs.m_tags = tags;
// Self described events do not have an id attached. We mark it internally with -1.
eventCallbackArgs.EventId = -1;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTraits.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTraits.cs
index 6c5a5793bb..5a40808573 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTraits.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTraits.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if ES_BUILD_STANDALONE
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTypes.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTypes.cs
index 8e2732b0ff..06b840f7b7 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTypes.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingEventTypes.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
using Interlocked = System.Threading.Interlocked;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingMetadataCollector.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingMetadataCollector.cs
index 1101439d66..ff97db5aa2 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingMetadataCollector.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingMetadataCollector.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo.cs
index 326af51c10..21a4390e42 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
#if !ES_BUILD_AGAINST_DOTNET_V35
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo_T.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo_T.cs
index b93aab9d65..58945987ee 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo_T.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TraceLoggingTypeInfo_T.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
using Interlocked = System.Threading.Interlocked;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TypeAnalysis.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TypeAnalysis.cs
index 404fdadc31..8b44ddec15 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TypeAnalysis.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/TypeAnalysis.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
using System;
using System.Collections.Generic;
using System.Reflection;
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index fed3f09ccf..f8f9776640 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -1162,6 +1162,7 @@ EEJitManager::EEJitManager()
m_JITCompiler = NULL;
#ifdef _TARGET_AMD64_
m_JITCompilerOther = NULL;
+ m_fUsingCompatJit = false;
#endif
#ifdef ALLOW_SXS_JIT
m_alternateJit = NULL;
@@ -1407,6 +1408,7 @@ BOOL EEJitManager::LoadJIT()
m_JITCompiler = NULL;
#ifdef _TARGET_AMD64_
m_JITCompilerOther = NULL;
+ m_fUsingCompatJit = false;
#endif
LoadAndInitializeJIT(ExecutionManager::GetJitName(), &m_JITCompiler, &newJitCompiler);
@@ -1495,6 +1497,13 @@ BOOL EEJitManager::LoadJIT()
// Tell the main JIT to fall back to the "fallback" JIT compiler, in case some
// obfuscator tries to directly call the main JIT's getJit() function.
newJitCompiler->setRealJit(fallbackICorJitCompiler);
+ // Record the fact that we are using the compat jit so that if the VM
+ // needs to behave differently for the compat jit it can query this value
+ //
+ // Currently we do behave differently when deciding how to call the
+ // CORINFO_HELP_STOP_FOR_GC jithelper.
+ //
+ m_fUsingCompatJit = true;
}
}
}
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index 8dc1d0992a..e7ba84b862 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -1121,6 +1121,7 @@ public:
HINSTANCE m_JITCompiler;
#ifdef _TARGET_AMD64_
HINSTANCE m_JITCompilerOther; // Stores the handle of the legacy JIT, if one is loaded.
+ bool m_fUsingCompatJit; // true if we are using the JIT64 compat jit, false otherwise
#endif
#ifdef ALLOW_SXS_JIT
@@ -1406,6 +1407,15 @@ private:
JumpStubBlockHeader * m_pBlocks;
JumpStubTable m_Table;
};
+
+#if defined(_TARGET_AMD64_)
+public :
+ static bool UsingCompatJit()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_pEEJitManager != nullptr) && m_pEEJitManager->m_fUsingCompatJit;
+ }
+#endif
};
inline CodeHeader * EEJitManager::GetCodeHeader(const METHODTOKEN& MethodToken)
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index 1a92bfd639..5e52d99040 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -7902,6 +7902,61 @@ CorInfoInline CEEInfo::canInline (CORINFO_METHOD_HANDLE hCaller,
}
#endif // PROFILING_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerPresent())
+ {
+ // #rejit
+ //
+ // See if rejit-specific flags for the caller disable inlining
+ if ((ReJitManager::GetCurrentReJitFlags(pCaller) &
+ COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
+ {
+ result = INLINE_FAIL;
+ szFailReason = "ReJIT request disabled inlining from caller";
+ goto exit;
+ }
+
+ // If the profiler has set a mask preventing inlining, always return
+ // false to the jit.
+ if (CORProfilerDisableInlining())
+ {
+ result = INLINE_FAIL;
+ szFailReason = "Profiler disabled inlining globally";
+ goto exit;
+ }
+
+ // If the profiler wishes to be notified of JIT events and the result from
+ // the above tests will cause a function to be inlined, we need to tell the
+ // profiler that this inlining is going to take place, and give them a
+ // chance to prevent it.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ if (pCaller->IsILStub() || pCallee->IsILStub())
+ {
+ // do nothing
+ }
+ else
+ {
+ BOOL fShouldInline;
+
+ HRESULT hr = g_profControlBlock.pProfInterface->JITInlining(
+ (FunctionID)pCaller,
+ (FunctionID)pCallee,
+ &fShouldInline);
+
+ if (SUCCEEDED(hr) && !fShouldInline)
+ {
+ result = INLINE_FAIL;
+ szFailReason = "Profiler disabled inlining locally";
+ goto exit;
+ }
+ }
+ END_PIN_PROFILER();
+ }
+ }
+#endif // PROFILING_SUPPORTED
+
exit: ;
EE_TO_JIT_TRANSITION();
@@ -10544,10 +10599,24 @@ void* CEEJitInfo::getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
#endif /*_PREFAST_ */
#if defined(_TARGET_AMD64_)
- // Always call profiler helpers indirectly to avoid going through jump stubs.
- // Jumps stubs corrupt RAX that has to be preserved for profiler probes.
- if (dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_ENTER ||
- dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_LEAVE ||
+ // To avoid using a jump stub we always call certain helpers using an indirect call.
+ // Because when using a direct call and the target is father away than 2^31 bytes,
+ // the direct call instead goes to a jump stub which jumps to the jit helper.
+ // However in this process the jump stub will corrupt RAX.
+ //
+ // The set of helpers for which RAX must be preserved are the profiler probes
+ // and the STOP_FOR_GC helper which maps to JIT_RareDisableHelper.
+ // In the case of the STOP_FOR_GC helper RAX can be holding a function return value.
+ //
+ // Note that JIT64 (the compat jit) has an issue where it fails when trying
+ // to make an indirect call using OPCONDCALL so we always use a direct call
+ // for JIT64. UsingCompatJit() == true means that we are using JIT64.
+ // The JIT64 also does not depend upon having RAX preserved across the call
+ //
+ if (((dynamicFtnNum == DYNAMIC_CORINFO_HELP_STOP_FOR_GC) &&
+ (ExecutionManager::UsingCompatJit() == false) ) ||
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_ENTER ||
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_LEAVE ||
dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_TAILCALL)
{
_ASSERTE(ppIndirection != NULL);
diff --git a/src/vm/rejit.cpp b/src/vm/rejit.cpp
index 9da9118df2..33ec16b83f 100644
--- a/src/vm/rejit.cpp
+++ b/src/vm/rejit.cpp
@@ -1796,6 +1796,7 @@ HRESULT ReJitManager::RequestRevertByToken(PTR_Module pModule, mdMethodDef metho
_ASSERTE(hr == E_OUTOFMEMORY);
return hr;
}
+
// If there were any errors, return the first one. This matches previous error handling
// behavior that only returned the first error encountered within Revert().
for (int i = 0; i < errorRecords.Count(); i++)
diff --git a/src/zap/zapinfo.cpp b/src/zap/zapinfo.cpp
index 9825103674..93311414f1 100644
--- a/src/zap/zapinfo.cpp
+++ b/src/zap/zapinfo.cpp
@@ -2513,6 +2513,21 @@ void * ZapInfo::getHelperFtn (CorInfoHelpFunc ftnNum, void **ppIndirection)
case CORINFO_HELP_PROF_FCN_TAILCALL:
*ppIndirection = m_pImage->GetInnerPtr(GetProfilingHandleImport(), kZapProfilingHandleImportValueIndexTailcallAddr * sizeof(TADDR));
return NULL;
+#ifdef _TARGET_AMD64_
+ case CORINFO_HELP_STOP_FOR_GC:
+ // Note that JIT64 (the compat jit) has an issue where it fails when trying
+ // to make an indirect call using OPCONDCALL so we always use a direct call
+ // for JIT64. m_hJitLegacy == NULL means that we are using RyuJIT.
+ // The JIT64 also does not depend upon having RAX preserved across the call.
+ if (m_zapper->m_hJitLegacy == NULL)
+ {
+ // Force all calls in ngen images for this helper to use an indirect call.
+ // We cannot use a jump stub to reach this helper because
+ // the RAX register can contain a return value.
+ dwHelper |= CORCOMPILE_HELPER_PTR;
+ }
+ break;
+#endif
default:
break;
}