(pTargetIP));
}
// bool DebuggerStepper::TrapStep() TrapStep attepts to set a
// patch at the next IL instruction to be executed. If we're stepping in &
// the next IL instruction is a call, then this'll set a breakpoint inside
// the code that will be called.
// How: There are a number of cases, depending on where the IP
// currently is:
// Unmanaged code: EnableTraceCall() & return false - try and get
// it when it returns.
// In a frame: if the param is true, then do an
// EnableTraceCall(). If the frame isn't the top frame, also do
// g_pEEInterface->TraceFrame(), g_pEEInterface->FollowTrace, and
// PatchTrace.
// Normal managed frame: create a Walker and walk the instructions until either
// leave the provided range (AddPatch there, return true), or we don't know what the
// next instruction is (say, after a call, or return, or branch - return false).
// Returns a boolean indicating if we were able to set a patch successfully
// in either this method, or (if in == true & the next instruction is a call)
// inside a callee method.
// true: Patch successfully placed either in this method or a callee,
// so the stepping is taken care of.
// false: Unable to place patch in either this method or any
// applicable callee methods, so the only option the caller has to put
// patch to control flow is to call TrapStepOut & try and place a patch
// on the method that called the current frame's method.
bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in)
{
LOG((LF_CORDB,LL_INFO10000,"DS::TS: this:0x%x\n", this));
if (!info->m_activeFrame.managed)
{
//
// We're not in managed code. Patch up all paths back in.
//
LOG((LF_CORDB,LL_INFO10000, "DS::TS: not in managed code\n"));
if (in)
{
EnablePolyTraceCall();
}
return false;
}
if (info->m_activeFrame.frame != NULL)
{
//
// We're in some kind of weird frame. Patch further entry to the frame.
// or if we can't, patch return from the frame
//
LOG((LF_CORDB,LL_INFO10000, "DS::TS: in a weird frame\n"));
if (in)
{
EnablePolyTraceCall();
// Only traditional steppers should patch a frame. JMC steppers will
// just rely on TriggerMethodEnter.
if (DEBUGGER_CONTROLLER_STEPPER == this->GetDCType())
{
if (info->m_activeFrame.frame != FRAME_TOP)
{
TraceDestination trace;
CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
// This could be anywhere, especially b/c step could be on non-leaf frame.
if (g_pEEInterface->TraceFrame(this->GetThread(),
info->m_activeFrame.frame,
FALSE, &trace,
&(info->m_activeFrame.registers))
&& g_pEEInterface->FollowTrace(&trace)
&& PatchTrace(&trace, info->m_activeFrame.fp,
(m_rgfMappingStop&STOP_UNMANAGED)?
(true):(false)))
{
return true;
}
}
}
}
return false;
}
#ifdef _TARGET_X86_
LOG((LF_CORDB,LL_INFO1000, "GetJitInfo for pc = 0x%x (addr of "
"that value:0x%x)\n", (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)),
info->m_activeFrame.registers.PCTAddr));
#endif
// Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and
// it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged
// function that had been called.
DebuggerJitInfo *ji = info->m_activeFrame.GetJitInfoFromFrame();
if( ji != NULL )
{
LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, got DJI 0x%p, "
"from 0x%p to 0x%p\n",
(const BYTE*)(GetControlPC(&info->m_activeFrame.registers)),
ji, ji->m_addrOfCode, ji->m_addrOfCode+ji->m_sizeOfCode));
}
else
{
LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, "
"didn't get a DJI \n",
(const BYTE*)(GetControlPC(&info->m_activeFrame.registers))));
}
//
// We're in a normal managed frame - walk the code
//
NativeWalker walker;
LOG((LF_CORDB,LL_INFO1000, "DS::TS: &info->m_activeFrame.registers 0x%p\n", &info->m_activeFrame.registers));
// !!! Eventually when using the fjit, we'll want
// to walk the IL to get the next location, & then map
// it back to native.
walker.Init((BYTE*)GetControlPC(&(info->m_activeFrame.registers)), &info->m_activeFrame.registers);
// Is the active frame really the active frame?
// What if the thread is stopped at a managed debug event outside of a filter ctx? Eg, stopped
// somewhere directly in mscorwks (like sending a LogMsg or ClsLoad event) or even at WaitForSingleObject.
// ActiveFrame is either the stepper's initial frame or the frame of a filterctx.
bool fIsActivFrameLive = (info->m_activeFrame.fp == info->m_bottomFP);
// If this thread isn't stopped in managed code, it can't be at the active frame.
if (GetManagedStoppedCtx(this->GetThread()) == NULL)
{
fIsActivFrameLive = false;
}
bool fIsJump = false;
bool fCallingIntoFunclet = false;
// If m_activeFrame is not the actual active frame,
// we should skip this first switch - never single step, and
// assume our context is bogus.
if (fIsActivFrameLive)
{
LOG((LF_CORDB,LL_INFO10000, "DC::TS: immediate?\n"));
// Note that by definition our walker must always be able to step
// through a single instruction, so any return
// of NULL IP's from those cases on the first step
// means that an exception is going to be generated.
//
// (On future steps, it can also mean that the destination
// simply can't be computed.)
WALK_TYPE wt = walker.GetOpcodeWalkType();
{
switch (wt)
{
case WALK_RETURN:
{
LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_RETURN\n"));
// Normally a 'ret' opcode means we're at the end of a function and doing a step-out.
// But the jit is free to use a 'ret' opcode to implement various goofy constructs like
// managed filters, in which case we may ret to the same function or we may ret to some
// internal CLR stub code.
// So we'll just ignore this and tell the Stepper to enable every notification it has
// and let the thread run free. This will include TrapStepOut() and EnableUnwind()
// to catch any potential filters.
// Go ahead and enable the single-step flag too. We know it's safe.
// If this lands in random code, then TriggerSingleStep will just ignore it.
EnableSingleStep();
// Don't set step-reason yet. If another trigger gets hit, it will set the reason.
return false;
}
case WALK_BRANCH:
LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_BRANCH\n"));
// A branch can be handled just like a call. If the branch is within the current method, then we just
// down to WALK_UNKNOWN, otherwise we handle it just like a call. Note: we need to force in=true
// because for a jmp, in or over is the same thing, we're still going there, and the in==true case is
// the case we want to use...
fIsJump = true;
// fall through...
case WALK_CALL:
LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_CALL ip=%p nextip=%p\n", walker.GetIP(), walker.GetNextIP()));
// If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL
// instruction), then put the bp where we're going, NOT at the instruction following the call
if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP()))
{
LOG((LF_CORDB, LL_INFO1000, "Walk call within method!" ));
goto LWALK_UNKNOWN;
}
if (walker.GetNextIP() != NULL)
{
#ifdef WIN64EXCEPTIONS
// There are 4 places we could be jumping:
// 1) to the beginning of the same method (recursive call)
// 2) somewhere in the same funclet, that isn't the method start
// 3) somewhere in the same method but different funclet
// 4) somewhere in a different method
//
// IsAddrWithinFrame ruled out option 2, IsAddrWithinMethodIncludingFunclet rules out option 4,
// and checking the IP against the start address rules out option 1. That leaves option only what we
// wanted, option #3
fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP()) &&
((CORDB_ADDRESS)(SIZE_T)walker.GetNextIP() != ji->m_addrOfCode);
#endif
// At this point, we know that the call/branch target is not in the current method.
// So if the current instruction is a jump, this must be a tail call or possibly a jump to the finally.
// So, check if the call/branch target is the JIT helper for handling tail calls if we are not calling
// into the funclet.
if ((fIsJump && !fCallingIntoFunclet) || IsTailCall(walker.GetNextIP()))
{
// A step-over becomes a step-out for a tail call.
if (!in)
{
TrapStepOut(info);
return true;
}
}
// To preserve the old behaviour, if this is not a tail call, then we assume we want to
// follow the call/jump.
if (fIsJump)
{
in = true;
}
// There are two cases where we need to perform a step-in. One, if the step operation is
// a step-in. Two, if the target address of the call is in a funclet of the current method.
// In this case, we want to step into the funclet even if the step operation is a step-over.
if (in || fCallingIntoFunclet)
{
if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet))
{
return true;
}
}
}
if (walker.GetSkipIP() == NULL)
{
LOG((LF_CORDB,LL_INFO10000,"DS::TS 0x%x m_reason = STEP_CALL (skip)\n",
this));
m_reason = STEP_CALL;
return true;
}
LOG((LF_CORDB,LL_INFO100000, "DC::TS:Imm:WALK_CALL Skip instruction\n"));
walker.Skip();
break;
case WALK_UNKNOWN:
LWALK_UNKNOWN:
LOG((LF_CORDB,LL_INFO10000,"DS::TS:WALK_UNKNOWN - curIP:0x%x "
"nextIP:0x%x skipIP:0x%x 1st byte of opcode:0x%x\n", (BYTE*)GetControlPC(&(info->m_activeFrame.
registers)), walker.GetNextIP(),walker.GetSkipIP(),
*(BYTE*)GetControlPC(&(info->m_activeFrame.registers))));
EnableSingleStep();
return true;
default:
if (walker.GetNextIP() == NULL)
{
return true;
}
walker.Next();
}
}
} // if (fIsActivFrameLive)
//
// Use our range, if we're in the original
// frame.
//
COR_DEBUG_STEP_RANGE *range;
SIZE_T rangeCount;
if (info->m_activeFrame.fp == m_fp)
{
range = m_range;
rangeCount = m_rangeCount;
}
else
{
range = NULL;
rangeCount = 0;
}
//
// Keep walking until either we're out of range, or
// else we can't predict ahead any more.
//
while (TRUE)
{
const BYTE *ip = walker.GetIP();
SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(ip);
LOG((LF_CORDB, LL_INFO1000, "Walking to ip 0x%p (natOff:0x%x)\n",ip,offset));
if (!IsInRange(offset, range, rangeCount)
&& !ShouldContinueStep( info, offset ))
{
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
ji,
offset,
info->m_returnFrame.fp,
NULL);
return true;
}
switch (walker.GetOpcodeWalkType())
{
case WALK_RETURN:
LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_RETURN Adding Patch.\n"));
// In the loop above, if we're at the return address, we'll check & see
// if we're returning to elsewhere within the same method, and if so,
// we'll single step rather than TrapStepOut. If we see a return in the
// code stream, then we'll set a breakpoint there, so that we can
// examine the return address, and decide whether to SS or TSO then
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
ji,
offset,
info->m_returnFrame.fp,
NULL);
return true;
case WALK_CALL:
LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL.\n"));
// If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL
// instruction), then put the bp where we're going, NOT at the instruction following the call
if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP()))
{
LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL IsAddrWithinFrame, Adding Patch.\n"));
// How else to detect this?
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
ji,
CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(walker.GetNextIP()),
info->m_returnFrame.fp,
NULL);
return true;
}
if (IsTailCall(walker.GetNextIP()))
{
if (!in)
{
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
ji,
offset,
info->m_returnFrame.fp,
NULL);
return true;
}
}
#ifdef WIN64EXCEPTIONS
fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP());
#endif
if (in || fCallingIntoFunclet)
{
LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL step in is true\n"));
if (walker.GetNextIP() == NULL)
{
LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL NextIP == NULL\n"));
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
ji,
offset,
info->m_returnFrame.fp,
NULL);
LOG((LF_CORDB,LL_INFO10000,"DS0x%x m_reason=STEP_CALL 2\n",
this));
m_reason = STEP_CALL;
return true;
}
if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet))
{
return true;
}
}
LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL Calling GetSkipIP\n"));
if (walker.GetSkipIP() == NULL)
{
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
ji,
offset,
info->m_returnFrame.fp,
NULL);
LOG((LF_CORDB,LL_INFO10000,"DS 0x%x m_reason=STEP_CALL4\n",this));
m_reason = STEP_CALL;
return true;
}
walker.Skip();
LOG((LF_CORDB, LL_INFO10000, "DS::TS: skipping over call.\n"));
break;
default:
if (walker.GetNextIP() == NULL)
{
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
ji,
offset,
info->m_returnFrame.fp,
NULL);
return true;
}
walker.Next();
break;
}
}
LOG((LF_CORDB,LL_INFO1000,"Ending TrapStep\n"));
}
bool DebuggerStepper::IsAddrWithinFrame(DebuggerJitInfo *dji,
MethodDesc* pMD,
const BYTE* currentAddr,
const BYTE* targetAddr)
{
_ASSERTE(dji != NULL);
bool result = IsAddrWithinMethodIncludingFunclet(dji, pMD, targetAddr);
// We need to check if this is a recursive call. In RTM we should see if this method is really necessary,
// since it looks like the X86 JIT doesn't emit intra-method jumps anymore.
if (result)
{
if ((CORDB_ADDRESS)(SIZE_T)targetAddr == dji->m_addrOfCode)
{
result = false;
}
}
#if defined(WIN64EXCEPTIONS)
// On WIN64, we also check whether the targetAddr and the currentAddr is in the same funclet.
_ASSERTE(currentAddr != NULL);
if (result)
{
int currentFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)currentAddr, DebuggerJitInfo::GFIM_BYADDRESS);
int targetFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)targetAddr, DebuggerJitInfo::GFIM_BYADDRESS);
result = (currentFuncletIndex == targetFuncletIndex);
}
#endif // WIN64EXCEPTIONS
return result;
}
// x86 shouldn't need to call this method directly. We should call IsAddrWithinFrame() on x86 instead.
// That's why I use a name with the word "funclet" in it to scare people off.
bool DebuggerStepper::IsAddrWithinMethodIncludingFunclet(DebuggerJitInfo *dji,
MethodDesc* pMD,
const BYTE* targetAddr)
{
_ASSERTE(dji != NULL);
return CodeRegionInfo::GetCodeRegionInfo(dji, pMD).IsMethodAddress(targetAddr);
}
void DebuggerStepper::TrapStepNext(ControllerStackInfo *info)
{
LOG((LF_CORDB, LL_INFO10000, "DS::TrapStepNext, this=%p\n", this));
// StepNext for a Normal stepper is just a step-out
TrapStepOut(info);
// @todo -should we also EnableTraceCall??
}
// Is this frame interesting?
// For a traditional stepper, all frames are interesting.
bool DebuggerStepper::IsInterestingFrame(FrameInfo * pFrame)
{
LIMITED_METHOD_CONTRACT;
return true;
}
// Place a single patch somewhere up the stack to do a step-out
void DebuggerStepper::TrapStepOut(ControllerStackInfo *info, bool fForceTraditional)
{
ControllerStackInfo returnInfo;
DebuggerJitInfo *dji;
LOG((LF_CORDB, LL_INFO10000, "DS::TSO this:0x%p\n", this));
bool fReturningFromFinallyFunclet = false;
#if defined(WIN64EXCEPTIONS)
// When we step out of a funclet, we should do one of two things, depending
// on the original stepping intention:
// 1) If we originally want to step out, then we should skip the parent method.
// 2) If we originally want to step in/over but we step off the end of the funclet,
// then we should resume in the parent, if possible.
if (info->m_activeFrame.IsNonFilterFuncletFrame())
{
// There should always be a frame for the parent method.
_ASSERTE(info->HasReturnFrame());
#ifdef _TARGET_ARM_
while (info->HasReturnFrame() && info->m_activeFrame.md != info->m_returnFrame.md)
{
StackTraceTicket ticket(info);
returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL);
info = &returnInfo;
}
_ASSERTE(info->HasReturnFrame());
#endif
_ASSERTE(info->m_activeFrame.md == info->m_returnFrame.md);
if (m_eMode == cStepOut)
{
StackTraceTicket ticket(info);
returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL);
info = &returnInfo;
}
else
{
_ASSERTE(info->m_returnFrame.managed);
_ASSERTE(info->m_returnFrame.frame == NULL);
MethodDesc *md = info->m_returnFrame.md;
dji = info->m_returnFrame.GetJitInfoFromFrame();
// The return value of a catch funclet is the control PC to resume to.
// The return value of a finally funclet has no meaning, so we need to check
// if the return value is in the main method.
LPVOID resumePC = GetRegdisplayReturnValue(&(info->m_activeFrame.registers));
// For finally funclet, there are two possible situations. Either the finally is
// called normally (i.e. no exception), in which case we simply fall through and
// let the normal loop do its work below, or the finally is called by the EH
// routines, in which case we need the unwind notification.
if (IsAddrWithinMethodIncludingFunclet(dji, md, (const BYTE *)resumePC))
{
SIZE_T reloffset = dji->m_codeRegionInfo.AddressToOffset((BYTE*)resumePC);
AddBindAndActivateNativeManagedPatch(info->m_returnFrame.md,
dji,
reloffset,
info->m_returnFrame.fp,
NULL);
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO:normally managed code AddPatch"
" in %s::%s, offset 0x%x, m_reason=%d\n",
info->m_returnFrame.md->m_pszDebugClassName,
info->m_returnFrame.md->m_pszDebugMethodName,
reloffset, m_reason));
// Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
// same method, so we should not "return" to the parent method.
LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n"));
return;
}
else
{
// This is the case where we step off the end of a finally funclet.
fReturningFromFinallyFunclet = true;
}
}
}
#endif // WIN64EXCEPTIONS
#ifdef _DEBUG
FramePointer dbgLastFP; // for debug, make sure we're making progress through the stack.
#endif
while (info->HasReturnFrame())
{
#ifdef _DEBUG
dbgLastFP = info->m_activeFrame.fp;
#endif
// Continue walking up the stack & set a patch upon the next
// frame up. We will eventually either hit managed code
// (which we can set a definite patch in), or the top of the
// stack.
StackTraceTicket ticket(info);
// The last parameter here is part of a really targetted (*cough* dirty) fix to
// disable getting an unwanted UMChain to fix issue 650903 (See
// code:ControllerStackInfo::WalkStack and code:TrackUMChain for the other
// parts.) In the case of managed step out we know that we aren't interested in
// unmanaged frames, and generating that unmanaged frame causes the stackwalker
// not to report the managed frame that was at the same SP. However the unmanaged
// frame might be used in the mixed-mode step out case so I don't suppress it
// there.
returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL, !(m_rgfMappingStop & STOP_UNMANAGED));
info = &returnInfo;
#ifdef _DEBUG
// If this assert fires, then it means that we're not making progress while
// tracing up the towards the root of the stack. Likely an issue in the Left-Side's
// stackwalker.
_ASSERTE(IsCloserToLeaf(dbgLastFP, info->m_activeFrame.fp));
#endif
#ifdef FEATURE_STUBS_AS_IL
if (info->m_activeFrame.md->IsILStub() && info->m_activeFrame.md->AsDynamicMethodDesc()->IsMulticastStub())
{
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO: multicast frame.\n"));
// User break should always be called from managed code, so it should never actually hit this codepath.
_ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
// JMC steppers shouldn't be patching stubs.
if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
{
LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n"));
continue;
}
TraceDestination trace;
EnableTraceCall(info->m_activeFrame.fp);
PCODE ip = GetControlPC(&(info->m_activeFrame.registers));
if (g_pEEInterface->TraceStub((BYTE*)ip, &trace)
&& g_pEEInterface->FollowTrace(&trace)
&& PatchTrace(&trace, info->m_activeFrame.fp,
true))
break;
}
else
#endif // FEATURE_STUBS_AS_IL
if (info->m_activeFrame.managed)
{
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO: return frame is managed.\n"));
if (info->m_activeFrame.frame == NULL)
{
// Returning normally to managed code.
_ASSERTE(info->m_activeFrame.md != NULL);
// Polymorphic check to skip over non-interesting frames.
if (!fForceTraditional && !this->IsInterestingFrame(&info->m_activeFrame))
continue;
dji = info->m_activeFrame.GetJitInfoFromFrame();
_ASSERTE(dji != NULL);
// Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value
// in that, and it was causing problems creating a stepper while sitting in ndirect stubs after we'd
// returned from the unmanaged function that had been called.
ULONG reloffset = info->m_activeFrame.relOffset;
AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
dji,
reloffset,
info->m_returnFrame.fp,
NULL);
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO:normally managed code AddPatch"
" in %s::%s, offset 0x%x, m_reason=%d\n",
info->m_activeFrame.md->m_pszDebugClassName,
info->m_activeFrame.md->m_pszDebugMethodName,
reloffset, m_reason));
// Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
// same method, so we should not "return" to the parent method.
if (!fReturningFromFinallyFunclet)
{
m_reason = STEP_RETURN;
}
break;
}
else if (info->m_activeFrame.frame == FRAME_TOP)
{
// Trad-stepper's step-out is actually like a step-next when we go off the top.
// JMC-steppers do a true-step out. So for JMC-steppers, don't enable trace-call.
if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
{
LOG((LF_CORDB, LL_EVERYTHING, "DS::TSO: JMC stepper skipping exit-frame case.\n"));
break;
}
// User break should always be called from managed code, so it should never actually hit this codepath.
_ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
// We're walking off the top of the stack. Note that if we call managed code again,
// this trace-call will cause us our stepper-to fire. So we'll actually do a
// step-next; not a true-step out.
EnableTraceCall(info->m_activeFrame.fp);
LOG((LF_CORDB, LL_INFO1000, "DS::TSO: Off top of frame!\n"));
m_reason = STEP_EXIT; //we're on the way out..
// @todo not that it matters since we don't send a
// stepComplete message to the right side.
break;
}
else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_FUNC_EVAL)
{
// Note: we treat walking off the top of the stack and
// walking off the top of a func eval the same way,
// except that we don't enable trace call since we
// know exactly where were going.
LOG((LF_CORDB, LL_INFO1000,
"DS::TSO: Off top of func eval!\n"));
m_reason = STEP_EXIT;
break;
}
else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_SECURITY &&
info->m_activeFrame.frame->GetInterception() == Frame::INTERCEPTION_NONE)
{
// If we're stepping out of something that was protected by (declarative) security,
// the security subsystem may leave a frame on the stack to cache it's computation.
// HOWEVER, this isn't a real frame, and so we don't want to stop here. On the other
// hand, if we're in the security goop (sec. executes managed code to do stuff), then
// we'll want to use the "returning to stub case", below. GetInterception()==NONE
// indicates that the frame is just a cache frame:
// Skip it and keep on going
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO: returning to a non-intercepting frame. Keep unwinding\n"));
continue;
}
else
{
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO: returning to a stub frame.\n"));
// User break should always be called from managed code, so it should never actually hit this codepath.
_ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
// JMC steppers shouldn't be patching stubs.
if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
{
LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n"));
continue;
}
// We're returning to some funky frame.
// (E.g. a security frame has called a native method.)
// Patch the frame from entering other methods. This effectively gives the Step-out
// a step-next behavior. For eg, this can be useful for step-out going between multicast delegates.
// This step-next could actually land us leaf-more on the callstack than we currently are!
// If we were a true-step out, we'd skip this and keep crawling.
// up the callstack.
//
// !!! For now, we assume that the TraceFrame entry
// point is smart enough to tell where it is in the
// calling sequence. We'll see how this holds up.
TraceDestination trace;
// We don't want notifications of trace-calls leaf-more than our current frame.
// For eg, if our current frame calls out to unmanaged code and then back in,
// we'll get a TraceCall notification. But since it's leaf-more than our current frame,
// we don't care because we just want to step out of our current frame (and everything
// our current frame may call).
EnableTraceCall(info->m_activeFrame.fp);
CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
if (g_pEEInterface->TraceFrame(GetThread(),
info->m_activeFrame.frame, FALSE,
&trace, &(info->m_activeFrame.registers))
&& g_pEEInterface->FollowTrace(&trace)
&& PatchTrace(&trace, info->m_activeFrame.fp,
true))
break;
// !!! Problem: we don't know which return frame to use -
// the TraceFrame patch may be in a frame below the return
// frame, or in a frame parallel with it
// (e.g. prestub popping itself & then calling.)
//
// For now, I've tweaked the FP comparison in the
// patch dispatching code to allow either case.
}
}
else
{
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO: return frame is not managed.\n"));
// Only step out to unmanaged code if we're actually
// marked to stop in unamanged code. Otherwise, just loop
// to get us past the unmanaged frames.
if (m_rgfMappingStop & STOP_UNMANAGED)
{
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO: return to unmanaged code "
"m_reason=STEP_RETURN\n"));
// Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
// same method, so we should not "return" to the parent method.
if (!fReturningFromFinallyFunclet)
{
m_reason = STEP_RETURN;
}
// We're stepping out into unmanaged code
LOG((LF_CORDB, LL_INFO10000,
"DS::TSO: Setting unmanaged trace patch at 0x%x(%x)\n",
GetControlPC(&(info->m_activeFrame.registers)),
info->m_returnFrame.fp.GetSPValue()));
AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)GetControlPC(&(info->m_activeFrame.registers)),
info->m_returnFrame.fp,
FALSE,
TRACE_UNMANAGED);
break;
}
}
}
// If we get here, we may be stepping out of the last frame. Our thread
// exit logic should catch this case. (@todo)
LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n"));
}
// void DebuggerStepper::StepOut()
// Called by Debugger::HandleIPCEvent to setup
// everything so that the process will step over the range of IL
// correctly.
// How: Converts the provided array of ranges from IL ranges to
// native ranges (if they're not native already), and then calls
// TrapStep or TrapStepOut, like so:
// Get the appropriate MethodDesc & JitInfo
// Iterate through array of IL ranges, use
// JitInfo::MapILRangeToMapEntryRange to translate IL to native
// ranges.
// Set member variables to remember that the DebuggerStepper now uses
// the ranges: m_range, m_rangeCount, m_stepIn, m_fp
// If (!TrapStep()) then {m_stepIn = true; TrapStepOut()}
// EnableUnwind( m_fp );
void DebuggerStepper::StepOut(FramePointer fp, StackTraceTicket ticket)
{
LOG((LF_CORDB, LL_INFO10000, "Attempting to step out, fp:0x%x this:0x%x"
"\n", fp.GetSPValue(), this ));
Thread *thread = GetThread();
CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
ControllerStackInfo info;
// We pass in the ticket b/c this is called both when we're live (via
// DebuggerUserBreakpoint) and when we're stopped (via normal StepOut)
info.GetStackInfo(ticket, thread, fp, context);
ResetRange();
m_stepIn = FALSE;
m_fp = info.m_activeFrame.fp;
#if defined(WIN64EXCEPTIONS)
// We need to remember the parent method frame pointer here so that we will recognize
// the range of the stepper as being valid when we return to the parent method.
if (info.m_activeFrame.IsNonFilterFuncletFrame())
{
m_fpParentMethod = info.m_returnFrame.fp;
}
#endif // WIN64EXCEPTIONS
m_eMode = cStepOut;
_ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) || (info.m_returnFrame.md != NULL));
TrapStepOut(&info);
EnableUnwind(m_fp);
}
#define GROW_RANGES_IF_NECESSARY() \
if (rTo == rToEnd) \
{ \
ULONG NewSize, OldSize; \
if (!ClrSafeInt::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)(realRangeCount*2), NewSize) || \
!ClrSafeInt::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)realRangeCount, OldSize) || \
NewSize < OldSize) \
{ \
DeleteInteropSafe(m_range); \
m_range = NULL; \
return false; \
} \
COR_DEBUG_STEP_RANGE *_pTmp = (COR_DEBUG_STEP_RANGE*) \
g_pDebugger->GetInteropSafeHeap()->Realloc(m_range, \
NewSize, \
OldSize); \
\
if (_pTmp == NULL) \
{ \
DeleteInteropSafe(m_range); \
m_range = NULL; \
return false; \
} \
\
m_range = _pTmp; \
rTo = m_range + realRangeCount; \
rToEnd = m_range + (realRangeCount*2); \
realRangeCount *= 2; \
}
//-----------------------------------------------------------------------------
// Given a set of IL ranges, convert them to native and cache them.
// Return true on success, false on error.
//-----------------------------------------------------------------------------
bool DebuggerStepper::SetRangesFromIL(DebuggerJitInfo *dji, COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount)
{
CONTRACTL
{
SO_NOT_MAINLINE;
WRAPPER(THROWS);
GC_NOTRIGGER;
PRECONDITION(ThisIsHelperThreadWorker()); // Only help initializes a stepper.
PRECONDITION(m_range == NULL); // shouldn't be set already.
PRECONDITION(CheckPointer(ranges));
PRECONDITION(CheckPointer(dji));
}
CONTRACTL_END;
// Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and
// it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged
// function that had been called.
MethodDesc *fd = dji->m_fd;
// The "+1" is for internal use, when we need to
// set an intermediate patch in pitched code. Isn't
// used unless the method is pitched & a patch is set
// inside it. Thus we still pass cRanges as the
// range count.
m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount+1];
if (m_range == NULL)
return false;
TRACE_ALLOC(m_range);
SIZE_T realRangeCount = rangeCount;
if (dji != NULL)
{
LOG((LF_CORDB,LL_INFO10000,"DeSt::St: For code md=0x%x, got DJI 0x%x, from 0x%x to 0x%x\n",
fd,
dji, dji->m_addrOfCode, (ULONG)dji->m_addrOfCode
+ (ULONG)dji->m_sizeOfCode));
//
// Map ranges to native offsets for jitted code
//
COR_DEBUG_STEP_RANGE *r, *rEnd, *rTo, *rToEnd;
r = ranges;
rEnd = r + rangeCount;
rTo = m_range;
rToEnd = rTo + realRangeCount;
//
// rTo may also be incremented in the middle of the loop on WIN64 platforms.
//
for (/**/; r < rEnd; r++, rTo++)
{
// If we are already at the end of our allocated array, but there are still
// more ranges to copy over, then grow the array.
GROW_RANGES_IF_NECESSARY();
if (r->startOffset == 0 && r->endOffset == (ULONG) ~0)
{
// {0...-1} means use the entire method as the range
// Code dup'd from below case.
LOG((LF_CORDB, LL_INFO10000, "DS:Step: Have DJI, special (0,-1) entry\n"));
rTo->startOffset = 0;
rTo->endOffset = (ULONG32)g_pEEInterface->GetFunctionSize(fd);
}
else
{
//
// One IL range may consist of multiple
// native ranges.
//
DebuggerILToNativeMap *mStart, *mEnd;
dji->MapILRangeToMapEntryRange(r->startOffset,
r->endOffset,
&mStart,
&mEnd);
// Either mStart and mEnd are both NULL (we don't have any sequence point),
// or they are both non-NULL.
_ASSERTE( ((mStart == NULL) && (mEnd == NULL)) ||
((mStart != NULL) && (mEnd != NULL)) );
if (mStart == NULL)
{
// @todo Won't this result in us stepping across
// the entire method?
rTo->startOffset = 0;
rTo->endOffset = 0;
}
else if (mStart == mEnd)
{
rTo->startOffset = mStart->nativeStartOffset;
rTo->endOffset = mStart->nativeEndOffset;
}
else
{
// Account for more than one continuous range here.
// Move the pointer back to work with the loop increment below.
// Don't dereference this pointer now!
rTo--;
for (DebuggerILToNativeMap* pMap = mStart;
pMap <= mEnd;
pMap = pMap + 1)
{
if ((pMap == mStart) ||
(pMap->nativeStartOffset != (pMap-1)->nativeEndOffset))
{
rTo++;
GROW_RANGES_IF_NECESSARY();
rTo->startOffset = pMap->nativeStartOffset;
rTo->endOffset = pMap->nativeEndOffset;
}
else
{
// If we have continuous ranges, then lump them together.
_ASSERTE(rTo->endOffset == pMap->nativeStartOffset);
rTo->endOffset = pMap->nativeEndOffset;
}
}
LOG((LF_CORDB, LL_INFO10000, "DS:Step: nat off:0x%x to 0x%x\n", rTo->startOffset, rTo->endOffset));
}
}
}
rangeCount = (int)((BYTE*)rTo - (BYTE*)m_range) / sizeof(COR_DEBUG_STEP_RANGE);
}
else
{
// Even if we don't have debug info, we'll be able to
// step through the method
SIZE_T functionSize = g_pEEInterface->GetFunctionSize(fd);
COR_DEBUG_STEP_RANGE *r = ranges;
COR_DEBUG_STEP_RANGE *rEnd = r + rangeCount;
COR_DEBUG_STEP_RANGE *rTo = m_range;
for(/**/; r < rEnd; r++, rTo++)
{
if (r->startOffset == 0 && r->endOffset == (ULONG) ~0)
{
LOG((LF_CORDB, LL_INFO10000, "DS:Step:No DJI, (0,-1) special entry\n"));
// Code dup'd from above case.
// {0...-1} means use the entire method as the range
rTo->startOffset = 0;
rTo->endOffset = (ULONG32)functionSize;
}
else
{
LOG((LF_CORDB, LL_INFO10000, "DS:Step:No DJI, regular entry\n"));
// We can't just leave ths IL entry - we have to
// get rid of it.
// This will just be ignored
rTo->startOffset = rTo->endOffset = (ULONG32)functionSize;
}
}
}
m_rangeCount = rangeCount;
m_realRangeCount = rangeCount;
return true;
}
// void DebuggerStepper::Step() Tells the stepper to step over
// the provided ranges.
// void *fp: frame pointer.
// bool in: true if we want to step into a function within the range,
// false if we want to step over functions within the range.
// COR_DEBUG_STEP_RANGE *ranges: Assumed to be nonNULL, it will
// always hold at least one element.
// SIZE_T rangeCount: One less than the true number of elements in
// the ranges argument.
// bool rangeIL: true if the ranges are provided in IL (they'll be
// converted to native before the DebuggerStepper uses them,
// false if they already are native.
bool DebuggerStepper::Step(FramePointer fp, bool in,
COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount,
bool rangeIL)
{
LOG((LF_CORDB, LL_INFO1000, "DeSt:Step this:0x%x ", this));
if (rangeCount>0)
LOG((LF_CORDB,LL_INFO10000," start,end[0]:(0x%x,0x%x)\n",
ranges[0].startOffset, ranges[0].endOffset));
else
LOG((LF_CORDB,LL_INFO10000," single step\n"));
Thread *thread = GetThread();
CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
// ControllerStackInfo doesn't report IL stubs, so if we are in an IL stub, we need
// to handle the single-step specially. There are probably other problems when we stop
// in an IL stub. We need to revisit this later.
bool fIsILStub = false;
if ((context != NULL) &&
g_pEEInterface->IsManagedNativeCode(reinterpret_cast(GetIP(context))))
{
MethodDesc * pMD = g_pEEInterface->GetNativeCodeMethodDesc(GetIP(context));
if (pMD != NULL)
{
fIsILStub = pMD->IsILStub();
}
}
LOG((LF_CORDB, LL_INFO10000, "DS::S - fIsILStub = %d\n", fIsILStub));
ControllerStackInfo info;
StackTraceTicket ticket(thread);
info.GetStackInfo(ticket, thread, fp, context);
_ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) ||
(info.m_returnFrame.md != NULL));
m_stepIn = in;
DebuggerJitInfo *dji = info.m_activeFrame.GetJitInfoFromFrame();
if (dji == NULL)
{
// !!! ERROR range step in frame with no code
ranges = NULL;
rangeCount = 0;
}
if (m_range != NULL)
{
TRACE_FREE(m_range);
DeleteInteropSafe(m_range);
m_range = NULL;
m_rangeCount = 0;
m_realRangeCount = 0;
}
if (rangeCount > 0)
{
if (rangeIL)
{
// IL ranges supplied, we need to convert them to native ranges.
bool fOk = SetRangesFromIL(dji, ranges, rangeCount);
if (!fOk)
{
return false;
}
}
else
{
// Native ranges, already supplied. Just copy them over.
m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount];
if (m_range == NULL)
{
return false;
}
memcpy(m_range, ranges, sizeof(COR_DEBUG_STEP_RANGE) * rangeCount);
m_realRangeCount = m_rangeCount = rangeCount;
}
_ASSERTE(m_range != NULL);
_ASSERTE(m_rangeCount > 0);
_ASSERTE(m_realRangeCount > 0);
}
else
{
// !!! ERROR cannot map IL ranges
ranges = NULL;
rangeCount = 0;
}
if (fIsILStub)
{
// Don't use the ControllerStackInfo if we are in an IL stub.
m_fp = fp;
}
else
{
m_fp = info.m_activeFrame.fp;
#if defined(WIN64EXCEPTIONS)
// We need to remember the parent method frame pointer here so that we will recognize
// the range of the stepper as being valid when we return to the parent method.
if (info.m_activeFrame.IsNonFilterFuncletFrame())
{
m_fpParentMethod = info.m_returnFrame.fp;
}
#endif // WIN64EXCEPTIONS
}
m_eMode = m_stepIn ? cStepIn : cStepOver;
LOG((LF_CORDB,LL_INFO10000,"DS 0x%x STep: STEP_NORMAL\n",this));
m_reason = STEP_NORMAL; //assume it'll be a normal step & set it to
//something else if we walk over it
if (fIsILStub)
{
LOG((LF_CORDB, LL_INFO10000, "DS:Step: stepping in an IL stub\n"));
// Enable the right triggers if the user wants to step in.
if (in)
{
if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER)
{
EnableTraceCall(info.m_activeFrame.fp);
}
else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
{
EnableMethodEnter();
}
}
// Also perform a step-out in case this IL stub is returning to managed code.
// However, we must fix up the ControllerStackInfo first, since it doesn't
// report IL stubs. The active frame reported by the ControllerStackInfo is
// actually the return frame in this case.
info.SetReturnFrameWithActiveFrame();
TrapStepOut(&info);
}
else if (!TrapStep(&info, in))
{
LOG((LF_CORDB,LL_INFO10000,"DS:Step: Did TS\n"));
m_stepIn = true;
TrapStepNext(&info);
}
LOG((LF_CORDB,LL_INFO10000,"DS:Step: Did TS,TSO\n"));
EnableUnwind(m_fp);
return true;
}
// TP_RESULT DebuggerStepper::TriggerPatch()
// What: Triggers patch if we're not in a stub, and we're
// outside of the stepping range. Otherwise sets another patch so as to
// step out of the stub, or in the next instruction within the range.
// How: If module==NULL & managed==> we're in a stub:
// TrapStepOut() and return false. Module==NULL&!managed==> return
// true. If m_range != NULL & execution is currently in the range,
// attempt a TrapStep (TrapStepOut otherwise) & return false. Otherwise,
// return true.
TP_RESULT DebuggerStepper::TriggerPatch(DebuggerControllerPatch *patch,
Thread *thread,
TRIGGER_WHY tyWhy)
{
LOG((LF_CORDB, LL_INFO10000, "DeSt::TP\n"));
// If we're frozen, we may hit a patch but we just ignore it
if (IsFrozen())
{
LOG((LF_CORDB, LL_INFO1000000, "DS::TP, ignoring patch at %p during frozen state\n", patch->address));
return TPR_IGNORE;
}
Module *module = patch->key.module;
BOOL managed = patch->IsManagedPatch();
mdMethodDef md = patch->key.md;
SIZE_T offset = patch->offset;
_ASSERTE((this->GetThread() == thread) || !"Stepper should only get patches on its thread");
// Note we can only run a stack trace if:
// - the context is in managed code (eg, not a stub)
// - OR we have a frame in place to prime the stackwalk.
ControllerStackInfo info;
CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
_ASSERTE(!ISREDIRECTEDTHREAD(thread));
// Context should always be from patch.
_ASSERTE(context != NULL);
bool fSafeToDoStackTrace = true;
// If we're in a stub (module == NULL and still in managed code), then our context is off in lala-land
// Then, it's only safe to do a stackwalk if the top frame is protecting us. That's only true for a
// frame_push. If we're here on a manager_push, then we don't have any such protection, so don't do the
// stackwalk.
fSafeToDoStackTrace = patch->IsSafeForStackTrace();
if (fSafeToDoStackTrace)
{
StackTraceTicket ticket(patch);
info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, context);
LOG((LF_CORDB, LL_INFO10000, "DS::TP: this:0x%p in %s::%s (fp:0x%p, "
"off:0x%p md:0x%p), \n\texception source:%s::%s (fp:0x%p)\n",
this,
info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugClassName:"Unknown",
info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugMethodName:"Unknown",
info.m_activeFrame.fp.GetSPValue(), patch->offset, patch->key.md,
m_fdException!=NULL?m_fdException->m_pszDebugClassName:"None",
m_fdException!=NULL?m_fdException->m_pszDebugMethodName:"None",
m_fpException.GetSPValue()));
}
DisableAll();
if (DetectHandleLCGMethods(dac_cast(patch->address), NULL, &info))
{
return TPR_IGNORE;
}
if (module == NULL)
{
// JMC steppers should not be patching here...
_ASSERTE(DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType());
if (managed)
{
LOG((LF_CORDB, LL_INFO10000,
"Frame (stub) patch hit at offset 0x%x\n", offset));
// This is a stub patch. If it was a TRACE_FRAME_PUSH that
// got us here, then the stub's frame is pushed now, so we
// tell the frame to apply the real patch. If we got here
// via a TRACE_MGR_PUSH, however, then there is no frame
// and we tell the stub manager that generated the
// TRACE_MGR_PUSH to apply the real patch.
TraceDestination trace;
bool traceOk;
FramePointer frameFP;
PTR_BYTE traceManagerRetAddr = NULL;
if (patch->trace.GetTraceType() == TRACE_MGR_PUSH)
{
_ASSERTE(context != NULL);
CONTRACT_VIOLATION(GCViolation);
traceOk = g_pEEInterface->TraceManager(
thread,
patch->trace.GetStubManager(),
&trace,
context,
&traceManagerRetAddr);
// We don't hae an active frame here, so patch with a
// FP of NULL so anything will match.
//
// @todo: should we take Esp out of the context?
frameFP = LEAF_MOST_FRAME;
}
else
{
_ASSERTE(fSafeToDoStackTrace);
CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
traceOk = g_pEEInterface->TraceFrame(thread,
thread->GetFrame(),
TRUE,
&trace,
&(info.m_activeFrame.registers));
frameFP = info.m_activeFrame.fp;
}
// Enable the JMC backstop for traditional steppers to catch us in case
// we didn't predict the call target properly.
EnableJMCBackStop(NULL);
if (!traceOk
|| !g_pEEInterface->FollowTrace(&trace)
|| !PatchTrace(&trace, frameFP,
(m_rgfMappingStop&STOP_UNMANAGED)?
(true):(false)))
{
//
// We can't set a patch in the frame -- we need
// to trap returning from this frame instead.
//
// Note: if we're in the TRACE_MGR_PUSH case from
// above, then we must place a patch where the
// TraceManager function told us to, since we can't
// actually unwind from here.
//
if (patch->trace.GetTraceType() != TRACE_MGR_PUSH)
{
_ASSERTE(fSafeToDoStackTrace);
LOG((LF_CORDB,LL_INFO10000,"TSO for non TRACE_MGR_PUSH case\n"));
TrapStepOut(&info);
}
else
{
LOG((LF_CORDB, LL_INFO10000,
"TSO for TRACE_MGR_PUSH case."));
// We'd better have a valid return address.
_ASSERTE(traceManagerRetAddr != NULL);
if (g_pEEInterface->IsManagedNativeCode(traceManagerRetAddr))
{
// Grab the jit info for the method.
DebuggerJitInfo *dji;
dji = g_pDebugger->GetJitInfoFromAddr((TADDR) traceManagerRetAddr);
MethodDesc * mdNative = (dji == NULL) ?
g_pEEInterface->GetNativeCodeMethodDesc(dac_cast(traceManagerRetAddr)) : dji->m_fd;
_ASSERTE(mdNative != NULL);
// Find the method that the return is to.
_ASSERTE(g_pEEInterface->GetFunctionAddress(mdNative) != NULL);
SIZE_T offsetRet = dac_cast(traceManagerRetAddr -
g_pEEInterface->GetFunctionAddress(mdNative));
// Place the patch.
AddBindAndActivateNativeManagedPatch(mdNative,
dji,
offsetRet,
LEAF_MOST_FRAME,
NULL);
LOG((LF_CORDB, LL_INFO10000,
"DS::TP: normally managed code AddPatch"
" in %s::%s, offset 0x%x\n",
mdNative->m_pszDebugClassName,
mdNative->m_pszDebugMethodName,
offsetRet));
}
else
{
// We're hitting this code path with MC++ assemblies
// that have an unmanaged entry point so the stub returns to CallDescrWorker.
_ASSERTE(g_pEEInterface->GetNativeCodeMethodDesc(dac_cast(patch->address))->IsILStub());
}
}
m_reason = STEP_NORMAL; //we tried to do a STEP_CALL, but since it didn't
//work, we're doing what amounts to a normal step.
LOG((LF_CORDB,LL_INFO10000,"DS 0x%x m_reason = STEP_NORMAL"
"(attempted call thru stub manager, SM didn't know where"
" we're going, so did a step out to original call\n",this));
}
else
{
m_reason = STEP_CALL;
}
EnableTraceCall(LEAF_MOST_FRAME);
EnableUnwind(m_fp);
return TPR_IGNORE;
}
else
{
// @todo - when would we hit this codepath?
// If we're not in managed, then we should have pushed a frame onto the Thread's frame chain,
// and thus we should still safely be able to do a stackwalk here.
_ASSERTE(fSafeToDoStackTrace);
if (DetectHandleInterceptors(&info) )
{
return TPR_IGNORE; //don't actually want to stop
}
LOG((LF_CORDB, LL_INFO10000,
"Unmanaged step patch hit at 0x%x\n", offset));
StackTraceTicket ticket(patch);
PrepareForSendEvent(ticket);
return TPR_TRIGGER;
}
} // end (module == NULL)
// If we're inside an interceptor but don't want to be,then we'll set a
// patch outside the current function.
_ASSERTE(fSafeToDoStackTrace);
if (DetectHandleInterceptors(&info) )
{
return TPR_IGNORE; //don't actually want to stop
}
LOG((LF_CORDB,LL_INFO10000, "DS: m_fp:0x%p, activeFP:0x%p fpExc:0x%p\n",
m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue()));
if (IsInRange(offset, m_range, m_rangeCount, &info) ||
ShouldContinueStep( &info, offset))
{
LOG((LF_CORDB, LL_INFO10000,
"Intermediate step patch hit at 0x%x\n", offset));
if (!TrapStep(&info, m_stepIn))
TrapStepNext(&info);
EnableUnwind(m_fp);
return TPR_IGNORE;
}
else
{
LOG((LF_CORDB, LL_INFO10000, "Step patch hit at 0x%x\n", offset));
// For a JMC stepper, we have an additional constraint:
// skip non-user code. So if we're still in non-user code, then
// we've got to keep going
DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(module, md);
if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi))
{
return TPR_IGNORE;
}
StackTraceTicket ticket(patch);
PrepareForSendEvent(ticket);
return TPR_TRIGGER;
}
}
// Return true if this should be skipped.
// For a non-jmc stepper, we don't care about non-user code, so we
// don't skip it and so we always return false.
bool DebuggerStepper::DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo)
{
LIMITED_METHOD_CONTRACT;
return false;
}
// For regular steppers, trace-call is just a trace-call.
void DebuggerStepper::EnablePolyTraceCall()
{
this->EnableTraceCall(LEAF_MOST_FRAME);
}
// Traditional steppers enable MethodEnter as a back-stop for step-in.
// We hope that the stub-managers will predict the step-in for us,
// but in case they don't the Method-Enter should catch us.
// MethodEnter is not fully correct for traditional steppers for a few reasons:
// - doesn't handle step-in to native
// - stops us *after* the prolog (a traditional stepper can stop us before the prolog).
// - only works for methods that have the JMC probe. That can exclude all optimized code.
void DebuggerStepper::TriggerMethodEnter(Thread * thread,
DebuggerJitInfo *dji,
const BYTE * ip,
FramePointer fp)
{
_ASSERTE(dji != NULL);
_ASSERTE(thread != NULL);
_ASSERTE(ip != NULL);
_ASSERTE(this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER);
_ASSERTE(!IsFrozen());
MethodDesc * pDesc = dji->m_fd;
LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, desc=%p, addr=%p\n",
pDesc, ip));
// JMC steppers won't stop in Lightweight delegates. Just return & keep executing.
if (pDesc->IsNoMetadata())
{
LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, skipping b/c it's lw-codegen\n"));
return;
}
// This is really just a heuristic. We don't want to trigger a JMC probe when we are
// executing in an IL stub, or in one of the marshaling methods called by the IL stub.
// The problem is that the IL stub can call into arbitrary code, including custom marshalers.
// In that case the user has to put a breakpoint to stop in the code.
if (g_pEEInterface->DetectHandleILStubs(thread))
{
return;
}
#ifdef _DEBUG
// To help trace down if a problem is related to a stubmanager,
// we add a knob that lets us skip the MethodEnter checks. This lets tests directly
// go against the Stub-managers w/o the MethodEnter check backstops.
int fSkip = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgSkipMEOnStep);
if (fSkip)
{
return;
}
// See EnableJMCBackStop() for details here. This check just makes sure that we don't fire
// the assert if we end up in the method we started in (which could happen if we trace call
// instructions before the JMC probe).
// m_StepInStartMethod may be null (if this step-in didn't start from managed code).
if ((m_StepInStartMethod != pDesc) &&
(!m_StepInStartMethod->IsLCGMethod()))
{
// Since normal step-in should stop us at the prolog, and TME is after the prolog,
// if a stub-manager did successfully find the address, we should get a TriggerPatch first
// at native offset 0 (before the prolog) and before we get the TME. That means if
// we do get the TME, then there was no stub-manager to find us.
SString sLog;
StubManager::DbgGetLog(&sLog);
// Assert b/c the Stub-manager should have caught us first.
// We don't want people relying on TriggerMethodEnter as the real implementation for Traditional Step-in
// (see above for reasons why). However, using TME will provide a bandage for the final retail product
// in cases where we are missing a stub-manager.
CONSISTENCY_CHECK_MSGF(false, (
"\nThe Stubmanagers failed to identify and trace a stub on step-in. The stub-managers for this code-path path need to be fixed.\n"
"See http://team/sites/clrdev/Devdocs/StubManagers.rtf for more information on StubManagers.\n"
"Stepper this=0x%p, startMethod='%s::%s'\n"
"---------------------------------\n"
"Stub manager log:\n%S"
"\n"
"The thread is now in managed method '%s::%s'.\n"
"---------------------------------\n",
this,
((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugClassName),
((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugMethodName),
sLog.GetUnicode(),
pDesc->m_pszDebugClassName, pDesc->m_pszDebugMethodName
));
}
#endif
// Place a patch to stopus.
// Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step.
AddBindAndActivateNativeManagedPatch(pDesc,
dji,
CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip),
fp,
NULL // AppDomain
);
LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, after setting patch to stop\n"));
// Once we resume, we'll go hit that patch (duh, we patched our return address)
// Furthermore, we know the step will complete with reason = call, so set that now.
m_reason = STEP_CALL;
}
// We may have single-stepped over a return statement to land us up a frame.
// Or we may have single-stepped through a method.
// We never single-step into calls (we place a patch at the call destination).
bool DebuggerStepper::TriggerSingleStep(Thread *thread, const BYTE *ip)
{
LOG((LF_CORDB,LL_INFO10000,"DS:TSS this:0x%x, @ ip:0x%x\n", this, ip));
_ASSERTE(!IsFrozen());
// User break should only do a step-out and never actually need a singlestep flag.
_ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
//
// there's one weird case here - if the last instruction generated
// a hardware exception, we may be in lala land. If so, rely on the unwind
// handler to figure out what happened.
//
// @todo this could be wrong when we have the incremental collector going
//
if (!g_pEEInterface->IsManagedNativeCode(ip))
{
LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n"));
DisableSingleStep();
return false;
}
// If we EnC the method, we'll blast the function address,
// and so have to get it from teh DJI that we'll have. If
// we haven't gotten debugger info about a regular function, then
// we'll have to get the info from the EE, which will be valid
// since we're standing in the function at this point, and
// EnC couldn't have happened yet.
MethodDesc *fd = g_pEEInterface->GetNativeCodeMethodDesc((PCODE)ip);
SIZE_T offset;
DebuggerJitInfo *dji = g_pDebugger->GetJitInfoFromAddr((TADDR) ip);
offset = CodeRegionInfo::GetCodeRegionInfo(dji, fd).AddressToOffset(ip);
ControllerStackInfo info;
// Safe to stackwalk b/c we've already checked that our IP is in crawlable code.
StackTraceTicket ticket(ip);
info.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL);
// This is a special case where we return from a managed method back to an IL stub. This can
// only happen if there's no more managed method frames closer to the root and we want to perform
// a step out, or if we step-next off the end of a method called by an IL stub. In either case,
// we'll get a single step in an IL stub, which we want to ignore. We also want to enable trace
// call here, just in case this IL stub is about to call the managed target (in the reverse interop case).
if (fd->IsILStub())
{
LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n"));
if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER)
{
EnableTraceCall(info.m_activeFrame.fp);
}
else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
{
EnableMethodEnter();
}
DisableSingleStep();
return false;
}
DisableAll();
LOG((LF_CORDB,LL_INFO10000, "DS::TSS m_fp:0x%x, activeFP:0x%x fpExc:0x%x\n",
m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue()));
if (DetectHandleLCGMethods((PCODE)ip, fd, &info))
{
return false;
}
if (IsInRange(offset, m_range, m_rangeCount, &info) ||
ShouldContinueStep( &info, offset))
{
if (!TrapStep(&info, m_stepIn))
TrapStepNext(&info);
EnableUnwind(m_fp);
LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning false Case 1!\n"));
return false;
}
else
{
LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning true Case 2 for reason STEP_%02x!\n", m_reason));
// @todo - when would a single-step (not a patch) land us in user-code?
// For a JMC stepper, we have an additional constraint:
// skip non-user code. So if we're still in non-user code, then
// we've got to keep going
DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(fd->GetModule(), fd->GetMemberDef());
if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi))
return false;
PrepareForSendEvent(ticket);
return true;
}
}
void DebuggerStepper::TriggerTraceCall(Thread *thread, const BYTE *ip)
{
LOG((LF_CORDB,LL_INFO10000,"DS:TTC this:0x%x, @ ip:0x%x\n",this,ip));
TraceDestination trace;
if (IsFrozen())
{
LOG((LF_CORDB,LL_INFO10000,"DS:TTC exit b/c of Frozen\n"));
return;
}
// This is really just a heuristic. We don't want to trigger a JMC probe when we are
// executing in an IL stub, or in one of the marshaling methods called by the IL stub.
// The problem is that the IL stub can call into arbitrary code, including custom marshalers.
// In that case the user has to put a breakpoint to stop in the code.
if (g_pEEInterface->DetectHandleILStubs(thread))
{
return;
}
if (g_pEEInterface->TraceStub(ip, &trace)
&& g_pEEInterface->FollowTrace(&trace)
&& PatchTrace(&trace, LEAF_MOST_FRAME,
(m_rgfMappingStop&STOP_UNMANAGED)?(true):(false)))
{
// !!! We really want to know ahead of time if PatchTrace will succeed.
DisableAll();
PatchTrace(&trace, LEAF_MOST_FRAME, (m_rgfMappingStop&STOP_UNMANAGED)?
(true):(false));
// If we're triggering a trace call, and we're following a trace into either managed code or unjitted managed
// code, then we need to update our stepper's reason to STEP_CALL to reflect the fact that we're going to land
// into a new function because of a call.
if ((trace.GetTraceType() == TRACE_UNJITTED_METHOD) || (trace.GetTraceType() == TRACE_MANAGED))
{
m_reason = STEP_CALL;
}
EnableUnwind(m_fp);
LOG((LF_CORDB, LL_INFO10000, "DS::TTC potentially a step call!\n"));
}
}
void DebuggerStepper::TriggerUnwind(Thread *thread,
MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset,
FramePointer fp,
CorDebugStepReason unwindReason)
{
CONTRACTL
{
SO_NOT_MAINLINE;
THROWS; // from GetJitInfo
GC_NOTRIGGER; // don't send IPC events
MODE_COOPERATIVE; // TriggerUnwind always is coop
PRECONDITION(!IsDbgHelperSpecialThread());
PRECONDITION(fd->IsDynamicMethod() || (pDJI != NULL));
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO10000,"DS::TU this:0x%p, in %s::%s, offset 0x%p "
"frame:0x%p unwindReason:0x%x\n", this, fd->m_pszDebugClassName,
fd->m_pszDebugMethodName, offset, fp.GetSPValue(), unwindReason));
_ASSERTE(unwindReason == STEP_EXCEPTION_FILTER || unwindReason == STEP_EXCEPTION_HANDLER);
if (IsFrozen())
{
LOG((LF_CORDB,LL_INFO10000,"DS:TTC exit b/c of Frozen\n"));
return;
}
if (IsCloserToRoot(fp, GetUnwind()))
{
// Handler is in a parent frame . For all steps (in,out,over)
// we want to stop in the handler.
// This will be like a Step Out, so we don't need any range.
ResetRange();
}
else
{
// Handler/Filter is in the same frame as the stepper
// For a step-in/over, we want to patch the handler/filter.
// But for a step-out, we want to just continue executing (and don't change
// the step-reason either).
if (m_eMode == cStepOut)
{
LOG((LF_CORDB, LL_INFO10000, "DS::TU Step-out, returning for same-frame case.\n"));
return;
}
}
// Remember the origin of the exception, so that if the step looks like
// it's going to complete in a different frame, but the code comes from the
// same frame as the one we're in, we won't stop twice in the "same" range
m_fpException = fp;
m_fdException = fd;
//
// An exception is exiting the step region. Set a patch on
// the filter/handler.
//
DisableAll();
BOOL fOk;
fOk = AddBindAndActivateNativeManagedPatch(fd, pDJI, offset, LEAF_MOST_FRAME, NULL);
// Since we're unwinding to an already executed method, the method should already
// be jitted and placing the patch should work.
CONSISTENCY_CHECK_MSGF(fOk, ("Failed to place patch at TriggerUnwind.\npThis=0x%p md=0x%p, native offset=0x%x\n", this, fd, offset));
LOG((LF_CORDB,LL_INFO100000,"Step reason:%s\n", unwindReason==STEP_EXCEPTION_FILTER
? "STEP_EXCEPTION_FILTER":"STEP_EXCEPTION_HANDLER"));
m_reason = unwindReason;
}
// Prepare for sending an event.
// This is called 1:1 w/ SendEvent, but this guy can be called in a GC_TRIGGERABLE context
// whereas SendEvent is pretty strict.
// Caller ensures that it's safe to run a stack trace.
void DebuggerStepper::PrepareForSendEvent(StackTraceTicket ticket)
{
#ifdef _DEBUG
_ASSERTE(!m_fReadyToSend);
m_fReadyToSend = true;
#endif
LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n", m_fpStepInto.GetSPValue()));
if (m_fpStepInto != LEAF_MOST_FRAME)
{
ControllerStackInfo csi;
csi.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL);
if (csi.m_targetFrameFound &&
#if !defined(WIN64EXCEPTIONS)
IsCloserToRoot(m_fpStepInto, csi.m_activeFrame.fp)
#else
IsCloserToRoot(m_fpStepInto, (csi.m_activeFrame.IsNonFilterFuncletFrame() ? csi.m_returnFrame.fp : csi.m_activeFrame.fp))
#endif // WIN64EXCEPTIONS
)
{
m_reason = STEP_CALL;
LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x STEP_CALL!\n", this));
}
#ifdef _DEBUG
else
{
LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x not a step call!\n", this));
}
#endif
}
#ifdef _DEBUG
// Steppers should only stop in interesting code.
if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
{
// If we're at either a patch or SS, we'll have a context.
CONTEXT *context = g_pEEInterface->GetThreadFilterContext(GetThread());
if (context == NULL)
{
void * pIP = CORDbgGetIP(reinterpret_cast(context));
DebuggerJitInfo * dji = g_pDebugger->GetJitInfoFromAddr((TADDR) pIP);
DebuggerMethodInfo * dmi = NULL;
if (dji != NULL)
{
dmi = dji->m_methodInfo;
CONSISTENCY_CHECK_MSGF(dmi->IsJMCFunction(), ("JMC stepper %p stopping in non-jmc method, MD=%p, '%s::%s'",
this, dji->m_fd, dji->m_fd->m_pszDebugClassName, dji->m_fd->m_pszDebugMethodName));
}
}
}
#endif
}
bool DebuggerStepper::SendEvent(Thread *thread, bool fIpChanged)
{
CONTRACTL
{
SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
CONTRACTL_END;
// We practically should never have a step interupted by SetIp.
// We'll still go ahead and send the Step-complete event because we've already
// deactivated our triggers by now and we haven't placed any new patches to catch us.
// We assert here because we don't believe we'll ever be able to hit this scenario.
// This is technically an issue, but we consider it benign enough to leave in.
_ASSERTE(!fIpChanged || !"Stepper interupted by SetIp");
LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n", m_fpStepInto.GetSPValue()));
_ASSERTE(m_fReadyToSend);
_ASSERTE(GetThread() == thread);
CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
_ASSERTE(!ISREDIRECTEDTHREAD(thread));
// We need to send the stepper and delete the controller because our stepper
// no longer has any patches or other triggers that will let it send the step-complete event.
g_pDebugger->SendStep(thread, context, this, m_reason);
this->Delete();
#ifdef _DEBUG
// Now that we've sent the event, we can stop recording information.
StubManager::DbgFinishLog();
#endif
return true;
}
void DebuggerStepper::ResetRange()
{
if (m_range)
{
TRACE_FREE(m_range);
DeleteInteropSafe(m_range);
m_range = NULL;
}
}
//-----------------------------------------------------------------------------
// Return true if this stepper is alive, but frozen. (we freeze when the stepper
// enters a nested func-eval).
//-----------------------------------------------------------------------------
bool DebuggerStepper::IsFrozen()
{
return (m_cFuncEvalNesting > 0);
}
//-----------------------------------------------------------------------------
// Returns true if this stepper is 'dead' - which happens if a non-frozen stepper
// gets a func-eval exit.
//-----------------------------------------------------------------------------
bool DebuggerStepper::IsDead()
{
return (m_cFuncEvalNesting < 0);
}
// * ------------------------------------------------------------------------
// * DebuggerJMCStepper routines
// * ------------------------------------------------------------------------
DebuggerJMCStepper::DebuggerJMCStepper(Thread *thread,
CorDebugUnmappedStop rgfMappingStop,
CorDebugIntercept interceptStop,
AppDomain *appDomain) :
DebuggerStepper(thread, rgfMappingStop, interceptStop, appDomain)
{
LOG((LF_CORDB, LL_INFO10000, "DJMCStepper ctor, this=%p\n", this));
}
DebuggerJMCStepper::~DebuggerJMCStepper()
{
LOG((LF_CORDB, LL_INFO10000, "DJMCStepper dtor, this=%p\n", this));
}
// If we're a JMC stepper, then don't stop in non-user code.
bool DebuggerJMCStepper::IsInterestingFrame(FrameInfo * pFrame)
{
CONTRACTL
{
THROWS;
MODE_ANY;
GC_NOTRIGGER;
}
CONTRACTL_END;
DebuggerMethodInfo *pInfo = pFrame->GetMethodInfoFromFrameOrThrow();
_ASSERTE(pInfo != NULL); // throws on failure
bool fIsUserCode = pInfo->IsJMCFunction();
LOG((LF_CORDB, LL_INFO1000000, "DS::TSO, frame '%s::%s' is '%s' code\n",
pFrame->DbgGetClassName(), pFrame->DbgGetMethodName(),
fIsUserCode ? "user" : "non-user"));
return fIsUserCode;
}
// A JMC stepper's step-next stops at the next thing of code run.
// This may be a Step-Out, or any User code called before that.
// A1 -> B1 -> { A2, B2 -> B3 -> A3}
// So TrapStepNex at end of A2 should land us in A3.
void DebuggerJMCStepper::TrapStepNext(ControllerStackInfo *info)
{
LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TrapStepNext, this=%p\n", this));
EnableMethodEnter();
// This will place a patch up the stack and set m_reason = STEP_RETURN.
// If we end up hitting JMC before that patch, we'll hit TriggerMethodEnter
// and that will set our reason to STEP_CALL.
TrapStepOut(info);
}
// ip - target address for call instruction
bool DebuggerJMCStepper::TrapStepInHelper(
ControllerStackInfo * pInfo,
const BYTE * ipCallTarget,
const BYTE * ipNext,
bool fCallingIntoFunclet)
{
#ifndef WIN64EXCEPTIONS
// There are no funclets on x86.
_ASSERTE(!fCallingIntoFunclet);
#endif
// If we are calling into a funclet, then we can't rely on the JMC probe to stop us because there are no
// JMC probes in funclets. Instead, we have to perform a traditional step-in here.
if (fCallingIntoFunclet)
{
TraceDestination td;
td.InitForManaged(reinterpret_cast(ipCallTarget));
PatchTrace(&td, LEAF_MOST_FRAME, false);
// If this succeeds, then we still need to put a patch at the return address. This is done below.
// If this fails, then we definitely need to put a patch at the return address to trap the thread.
// So in either case, we have to execute the rest of this function.
}
MethodDesc * pDesc = pInfo->m_activeFrame.md;
DebuggerJitInfo *dji = NULL;
// We may not have a DJI if we're in an attach case. We should still be able to do a JMC-step in though.
// So NULL is ok here.
dji = g_pDebugger->GetJitInfo(pDesc, (const BYTE*) ipNext);
// Place patch after call, which is at ipNext. Note we don't need an IL->Native map here
// since we disassembled native code to find the ip after the call.
SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ipNext);
LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TSIH, at '%s::%s', calling=0x%p, next=0x%p, offset=%d\n",
pDesc->m_pszDebugClassName,
pDesc->m_pszDebugMethodName,
ipCallTarget, ipNext,
offset));
// Place a patch at the native address (inside the managed method).
AddBindAndActivateNativeManagedPatch(pInfo->m_activeFrame.md,
dji,
offset,
pInfo->m_returnFrame.fp,
NULL);
EnableMethodEnter();
// Return true means that we want to let the stepper run free. It will either
// hit the patch after the call instruction or it will hit a TriggerMethodEnter.
return true;
}
// For JMC-steppers, we don't enable trace-call; we enable Method-Enter.
void DebuggerJMCStepper::EnablePolyTraceCall()
{
_ASSERTE(!IsFrozen());
this->EnableMethodEnter();
}
// Return true if this is non-user code. This means we've setup the proper patches &
// triggers, etc and so we expect the controller to just run free.
// This is called when all other stepping criteria are met and we're about to
// send a step-complete. For JMC, this is when we see if we're in non-user code
// and if so, continue stepping instead of send the step complete.
// Return false if this is user-code.
bool DebuggerJMCStepper::DetectHandleNonUserCode(ControllerStackInfo *pInfo, DebuggerMethodInfo * dmi)
{
_ASSERTE(dmi != NULL);
bool fIsUserCode = dmi->IsJMCFunction();
if (!fIsUserCode)
{
LOG((LF_CORDB, LL_INFO10000, "JMC stepper stopped in non-user code, continuing.\n"));
// Not-user code, we want to skip through this.
// We may be here while trying to step-out.
// Step-out just means stop at the first interesting frame above us.
// So JMC TrapStepOut won't patch a non-user frame.
// But if we're skipping over other stuff (prolog, epilog, interceptors,
// trace calls), then we may still be in the middle of non-user
//_ASSERTE(m_eMode != cStepOut);
if (m_eMode == cStepOut)
{
TrapStepOut(pInfo);
}
else if (m_stepIn)
{
EnableMethodEnter();
TrapStepOut(pInfo);
// Run until we hit the next thing of managed code.
} else {
// Do a traditional step-out since we just want to go up 1 frame.
TrapStepOut(pInfo, true); // force trad step out.
// If we're not in the original frame anymore, then
// If we did a Step-over at the end of a method, and that did a single-step over the return
// then we may already be in our parent frame. In that case, we also want to behave
// like a step-in and TriggerMethodEnter.
if (this->m_fp != pInfo->m_activeFrame.fp)
{
// If we're a step-over, then we should only be stopped in a parent frame.
_ASSERTE(m_stepIn || IsCloserToLeaf(this->m_fp, pInfo->m_activeFrame.fp));
EnableMethodEnter();
}
// Step-over shouldn't stop in a frame below us in the same callstack.
// So we do a tradional step-out of our current frame, which guarantees
// that. After that, we act just like a step-in.
m_stepIn = true;
}
EnableUnwind(m_fp);
// Must keep going...
return true;
}
return false;
}
// Dispatched right after the prolog of a JMC function.
// We may be blocking the GC here, so let's be fast!
void DebuggerJMCStepper::TriggerMethodEnter(Thread * thread,
DebuggerJitInfo *dji,
const BYTE * ip,
FramePointer fp)
{
_ASSERTE(dji != NULL);
_ASSERTE(thread != NULL);
_ASSERTE(ip != NULL);
_ASSERTE(!IsFrozen());
MethodDesc * pDesc = dji->m_fd;
LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, desc=%p, addr=%p\n",
pDesc, ip));
// JMC steppers won't stop in Lightweight delegates. Just return & keep executing.
if (pDesc->IsNoMetadata())
{
LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, skipping b/c it's lw-codegen\n"));
return;
}
// Is this user code?
DebuggerMethodInfo * dmi = dji->m_methodInfo;
bool fIsUserCode = dmi->IsJMCFunction();
LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, '%s::%s' is '%s' code\n",
pDesc->m_pszDebugClassName,
pDesc->m_pszDebugMethodName,
fIsUserCode ? "user" : "non-user"
));
// If this isn't user code, then just return and continue executing.
if (!fIsUserCode)
return;
// MethodEnter is only enabled when we want to stop in a JMC function.
// And that's where we are now. So patch the ip and resume.
// The stepper will hit the patch, and stop.
// It's a good thing we have the fp passed in, because we have no other
// way of getting it. We can't do a stack trace here (the stack trace
// would start at the last pushed Frame, which miss a lot of managed
// frames).
// Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step.
AddBindAndActivateNativeManagedPatch(pDesc,
dji,
CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip),
fp,
NULL // AppDomain
);
LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, after setting patch to stop\n"));
// Once we resume, we'll go hit that patch (duh, we patched our return address)
// Furthermore, we know the step will complete with reason = call, so set that now.
m_reason = STEP_CALL;
}
//-----------------------------------------------------------------------------
// Helper to convert form an EE Frame's interception enum to a CorDebugIntercept
// bitfield.
// The intercept value in EE Frame's is a 0-based enumeration (not a bitfield).
// The intercept value for ICorDebug is a bitfied.
//-----------------------------------------------------------------------------
CorDebugIntercept ConvertFrameBitsToDbg(Frame::Interception i)
{
_ASSERTE(i >= 0 && i < Frame::INTERCEPTION_COUNT);
// Since the ee frame is a 0-based enum, we can just use a map.
const CorDebugIntercept map[Frame::INTERCEPTION_COUNT] =
{
// ICorDebug EE Frame
INTERCEPT_NONE, // INTERCEPTION_NONE,
INTERCEPT_CLASS_INIT, // INTERCEPTION_CLASS_INIT
INTERCEPT_EXCEPTION_FILTER, // INTERCEPTION_EXCEPTION
INTERCEPT_CONTEXT_POLICY, // INTERCEPTION_CONTEXT
INTERCEPT_SECURITY, // INTERCEPTION_SECURITY
INTERCEPT_INTERCEPTION, // INTERCEPTION_OTHER
};
return map[i];
}
//-----------------------------------------------------------------------------
// This is a helper class to do a stack walk over a certain range and find all the interceptors.
// This allows a JMC stepper to see if there are any interceptors it wants to skip over (though
// there's nothing JMC-specific about this).
// Note that we only want to walk the stack range that the stepper is operating in.
// That's because we don't care about interceptors that happened _before_ the
// stepper was created.
//-----------------------------------------------------------------------------
class InterceptorStackInfo
{
public:
#ifdef _DEBUG
InterceptorStackInfo()
{
// since this ctor just nulls out fpTop (which is already done in Init), we
// only need it in debug.
m_fpTop = LEAF_MOST_FRAME;
}
#endif
// Get a CorDebugIntercept bitfield that contains a bit for each type of interceptor
// if that interceptor is present within our stack-range.
// Stack range is from leaf-most up to and including fp
CorDebugIntercept GetInterceptorsInRange()
{
_ASSERTE(m_fpTop != LEAF_MOST_FRAME || !"Must call Init first");
return (CorDebugIntercept) m_bits;
}
// Prime the stackwalk.
void Init(FramePointer fpTop, Thread *thread, CONTEXT *pContext, BOOL contextValid)
{
_ASSERTE(fpTop != LEAF_MOST_FRAME);
_ASSERTE(thread != NULL);
m_bits = 0;
m_fpTop = fpTop;
LOG((LF_CORDB,LL_EVERYTHING, "ISI::Init - fpTop=%p, thread=%p, pContext=%p, contextValid=%d\n",
fpTop.GetSPValue(), thread, pContext, contextValid));
int result;
result = DebuggerWalkStack(
thread,
LEAF_MOST_FRAME,
pContext,
contextValid,
WalkStack,
(void *) this,
FALSE
);
}
protected:
// This is a bitfield of all the interceptors we encounter in our stack-range
int m_bits;
// This is the top of our stack range.
FramePointer m_fpTop;
static StackWalkAction WalkStack(FrameInfo *pInfo, void *data)
{
_ASSERTE(pInfo != NULL);
_ASSERTE(data != NULL);
InterceptorStackInfo * pThis = (InterceptorStackInfo*) data;
// If there's an interceptor frame here, then set those
// bits in our bitfield.
Frame::Interception i = Frame::INTERCEPTION_NONE;
Frame * pFrame = pInfo->frame;
if ((pFrame != NULL) && (pFrame != FRAME_TOP))
{
i = pFrame->GetInterception();
if (i != Frame::INTERCEPTION_NONE)
{
pThis->m_bits |= (int) ConvertFrameBitsToDbg(i);
}
}
else if (pInfo->HasMethodFrame())
{
// Check whether we are executing in a class constructor.
_ASSERTE(pInfo->md != NULL);
// Need to be careful about an off-by-one error here! Imagine your stack looks like:
// Foo.DoSomething()
// Foo..cctor <--- step starts/ends in here
// Bar.Bar();
//
// and your code looks like this:
// Foo..cctor()
// {
// Foo.DoSomething(); <-- JMC step started here
// int x = 1; <-- step ends here
// }
// This stackwalk covers the inclusive range [Foo..cctor, Foo.DoSomething()] so we will see
// the static cctor in this walk. However executing inside a static class constructor does not
// count as an interceptor. You must start the step outside the static constructor and then call
// into it to have an interceptor. Therefore only static constructors that aren't the outermost
// frame should be treated as interceptors.
if (pInfo->md->IsClassConstructor() && (pInfo->fp != pThis->m_fpTop))
{
// We called a class constructor, add the appropriate flag
pThis->m_bits |= (int) INTERCEPT_CLASS_INIT;
}
}
LOG((LF_CORDB,LL_EVERYTHING,"ISI::WS- Frame=%p, fp=%p, Frame bits=%x, Cor bits=0x%x\n", pInfo->frame, pInfo->fp.GetSPValue(), i, pThis->m_bits));
// We can stop once we hit the top frame.
if (pInfo->fp == pThis->m_fpTop)
{
return SWA_ABORT;
}
else
{
return SWA_CONTINUE;
}
}
};
// Skip interceptors for JMC steppers.
// Return true if we patch something (and thus should keep stepping)
// Return false if we're done.
bool DebuggerJMCStepper::DetectHandleInterceptors(ControllerStackInfo * info)
{
LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Start DetectHandleInterceptors\n"));
// For JMC, we could stop very far way from an interceptor.
// So we have to do a stack walk to search for interceptors...
// If we find any in our stack range (from m_fp ... current fp), then we just do a trap-step-next.
// Note that this logic should also work for regular steppers, but we've left that in
// as to keep that code-path unchanged.
// ControllerStackInfo only gives us the bottom 2 frames on the stack, so we ignore it and
// have to do our own stack walk.
// @todo - for us to properly skip filters, we need to make sure that filters show up in our chains.
InterceptorStackInfo info2;
CONTEXT *context = g_pEEInterface->GetThreadFilterContext(this->GetThread());
CONTEXT tempContext;
_ASSERTE(!ISREDIRECTEDTHREAD(this->GetThread()));
if (context == NULL)
{
info2.Init(this->m_fp, this->GetThread(), &tempContext, FALSE);
}
else
{
info2.Init(this->m_fp, this->GetThread(), context, TRUE);
}
// The following casts are safe on WIN64 platforms.
int iOnStack = (int) info2.GetInterceptorsInRange();
int iSkip = ~((int) m_rgfInterceptStop);
LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: iOnStack=%x, iSkip=%x\n", iOnStack, iSkip));
// If the bits on the stack contain any interceptors we want to skip, then we need to keep going.
if ((iOnStack & iSkip) != 0)
{
LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: keep going!\n"));
TrapStepNext(info);
EnableUnwind(m_fp);
return true;
}
LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Done!!\n"));
return false;
}
// * ------------------------------------------------------------------------
// * DebuggerThreadStarter routines
// * ------------------------------------------------------------------------
DebuggerThreadStarter::DebuggerThreadStarter(Thread *thread)
: DebuggerController(thread, NULL)
{
LOG((LF_CORDB, LL_INFO1000, "DTS::DTS: this:0x%x Thread:0x%x\n",
this, thread));
// Check to make sure we only have 1 ThreadStarter on a given thread. (Inspired by NDPWhidbey issue 16888)
#if defined(_DEBUG)
EnsureUniqueThreadStarter(this);
#endif
}
// TP_RESULT DebuggerThreadStarter::TriggerPatch() If we're in a
// stub (module==NULL&&managed) then do a PatchTrace up the stack &
// return false. Otherwise DisableAll & return
// true
TP_RESULT DebuggerThreadStarter::TriggerPatch(DebuggerControllerPatch *patch,
Thread *thread,
TRIGGER_WHY tyWhy)
{
Module *module = patch->key.module;
BOOL managed = patch->IsManagedPatch();
LOG((LF_CORDB,LL_INFO1000, "DebuggerThreadStarter::TriggerPatch for thread 0x%x\n", Debugger::GetThreadIdHelper(thread)));
if (module == NULL && managed)
{
// This is a stub patch. If it was a TRACE_FRAME_PUSH that got us here, then the stub's frame is pushed now, so
// we tell the frame to apply the real patch. If we got here via a TRACE_MGR_PUSH, however, then there is no
// frame and we go back to the stub manager that generated the stub for where to patch next.
TraceDestination trace;
bool traceOk;
if (patch->trace.GetTraceType() == TRACE_MGR_PUSH)
{
BYTE *dummy = NULL;
CONTEXT *context = GetManagedLiveCtx(thread);
CONTRACT_VIOLATION(GCViolation);
traceOk = g_pEEInterface->TraceManager(thread, patch->trace.GetStubManager(), &trace, context, &dummy);
}
else if ((patch->trace.GetTraceType() == TRACE_FRAME_PUSH) && (thread->GetFrame()->IsTransitionToNativeFrame()))
{
// If we've got a frame that is transitioning to native, there's no reason to try to keep tracing. So we
// bail early and save ourselves some effort. This also works around a problem where we deadlock trying to
// do too much work to determine the destination of a ComPlusMethodFrame. (See issue 87103.)
//
// Note: trace call is still enabled, so we can just ignore this patch and wait for trace call to fire
// again...
return TPR_IGNORE;
}
else
{
// It's questionable whether Trace_Frame_Push is actually safe or not.
ControllerStackInfo csi;
StackTraceTicket ticket(patch);
csi.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
traceOk = g_pEEInterface->TraceFrame(thread, thread->GetFrame(), TRUE, &trace, &(csi.m_activeFrame.registers));
}
if (traceOk && g_pEEInterface->FollowTrace(&trace))
{
PatchTrace(&trace, LEAF_MOST_FRAME, TRUE);
}
return TPR_IGNORE;
}
else
{
// We've hit user code; trigger our event.
DisableAll();
{
// Give the helper thread a chance to get ready. The temporary helper can't handle
// execution control well, and the RS won't do any execution control until it gets a
// create Thread event, which it won't get until here.
// So now's our best time to wait for the real helper thread.
g_pDebugger->PollWaitingForHelper();
}
return TPR_TRIGGER;
}
}
void DebuggerThreadStarter::TriggerTraceCall(Thread *thread, const BYTE *ip)
{
LOG((LF_CORDB, LL_EVERYTHING, "DTS::TTC called\n"));
#ifdef DEBUGGING_SUPPORTED
if (thread->GetDomain()->IsDebuggerAttached())
{
TraceDestination trace;
if (g_pEEInterface->TraceStub(ip, &trace) && g_pEEInterface->FollowTrace(&trace))
{
PatchTrace(&trace, LEAF_MOST_FRAME, true);
}
}
#endif //DEBUGGING_SUPPORTED
}
bool DebuggerThreadStarter::SendEvent(Thread *thread, bool fIpChanged)
{
CONTRACTL
{
SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
CONTRACTL_END;
// This SendEvent can't be interupted by a SetIp because until the client
// gets a ThreadStarter event, it doesn't even know the thread exists, so
// it certainly can't change its ip.
_ASSERTE(!fIpChanged);
LOG((LF_CORDB, LL_INFO10000, "DTS::SE: in DebuggerThreadStarter's SendEvent\n"));
// Send the thread started event.
g_pDebugger->ThreadStarted(thread);
// We delete this now because its no longer needed. We can call
// delete here because the queued count is above 0. This object
// will really be deleted when its dequeued shortly after this
// call returns.
Delete();
return true;
}
// * ------------------------------------------------------------------------
// * DebuggerUserBreakpoint routines
// * ------------------------------------------------------------------------
bool DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(FrameInfo * pFrame)
{
CONTRACTL
{
THROWS;
MODE_ANY;
GC_NOTRIGGER;
}
CONTRACTL_END;
// Steppers ignore internal frames, so should only be called on real frames.
_ASSERTE(pFrame->HasMethodFrame());
// Now get the namespace of the active frame
MethodDesc *pMD = pFrame->md;
if (pMD != NULL)
{
MethodTable * pMT = pMD->GetMethodTable();
LPCUTF8 szNamespace = NULL;
LPCUTF8 szClassName = pMT->GetFullyQualifiedNameInfo(&szNamespace);
if (szClassName != NULL && szNamespace != NULL)
{
MAKE_WIDEPTR_FROMUTF8(wszNamespace, szNamespace); // throw
MAKE_WIDEPTR_FROMUTF8(wszClassName, szClassName);
if (wcscmp(wszClassName, W("Debugger")) == 0 &&
wcscmp(wszNamespace, W("System.Diagnostics")) == 0)
{
// This will continue stepping
return true;
}
}
}
return false;
}
// Helper check if we're directly in a dynamic method (ignoring any chain goo
// or stuff in the Debugger namespace.
class IsLeafFrameDynamic
{
protected:
static StackWalkAction WalkStackWrapper(FrameInfo *pInfo, void *data)
{
IsLeafFrameDynamic * pThis = reinterpret_cast (data);
return pThis->WalkStack(pInfo);
}
StackWalkAction WalkStack(FrameInfo *pInfo)
{
_ASSERTE(pInfo != NULL);
// A FrameInfo may have both Method + Chain rolled into one.
if (!pInfo->HasMethodFrame() && !pInfo->HasStubFrame())
{
// We're a chain. Ignore it and keep looking.
return SWA_CONTINUE;
}
// So now this is the first non-chain, non-Debugger namespace frame.
// LW frames don't have a name, so we check if it's LW first.
if (pInfo->eStubFrameType == STUBFRAME_LIGHTWEIGHT_FUNCTION)
{
m_fInLightWeightMethod = true;
return SWA_ABORT;
}
// Ignore Debugger.Break() frames.
// All Debugger.Break calls will have this on the stack.
if (DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(pInfo))
{
return SWA_CONTINUE;
}
// We've now determined leafmost thing, so stop stackwalking.
_ASSERTE(m_fInLightWeightMethod == false);
return SWA_ABORT;
}
bool m_fInLightWeightMethod;
// Need this context to do stack trace.
CONTEXT m_tempContext;
public:
// On success, copies the leafmost non-chain frameinfo (including stubs) for the current thread into pInfo
// and returns true.
// On failure, returns false.
// Return true on success.
bool DoCheck(IN Thread * pThread)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
PRECONDITION(CheckPointer(pThread));
}
CONTRACTL_END;
m_fInLightWeightMethod = false;
DebuggerWalkStack(
pThread,
LEAF_MOST_FRAME,
&m_tempContext, false,
WalkStackWrapper,
(void *) this,
TRUE // includes everything
);
// We don't care whether the stackwalk succeeds or not because the
// callback sets our status via this field either way, so just return it.
return m_fInLightWeightMethod;
};
};
// Handle a Debug.Break() notification.
// This may create a controller to step-out out the Debug.Break() call (so that
// we appear stopped at the callsite).
// If we can't step-out (eg, we're directly in a dynamic method), then send
// the debug event immediately.
void DebuggerUserBreakpoint::HandleDebugBreak(Thread * pThread)
{
bool fDoStepOut = true;
// If the leaf frame is not a LW method, then step-out.
IsLeafFrameDynamic info;
fDoStepOut = !info.DoCheck(pThread);
if (fDoStepOut)
{
// Create a controller that will step out for us.
new (interopsafe) DebuggerUserBreakpoint(pThread);
}
else
{
// Send debug event immediately.
g_pDebugger->SendUserBreakpointAndSynchronize(pThread);
}
}
DebuggerUserBreakpoint::DebuggerUserBreakpoint(Thread *thread)
: DebuggerStepper(thread, (CorDebugUnmappedStop) (STOP_ALL & ~STOP_UNMANAGED), INTERCEPT_ALL, NULL)
{
// Setup a step out from the current frame (which we know is
// unmanaged, actually...)
// This happens to be safe, but it's a very special case (so we have a special case ticket)
// This is called while we're live (so no filter context) and from the fcall,
// and we pushed a HelperMethodFrame to protect us. We also happen to know that we have
// done anything illegal or dangerous since then.
StackTraceTicket ticket(this);
StepOut(LEAF_MOST_FRAME, ticket);
}
// Is this frame interesting?
// Use this to skip all code in the namespace "Debugger.Diagnostics"
bool DebuggerUserBreakpoint::IsInterestingFrame(FrameInfo * pFrame)
{
CONTRACTL
{
THROWS;
MODE_ANY;
GC_NOTRIGGER;
}
CONTRACTL_END;
return !IsFrameInDebuggerNamespace(pFrame);
}
bool DebuggerUserBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
{
CONTRACTL
{
SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
CONTRACTL_END;
// See DebuggerStepper::SendEvent for why we assert here.
// This is technically an issue, but it's too benign to fix.
_ASSERTE(!fIpChanged);
LOG((LF_CORDB, LL_INFO10000,
"DUB::SE: in DebuggerUserBreakpoint's SendEvent\n"));
// Send the user breakpoint event.
g_pDebugger->SendRawUserBreakpoint(thread);
// We delete this now because its no longer needed. We can call
// delete here because the queued count is above 0. This object
// will really be deleted when its dequeued shortly after this
// call returns.
Delete();
return true;
}
// * ------------------------------------------------------------------------
// * DebuggerFuncEvalComplete routines
// * ------------------------------------------------------------------------
DebuggerFuncEvalComplete::DebuggerFuncEvalComplete(Thread *thread,
void *dest)
: DebuggerController(thread, NULL)
{
#ifdef _TARGET_ARM_
m_pDE = reinterpret_cast(((DWORD)dest) & ~THUMB_CODE)->m_associatedDebuggerEval;
#else
m_pDE = reinterpret_cast(dest)->m_associatedDebuggerEval;
#endif
// Add an unmanaged patch at the destination.
AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE*)dest, LEAF_MOST_FRAME, FALSE, TRACE_UNMANAGED);
}
TP_RESULT DebuggerFuncEvalComplete::TriggerPatch(DebuggerControllerPatch *patch,
Thread *thread,
TRIGGER_WHY tyWhy)
{
// It had better be an unmanaged patch...
_ASSERTE((patch->key.module == NULL) && !patch->IsManagedPatch());
// set ThreadFilterContext back here because we need make stack crawlable! In case,
// GC got triggered.
// Restore the thread's context to what it was before we hijacked it for this func eval.
CONTEXT *pCtx = GetManagedLiveCtx(thread);
CORDbgCopyThreadContext(reinterpret_cast(pCtx),
reinterpret_cast(&(m_pDE->m_context)));
// We've hit our patch, so simply disable all (which removes the
// patch) and trigger the event.
DisableAll();
return TPR_TRIGGER;
}
bool DebuggerFuncEvalComplete::SendEvent(Thread *thread, bool fIpChanged)
{
CONTRACTL
{
SO_NOT_MAINLINE;
THROWS;
SENDEVENT_CONTRACT_ITEMS;
}
CONTRACTL_END;
// This should not ever be interupted by a SetIp.
// The BP will be off in random native code for which SetIp would be illegal.
// However, func-eval conroller will restore the context from when we're at the patch,
// so that will look like the IP changed on us.
_ASSERTE(fIpChanged);
LOG((LF_CORDB, LL_INFO10000, "DFEC::SE: in DebuggerFuncEval's SendEvent\n"));
_ASSERTE(!ISREDIRECTEDTHREAD(thread));
// The DebuggerEval is at our faulting address.
DebuggerEval *pDE = m_pDE;
// Send the func eval complete (or exception) event.
g_pDebugger->FuncEvalComplete(thread, pDE);
// We delete this now because its no longer needed. We can call
// delete here because the queued count is above 0. This object
// will really be deleted when its dequeued shortly after this
// call returns.
Delete();
return true;
}
#ifdef EnC_SUPPORTED
// * ------------------------------------------------------------------------ *
// * DebuggerEnCBreakpoint routines
// * ------------------------------------------------------------------------ *
//---------------------------------------------------------------------------------------
//
// DebuggerEnCBreakpoint constructor - creates and activates a new EnC breakpoint
//
// Arguments:
// offset - native offset in the function to place the patch
// jitInfo - identifies the function in which the breakpoint is being placed
// fTriggerType - breakpoint type: either REMAP_PENDING or REMAP_COMPLETE
// pAppDomain - the breakpoint applies to the specified AppDomain only
//
DebuggerEnCBreakpoint::DebuggerEnCBreakpoint(SIZE_T offset,
DebuggerJitInfo *jitInfo,
DebuggerEnCBreakpoint::TriggerType fTriggerType,
AppDomain *pAppDomain)
: DebuggerController(NULL, pAppDomain),
m_fTriggerType(fTriggerType),
m_jitInfo(jitInfo)
{
_ASSERTE( jitInfo != NULL );
// Add and activate the specified patch
AddBindAndActivateNativeManagedPatch(jitInfo->m_fd, jitInfo, offset, LEAF_MOST_FRAME, pAppDomain);
LOG((LF_ENC,LL_INFO1000, "DEnCBPDEnCBP::adding %S patch!\n",
fTriggerType == REMAP_PENDING ? W("remap pending") : W("remap complete")));
}
//---------------------------------------------------------------------------------------
//
// DebuggerEnCBreakpoint::TriggerPatch
// called by the debugging infrastructure when the patch is hit.
//
// Arguments:
// patch - specifies the patch that was hit
// thread - identifies the thread on which the patch was hit
// tyWhy - TY_SHORT_CIRCUIT for normal REMAP_PENDING EnC patches
//
// Return value:
// TPR_IGNORE if the debugger chooses not to take a remap opportunity
// TPR_IGNORE_AND_STOP when a remap-complete event is sent
// Doesn't return at all if the debugger remaps execution to the new version of the method
//
TP_RESULT DebuggerEnCBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
Thread *thread,
TRIGGER_WHY tyWhy)
{
_ASSERTE(HasLock());
Module *module = patch->key.module;
mdMethodDef md = patch->key.md;
SIZE_T offset = patch->offset;
// Map the current native offset back to the IL offset in the old
// function. This will be mapped to the new native offset within
// ResumeInUpdatedFunction
CorDebugMappingResult map;
DWORD which;
SIZE_T currentIP = (SIZE_T)m_jitInfo->MapNativeOffsetToIL(offset,
&map, &which);
// We only lay DebuggerEnCBreakpoints at sequence points
_ASSERTE(map == MAPPING_EXACT);
LOG((LF_ENC, LL_ALWAYS,
"DEnCBP::TP: triggered E&C %S breakpoint: tid=0x%x, module=0x%08x, "
"method def=0x%08x, version=%d, native offset=0x%x, IL offset=0x%x\n this=0x%x\n",
m_fTriggerType == REMAP_PENDING ? W("ResumePending") : W("ResumeComplete"),
thread, module, md, m_jitInfo->m_encVersion, offset, currentIP, this));
// If this is a REMAP_COMPLETE patch, then dispatch the RemapComplete callback
if (m_fTriggerType == REMAP_COMPLETE)
{
return HandleRemapComplete(patch, thread, tyWhy);
}
// This must be a REMAP_PENDING patch
// unless we got here on an explicit short-circuit, don't do any work
if (tyWhy != TY_SHORT_CIRCUIT)
{
LOG((LF_ENC, LL_ALWAYS, "DEnCBP::TP: not short-circuit ... bailing\n"));
return TPR_IGNORE;
}
_ASSERTE(patch->IsManagedPatch());
// Grab the MethodDesc for this function.
_ASSERTE(module != NULL);
// GENERICS: @todo generics. This should be replaced by a similar loop
// over the DJIs for the DMI as in BindPatch up above.
MethodDesc *pFD = g_pEEInterface->FindLoadedMethodRefOrDef(module, md);
_ASSERTE(pFD != NULL);
LOG((LF_ENC, LL_ALWAYS,
"DEnCBP::TP: in %s::%s\n", pFD->m_pszDebugClassName,pFD->m_pszDebugMethodName));
// Grab the jit info for the original copy of the method, which is
// what we are executing right now.
DebuggerJitInfo *pJitInfo = m_jitInfo;
_ASSERTE(pJitInfo);
_ASSERTE(pJitInfo->m_fd == pFD);
// Grab the context for this thread. This is the context that was
// passed to COMPlusFrameHandler.
CONTEXT *pContext = GetManagedLiveCtx(thread);
// We use the module the current function is in.
_ASSERTE(module->IsEditAndContinueEnabled());
EditAndContinueModule *pModule = (EditAndContinueModule*)module;
// Release the controller lock for the rest of this method
CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection);
// resumeIP is the native offset in the new version of the method the debugger wants
// to resume to. We'll pass the address of this variable over to the right-side
// and if it modifies the contents while we're stopped dispatching the RemapOpportunity,
// then we know it wants a remap.
// This form of side-channel communication seems like an error-prone workaround. Ideally the
// remap IP (if any) would just be returned in a response event.
SIZE_T resumeIP = (SIZE_T) -1;
// Debugging code to enable a break after N RemapOpportunities
#ifdef _DEBUG
static int breakOnRemapOpportunity = -1;
if (breakOnRemapOpportunity == -1)
breakOnRemapOpportunity = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapOpportunity);
static int remapOpportunityCount = 0;
++remapOpportunityCount;
if (breakOnRemapOpportunity == 1 || breakOnRemapOpportunity == remapOpportunityCount)
{
_ASSERTE(!"BreakOnRemapOpportunity");
}
#endif
// Send an event to the RS to call the RemapOpportunity callback, passing the address of resumeIP.
// If the debugger responds with a call to RemapFunction, the supplied IP will be copied into resumeIP
// and we will know to update the context and resume the function at the new IP. Otherwise we just do
// nothing and try again on next RemapFunction breakpoint
g_pDebugger->LockAndSendEnCRemapEvent(pJitInfo, currentIP, &resumeIP);
LOG((LF_ENC, LL_ALWAYS,
"DEnCBP::TP: resume IL offset is 0x%x\n", resumeIP));
// Has the debugger requested a remap?
if (resumeIP != (SIZE_T) -1)
{
// This will jit the function, update the context, and resume execution at the new location.
g_pEEInterface->ResumeInUpdatedFunction(pModule,
pFD,
(void*)pJitInfo,
resumeIP,
pContext);
_ASSERTE(!"Returned from ResumeInUpdatedFunction!");
}
LOG((LF_CORDB, LL_ALWAYS, "DEnCB::TP: We've returned from ResumeInUpd"
"atedFunction, we're going to skip the EnC patch ####\n"));
// We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches
// in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have
// changed underneath us.
// inverseLock holder will reaquire lock.
return TPR_IGNORE;
}
//
// HandleResumeComplete is called for an EnC patch in the newly updated function
// so that we can notify the debugger that the remap has completed and they can
// now remap their steppers or anything else that depends on the new code actually
// being on the stack. We return TPR_IGNORE_AND_STOP because it's possible that the
// function was edited after we handled remap complete and want to make sure we
// start a fresh call to TriggerPatch
//
TP_RESULT DebuggerEnCBreakpoint::HandleRemapComplete(DebuggerControllerPatch *patch,
Thread *thread,
TRIGGER_WHY tyWhy)
{
LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: HandleRemapComplete\n"));
// Debugging code to enable a break after N RemapCompletes
#ifdef _DEBUG
static int breakOnRemapComplete = -1;
if (breakOnRemapComplete == -1)
breakOnRemapComplete = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapComplete);
static int remapCompleteCount = 0;
++remapCompleteCount;
if (breakOnRemapComplete == 1 || breakOnRemapComplete == remapCompleteCount)
{
_ASSERTE(!"BreakOnRemapComplete");
}
#endif
_ASSERTE(HasLock());
bool fApplied = m_jitInfo->m_encBreakpointsApplied;
// Need to delete this before unlock below so if any other thread come in after the unlock
// they won't handle this patch.
Delete();
// We just deleted ourselves. Can't access anything any instances after this point.
// if have somehow updated this function before we resume into it then just bail
if (fApplied)
{
LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: function already updated, ignoring\n"));
return TPR_IGNORE_AND_STOP;
}
// GENERICS: @todo generics. This should be replaced by a similar loop
// over the DJIs for the DMI as in BindPatch up above.
MethodDesc *pFD = g_pEEInterface->FindLoadedMethodRefOrDef(patch->key.module, patch->key.md);
LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: unlocking controller\n"));
// Unlock the controller lock and dispatch the remap complete event
CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection);
LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: sending RemapCompleteEvent\n"));
g_pDebugger->LockAndSendEnCRemapCompleteEvent(pFD);
// We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches
// in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have
// changed underneath us.
// inverseLock holder will reacquire.
return TPR_IGNORE_AND_STOP;
}
#endif //EnC_SUPPORTED
// continuable-exceptions
// * ------------------------------------------------------------------------ *
// * DebuggerContinuableExceptionBreakpoint routines
// * ------------------------------------------------------------------------ *
//---------------------------------------------------------------------------------------
//
// constructor
//
// Arguments:
// pThread - the thread on which we are intercepting an exception
// nativeOffset - This is the target native offset. It is where we are going to resume execution.
// jitInfo - the DebuggerJitInfo of the method at which we are intercepting
// pAppDomain - the AppDomain in which the thread is executing
//
DebuggerContinuableExceptionBreakpoint::DebuggerContinuableExceptionBreakpoint(Thread *pThread,
SIZE_T nativeOffset,
DebuggerJitInfo *jitInfo,
AppDomain *pAppDomain)
: DebuggerController(pThread, pAppDomain)
{
_ASSERTE( jitInfo != NULL );
// Add a native patch at the specified native offset, which is where we are going to resume execution.
AddBindAndActivateNativeManagedPatch(jitInfo->m_fd, jitInfo, nativeOffset, LEAF_MOST_FRAME, pAppDomain);
}
//---------------------------------------------------------------------------------------
//
// This function is called when the patch added in the constructor is hit. At this point,
// we have already resumed execution, and the exception is no longer in flight.
//
// Arguments:
// patch - the patch added in the constructor; unused
// thread - the thread in question; unused
// tyWhy - a flag which is only useful for EnC; unused
//
// Return Value:
// This function always returns TPR_TRIGGER, meaning that it wants to send an event to notify the RS.
//
TP_RESULT DebuggerContinuableExceptionBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
Thread *thread,
TRIGGER_WHY tyWhy)
{
LOG((LF_CORDB, LL_INFO10000, "DCEBP::TP\n"));
//
// Disable the patch
//
DisableAll();
// We will send a notification to the RS when the patch is triggered.
return TPR_TRIGGER;
}
//---------------------------------------------------------------------------------------
//
// This function is called when we want to notify the RS that an interception is complete.
// At this point, we have already resumed execution, and the exception is no longer in flight.
//
// Arguments:
// thread - the thread in question
// fIpChanged - whether the IP has changed by SetIP after the patch is hit but
// before this function is called
//
bool DebuggerContinuableExceptionBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
{
CONTRACTL
{
SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
CONTRACTL_END;
LOG((LF_CORDB, LL_INFO10000,
"DCEBP::SE: in DebuggerContinuableExceptionBreakpoint's SendEvent\n"));
if (!fIpChanged)
{
g_pDebugger->SendInterceptExceptionComplete(thread);
}
// On WIN64, by the time we get here the DebuggerExState is gone already.
// ExceptionTrackers are cleaned up before we resume execution for a handled exception.
#if !defined(WIN64EXCEPTIONS)
thread->GetExceptionState()->GetDebuggerState()->SetDebuggerInterceptContext(NULL);
#endif // !WIN64EXCEPTIONS
//
// We delete this now because its no longer needed. We can call
// delete here because the queued count is above 0. This object
// will really be deleted when its dequeued shortly after this
// call returns.
//
Delete();
return true;
}
#endif // !DACCESS_COMPILE