summaryrefslogtreecommitdiff
path: root/src/vm/stackwalktypes.h
blob: 7bcc2c1125b4a4bd72f5f1a701ff94dc602f3820 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// ============================================================================
// File: stackwalktypes.h
//

// ============================================================================
// Contains types used by stackwalk.h.


#ifndef __STACKWALKTYPES_H__
#define __STACKWALKTYPES_H__

class CrawlFrame;
struct RangeSection;
struct StackwalkCacheEntry;

//
// This type should be used internally inside the code manager only. EECodeInfo should 
// be used in general code instead. Ideally, we would replace all uses of METHODTOKEN 
// with EECodeInfo.
//
struct METHODTOKEN
{
    METHODTOKEN(RangeSection * pRangeSection, TADDR pCodeHeader)
        : m_pRangeSection(pRangeSection), m_pCodeHeader(pCodeHeader)
    {
    }

    METHODTOKEN()
    {
    }

    // Cache of RangeSection containing the code to avoid redundant lookups.
    RangeSection * m_pRangeSection;

    // CodeHeader* for EEJitManager
    // PTR_RUNTIME_FUNCTION for managed native code
    TADDR m_pCodeHeader;

    BOOL IsNull() const
    {
        return m_pCodeHeader == NULL;
    }
};

//************************************************************************
// Stack walking
//************************************************************************
enum StackCrawlMark
{
    LookForMe = 0,
    LookForMyCaller = 1,
    LookForMyCallersCaller = 2,
    LookForThread = 3
};

enum StackWalkAction 
{
    SWA_CONTINUE    = 0,    // continue walking
    SWA_ABORT       = 1,    // stop walking, early out in "failure case"
    SWA_FAILED      = 2     // couldn't walk stack
};

#define SWA_DONE SWA_CONTINUE


// Pointer to the StackWalk callback function.
typedef StackWalkAction (*PSTACKWALKFRAMESCALLBACK)(
    CrawlFrame       *pCF,      //
    VOID*             pData     // Caller's private data

);

/******************************************************************************
   StackwalkCache: new class implements stackwalk perf optimization features.
   StackwalkCacheEntry array: very simple per thread hash table, keeping cached data.
   StackwalkCacheUnwindInfo: used by EECodeManager::UnwindStackFrame to return
   stackwalk cache flags.
   Cf. Ilyakoz for any questions.
*/

struct StackwalkCacheUnwindInfo
{
#if defined(_TARGET_AMD64_)
    ULONG RBPOffset;
    ULONG RSPOffsetFromUnwindInfo;
#else  // !_TARGET_AMD64_
    size_t securityObjectOffset;    // offset of SecurityObject. 0 if there is no security object
    BOOL fUseEbp;                   // Is EBP modified by the method - either for a frame-pointer or for a scratch-register?
    BOOL fUseEbpAsFrameReg;         // use EBP as the frame pointer?
#endif // !_TARGET_AMD64_

    inline StackwalkCacheUnwindInfo() { SUPPORTS_DAC; ZeroMemory(this, sizeof(StackwalkCacheUnwindInfo)); }
    StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry);
};

//************************************************************************

#if defined(_WIN64)
    #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x10
#else  // !_WIN64
    #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x8
#endif // !_WIN64

struct 
DECLSPEC_ALIGN(STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY)
StackwalkCacheEntry
{
    //
    //  don't rearrange the fields, so that invalid value 0x8000000000000000 will never appear
    //  as StackwalkCacheEntry, it's required for atomicMOVQ using FILD/FISTP instructions
    //
    UINT_PTR IP;
#if !defined(_TARGET_AMD64_)
    WORD ESPOffset:15;          // stack offset (frame size + pending arguments + etc)
    WORD securityObjectOffset:3;// offset of SecurityObject. 0 if there is no security object
    WORD fUseEbp:1;             // For ESP methods, is EBP touched at all?
    WORD fUseEbpAsFrameReg:1;   // use EBP as the frame register?
    WORD argSize:11;            // size of args pushed on stack
#else  // _TARGET_AMD64_
    DWORD RSPOffset;
    DWORD RBPOffset;
#endif // _TARGET_AMD64_

    inline BOOL Init(UINT_PTR   IP,
                     UINT_PTR   SPOffset,
                     StackwalkCacheUnwindInfo *pUnwindInfo,
                     UINT_PTR   argSize)
    {
        LIMITED_METHOD_CONTRACT;

        this->IP              = IP;

#if defined(_TARGET_X86_)
        this->ESPOffset         = SPOffset;
        this->argSize           = argSize;
        
        this->securityObjectOffset = (WORD)pUnwindInfo->securityObjectOffset;
        _ASSERTE(this->securityObjectOffset == pUnwindInfo->securityObjectOffset);
        
        this->fUseEbp           = pUnwindInfo->fUseEbp;
        this->fUseEbpAsFrameReg = pUnwindInfo->fUseEbpAsFrameReg;
        _ASSERTE(!fUseEbpAsFrameReg || fUseEbp);

        // return success if we fit SPOffset and argSize into
        return ((this->ESPOffset == SPOffset) && 
                (this->argSize == argSize));
#elif defined(_TARGET_AMD64_)
        // The size of a stack frame is guaranteed to fit in 4 bytes, so we don't need to check RSPOffset and RBPOffset.

        // The actual SP offset may be bigger than the offset we get from the unwind info because of stack allocations.
        _ASSERTE(SPOffset >= pUnwindInfo->RSPOffsetFromUnwindInfo);

        _ASSERTE(FitsIn<DWORD>(SPOffset));
        this->RSPOffset  = static_cast<DWORD>(SPOffset);
        _ASSERTE(FitsIn<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo)));
        this->RBPOffset  = static_cast<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo));
        return TRUE;
#else  // !_TARGET_X86_ && !_TARGET_AMD64_
        return FALSE;
#endif // !_TARGET_X86_ && !_TARGET_AMD64_
    }

    inline BOOL HasSecurityObject()
    {
        LIMITED_METHOD_CONTRACT;

#if defined(_TARGET_X86_)
        return securityObjectOffset != 0;
#else  // !_TARGET_X86_
        // On AMD64 we don't save anything by grabbing the security object before it is needed.  This is because
        // we need to crack the GC info in order to find the security object, and to unwind we only need to
        // crack the unwind info.
        return FALSE;
#endif // !_TARGET_X86_
    }

    inline BOOL IsSafeToUseCache()
    {
        LIMITED_METHOD_CONTRACT;

#if defined(_TARGET_X86_)
        return (!fUseEbp || fUseEbpAsFrameReg);
#elif defined(_TARGET_AMD64_)
        return TRUE;
#else  // !_TARGET_X86_ && !_TARGET_AMD64_
        return FALSE;
#endif // !_TARGET_X86_ && !_TARGET_AMD64_
    }
};

#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
static_assert_no_msg(sizeof(StackwalkCacheEntry) == 2 * sizeof(UINT_PTR));
#endif // _TARGET_X86_ || _TARGET_AMD64_

//************************************************************************

class StackwalkCache 
{
    friend struct _DacGlobals;

    public:
        BOOL Lookup(UINT_PTR IP);
        void Insert(StackwalkCacheEntry *pCacheEntry);
        inline void ClearEntry () { LIMITED_METHOD_DAC_CONTRACT; m_CacheEntry.IP = 0; }
        inline BOOL Enabled() { LIMITED_METHOD_DAC_CONTRACT;  return s_Enabled; };
        inline BOOL IsEmpty () { LIMITED_METHOD_CONTRACT;  return m_CacheEntry.IP == 0; }

#ifndef DACCESS_COMPILE
        StackwalkCache();
#endif
        static void Init();

        StackwalkCacheEntry m_CacheEntry; // local copy of Global Cache entry for current IP
        
        static void Invalidate(LoaderAllocator * pLoaderAllocator);
        
    private:
        unsigned GetKey(UINT_PTR IP);
        
#ifdef DACCESS_COMPILE
        // DAC can't rely on the cache here
        const static BOOL s_Enabled;
#else
        static BOOL s_Enabled;
#endif
};

//************************************************************************

inline StackwalkCacheUnwindInfo::StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry) 
{
    LIMITED_METHOD_CONTRACT;
    
#if defined(_TARGET_AMD64_)
    RBPOffset = pCacheEntry->RBPOffset;
#else  // !_TARGET_AMD64_
    securityObjectOffset = pCacheEntry->securityObjectOffset;
    fUseEbp = pCacheEntry->fUseEbp;
    fUseEbpAsFrameReg = pCacheEntry->fUseEbpAsFrameReg;
#endif // !_TARGET_AMD64_
}

#endif  // __STACKWALKTYPES_H__