summaryrefslogtreecommitdiff
path: root/src/vm/callcounter.cpp
blob: 6b94f7303eb11182338d75ac84c4df53e56fa1ab (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// ===========================================================================
// File: CallCounter.CPP
//
// ===========================================================================



#include "common.h"
#include "excep.h"
#include "log.h"
#include "tieredcompilation.h"
#include "callcounter.h"

#ifdef FEATURE_TIERED_COMPILATION
#ifndef DACCESS_COMPILE

CallCounterEntry CallCounterEntry::CreateWithCallCountingDisabled(MethodDesc *m)
{
    WRAPPER_NO_CONTRACT;
    _ASSERTE(m != nullptr);

    CallCounterEntry entry(m, INT_MAX);
    _ASSERTE(!entry.IsCallCountingEnabled());
    return entry;
}

CallCounter::CallCounter()
{
    LIMITED_METHOD_CONTRACT;

    m_lock.Init(LOCK_TYPE_DEFAULT);
}

#endif // !DACCESS_COMPILE

bool CallCounter::IsEligibleForCallCounting(PTR_MethodDesc pMethodDesc)
{
    WRAPPER_NO_CONTRACT;
    _ASSERTE(pMethodDesc != NULL);
    _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());

    return g_pConfig->TieredCompilation_CallCounting() && !pMethodDesc->RequestedAggressiveOptimization();
}

bool CallCounter::IsCallCountingEnabled(PTR_MethodDesc pMethodDesc)
{
    WRAPPER_NO_CONTRACT;
    _ASSERTE(pMethodDesc != PTR_NULL);
    _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
    _ASSERTE(IsEligibleForCallCounting(pMethodDesc));

#ifndef DACCESS_COMPILE
    SpinLockHolder holder(&m_lock);
#endif

    PTR_CallCounterEntry entry =
        (PTR_CallCounterEntry)const_cast<CallCounterEntry *>(m_methodToCallCount.LookupPtr(pMethodDesc));
    return entry == PTR_NULL || entry->IsCallCountingEnabled();
}

#ifndef DACCESS_COMPILE

void CallCounter::DisableCallCounting(MethodDesc* pMethodDesc)
{
    WRAPPER_NO_CONTRACT;
    _ASSERTE(pMethodDesc != NULL);
    _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
    _ASSERTE(IsEligibleForCallCounting(pMethodDesc));

    // Disabling call counting will affect the tier of the MethodDesc's first native code version. Callers must ensure that this
    // change is made deterministically and prior to or while jitting the first native code version such that the tier would not
    // be changed after it is already jitted. At that point, the call count threshold would already be initialized and the entry
    // would exist. To disable call counting at different points in time, it would be ok to do so if the method has not been
    // called yet (if the entry does not yet exist in the hash table), if necessary that could be a different function like
    // TryDisable...() that would fail to disable call counting if the method has already been called.

    SpinLockHolder holder(&m_lock);

    CallCounterEntry *existingEntry = const_cast<CallCounterEntry *>(m_methodToCallCount.LookupPtr(pMethodDesc));
    if (existingEntry != nullptr)
    {
        existingEntry->DisableCallCounting();
        return;
    }

    // Typically, the entry would already exist because OnMethodCalled() would have been called before this function on the same
    // thread. With multi-core JIT, a function may be jitted before it is called, in which case the entry would not exist.
    m_methodToCallCount.Add(CallCounterEntry::CreateWithCallCountingDisabled(pMethodDesc));
}

// This is called by the prestub each time the method is invoked in a particular
// AppDomain (the AppDomain for which AppDomain.GetCallCounter() == this). These
// calls continue until we backpatch the prestub to avoid future calls. This allows
// us to track the number of calls to each method and use it as a trigger for tiered
// compilation.
void CallCounter::OnMethodCalled(
    MethodDesc* pMethodDesc,
    TieredCompilationManager *pTieredCompilationManager,
    BOOL* shouldStopCountingCallsRef,
    BOOL* wasPromotedToNextTierRef)
{
    STANDARD_VM_CONTRACT;

    _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
    _ASSERTE(pTieredCompilationManager != nullptr);
    _ASSERTE(shouldStopCountingCallsRef != nullptr);
    _ASSERTE(wasPromotedToNextTierRef != nullptr);

    // At the moment, call counting is only done for tier 0 code
    _ASSERTE(IsEligibleForCallCounting(pMethodDesc));

    // PERF: This as a simple to implement, but not so performant, call counter
    // Currently this is only called until we reach a fixed call count and then
    // disabled. Its likely we'll want to improve this at some point but
    // its not as bad as you might expect. Allocating a counter inline in the
    // MethodDesc or at some location computable from the MethodDesc should
    // eliminate 1 pointer per-method (the MethodDesc* key) and the CPU
    // overhead to acquire the lock/search the dictionary. Depending on where it
    // is we may also be able to reduce it to 1 byte counter without wasting the
    // following bytes for alignment. Further work to inline the OnMethodCalled
    // callback directly into the jitted code would eliminate CPU overhead of 
    // leaving the prestub unpatched, but may not be good overall as it increases
    // the size of the jitted code.

    bool isFirstCall = false;
    int callCountLimit;
    {
        //Be careful if you convert to something fully lock/interlocked-free that
        //you correctly handle what happens when some N simultaneous calls don't
        //all increment the counter. The slight drift is probably neglible for tuning
        //but TieredCompilationManager::OnMethodCalled() doesn't expect multiple calls
        //each claiming to be exactly the threshhold call count needed to trigger
        //optimization.
        SpinLockHolder holder(&m_lock);
        CallCounterEntry* pEntry = const_cast<CallCounterEntry*>(m_methodToCallCount.LookupPtr(pMethodDesc));
        if (pEntry == NULL)
        {
            isFirstCall = true;
            callCountLimit = (int)g_pConfig->TieredCompilation_CallCountThreshold() - 1;
            _ASSERTE(callCountLimit >= 0);
            m_methodToCallCount.Add(CallCounterEntry(pMethodDesc, callCountLimit));
        }
        else if (pEntry->IsCallCountingEnabled())
        {
            callCountLimit = --pEntry->callCountLimit;
        }
        else
        {
            *shouldStopCountingCallsRef = true;
            *wasPromotedToNextTierRef = true;
            return;
        }
    }

    pTieredCompilationManager->OnMethodCalled(
        pMethodDesc,
        isFirstCall,
        callCountLimit,
        shouldStopCountingCallsRef,
        wasPromotedToNextTierRef);
}

#endif // !DACCESS_COMPILE
#endif // FEATURE_TIERED_COMPILATION