summaryrefslogtreecommitdiff
path: root/src/vm/callcounter.cpp
blob: 641b61198adbf7de03e7b958eb08fef5b9694e57 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// ===========================================================================
// File: CallCounter.CPP
//
// ===========================================================================



#include "common.h"
#include "excep.h"
#include "log.h"
#include "tieredcompilation.h"
#include "callcounter.h"

#ifdef FEATURE_TIERED_COMPILATION

CallCounter::CallCounter()
{
    LIMITED_METHOD_CONTRACT;

    m_lock.Init(LOCK_TYPE_DEFAULT);
}

// This is called by the prestub each time the method is invoked in a particular
// AppDomain (the AppDomain for which AppDomain.GetCallCounter() == this). These
// calls continue until we backpatch the prestub to avoid future calls. This allows
// us to track the number of calls to each method and use it as a trigger for tiered
// compilation.
//
// Returns TRUE if no future invocations are needed (we reached the count we cared about)
// and FALSE otherwise. It is permissible to keep calling even when TRUE was previously
// returned and multi-threaded race conditions will surely cause this to occur.
void CallCounter::OnMethodCalled(
    MethodDesc* pMethodDesc,
    TieredCompilationManager *pTieredCompilationManager,
    BOOL* shouldStopCountingCallsRef,
    BOOL* wasPromotedToTier1Ref)
{
    STANDARD_VM_CONTRACT;

    _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
    _ASSERTE(pTieredCompilationManager != nullptr);
    _ASSERTE(shouldStopCountingCallsRef != nullptr);
    _ASSERTE(wasPromotedToTier1Ref != nullptr);

    // PERF: This as a simple to implement, but not so performant, call counter
    // Currently this is only called until we reach a fixed call count and then
    // disabled. Its likely we'll want to improve this at some point but
    // its not as bad as you might expect. Allocating a counter inline in the
    // MethodDesc or at some location computable from the MethodDesc should
    // eliminate 1 pointer per-method (the MethodDesc* key) and the CPU
    // overhead to acquire the lock/search the dictionary. Depending on where it
    // is we may also be able to reduce it to 1 byte counter without wasting the
    // following bytes for alignment. Further work to inline the OnMethodCalled
    // callback directly into the jitted code would eliminate CPU overhead of 
    // leaving the prestub unpatched, but may not be good overall as it increases
    // the size of the jitted code.


    TieredCompilationManager* pCallCounterSink = NULL;
    int callCount;
    {
        //Be careful if you convert to something fully lock/interlocked-free that
        //you correctly handle what happens when some N simultaneous calls don't
        //all increment the counter. The slight drift is probably neglible for tuning
        //but TieredCompilationManager::OnMethodCalled() doesn't expect multiple calls
        //each claiming to be exactly the threshhold call count needed to trigger
        //optimization.
        SpinLockHolder holder(&m_lock);
        CallCounterEntry* pEntry = const_cast<CallCounterEntry*>(m_methodToCallCount.LookupPtr(pMethodDesc));
        if (pEntry == NULL)
        {
            callCount = 1;
            m_methodToCallCount.Add(CallCounterEntry(pMethodDesc, callCount));
        }
        else
        {
            pEntry->callCount++;
            callCount = pEntry->callCount;
        }
    }

    pTieredCompilationManager->OnMethodCalled(pMethodDesc, callCount, shouldStopCountingCallsRef, wasPromotedToTier1Ref);
}

#endif // FEATURE_TIERED_COMPILATION