summaryrefslogtreecommitdiff
path: root/src/vm/gchelpers.inl
blob: 1b14077e72984ce1fc3bf50c4a3bbfde8ca07e04 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.

/*
* GCHELPERS.INL
*
* GC Allocation and Write Barrier Helpers
*
*
*/

#ifndef _GCHELPERS_INL_
#define _GCHELPERS_INL_

//========================================================================
//
//      WRITE BARRIER HELPERS
//
//========================================================================

#if defined(_WIN64)
    static const int card_byte_shift        = 11;
    static const int card_bundle_byte_shift = 21;
#else
    static const int card_byte_shift        = 10;

    #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
        #error Manually managed card bundles are currently only implemented for AMD64.
    #endif
#endif

FORCEINLINE void InlinedSetCardsAfterBulkCopyHelper(Object **start, size_t len)
{
    // Check whether the writes were even into the heap. If not there's no card update required.
    // Also if the size is smaller than a pointer, no write barrier is required.
    _ASSERTE(len >= sizeof(uintptr_t));
    if ((BYTE*)start < g_lowest_address || (BYTE*)start >= g_highest_address)
    {
        return;
    }

    // Don't optimize the Generation 0 case if we are checking for write barrier violations
    // since we need to update the shadow heap even in the generation 0 case.
#if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
    if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK)
    {
        for(unsigned i=0; i < len / sizeof(Object*); i++)
        {
            updateGCShadow(&start[i], start[i]);
        }
    }
#endif //WRITE_BARRIER_CHECK && !SERVER_GC

#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    if (GCHeapUtilities::SoftwareWriteWatchIsEnabled())
    {
        GCHeapUtilities::SoftwareWriteWatchSetDirtyRegion(start, len);
    }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

    size_t startAddress = (size_t)start;
    size_t endAddress = startAddress + len;
    size_t startingClump = startAddress >> card_byte_shift;
    size_t endingClump = (endAddress + (1 << card_byte_shift) - 1) >> card_byte_shift;

    // calculate the number of clumps to mark (round_up(end) - start)
    size_t clumpCount = endingClump - startingClump;
    // VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
    // with g_lowest/highest_address check at the beginning of this function.
    uint8_t* card = ((uint8_t*)VolatileLoadWithoutBarrier(&g_card_table)) + startingClump;

    // Fill the cards. To avoid cache line thrashing we check whether the cards have already been set before
    // writing.
    do
    {
        if (*card != 0xff)
        {
            *card = 0xff;
        }

        card++;
        clumpCount--;
    }
    while (clumpCount != 0);

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    size_t startBundleByte = startAddress >> card_bundle_byte_shift;
    size_t endBundleByte = (endAddress + (1 << card_bundle_byte_shift) - 1) >> card_bundle_byte_shift;
    size_t bundleByteCount = endBundleByte - startBundleByte;

    uint8_t* pBundleByte = ((uint8_t*)VolatileLoadWithoutBarrier(&g_card_bundle_table)) + startBundleByte;

    do
    {
        if (*pBundleByte != 0xFF)
        {
            *pBundleByte = 0xFF;
        }

        pBundleByte++;
        bundleByteCount--;
    }
    while (bundleByteCount != 0);
#endif
}

#endif // !_GCHELPERS_INL_