summaryrefslogtreecommitdiff
path: root/src/jit/alloc.cpp
blob: cc27c83fd08021bda6ff5b68dabccb69d4f91e88 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/*****************************************************************************/


#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
/*****************************************************************************/

/*****************************************************************************/
void                allocatorCodeSizeBeg(){}
/*****************************************************************************/
#ifdef  DEBUG
/*****************************************************************************/

void    __cdecl     debugStop(const char *why, ...)
{
    va_list     args;

    va_start(args, why);

    printf("NOTIFICATION: ");
    if  (why)
        vprintf(why, args);
    else
        printf("debugStop(0)");

    printf("\n");

    va_end(args);

    BreakIfDebuggerPresent();
}

/*****************************************************************************/

/* 
 * Does this constant need to be bigger?
 */
static  size_t    blockStop    = 99999999;

/*****************************************************************************/
#endif // DEBUG
/*****************************************************************************/

size_t THE_ALLOCATOR_BASE_SIZE  = 0;

bool   norls_allocator::nraInit(IEEMemoryManager* pMemoryManager, size_t pageSize, int preAlloc)
{
    bool    result = false;

    nraMemoryManager = pMemoryManager;

    nraPageList  =
    nraPageLast  = 0;

    nraFreeNext  =
    nraFreeLast  = 0;

    assert(THE_ALLOCATOR_BASE_SIZE != 0);

    nraPageSize  = pageSize ? pageSize : THE_ALLOCATOR_BASE_SIZE;

#ifdef DEBUG
    nraShouldInjectFault = JitConfig.ShouldInjectFault() != 0;
#endif    

    if  (preAlloc)
    {
        /* Grab the initial page(s) */

        setErrorTrap(NULL, norls_allocator *, pThis, this)  // ERROR TRAP: Start normal block
        {
            pThis->nraAllocNewPage(0);
        }
        impJitErrorTrap()  // ERROR TRAP: The following block handles errors
        {
            result = true;
        }
        endErrorTrap()  // ERROR TRAP: End
    }

    return  result;
}

/*---------------------------------------------------------------------------*/

void    *   norls_allocator::nraAllocNewPage(size_t sz)
{
    norls_pagdesc * newPage;
    size_t          sizPage;

    size_t          realSize = sz + sizeof(norls_pagdesc);
    if (realSize < sz) 
        NOMEM();   // Integer overflow

    /* Do we have a page that's now full? */

    if  (nraPageLast)
    {
        /* Undo the "+=" done in nraAlloc() */

        nraFreeNext -= sz;

        /* Save the actual used size of the page */

        nraPageLast->nrpUsedSize = nraFreeNext - nraPageLast->nrpContents;
    }

    /* Make sure we grab enough to satisfy the allocation request */

    sizPage = nraPageSize;

    if  (sizPage < realSize)
    {
        /* The allocation doesn't fit in a default-sized page */

#ifdef  DEBUG
//      if  (nraPageLast) printf("NOTE: wasted %u bytes in last page\n", nraPageLast->nrpPageSize - nraPageLast->nrpUsedSize);
#endif

        sizPage = realSize;
    }

    /* Round to the nearest multiple of OS page size */

    if (!nraDirectAlloc())
    {
        sizPage +=  (DEFAULT_PAGE_SIZE - 1);
        sizPage &= ~(DEFAULT_PAGE_SIZE - 1);
    }

    /* Allocate the new page */

    newPage = (norls_pagdesc *)nraVirtualAlloc(0, sizPage, MEM_COMMIT, PAGE_READWRITE);
    if  (!newPage)
        NOMEM();

#ifdef DEBUG
    newPage->nrpSelfPtr = newPage;
#endif

    /* Append the new page to the end of the list */

    newPage->nrpNextPage = 0;
    newPage->nrpPageSize = sizPage;
    newPage->nrpPrevPage = nraPageLast;
    newPage->nrpUsedSize = 0;  // nrpUsedSize is meaningless until a new page is allocated.
                               // Instead of letting it contain garbage (so to confuse us),
                               // set it to zero.

    if  (nraPageLast)
        nraPageLast->nrpNextPage = newPage;
    else
        nraPageList              = newPage;
    nraPageLast = newPage;

    /* Set up the 'next' and 'last' pointers */

    nraFreeNext = newPage->nrpContents + sz;
    nraFreeLast = newPage->nrpPageSize + (BYTE *)newPage;

    assert(nraFreeNext <= nraFreeLast);

    return  newPage->nrpContents;
}

// This method walks the nraPageList forward and release the pages.
// Be careful no other thread is doing nraToss at the same time.
// Otherwise, the page specified by temp could be double-freed (VSW 600919).

void        norls_allocator::nraFree(void)
{
    /* Free all of the allocated pages */

    while   (nraPageList)
    {
        norls_pagdesc * temp;

        temp = nraPageList;
               nraPageList = temp->nrpNextPage;

        nraVirtualFree(temp, 0, MEM_RELEASE);
    }
}

// This method walks the nraPageList backward and release the pages.
// Be careful no other thread is doing nraFree as the same time.
// Otherwise, the page specified by temp could be double-freed (VSW 600919).
void        norls_allocator::nraToss(nraMarkDsc &mark)
{
    void    *   last = mark.nmPage;

    if  (!last)
    {
        if  (!nraPageList)
            return;

        nraFreeNext  = nraPageList->nrpContents;
        nraFreeLast  = nraPageList->nrpPageSize + (BYTE *)nraPageList;

        return;
    }

    /* Free up all the new pages we've added at the end of the list */

    while (nraPageLast != last)
    {
        norls_pagdesc * temp;

        /* Remove the last page from the end of the list */

        temp = nraPageLast;
               nraPageLast = temp->nrpPrevPage;

        /* The new last page has no 'next' page */

        nraPageLast->nrpNextPage = 0;

        nraVirtualFree(temp, 0, MEM_RELEASE);
    }

    nraFreeNext = mark.nmNext;
    nraFreeLast = mark.nmLast;
}

/*****************************************************************************/
#ifdef DEBUG
/*****************************************************************************/
void    *           norls_allocator::nraAlloc(size_t sz)
{
    void    *   block;

    assert(sz != 0 && (sz & (sizeof(int) - 1)) == 0);
#ifdef _WIN64
    //Ensure that we always allocate in pointer sized increments.
    /* TODO-Cleanup:
     * This is wasteful.  We should add alignment requirements to the allocations so we don't waste space in
     * the heap.
     */
    sz = (unsigned)roundUp(sz, sizeof(size_t));
#endif

#ifdef DEBUG
    if (nraShouldInjectFault)
    {
        // Force the underlying memory allocator (either the OS or the CLR hoster) 
        // to allocate the memory. Any fault injection will kick in.
        void * p = DbgNew(1); 
        if (p) 
        {
            DbgDelete(p);
        }
        else 
        {
            NOMEM();  // Throw!
        }
    }
#endif    

    block = nraFreeNext;
            nraFreeNext += sz;

    if  ((size_t)block == blockStop) debugStop("Block at %08X allocated", block);

    if  (nraFreeNext > nraFreeLast)
        block = nraAllocNewPage(sz);

#ifdef DEBUG
    memset(block, UninitializedWord<char>(), sz);
#endif

    return  block;
}

/*****************************************************************************/
#endif
/*****************************************************************************/

size_t              norls_allocator::nraTotalSizeAlloc()
{
    norls_pagdesc * page;
    size_t          size = 0;

    for (page = nraPageList; page; page = page->nrpNextPage)
        size += page->nrpPageSize;

    return  size;
}

size_t              norls_allocator::nraTotalSizeUsed()
{
    norls_pagdesc * page;
    size_t          size = 0;

    if  (nraPageLast)
        nraPageLast->nrpUsedSize = nraFreeNext - nraPageLast->nrpContents;

    for (page = nraPageList; page; page = page->nrpNextPage)
        size += page->nrpUsedSize;

    return  size;
}

/*****************************************************************************
 * We try to use this allocator instance as much as possible. It will always
 * keep a page handy so small methods won't have to call VirtualAlloc()
 * But we may not be able to use it if another thread/reentrant call
 * is already using it.
 */

static norls_allocator *nraTheAllocator;
static nraMarkDsc       nraTheAllocatorMark;
static LONG             nraTheAllocatorIsInUse = 0;

// The static instance which we try to reuse for all non-simultaneous requests

static norls_allocator  theAllocator;

/*****************************************************************************/

void                nraInitTheAllocator()
{
    THE_ALLOCATOR_BASE_SIZE = norls_allocator::nraDirectAlloc() ? 
        (size_t)norls_allocator::MIN_PAGE_SIZE : (size_t)norls_allocator::DEFAULT_PAGE_SIZE;
}

void                nraTheAllocatorDone()
{   
    // We chose not to call nraTheAllocator->nraFree() and let the memory leak.
    // Below is the reason (VSW 600919).

    // The following race-condition exists during ExitProcess.
    // Thread A calls ExitProcess, which causes thread B to terminate.
    // Thread B terminated in the middle of nraToss() 
    // (through the call-chain of nraFreeTheAllocator()  ==> nraRlsm() ==> nraToss())
    // And then thread A comes along to call nraTheAllocator->nraFree() which will cause the double-free 
    // of page specified by "temp".

    // These are possible fixes:
    // 1. Thread A tries to get hold on nraTheAllocatorIsInUse lock before
    //    calling theAllocator.nraFree(). However, this could cause the deadlock because thread B
    //    has already gone and therefore it can't release nraTheAllocatorIsInUse.
    // 2. Fix the logic in nraToss() and nraFree() to update nraPageList and nraPageLast in a thread safe way.
    //    But it needs careful work to make it high performant (e.g. not holding a lock?)
    // 3. The scenario of dynamically unloading clrjit.dll cleanly is unimportant at this time.
    //    We will leak the memory associated with other instances of morls_allocator anyway.
    
    // Therefore we decided not to call the cleanup code when unloading the jit. 
    
}

/*****************************************************************************/

norls_allocator *   nraGetTheAllocator(IEEMemoryManager* pMemoryManager)
{
    if (InterlockedExchange(&nraTheAllocatorIsInUse, 1))
    {
        // Its being used by another Compiler instance
        return NULL;
    }

    if (nraTheAllocator == NULL)
    {
        // Not initialized yet

        bool res = theAllocator.nraInit(pMemoryManager, 0, 1);

        if (res)
        {
            // failed to initialize
            InterlockedExchange(&nraTheAllocatorIsInUse, 0);            
            return NULL;
        }

        nraTheAllocator = &theAllocator;
        
        assert(nraTheAllocator->nraTotalSizeAlloc() == THE_ALLOCATOR_BASE_SIZE);
        nraTheAllocator->nraMark(nraTheAllocatorMark);    
    }
    else
    {
        if (nraTheAllocator->nraGetMemoryManager() != pMemoryManager)
        {
            // already initialize with a different memory manager
            InterlockedExchange(&nraTheAllocatorIsInUse, 0);            
            return NULL;
        }
    }

    assert(nraTheAllocator->nraTotalSizeAlloc() == THE_ALLOCATOR_BASE_SIZE);
    return nraTheAllocator;
}


void                nraFreeTheAllocator()
{
    assert (nraTheAllocator != NULL);
    assert(nraTheAllocatorIsInUse == 1);

    nraTheAllocator->nraRlsm(nraTheAllocatorMark);
    assert(nraTheAllocator->nraTotalSizeAlloc() == THE_ALLOCATOR_BASE_SIZE);

    InterlockedExchange(&nraTheAllocatorIsInUse, 0);
}

/*****************************************************************************/