summaryrefslogtreecommitdiff
path: root/src/vm/arm64/crthelpers.S
blob: c8b108ca8fdc5c3acb5ff569a395c4b30bf89534 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.

// ==++==
//

#include "unixasmmacros.inc"

//
// ==--==

// Calls to JIT_MemSet is emitted by jit for initialization of large structs. 
// We need to provide our own implementation of memset instead of using the ones in crt because crt implementation does not gurantee 
// that aligned 8/4/2 - byte memory will be written atomically. This is required because members in a struct can be read atomically 
// and their values should be written atomically.
// 
//
//void JIT_MemSet(void *dst, int val, SIZE_T count)
//
//    uint64_t valEx = (unsigned char)val;
//    valEx = valEx | valEx << 8;
//    valEx = valEx | valEx << 16;
//    valEx = valEx | valEx << 32;
//
//    size_t dc_zva_size = 4ULL << DCZID_EL0.BS;
//
//    uint64_t use_dc_zva = (val == 0) && !DCZID_EL0.p ? count / (2 * dc_zva_size) : 0; // ~Minimum size (assumes worst case alignment)
//
//    // If not aligned then make it 8-byte aligned   
//    if(((uint64_t)dst&0xf) != 0)
//    {
//        // Calculate alignment we can do without exceeding count
//        // Use math to avoid introducing more unpredictable branches
//        // Due to inherent mod in lsr, ~7 is used instead of ~0 to handle count == 0
//        // Note logic will fail is count >= (1 << 61).  But this exceeds max physical memory for arm64
//        uint8_t align = (dst & 0x7) & (~uint64_t(7) >> (countLeadingZeros(count) mod 64))
//
//        if(align&0x1)
//        {
//            *(unit8_t*)dst = (unit8_t)valEx;
//            dst = (unit8_t*)dst + 1;
//            count-=1;
//        }
//
//        if(align&0x2)
//        {
//            *(unit16_t*)dst = (unit16_t)valEx;
//            dst = (unit16_t*)dst + 1;
//            count-=2;
//        }
//
//        if(align&0x4)
//        {
//            *(unit32_t*)dst = (unit32_t)valEx;
//            dst = (unit32_t*)dst + 1;
//            count-=4;
//        }
//    }
//
//    if(use_dc_zva)
//    {
//        // If not aligned then make it aligned to dc_zva_size
//        if(dst&0x8)
//        {
//            *(uint64_t*)dst = (uint64_t)valEx;
//            dst = (uint64_t*)dst + 1;
//            count-=8;
//        }
//
//        while(dst & (dc_zva_size - 1))
//        {
//            *(uint64_t*)dst = valEx;
//            dst = (uint64_t*)dst + 1;
//            *(uint64_t*)dst = valEx;
//            dst = (uint64_t*)dst + 1;
//            count-=16;
//        }
//
//        count -= dc_zva_size;
//
//        while(count >= 0)
//        {
//            dc_zva(dst);
//            dst = (uint8_t*)dst + dc_zva_size;
//            count-=dc_zva_size;
//        }
//
//        count += dc_zva_size;
//    }
//
//    count-=16;
//
//    while(count >= 0)
//    {
//        *(uint64_t*)dst = valEx;
//        dst = (uint64_t*)dst + 1;
//        *(uint64_t*)dst = valEx;
//        dst = (uint64_t*)dst + 1;
//        count-=16;
//    }
//
//    if(count & 8)
//    {
//        *(uint64_t*)dst = valEx;
//        dst = (uint64_t*)dst + 1;
//    }
//
//    if(count & 4)
//    {
//        *(uint32_t*)dst = (uint32_t)valEx;
//        dst = (uint32_t*)dst + 1;
//    }
//
//    if(count & 2)
//    {
//        *(uint16_t*)dst = (uint16_t)valEx;
//        dst = (uint16_t*)dst + 1;
//    }
//
//    if(count & 1)
//    {
//        *(uint8_t*)dst = (uint8_t)valEx;
//    }
//
//

// Assembly code corresponding to above C++ method. JIT_MemSet can AV and clr exception personality routine needs to 
// determine if the exception has taken place inside JIT_Memset in order to throw corresponding managed exception.
// Determining this is slow if the method were implemented as C++ method (using unwind info). In .asm file by adding JIT_MemSet_End
// marker it can be easily determined if exception happened in JIT_MemSet. Therefore, JIT_MemSet has been written in assembly instead of 
// as C++ method.

LEAF_ENTRY JIT_MemSet, _TEXT
    ands        w8, w1, #0xff
    mrs         x3, DCZID_EL0                      // x3 = DCZID_EL0
    mov         x6, #4
    lsr         x11, x2, #3                        // x11 = count >> 3

    orr         w8, w8, w8, lsl #8
    and         x5, x3, #0xf                       // x5 = dczid_el0.bs
    csel        x11, x11, xzr, eq                  // x11 = (val == 0) ? count >> 3 : 0
    tst         x3, (1 << 4)

    orr         w8, w8, w8, lsl #0x10
    csel        x11, x11, xzr, eq                  // x11 = (val == 0) && !DCZID_EL0.p ? count >> 3 : 0
    ands        x3, x0, #7                         // x3 = dst & 7
    lsl         x9, x6, x5                         // x9 = size

    orr         x8, x8, x8, lsl #0x20
    lsr         x11, x11, x5                       // x11 = (val == 0) && !DCZID_EL0.p ? count >> (3 + DCZID_EL0.bs) : 0
    sub         x10, x9, #1                        // x10 = mask

    b.eq        LOCAL_LABEL(JIT_MemSet_0x80)

    movn        x4, #7
    clz         x5, x2
    lsr         x4, x4, x5
    and         x3, x3, x4

    tbz         x3, #0, LOCAL_LABEL(JIT_MemSet_0x2c)
    strb        w8, [x0], #1
    sub         x2, x2, #1
LOCAL_LABEL(JIT_MemSet_0x2c):
    tbz         x3, #1, LOCAL_LABEL(JIT_MemSet_0x5c)
    strh        w8, [x0], #2
    sub         x2, x2, #2
LOCAL_LABEL(JIT_MemSet_0x5c):
    tbz         x3, #2, LOCAL_LABEL(JIT_MemSet_0x80)
    str         w8, [x0], #4
    sub         x2, x2, #4
LOCAL_LABEL(JIT_MemSet_0x80):
    cbz         x11, LOCAL_LABEL(JIT_MemSet_0x9c)
    tbz         x0, #3, LOCAL_LABEL(JIT_MemSet_0x84)
    str         x8, [x0], #8
    sub         x2, x2, #8

    b           LOCAL_LABEL(JIT_MemSet_0x85)
LOCAL_LABEL(JIT_MemSet_0x84):
    stp         x8, x8, [x0], #16
    sub         x2, x2, #16
LOCAL_LABEL(JIT_MemSet_0x85):
    tst         x0, x10
    b.ne        LOCAL_LABEL(JIT_MemSet_0x84)

    b           LOCAL_LABEL(JIT_MemSet_0x8a)
LOCAL_LABEL(JIT_MemSet_0x88):
    dc          zva, x0
    add         x0, x0, x9
LOCAL_LABEL(JIT_MemSet_0x8a):
    subs        x2, x2, x9
    b.ge        LOCAL_LABEL(JIT_MemSet_0x88)

LOCAL_LABEL(JIT_MemSet_0x8c):
    add         x2, x2, x9

LOCAL_LABEL(JIT_MemSet_0x9c):
    b           LOCAL_LABEL(JIT_MemSet_0xa8)
LOCAL_LABEL(JIT_MemSet_0xa0):
    stp         x8, x8, [x0], #16
LOCAL_LABEL(JIT_MemSet_0xa8):
    subs        x2, x2, #16
    b.ge        LOCAL_LABEL(JIT_MemSet_0xa0)

LOCAL_LABEL(JIT_MemSet_0xb0):
    tbz         x2, #3, LOCAL_LABEL(JIT_MemSet_0xb4)
    str         x8, [x0], #8
LOCAL_LABEL(JIT_MemSet_0xb4):
    tbz         x2, #2, LOCAL_LABEL(JIT_MemSet_0xc8)
    str         w8, [x0], #4
LOCAL_LABEL(JIT_MemSet_0xc8):
    tbz         x2, #1, LOCAL_LABEL(JIT_MemSet_0xdc)
    strh        w8, [x0], #2
LOCAL_LABEL(JIT_MemSet_0xdc):
    tbz         x2, #0, LOCAL_LABEL(JIT_MemSet_0xe8)
    strb        w8, [x0]
LOCAL_LABEL(JIT_MemSet_0xe8):
    ret         lr
LEAF_END_MARKED JIT_MemSet, _TEXT

// See comments above for JIT_MemSet

//void JIT_MemCpy(void *dst, const void *src, SIZE_T count)
//
//    // If not aligned then make it 8-byte aligned   
//    if(((uintptr_t)dst&0x7) != 0)
//    {
//        // Calculate alignment we can do without exceeding count
//        // Use math to avoid introducing more unpredictable branches
//        // Due to inherent mod in lsr, ~7 is used instead of ~0 to handle count == 0
//        // Note logic will fail is count >= (1 << 61).  But this exceeds max physical memory for arm64
//        uint8_t align = (dst & 0x7) & (~uint64_t(7) >> (countLeadingZeros(count) mod 64))
//
//        if(align&0x1)
//        {
//            *(unit8_t*)dst = *(unit8_t*)src;
//            dst = (unit8_t*)dst + 1;
//            src = (unit8_t*)src + 1;
//            count-=1;
//        }
//
//        if(align&0x2)
//        {
//            *(unit16_t*)dst = *(unit16_t*)src;
//            dst = (unit16_t*)dst + 1;
//            src = (unit16_t*)src + 1;
//            count-=2;
//        }
//
//        if(align&0x4)
//        {
//            *(unit32_t*)dst = *(unit32_t*)src;
//            dst = (unit32_t*)dst + 1;
//            src = (unit32_t*)src + 1;
//            count-=4;
//        }
//    }
//
//    count-=16;
//
//    while(count >= 0)
//    {
//        *(unit64_t*)dst = *(unit64_t*)src;
//        dst = (unit64_t*)dst + 1;
//        src = (unit64_t*)src + 1;
//        *(unit64_t*)dst = *(unit64_t*)src;
//        dst = (unit64_t*)dst + 1;
//        src = (unit64_t*)src + 1;
//        count-=16;
//    }
//
//    if(count & 8)
//    {
//        *(unit64_t*)dst = *(unit64_t*)src;
//        dst = (unit64_t*)dst + 1;
//        src = (unit64_t*)src + 1;
//    }
//
//    if(count & 4)
//    {
//        *(unit32_t*)dst = *(unit32_t*)src;
//        dst = (unit32_t*)dst + 1;
//        src = (unit32_t*)src + 1;
//    }
//
//    if(count & 2)
//    {
//        *(unit16_t*)dst = *(unit16_t*)src;
//        dst = (unit16_t*)dst + 1;
//        src = (unit16_t*)src + 1;
//    }
//
//    if(count & 1)
//    {
//        *(unit8_t*)dst = *(unit8_t*)src;
//    }
//
//

// Assembly code corresponding to above C++ method.
// See comments above for JIT_MemSet method
LEAF_ENTRY JIT_MemCpy, _TEXT
    ands        x3, x0, #7
    movn        x4, #7
    clz         x5, x2
    b.eq        LOCAL_LABEL(JIT_MemCpy_0xa8)
    lsr         x4, x4, x5
    and         x3, x3, x4
    tbz         x3, #0, LOCAL_LABEL(JIT_MemCpy_0x2c)
    ldrsb       w8, [x1], #1
    strb        w8, [x0], #1
    sub         x2, x2, #1
LOCAL_LABEL(JIT_MemCpy_0x2c):
    tbz         x3, #1, LOCAL_LABEL(JIT_MemCpy_0x5c)
    ldrsh       w8, [x1], #2
    strh        w8, [x0], #2
    sub         x2, x2, #2
LOCAL_LABEL(JIT_MemCpy_0x5c):
    tbz         x3, #2, LOCAL_LABEL(JIT_MemCpy_0xa8)
    ldr         w8, [x1], #4
    str         w8, [x0], #4
    sub         x2, x2, #4
    b           LOCAL_LABEL(JIT_MemCpy_0xa8)
LOCAL_LABEL(JIT_MemCpy_0xa0):
    ldp         x8, x9, [x1], #16
    stp         x8, x9, [x0], #16
LOCAL_LABEL(JIT_MemCpy_0xa8):
    subs        x2, x2, #16
    b.ge        LOCAL_LABEL(JIT_MemCpy_0xa0)
LOCAL_LABEL(JIT_MemCpy_0xb0):
    tbz         x2, #3, LOCAL_LABEL(JIT_MemCpy_0xb4)
    ldr         x8, [x1], #8
    str         x8, [x0], #8
LOCAL_LABEL(JIT_MemCpy_0xb4):
    tbz         x2, #2, LOCAL_LABEL(JIT_MemCpy_0xc8)
    ldr         w8, [x1], #4
    str         w8, [x0], #4
LOCAL_LABEL(JIT_MemCpy_0xc8):
    tbz         x2, #1, LOCAL_LABEL(JIT_MemCpy_0xdc)
    ldrsh       w8, [x1], #2
    strh        w8, [x0], #2
LOCAL_LABEL(JIT_MemCpy_0xdc):
    tbz         x2, #0, LOCAL_LABEL(JIT_MemCpy_0xe8)
    ldrsb       w8, [x1]
    strb        w8, [x0]
LOCAL_LABEL(JIT_MemCpy_0xe8):
    ret         lr
LEAF_END_MARKED JIT_MemCpy, _TEXT