summaryrefslogtreecommitdiff
path: root/src/vm/interpreter.h
blob: 52baeaadf114f3fbbd90426560c930e833c2039b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.


#ifndef INTERPRETER_H_DEFINED
#define INTERPRETER_H_DEFINED 1

#include "corjit.h"
#include "corinfo.h"
#include "codeman.h"
#include "jitinterface.h"
#include "stack.h"
#include "crst.h"
#include "callhelpers.h"

typedef SSIZE_T NativeInt;
typedef SIZE_T NativeUInt;
typedef SIZE_T NativePtr;

// Determines whether we interpret IL stubs.  (We might disable this selectively for
// some architectures, perhaps.)
#define INTERP_ILSTUBS 1

// If this is set, we keep track of extra information about IL instructions executed per-method.
#define INTERP_PROFILE 0

// If this is set, we track the distribution of IL instructions.
#define INTERP_ILINSTR_PROFILE 0

#define INTERP_ILCYCLE_PROFILE 0
#if INTERP_ILCYCLE_PROFILE
#if !INTERP_ILINSTR_PROFILE
#error INTERP_ILCYCLE_PROFILE may only be set if INTERP_ILINSTR_PROFILE is also set.
#endif
#endif

#if defined(_DEBUG) || INTERP_ILINSTR_PROFILE
// I define "INTERP_TRACING", rather than just using _DEBUG, so that I can easily make a build
// in which tracing is enabled in retail.
#define INTERP_TRACING 1
#else
#define INTERP_TRACING 0
#endif // defined(_DEBUG) || defined(INTERP_ILINSTR_PROFILE)

#if INTERP_TRACING
#define INTERPLOG(...) if (s_TraceInterpreterVerboseFlag.val(CLRConfig::INTERNAL_TraceInterpreterVerbose)) { fprintf(GetLogFile(), __VA_ARGS__); }
#else
#define INTERPLOG(...)
#endif

#if INTERP_TRACING
#define InterpTracingArg(x) ,x
#else
#define InterpTracingArg(x)
#endif

#define FEATURE_INTERPRETER_DEADSIMPLE_OPT 0

#define NYI_INTERP(msg) _ASSERTE_MSG(false, msg)
// I wanted to define NYI_INTERP as the following in retail:
//   #define NYI_INTERP(msg) _ASSERTE_ALL_BUILDS(__FILE__, false)
// but doing so gave a very odd unreachable code error.


// To allow keeping a pointer (index) to the vararg cookie argument to implement arglist.
// Use sentinel value of NO_VA_ARGNUM.
#define NO_VA_ARGNUM UINT_MAX

// First, a set of utility routines on CorInfoTypes.

// Returns "true" iff "cit" is "stack-normal": all integer types with byte size less than 4
// are folded to CORINFO_TYPE_INT; all remaining unsigned types are folded to their signed counterparts.
bool IsStackNormalType(CorInfoType cit);

// Returns the stack-normal CorInfoType that contains "cit".
CorInfoType CorInfoTypeStackNormalize(CorInfoType cit);

// Returns the (byte) size of "cit".  Requires that "cit" is not a CORINFO_TYPE_VALUECLASS.
size_t CorInfoTypeSize(CorInfoType cit);

// Returns true iff "cit" is an unsigned integral type.
bool CorInfoTypeIsUnsigned(CorInfoType cit);

// Returns true iff "cit" is an integral type.
bool CorInfoTypeIsIntegral(CorInfoType cit);

// Returns true iff "cet" is an unsigned integral type.
bool CorElemTypeIsUnsigned(CorElementType cet);

// Returns true iff "cit" is an integral type.
bool CorInfoTypeIsFloatingPoint(CorInfoType cit);

// Returns true iff "cit" is a pointer type (mgd/unmgd pointer, or native int).
bool CorInfoTypeIsPointer(CorInfoType cit);

// Requires that "cit" is stack-normal; returns its (byte) size.
inline size_t CorInfoTypeStackNormalSize(CorInfoType cit)
{
    assert(IsStackNormalType(cit));
    return CorInfoTypeSize(cit);
}

inline unsigned getClassSize(CORINFO_CLASS_HANDLE clsHnd)
{
    TypeHandle VMClsHnd(clsHnd);
    return VMClsHnd.GetSize();
}

// The values of this enumeration are in one-to-one correspondence with CorInfoType --
// just shifted so that they're the value stored in an interpreter type for non-value-class
// CorinfoTypes.
enum CorInfoTypeShifted
{
    CORINFO_TYPE_SHIFTED_UNDEF      = unsigned(CORINFO_TYPE_UNDEF)      << 2,    //0x0 << 2 = 0x0
    CORINFO_TYPE_SHIFTED_VOID       = unsigned(CORINFO_TYPE_VOID)       << 2,    //0x1 << 2 = 0x4
    CORINFO_TYPE_SHIFTED_BOOL       = unsigned(CORINFO_TYPE_BOOL)       << 2,    //0x2 << 2 = 0x8
    CORINFO_TYPE_SHIFTED_CHAR       = unsigned(CORINFO_TYPE_CHAR)       << 2,    //0x3 << 2 = 0xC
    CORINFO_TYPE_SHIFTED_BYTE       = unsigned(CORINFO_TYPE_BYTE)       << 2,    //0x4 << 2 = 0x10
    CORINFO_TYPE_SHIFTED_UBYTE      = unsigned(CORINFO_TYPE_UBYTE)      << 2,    //0x5 << 2 = 0x14
    CORINFO_TYPE_SHIFTED_SHORT      = unsigned(CORINFO_TYPE_SHORT)      << 2,    //0x6 << 2 = 0x18
    CORINFO_TYPE_SHIFTED_USHORT     = unsigned(CORINFO_TYPE_USHORT)     << 2,    //0x7 << 2 = 0x1C
    CORINFO_TYPE_SHIFTED_INT        = unsigned(CORINFO_TYPE_INT)        << 2,    //0x8 << 2 = 0x20
    CORINFO_TYPE_SHIFTED_UINT       = unsigned(CORINFO_TYPE_UINT)       << 2,    //0x9 << 2 = 0x24
    CORINFO_TYPE_SHIFTED_LONG       = unsigned(CORINFO_TYPE_LONG)       << 2,    //0xa << 2 = 0x28
    CORINFO_TYPE_SHIFTED_ULONG      = unsigned(CORINFO_TYPE_ULONG)      << 2,    //0xb << 2 = 0x2C
    CORINFO_TYPE_SHIFTED_NATIVEINT  = unsigned(CORINFO_TYPE_NATIVEINT)  << 2,    //0xc << 2 = 0x30
    CORINFO_TYPE_SHIFTED_NATIVEUINT = unsigned(CORINFO_TYPE_NATIVEUINT) << 2,    //0xd << 2 = 0x34
    CORINFO_TYPE_SHIFTED_FLOAT      = unsigned(CORINFO_TYPE_FLOAT)      << 2,    //0xe << 2 = 0x38
    CORINFO_TYPE_SHIFTED_DOUBLE     = unsigned(CORINFO_TYPE_DOUBLE)     << 2,    //0xf << 2 = 0x3C
    CORINFO_TYPE_SHIFTED_STRING     = unsigned(CORINFO_TYPE_STRING)     << 2,    //0x10 << 2 = 0x40
    CORINFO_TYPE_SHIFTED_PTR        = unsigned(CORINFO_TYPE_PTR)        << 2,    //0x11 << 2 = 0x44
    CORINFO_TYPE_SHIFTED_BYREF      = unsigned(CORINFO_TYPE_BYREF)      << 2,    //0x12 << 2 = 0x48
    CORINFO_TYPE_SHIFTED_VALUECLASS = unsigned(CORINFO_TYPE_VALUECLASS) << 2,    //0x13 << 2 = 0x4C
    CORINFO_TYPE_SHIFTED_CLASS      = unsigned(CORINFO_TYPE_CLASS)      << 2,    //0x14 << 2 = 0x50
    CORINFO_TYPE_SHIFTED_REFANY     = unsigned(CORINFO_TYPE_REFANY)     << 2,    //0x15 << 2 = 0x54
    CORINFO_TYPE_SHIFTED_VAR        = unsigned(CORINFO_TYPE_VAR)        << 2,    //0x16 << 2 = 0x58
};

class InterpreterType
{
    // We use this typedef, but the InterpreterType is actually encoded.  We assume that the two
    // low-order bits of a "real" CORINFO_CLASS_HANDLE are zero, then use them as follows:
    //    0x0 ==> if "ci" is a non-struct CORINFO_TYPE_* value, m_tp contents are (ci << 2).
    //    0x1, 0x3 ==> is a CORINFO_CLASS_HANDLE "sh" for a struct type, or'd with 0x1 and possibly 0x2.
    //       0x2 is added to indicate that an instance does not fit in a INT64 stack slot on the plaform, and
    //         should be referenced via a level of indirection.
    //    0x2 (exactly) indicates that it is a "native struct type".
    //
    CORINFO_CLASS_HANDLE m_tp;

public:
    // Default ==> undefined.
    InterpreterType()
        : m_tp(reinterpret_cast<CORINFO_CLASS_HANDLE>((static_cast<intptr_t>(CORINFO_TYPE_UNDEF) << 2)))
    {}

    // Requires that "cit" is not CORINFO_TYPE_VALUECLASS.
    InterpreterType(CorInfoType cit)
        : m_tp(reinterpret_cast<CORINFO_CLASS_HANDLE>((static_cast<intptr_t>(cit) << 2)))
    {
        assert(cit != CORINFO_TYPE_VALUECLASS);
    }

    // Requires that "cet" is not ELEMENT_TYPE_VALUETYPE.
    InterpreterType(CorElementType cet)
        : m_tp(reinterpret_cast<CORINFO_CLASS_HANDLE>((static_cast<intptr_t>(CEEInfo::asCorInfoType(cet)) << 2)))
    {
        assert(cet != ELEMENT_TYPE_VALUETYPE);
    }

    InterpreterType(CEEInfo* comp, CORINFO_CLASS_HANDLE sh)
    {
        GCX_PREEMP();

        // TODO: might wish to make a different constructor, for the cases where this is possible...
        TypeHandle typHnd(sh);
        if (typHnd.IsNativeValueType())
        {
            intptr_t shAsInt = reinterpret_cast<intptr_t>(sh);
            assert((shAsInt & 0x1) == 0); // The 0x2 bit might already be set by the VM! This is ok, because it's only set for native value types. This is a bit slimey...
            m_tp = reinterpret_cast<CORINFO_CLASS_HANDLE>(shAsInt | 0x2);
        }
        else
        {
            CorInfoType cit = comp->getTypeForPrimitiveValueClass(sh);
            if (cit != CORINFO_TYPE_UNDEF)
            {
                m_tp = reinterpret_cast<CORINFO_CLASS_HANDLE>(static_cast<intptr_t>(cit) << 2);
            }
            else
            {
                assert((comp->getClassAttribs(sh) & CORINFO_FLG_VALUECLASS) != 0);
                intptr_t shAsInt = reinterpret_cast<intptr_t>(sh);
                assert((shAsInt & 0x3) == 0);
                intptr_t bits = 0x1;                            // All value classes (structs) get 0x1 set.
                if (getClassSize(sh) > sizeof(INT64))
                {
                    bits |= 0x2;                                // "Large" structs get 0x2 set, also.
                }
                m_tp = reinterpret_cast<CORINFO_CLASS_HANDLE>(shAsInt | bits);
            }
        }
    }

    bool operator==(const InterpreterType& it2) const { return m_tp == it2.m_tp; }
    bool operator!=(const InterpreterType& it2) const { return m_tp != it2.m_tp; }

    CorInfoType ToCorInfoType() const
    {
        LIMITED_METHOD_CONTRACT;

        intptr_t iTypeAsInt = reinterpret_cast<intptr_t>(m_tp);
        if ((iTypeAsInt & 0x3) == 0x0)
        {
            return static_cast<CorInfoType>(iTypeAsInt >> 2);
        }
        // Is a class or struct (or refany?).  
        else
        {
            return CORINFO_TYPE_VALUECLASS;
        }
    }

    CorInfoType ToCorInfoTypeNotStruct() const
    {
        LIMITED_METHOD_CONTRACT;

        _ASSERTE_MSG((reinterpret_cast<intptr_t>(m_tp) & 0x3) == 0x0, "precondition: not a struct type.");

        intptr_t iTypeAsInt = reinterpret_cast<intptr_t>(m_tp);
        return static_cast<CorInfoType>(iTypeAsInt >> 2);
    }

    CorInfoTypeShifted ToCorInfoTypeShifted() const
    {
        LIMITED_METHOD_CONTRACT;

        _ASSERTE_MSG((reinterpret_cast<intptr_t>(m_tp) & 0x3) == 0x0, "precondition: not a struct type.");

        return static_cast<CorInfoTypeShifted>(reinterpret_cast<size_t>(m_tp));
    }

    CORINFO_CLASS_HANDLE ToClassHandle() const
    {
        LIMITED_METHOD_CONTRACT;

        intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
        assert((asInt & 0x3) != 0);
        return reinterpret_cast<CORINFO_CLASS_HANDLE>(asInt & (~0x3));
    }

    size_t AsRaw() const    // Just hand out the raw bits. Be careful using this! Use something else if you can!
    {
        LIMITED_METHOD_CONTRACT;

        return reinterpret_cast<size_t>(m_tp);
    }

    // Returns the stack-normalized type for "this".
    InterpreterType StackNormalize() const;

    // Returns the (byte) size of "this".  Requires "ceeInfo" for the struct case.
    __forceinline size_t Size(CEEInfo* ceeInfo) const
    {
        LIMITED_METHOD_CONTRACT;

        intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
        intptr_t asIntBits = (asInt & 0x3);
        if (asIntBits == 0)
        {
            return CorInfoTypeSize(ToCorInfoType());
        }
        else if (asIntBits == 0x2)
        {
            // Here we're breaking abstraction, and taking advantage of the fact that 0x2
            // is the low-bit encoding of "native struct type" both for InterpreterType and for
            // TypeHandle.
            TypeHandle typHnd(m_tp);
            assert(typHnd.IsNativeValueType());
            return typHnd.AsNativeValueType()->GetNativeSize();
        }
        else
        {
            return getClassSize(ToClassHandle());
        }
    }

    __forceinline size_t SizeNotStruct() const
    {
        LIMITED_METHOD_CONTRACT;

        _ASSERTE_MSG((reinterpret_cast<intptr_t>(m_tp) & 0x3) == 0, "Precondition: is not a struct type!");
        return CorInfoTypeSize(ToCorInfoTypeNotStruct());
    }

    // Requires that "it" is stack-normal; returns its (byte) size.
    size_t StackNormalSize() const
    {
        CorInfoType cit = ToCorInfoType();
        assert(IsStackNormalType(cit)); // Precondition.
        return CorInfoTypeStackNormalSize(cit);
    }

    // Is it a struct? (But don't include "native struct type").
    bool IsStruct() const
    {
        intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
        return (asInt & 0x1) == 0x1 || (asInt == CORINFO_TYPE_SHIFTED_REFANY);
    }

    // Returns "true" iff represents a large (> INT64 size) struct.
    bool IsLargeStruct(CEEInfo* ceeInfo) const
    {
        intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
#ifdef _TARGET_AMD64_
        if (asInt == CORINFO_TYPE_SHIFTED_REFANY)
        {
            return true;
        }
#endif
        return (asInt & 0x3) == 0x3
            || ((asInt & 0x3) == 0x2 && Size(ceeInfo) > sizeof(INT64));
    }

#ifdef _DEBUG
    bool MatchesWork(const InterpreterType it2, CEEInfo* info) const;

    bool Matches(const InterpreterType it2, CEEInfo* info) const
    {
        CONTRACTL {
            THROWS;
            GC_TRIGGERS;
            MODE_COOPERATIVE;
        } CONTRACTL_END;

        return MatchesWork(it2, info) || it2.MatchesWork(*this, info);
    }
#endif // _DEBUG
};

#ifndef DACCESS_COMPILE
// This class does whatever "global" (applicable to all executions after the first, as opposed to caching
// within a single execution) we do.  It is parameterized over the "Key" type (which is required to be an integral
// type, to allow binary search), and the "Val" type of things cached.
template<typename Key, typename Val>
class InterpreterCache
{
public:
    InterpreterCache();

    // Returns "false" if "k" is already present, otherwise "true".  Requires that "v" == current mapping
    // if "k" is already present.
    bool AddItem(Key k, Val v);
    bool GetItem(Key k, Val& v);

private:
    struct KeyValPair
    {
        Key m_key;
        Val m_val;
    };

    // This is kept ordered by m_iloffset, to enable binary search.
    KeyValPair* m_pairs;
    unsigned short m_allocSize;
    unsigned short m_count;

    static const unsigned InitSize = 8;

    void EnsureCanInsert();

#ifdef _DEBUG
    static void AddAllocBytes(unsigned bytes);
#endif // _DEBUG
};

#ifdef _DEBUG
enum CachedItemKind
{
    CIK_Undefined,
    CIK_CallSite,
    CIK_StaticField,
    CIK_InstanceField,
    CIK_ClassHandle,
};
#endif // _DEBUG

struct StaticFieldCacheEntry
{
    void* m_srcPtr;
    UINT m_sz;
    InterpreterType m_it;

    StaticFieldCacheEntry(void* srcPtr, UINT sz, InterpreterType it) : m_srcPtr(srcPtr), m_sz(sz), m_it(it) {}

#ifdef _DEBUG
    bool operator==(const StaticFieldCacheEntry& entry) const
    {
        return m_srcPtr == entry.m_srcPtr && m_sz == entry.m_sz && m_it == entry.m_it;
    }
#endif // _DEBUG
};

// "small" part of CORINFO_SIG_INFO, sufficient for the interpreter to call the method so decribed
struct CORINFO_SIG_INFO_SMALL
{
    CORINFO_CLASS_HANDLE    retTypeClass;   // if the return type is a value class, this is its handle (enums are normalized)
    unsigned                numArgs : 16;
    CorInfoCallConv         callConv: 8;
    CorInfoType             retType : 8;

    CorInfoCallConv     getCallConv()       { return CorInfoCallConv((callConv & CORINFO_CALLCONV_MASK)); }
    bool                hasThis()           { return ((callConv & CORINFO_CALLCONV_HASTHIS) != 0); }
    bool                hasExplicitThis()   { return ((callConv & CORINFO_CALLCONV_EXPLICITTHIS) != 0); }
    unsigned            totalILArgs()       { return (numArgs + hasThis()); }
    bool                isVarArg()          { return ((getCallConv() == CORINFO_CALLCONV_VARARG) || (getCallConv() == CORINFO_CALLCONV_NATIVEVARARG)); }
    bool                hasTypeArg()        { return ((callConv & CORINFO_CALLCONV_PARAMTYPE) != 0); }

#ifdef _DEBUG
    bool operator==(const CORINFO_SIG_INFO_SMALL& csis) const
    {
        return retTypeClass == csis.retTypeClass 
            && numArgs == csis.numArgs 
            && callConv == csis.callConv
            && retType == csis.retType;
    }
#endif // _DEBUG
};

struct CallSiteCacheData
{
    MethodDesc* m_pMD;

    CORINFO_SIG_INFO_SMALL             m_sigInfo;

    CallSiteCacheData(MethodDesc* pMD, const CORINFO_SIG_INFO_SMALL& sigInfo)
        : m_pMD(pMD), m_sigInfo(sigInfo)
    {}

#ifdef _DEBUG
    bool operator==(const CallSiteCacheData& cscd) const
    {
        return m_pMD == cscd.m_pMD
            && m_sigInfo == cscd.m_sigInfo;
    }
#endif // _DEBUG
};

struct CachedItem
{
#ifdef _DEBUG
    CachedItemKind m_tag;
#endif // _DEBUG
    union
    {
        // m_tag == CIK_CallSite
        CallSiteCacheData*                  m_callSiteInfo;
        // m_tag == CIK_StaticField
        StaticFieldCacheEntry*              m_staticFieldAddr;
        // m_tag == CIK_InstanceField
        FieldDesc*                          m_instanceField;
        // m_tag == CIT_ClassHandle
        CORINFO_CLASS_HANDLE                m_clsHnd;
    } m_value;

    CachedItem() 
#ifdef _DEBUG
        : m_tag(CIK_Undefined)
#endif
    {}

#ifdef _DEBUG
    bool operator==(const CachedItem& ci)
    {
        if (m_tag != ci.m_tag) return false;
        switch (m_tag)
        {
        case CIK_CallSite:
            return *m_value.m_callSiteInfo == *ci.m_value.m_callSiteInfo;
        case CIK_StaticField: 
            return *m_value.m_staticFieldAddr == *ci.m_value.m_staticFieldAddr;
        case CIK_InstanceField: 
            return m_value.m_instanceField == ci.m_value.m_instanceField;
        case CIK_ClassHandle:
            return m_value.m_clsHnd == ci.m_value.m_clsHnd;
        default:
            return true;
        }
    }
#endif

    CachedItem(CallSiteCacheData* callSiteInfo)
#ifdef _DEBUG
        : m_tag(CIK_CallSite)
#endif
    {
        m_value.m_callSiteInfo = callSiteInfo;
    }

    CachedItem(StaticFieldCacheEntry* staticFieldAddr)
#ifdef _DEBUG
        : m_tag(CIK_StaticField)
#endif
    {
        m_value.m_staticFieldAddr = staticFieldAddr;
    }

    CachedItem(FieldDesc* instanceField)
#ifdef _DEBUG
        : m_tag(CIK_InstanceField)
#endif
    {
        m_value.m_instanceField = instanceField;
    }

    CachedItem(CORINFO_CLASS_HANDLE m_clsHnd)
#ifdef _DEBUG
        : m_tag(CIK_ClassHandle)
#endif
    {
        m_value.m_clsHnd = m_clsHnd;
    }
};


const char* eeGetMethodFullName(CEEInfo* info, CORINFO_METHOD_HANDLE hnd, const char** clsName = NULL);

// The per-InterpMethodInfo cache may map generic instantiation information to the
// cache for the current instantitation; when we find the right one the first time we copy it
// into here, so we only have to do the instantiation->cache lookup once.
typedef InterpreterCache<unsigned, CachedItem> ILOffsetToItemCache;
typedef InterpreterCache<size_t, ILOffsetToItemCache*> GenericContextToInnerCache;

#endif // DACCESS_COMPILE

// This is the information that the intepreter stub provides to the
// interpreter about the method being interpreted.
struct InterpreterMethodInfo
{
#if INTERP_PROFILE || defined(_DEBUG)
    const char*                 m_clsName;
    const char*                 m_methName;
#endif

    // Stub num for the current method under interpretation.
    int                         m_stubNum;

    // The method this info is relevant to.
    CORINFO_METHOD_HANDLE       m_method;

    // The module containing the method.
    CORINFO_MODULE_HANDLE       m_module;

    // Code pointer, size, and max stack usage.
    BYTE*                       m_ILCode;
    BYTE*                       m_ILCodeEnd;        // One byte past the last byte of IL. IL Code Size = m_ILCodeEnd - m_ILCode.

    // The CLR transforms delegate constructors, and may add up to this many
    // extra arguments.  This amount will be added to the IL's reported MaxStack to
    // get the "maxStack" value below, so we can use a uniform calling convention for
    // "DoCall".
    unsigned                    m_maxStack;

    unsigned                    m_ehClauseCount;

    // Used to implement arglist, an index into the ilArgs array where the argument pointed to is VA sig cookie.
    unsigned                    m_varArgHandleArgNum;

    // The number of arguments.
    unsigned short              m_numArgs;

    // The number of local variables.
    unsigned short              m_numLocals;

    enum Flags
    {
        // Is the first argument a "this" pointer?
        Flag_hasThisArg,
        // If "m_hasThisArg" is true, indicates whether the type of this is an object pointer
        // or a byref.
        Flag_thisArgIsObjPtr,
        // Is there a return buffer argument?
        Flag_hasRetBuffArg,
        // Is the method a var arg method
        Flag_isVarArg,
        // Is the last argument a generic type context?
        Flag_hasGenericsContextArg,
        // Does the type have generic args?
        Flag_typeHasGenericArgs,
        // Does the method have generic args?
        Flag_methHasGenericArgs,
        // Is the method a "dead simple" getter (one that just reads a field?)
        Flag_methIsDeadSimpleGetter,
        // We recognize two forms of dead simple getters, one for "opt" and one for "dbg".  If it is
        // dead simple, is it dbg or opt?
        Flag_methIsDeadSimpleGetterIsDbgForm,
        Flag_Count,
    };

    typedef UINT16 FlagGroup;

    // The bitmask for a set of InterpreterMethodInfo::Flags.
    FlagGroup                       m_flags;

    template<int Flg>
    FlagGroup GetFlagBit() {
        // This works as long as FlagGroup is "int" type.
        static_assert(sizeof(FlagGroup) * 8 >= Flag_Count, "error: bitset not large enough");
        return (1 << Flg);
    }

    // Get and set the value of a flag.
    template<int Flg>
    bool GetFlag() { return (m_flags & GetFlagBit<Flg>()) != 0; }
    template<int Flg>
    void SetFlag(bool b)
    {
        if (b) m_flags |= GetFlagBit<Flg>();
        else   m_flags &= (~GetFlagBit<Flg>());
    }

    // This structure describes a local: its type and its offset.
    struct LocalDesc
    {
        InterpreterType m_typeStackNormal;
        InterpreterType m_type;
        unsigned m_offset;
    };

    // This structure describes an argument.  Much like a LocalDesc, but
    // "m_nativeOffset" contains the offset if the argument was passed using the system's native calling convention
    // (e.g., the calling convention for a JIT -> Interpreter call) whereas "m_directOffset" describes arguments passed
    // via a direct Interpreter -> Interpreter call.
    struct ArgDesc
    {
        InterpreterType m_typeStackNormal;
        InterpreterType m_type;
        short           m_nativeOffset;
        short           m_directOffset;
    };


    // This is an array of size at least "m_numArgs", such that entry "i" describes the "i'th"
    // arg in the "m_ilArgs" array passed to the intepreter: that is, the ArgDesc contains the type, stack-normal type,
    // and offset in the "m_ilArgs" array of that argument.  In addition, has extra entries if "m_hasGenericsContextArg"
    // and/or "m_hasRetBuffArg" are true, giving the offset of those arguments -- the offsets of those arguments
    // are in that order in the array.  (The corresponding types should be NativeInt.)
    ArgDesc*                    m_argDescs;

    // This is an array of size "m_numLocals", such that entry "i" describes the "i'th"
    // local : that is, the LocalDesc contains the type, stack-normal type, and, if the type 
    // is a large struct type, the offset in the local variable large-struct memory array.  
    LocalDesc*                  m_localDescs;

    // A bit map, with 1 bit per local, indicating whether it contains a pinning reference.
    char*                       m_localIsPinningRefBits;

    unsigned                    m_largeStructLocalSize;
    unsigned                    LocalMemSize()
    {
        return m_largeStructLocalSize + m_numLocals * sizeof(INT64);
    }

    // I will probably need more information about the return value, but for now...
    CorInfoType                 m_returnType;

    // The number of times this method has been interpreted.
    unsigned int               m_invocations;

#if INTERP_PROFILE
    UINT64                      m_totIlInstructionsExeced;
    unsigned                    m_maxIlInstructionsExeced;

    void RecordExecInstrs(unsigned instrs)
    {  
        m_totIlInstructionsExeced += instrs;
        if (instrs > m_maxIlInstructionsExeced)
        {
            m_maxIlInstructionsExeced = instrs;
        }
    }
#endif

// #ifndef DACCESS_COMPILE
    // Caching information.  Currently the only thing we cache is saved formats of MethodDescCallSites
    // at call instructions.
    // We use a "void*", because the actual type depends on the whether the method has
    // a dynamic generics context.  If so, this is a cache from the generic parameter to an
    // ILoffset->item cache; if not, it's a the ILoffset->item cache directly.
    void* m_methodCache;
// #endif // DACCESS_COMPILE

    InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo);

    void InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo, short* argOffsets_);

    void AllocPinningBitsIfNeeded();

    void SetPinningBit(unsigned locNum);
    bool GetPinningBit(unsigned locNum);

    CORINFO_METHOD_HANDLE GetPreciseGenericsContext(Object* thisArg, void* genericsCtxtArg);

#ifndef DACCESS_COMPILE
    // Gets the proper cache for a call to a method with the current InterpreterMethodInfo, with the given
    // "thisArg" and "genericsCtxtArg".  If "alloc" is true, will allocate the cache if necessary.
    ILOffsetToItemCache* GetCacheForCall(Object* thisArg, void* genericsCtxtArg, bool alloc = false);
#endif // DACCESS_COMPILE

    ~InterpreterMethodInfo();
};


// Expose some protected methods of CEEInfo.
class InterpreterCEEInfo: public CEEInfo
{
    CEEJitInfo m_jitInfo;
public:
    InterpreterCEEInfo(CORINFO_METHOD_HANDLE meth): CEEInfo((MethodDesc*)meth), m_jitInfo((MethodDesc*)meth, NULL, NULL, CORJIT_FLAGS::CORJIT_FLAG_SPEED_OPT) { m_pOverride = this; }
    
    // Certain methods are unimplemented by CEEInfo (they hit an assert).  They are implemented by CEEJitInfo, yet
    // don't seem to require any of the CEEJitInfo state we can't provide.  For those case, delegate to the "partial"
    // CEEJitInfo m_jitInfo.
    void addActiveDependency(CORINFO_MODULE_HANDLE moduleFrom,CORINFO_MODULE_HANDLE moduleTo)
    {
        m_jitInfo.addActiveDependency(moduleFrom, moduleTo);
    }
};

extern INT64 F_CALL_CONV InterpretMethod(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
extern float F_CALL_CONV InterpretMethodFloat(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
extern double F_CALL_CONV InterpretMethodDouble(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);

class Interpreter
{
    friend INT64 F_CALL_CONV InterpretMethod(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
    friend float F_CALL_CONV InterpretMethodFloat(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
    friend double F_CALL_CONV InterpretMethodDouble(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);

    // This will be inlined into the bodies of the methods above
    static inline ARG_SLOT InterpretMethodBody(InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext);

    // The local frame size of the method being interpreted.
    static size_t GetFrameSize(InterpreterMethodInfo* interpMethInfo);

    // JIT the method if we've passed the threshold, or if "force" is true.
    static void JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo, bool force = false);

    friend class InterpreterFrame;

public:
    // Return an interpreter stub for the given method.  That is, a stub that transforms the arguments from the native
    // calling convention to the interpreter convention, and provides the method descriptor, then calls the interpreter.
    // If "jmpCall" setting is true, then "ppInterpreterMethodInfo" must be provided and the GenerateInterpreterStub
    // will NOT generate a stub. Instead it will provide a MethodInfo that is initialized correctly after computing
    // arg descs.
    static CorJitResult GenerateInterpreterStub(CEEInfo* comp,
                                                CORINFO_METHOD_INFO* info, 
                                                /*OUT*/ BYTE **nativeEntry,
                                                /*OUT*/ ULONG *nativeSizeOfCode,
                                                InterpreterMethodInfo** ppInterpMethodInfo = NULL,
                                                bool jmpCall = false);

    // If "addr" is the start address of an interpreter stub, return the corresponding MethodDesc*,
    // else "NULL".
    static class MethodDesc* InterpretationStubToMethodInfo(PCODE addr);

    // A value to indicate that the cache has not been initialized (to distinguish it from NULL --
    // we've looked and it doesn't yet have a cache.)
#define UninitExecCache reinterpret_cast<ILOffsetToItemCache*>(0x1)

    // The "frameMemory" should be a pointer to a locally-allocated memory block
    // whose size is sufficient to hold the m_localVarMemory, the operand stack, and the
    // operand type stack.
    Interpreter(InterpreterMethodInfo* methInfo_, bool directCall_, BYTE* ilArgs_, void* stubContext_, BYTE* frameMemory) 
        : m_methInfo(methInfo_),
          m_interpCeeInfo(methInfo_->m_method),
          m_ILCodePtr(methInfo_->m_ILCode),
          m_directCall(directCall_),
          m_ilArgs(ilArgs_),
          m_stubContext(stubContext_),
          m_orOfPushedInterpreterTypes(0),
          m_largeStructOperandStack(NULL),
          m_largeStructOperandStackHt(0),
          m_largeStructOperandStackAllocSize(0),
          m_curStackHt(0),
          m_leaveInfoStack(),
          m_filterNextScan(0),
          m_filterHandlerOffset(0),
          m_filterExcILOffset(0),
          m_inFlightException(NULL),
          m_thisArg(NULL),
#ifdef USE_CHECKED_OBJECTREFS
          m_retBufArg(NULL),  // Initialize to NULL so we can safely declare protected.
#endif // USE_CHECKED_OBJECTREFS
          m_genericsCtxtArg(NULL),
          m_securityObject((Object*)NULL),
          m_args(NULL),
          m_argsSize(0),
          m_callThisArg(NULL), 
          m_structRetValITPtr(NULL),
#ifndef DACCESS_COMPILE
          // Means "uninitialized"
          m_thisExecCache(UninitExecCache), 
#endif
          m_constrainedFlag(false),
          m_readonlyFlag(false),
          m_locAllocData(NULL),
          m_preciseGenericsContext(NULL),
          m_functionPointerStack(NULL)
    {
        // We must zero the locals.
        memset(frameMemory, 0, methInfo_->LocalMemSize() + sizeof(GSCookie));

        // m_localVarMemory is below the fixed size slots, above the large struct slots.
        m_localVarMemory = frameMemory + methInfo_->m_largeStructLocalSize + sizeof(GSCookie);
        m_gsCookieAddr = (GSCookie*) (m_localVarMemory - sizeof(GSCookie));

        // Having zeroed, for large struct locals, we must initialize the fixed-size local slot to point to the
        // corresponding large-struct local slot.
        for (unsigned i = 0; i < methInfo_->m_numLocals; i++)
        {
            if (methInfo_->m_localDescs[i].m_type.IsLargeStruct(&m_interpCeeInfo))
            {
                void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
                *reinterpret_cast<void**>(structPtr) = LargeStructLocalSlot(i);
            }
        }
        frameMemory += methInfo_->LocalMemSize();
        frameMemory += sizeof(GSCookie);

#define COMBINE_OPSTACK_VAL_TYPE 0

#if COMBINE_OPSTACK_VAL_TYPE
        m_operandStackX = reinterpret_cast<OpStackValAndType*>(frameMemory);
        frameMemory += (methInfo_->m_maxStack * sizeof(OpStackValAndType));
#else
        m_operandStack = reinterpret_cast<INT64*>(frameMemory);
        frameMemory += (methInfo_->m_maxStack * sizeof(INT64));
        m_operandStackTypes = reinterpret_cast<InterpreterType*>(frameMemory);
#endif

        // If we have a "this" arg, save it in case we need it later.  (So we can
        // reliably get it even if the IL updates arg 0...)
        if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasThisArg>())
        {
            m_thisArg = *reinterpret_cast<Object**>(GetArgAddr(0));
        }

        unsigned extraArgInd = methInfo_->m_numArgs - 1;
        // We do these in the *reverse* of the order they appear in the array, so that we can conditionally process
        // the ones that are used.
        if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasGenericsContextArg>())
        {
            m_genericsCtxtArg = *reinterpret_cast<Object**>(GetArgAddr(extraArgInd));
            extraArgInd--;
        }
        if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_isVarArg>())
        {
            extraArgInd--;
        }
        if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasRetBuffArg>())
        {
            m_retBufArg = *reinterpret_cast<void**>(GetArgAddr(extraArgInd));
            extraArgInd--;
        }
    }

    ~Interpreter()
    {
        if (m_largeStructOperandStack != NULL)
        {
            delete[] m_largeStructOperandStack;
        }

        if (m_locAllocData != NULL)
        {
            delete m_locAllocData;
        }

        if (m_functionPointerStack != NULL)
        {
            delete[] m_functionPointerStack;
        }
    }

    // Called during EE startup to initialize locks and other generic resources.
    static void Initialize();

    // Called during stub generation to initialize compiler-specific resources.
    static void InitializeCompilerStatics(CEEInfo* info);

    // Called during EE shutdown to destroy locks and release generic resources.
    static void Terminate();

    // Returns true iff "stackPtr" can only be in younger frames than "this".  (On a downwards-
    // growing stack, it is less than the smallest local address of "this".)
    bool IsInCalleesFrames(void* stackPtr);

    MethodDesc* GetMethodDesc() { return reinterpret_cast<MethodDesc*>(m_methInfo->m_method); }

#if INTERP_ILSTUBS
    void*      GetStubContext() { return m_stubContext; }
    void*      GetStubContextAddr() { return &m_stubContext; }
#endif

    OBJECTREF* GetAddressOfSecurityObject() { return &m_securityObject; }
 
    void*      GetParamTypeArg() { return m_genericsCtxtArg; }

private:
    // Architecture-dependent helpers.
    inline static unsigned short NumberOfIntegerRegArgs();

    // Wrapper for ExecuteMethod to do a O(1) alloca when performing a jmpCall and normal calls. If doJmpCall is true, this method also resolves the call token into pResolvedToken.
    static
    ARG_SLOT ExecuteMethodWrapper(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext, bool* pDoJmpCall, CORINFO_RESOLVED_TOKEN* pResolvedCallToken);

    // Execute the current method, and set *retVal to the return value, if any.
    void ExecuteMethod(ARG_SLOT* retVal, bool* pDoJmpCall, unsigned* pJumpCallToken);

    // Fetches the monitor for static methods by asking cee info. Returns the monitor
    // object.
    AwareLock* GetMonitorForStaticMethod();

    // Synchronized methods have to call monitor enter and exit at the entry and exits of the
    // method.
    void DoMonitorEnterWork();
    void DoMonitorExitWork();

    // Determines if the current exception is handled by the current method.  If so,
    // returns true and sets the interpreter state to start executing in the appropriate handler.
    bool MethodHandlesException(OBJECTREF orThrowable);

    // Assumes that "ilCode" is the first instruction in a method, whose code is of size "codeSize".
    // Returns "false" if this method has no loops; if it returns "true", it might have a loop.
    static bool MethodMayHaveLoop(BYTE* ilCode, unsigned codeSize);

    // Do anything that needs to be done on a backwards branch (e.g., GC poll).
    // Assumes that "offset" is the delta between the current code pointer and the post-branch pointer;
    // obviously, it will be negative.
    void BackwardsBranchActions(int offset);

    // Expects "interp0" to be the address of the interpreter object being scanned.
    static void GCScanRoots(promote_func* pf, ScanContext* sc, void* interp0);

    // The above calls this instance method.
    void GCScanRoots(promote_func* pf, ScanContext* sc);

    // Scan the root at "loc", whose type is "it", using "pf" and "sc".
    void GCScanRootAtLoc(Object** loc, InterpreterType it, promote_func* pf, ScanContext* sc,
                         bool pinningRef = false);

    // Scan the root at "loc", whose type is the value class "valueCls", using "pf" and "sc".
    void GCScanValueClassRootAtLoc(Object** loc, CORINFO_CLASS_HANDLE valueClsHnd, promote_func* pf, ScanContext* sc);

    // Asserts that "addr" is the start of the interpretation stub for "md".  Records this in a table,
    // to satisfy later calls to "InterpretationStubToMethodInfo."
    static void RecordInterpreterStubForMethodDesc(CORINFO_METHOD_HANDLE md, void* addr);

    struct ArgState
    {
        unsigned short numRegArgs;
        unsigned short numFPRegArgSlots;
        unsigned       fpArgsUsed;   // Bit per single-precision fp arg accounted for.
        short          callerArgStackSlots;
        short*         argOffsets;
        enum ArgRegStatus
        {
            ARS_IntReg,
            ARS_FloatReg,
            ARS_NotReg
        };
        ArgRegStatus*  argIsReg;

        ArgState(unsigned totalArgs) : 
            numRegArgs(0),
            numFPRegArgSlots(0), fpArgsUsed(0),
            callerArgStackSlots(0), 
            argOffsets(new short[totalArgs]),
            argIsReg(new ArgRegStatus[totalArgs])
        {
            for (unsigned i = 0; i < totalArgs; i++)
            {
                argIsReg[i] = ARS_NotReg;
                argOffsets[i] = 0;
            }
        }

#if defined(_ARM_)
        static const int MaxNumFPRegArgSlots = 16;
#elif defined(_ARM64_)
        static const int MaxNumFPRegArgSlots = 8;
#elif defined(_AMD64_)
        static const int MaxNumFPRegArgSlots = 4;
#endif

        ~ArgState()
        {
            delete[] argOffsets;
            delete[] argIsReg;
        }

        void AddArg(unsigned canonIndex, short numSlots = 1, bool noReg = false, bool twoSlotAlign = false);

        // By this call, argument "canonIndex" is declared to be a floating point argument, taking the given #
        // of slots.  Important that this be called in argument order.
        void AddFPArg(unsigned canonIndex, unsigned short numSlots, bool doubleAlign);

#if defined(_AMD64_)
        // We have a special function for AMD64 because both integer/float registers overlap. However, all
        // callers are expected to call AddArg/AddFPArg directly.
        void AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType);
#endif
    };

    typedef MapSHash<void*, CORINFO_METHOD_HANDLE> AddrToMDMap;
    static AddrToMDMap* s_addrToMDMap;
    static AddrToMDMap* GetAddrToMdMap();

    // In debug, we map to a pair, containing the Thread that inserted it, so we can assert that any given thread only
    // inserts one stub for a CORINFO_METHOD_HANDLE.
    struct MethInfo
    {
        InterpreterMethodInfo* m_info;
#ifdef _DEBUG
        Thread* m_thread;
#endif // _DEBUG
    };
    typedef MapSHash<CORINFO_METHOD_HANDLE, MethInfo> MethodHandleToInterpMethInfoPtrMap;
    static MethodHandleToInterpMethInfoPtrMap* s_methodHandleToInterpMethInfoPtrMap;
    static MethodHandleToInterpMethInfoPtrMap* GetMethodHandleToInterpMethInfoPtrMap();

    static InterpreterMethodInfo* RecordInterpreterMethodInfoForMethodHandle(CORINFO_METHOD_HANDLE md, InterpreterMethodInfo* methInfo);
    static InterpreterMethodInfo* Interpreter::MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE md);

public:
    static unsigned s_interpreterStubNum;
private:
    unsigned CurOffset()
    {
        assert(m_methInfo->m_ILCode <= m_ILCodePtr &&
                                       m_ILCodePtr < m_methInfo->m_ILCodeEnd);
        unsigned res = static_cast<unsigned>(m_ILCodePtr - m_methInfo->m_ILCode);
        return res;
    }

    // We've computed a branch target. Is the target in range? If not, throw an InvalidProgramException.
    // Otherwise, execute the branch by changing m_ILCodePtr.
    void ExecuteBranch(BYTE* ilTargetPtr)
    {
        if (m_methInfo->m_ILCode <= ilTargetPtr &&
                                    ilTargetPtr < m_methInfo->m_ILCodeEnd)
        {
            m_ILCodePtr = ilTargetPtr;
        }
        else
        {
            COMPlusThrow(kInvalidProgramException);
        }
    }

    // Private fields:
    // 
    InterpreterMethodInfo* m_methInfo;
    InterpreterCEEInfo m_interpCeeInfo;

    BYTE*  m_ILCodePtr;

    bool   m_directCall;
    BYTE*  m_ilArgs;

    __forceinline InterpreterType GetArgType(unsigned argNum)
    {
        return m_methInfo->m_argDescs[argNum].m_type;
    }

    __forceinline InterpreterType GetArgTypeNormal(unsigned argNum)
    {
        return m_methInfo->m_argDescs[argNum].m_typeStackNormal;
    }

    __forceinline BYTE* GetArgAddr(unsigned argNum)
    {
        if (!m_directCall)
        {
#if defined(_AMD64_)
            // In AMD64, a reference to the struct is passed if its size exceeds the word size.
            // Dereference the arg to get to the ref of the struct.
            if (GetArgType(argNum).IsLargeStruct(&m_interpCeeInfo))
            {
                return *reinterpret_cast<BYTE**>(&m_ilArgs[m_methInfo->m_argDescs[argNum].m_nativeOffset]);
            }
#endif
            return &m_ilArgs[m_methInfo->m_argDescs[argNum].m_nativeOffset];
        }
        else
        {
            if (GetArgType(argNum).IsLargeStruct(&m_interpCeeInfo))
            {
                return *reinterpret_cast<BYTE**>(&m_ilArgs[m_methInfo->m_argDescs[argNum].m_directOffset]);
            }
            else
            {
                return &m_ilArgs[m_methInfo->m_argDescs[argNum].m_directOffset];
            }
        }
    }

    __forceinline MethodTable* GetMethodTableFromClsHnd(CORINFO_CLASS_HANDLE hnd)
    {
        TypeHandle th(hnd);
        return th.GetMethodTable();
    }

#ifdef FEATURE_HFA
    __forceinline BYTE* GetHFARetBuffAddr(unsigned sz)
    {
        // Round up to a double boundary:
        sz = ((sz + sizeof(double) - 1) / sizeof(double)) * sizeof(double);
        // We rely on the interpreter stub to have pushed "sz" bytes on its stack frame,
        // below m_ilArgs;
        return m_ilArgs - sz;
    }
#endif // FEATURE_HFA

    void*  m_stubContext;


    // Address of the GSCookie value in the current method's frame.
    GSCookie* m_gsCookieAddr;

    BYTE* GetFrameBase()
    {
        return (m_localVarMemory - sizeof(GSCookie) - m_methInfo->m_largeStructLocalSize);
    }
    // m_localVarMemory points to the boundary between the fixed-size slots for the locals
    // (positive offsets), and the full-sized slots for large struct locals (negative offsets).
    BYTE*  m_localVarMemory;
    INT64* FixedSizeLocalSlot(unsigned locNum)
    {
        return reinterpret_cast<INT64*>(m_localVarMemory) + locNum;
    }

    BYTE* LargeStructLocalSlot(unsigned locNum)
    {
        BYTE* base = GetFrameBase();
        BYTE* addr = base + m_methInfo->m_localDescs[locNum].m_offset;
        assert(IsInLargeStructLocalArea(addr));
        return addr;
    }

    bool IsInLargeStructLocalArea(void* addr)
    {
        void* base = GetFrameBase();
        return (base <= addr) && (addr < (static_cast<void*>(m_localVarMemory - sizeof(GSCookie))));
    }

    bool IsInLocalArea(void* addr)
    {
        void* base = GetFrameBase();
        return (base <= addr) && (addr < static_cast<void*>(reinterpret_cast<INT64*>(m_localVarMemory) + m_methInfo->m_numLocals));
    }

    // Ensures that the operand stack contains no pointers to large struct local slots (by
    // copying the values out to locations allocated on the large struct stack.
    void OpStackNormalize();

    // The defining property of this word is: if the bottom two bits are not 0x3, then the current operand stack contains no pointers
    // to large-struct slots for locals.  Operationally, we achieve this by taking "OR" of the interpreter types of local variables that have been loaded onto the
    // operand stack -- if any have been large structs, they will have 0x3 as the low order bits of their interpreter type, and this will be
    // "sticky."  We may sometimes determine that no large struct local pointers are currently on the stack, and reset this word to zero.
    size_t m_orOfPushedInterpreterTypes;

#if COMBINE_OPSTACK_VAL_TYPE
    struct OpStackValAndType
    {
        INT64 m_val;
        InterpreterType m_type;
        INT32 m_pad;
    };

    OpStackValAndType* m_operandStackX;
#else
    INT64* m_operandStack;
#endif

    template<typename T>
    __forceinline T OpStackGet(unsigned ind)
    {
        return *OpStackGetAddr<T>(ind);
    }

    template<typename T>
    __forceinline void OpStackSet(unsigned ind, T val)
    {
        *OpStackGetAddr<T>(ind) = val;
    }

#if COMBINE_OPSTACK_VAL_TYPE
    template<typename T>
    __forceinline T* OpStackGetAddr(unsigned ind)
    {
        return reinterpret_cast<T*>(ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStackX[ind].m_val), sizeof(T)));
    }

    __forceinline void* OpStackGetAddr(unsigned ind, size_t sz)
    {
        return ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStackX[ind].m_val), sz);
    }
#else
    template<typename T>
    __forceinline T* OpStackGetAddr(unsigned ind)
    {
        return reinterpret_cast<T*>(ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStack[ind]), sizeof(T)));
    }

    __forceinline void* OpStackGetAddr(unsigned ind, size_t sz)
    {
        return ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStack[ind]), sz);
    }
#endif

    __forceinline INT64 GetSmallStructValue(void* src, size_t sz)
    {
        assert(sz <= sizeof(INT64));

        INT64 ret = 0;
        memcpy(ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&ret), sz), src, sz);
        return ret;
    }

    BYTE*  m_largeStructOperandStack;
    size_t m_largeStructOperandStackHt;
    size_t m_largeStructOperandStackAllocSize;

    // Allocate "sz" bytes on the large struct operand stack, and return a pointer to where
    // the structure should be copied.
    void* LargeStructOperandStackPush(size_t sz);

    // Deallocate "sz" bytes from the large struct operand stack, unless the corresponding
    // operand stack value "fromAddr" is a pointer to a local variable.
    void LargeStructOperandStackPop(size_t sz, void* fromAddr);

    // Ensures that we can push a struct of size "sz" on the large struct operand stack.
    void LargeStructOperandStackEnsureCanPush(size_t sz);

#ifdef _DEBUG
    // Returns "true" iff the sum of sizes of large structures on the operand stack 
    // equals "m_largeStructOperandStackHt", which should be an invariant.
    bool LargeStructStackHeightIsValid();
#endif // _DEBUG

    // Returns "true" iff the "cit" is 'considered' a valid pointer type for the
    // architecture. For ex: nativeint/byref and for amd64 longs with loose rules.
    bool IsValidPointerType(CorInfoType cit);

#if !COMBINE_OPSTACK_VAL_TYPE
    InterpreterType* m_operandStackTypes;
#endif

#if COMBINE_OPSTACK_VAL_TYPE
#if USE_MACRO_FOR_OPSTACKACCESS
#define OpStackTypeGet(ind) m_operandStackX[ind].m_type
#define OpStackTypeSet(ind, it) m_operandStackX[ind].m_type = it
#else
    __forceinline InterpreterType OpStackTypeGet(unsigned ind)
    {
        return m_operandStackX[ind].m_type;
    }

    __forceinline void OpStackTypeSet(unsigned ind, InterpreterType it)
    {
        assert(IsStackNormalType(it.ToCorInfoType()));
        m_operandStackX[ind].m_type = it;
    }
#endif
#else
    __forceinline InterpreterType OpStackTypeGet(unsigned ind)
    {
        return m_operandStackTypes[ind];
    }

    __forceinline void OpStackTypeSet(unsigned ind, InterpreterType it)
    {
        assert(IsStackNormalType(it.ToCorInfoType()));
        m_operandStackTypes[ind] = it;
    }
#endif
    unsigned m_curStackHt;

    // These are used in searching for finally clauses when we 'leave' a try block:

    struct LeaveInfo
    {
        unsigned m_offset;      // The offset of "leave" instructions in try blocks whose finally blocks are being executed.
        BYTE*    m_target;       // The location the 'leave' was jumping to -- where execution should resume after all finally's have been executed.
        unsigned m_nextEHIndex;      // The index in the EH table at which the search for the next finally for "lastLeaveOffset" should resume.

        LeaveInfo(unsigned offset = 0, BYTE* target = NULL) : m_offset(offset), m_target(target), m_nextEHIndex(0) {}
    };
    // This is a stack of the currently in-force "leaves."  (Multiple leave's can be being processed when a try-finally occurs
    // within a finally).
    Stack<LeaveInfo> m_leaveInfoStack;

    // Used to track the next filter to scan in case the current
    // filter doesn't handle the exception.
    unsigned m_filterNextScan;

    // Used to record the handler offset for the current filter so it can be used during endfilter.
    unsigned m_filterHandlerOffset;

    // The actual offset at which the exception occurred for a filter that might possibly handle it.
    unsigned m_filterExcILOffset;

    // This is the exception to rethrow upon exiting the last finally.
    Object* m_inFlightException; // This must be scanned by GC.

    // Storing "this" and "typeCtxt" args if necessary.
    Object* m_thisArg;   // This must be scanned by GC.
    void*   m_retBufArg; // This must be scanned by GC: 
                         // if the caller is JITted, o.f = Foo(), for o.f a value type, retBuf may be ref o.f.
    void*   m_genericsCtxtArg;

    // Acquired variable for synchronized methods.
    unsigned char   m_monAcquired;

    // Holds the security object, for frames that require it.
    OBJECTREF m_securityObject;

    ARG_SLOT* m_args;
    InterpreterType* m_argTypes;
    unsigned  m_argsSize;

    void* m_callThisArg;

    // If "m_structRetValITPtr" is non-NULL, then "*m_structRetValITPtr" represents a struct type, and
    // "m_structRetValTempSpace" is a pointer to a value of that struct type, which must be scanned during GC.
    InterpreterType* m_structRetValITPtr;
    void* m_structRetValTempSpace;

#ifdef DACCESS_COMPILE
    void* m_thisExecCache;
#else  // DACCESS_COMPILE

     // The proper cache for the current method execution (or else UninitExecCache).
    ILOffsetToItemCache* m_thisExecCache;

    // Retrieve the ILoffset->Item cache for the generic instantiation (if any) of the
    // currently-executing method.  If "alloc" is true, allocate one if its not there.
    ILOffsetToItemCache* GetThisExecCache(bool alloc)
    {
        if (m_thisExecCache == UninitExecCache || 
            (m_thisExecCache == NULL && alloc))
        {
            m_thisExecCache = m_methInfo->GetCacheForCall(m_thisArg, m_genericsCtxtArg, alloc);
        }
        assert(!alloc || m_thisExecCache != NULL);
        return m_thisExecCache;
    }
    
    // Cache that a call at "iloffset" has the given CallSiteCacheData "callInfo".
    void CacheCallInfo(unsigned iloffset, CallSiteCacheData* callInfo);

    // If there's a cached CORINFO_CALL_INFO for the call at the given IL offset, return it, else NULL.
    CallSiteCacheData* GetCachedCallInfo(unsigned iloffset);

    void CacheInstanceField(unsigned iloffset, FieldDesc* fld);
    FieldDesc* GetCachedInstanceField(unsigned iloffset);

    void CacheStaticField(unsigned iloffset, StaticFieldCacheEntry* pEntry);
    StaticFieldCacheEntry* GetCachedStaticField(unsigned iloffset);

    void CacheClassHandle(unsigned ilOffset, CORINFO_CLASS_HANDLE clsHnd);
    CORINFO_CLASS_HANDLE GetCachedClassHandle(unsigned iloffset);
#endif // DACCESS_COMPILE

#if INTERP_ILCYCLE_PROFILE
    // Cycles we want to delete from the current instructions cycle count; e.g.,
    // cycles spent in a callee.
    unsigned __int64 m_exemptCycles;
    unsigned __int64 m_startCycles;
    unsigned short   m_instr;

    void UpdateCycleCount();
#endif // INTERP_ILCYCLE_PROFILE

#ifdef _DEBUG

    // These collectively record all the interpreter method infos we've created.
    static InterpreterMethodInfo** s_interpMethInfos;
    static unsigned s_interpMethInfosAllocSize;
    static unsigned s_interpMethInfosCount;

    static void AddInterpMethInfo(InterpreterMethodInfo* methInfo);

    // Print any end-of-run summary information we've collected, and want
    // printed.

    // Both methods below require that "mi0" and "mi1" are actually "InterpreterMethodInfo*"s.

    // Returns -1, 0, or 1, depending on whether "mi0->m_invocations" is less than,
    // equal, or greater than "mi1->m_invocations.".
    static int _cdecl CompareMethInfosByInvocations(const void* mi0, const void* mi1);
#if INTERP_PROFILE
    // Returns 1, 0, or -1, depending on whether "mi0->m_totIlInstructionsExeced" is less than,
    // equal, or greater than "mi1->m_totIlInstructionsExeced.".  (Note that this enables a descending sort.)
    static int _cdecl CompareMethInfosByILInstrs(const void* mi0, const void* mi1);
#endif // INTERP_PROFILE
#endif // _DEBUG

    private:
    static ConfigDWORD s_PrintPostMortemFlag;

    public:
    static void PrintPostMortemData();

#if INTERP_TRACING
    private:
    // Returns a string name of the il operation at "ILCodePtr".
    static const char* ILOp(BYTE* ilCodePtr);
    static const char* ILOp1Byte(unsigned short ilInstrVal);
    static const char* ILOp2Byte(unsigned short ilInstrVal);

    // Prints a representation of the operand stack.
    void PrintOStack();

    // Prints a representation of the arguments.
    void PrintArgs();

    // Prints a representation of the locals.
    void PrintLocals();

    // Helper functions for the above:
    // Print the value at ostack position "index".
    void PrintOStackValue(unsigned index);

    // Print the value of the argument number "argNum".
    void PrintArgValue(unsigned argNum);

    // Requires that "valAddr" point to a location containing a value of type
    // "cit", and prints that value.
    void PrintValue(InterpreterType cit, BYTE* valAddr);

    public:
    static inline FILE* GetLogFile();
    private:
    static FILE*        s_InterpreterLogFile;
    static ConfigDWORD  s_DumpInterpreterStubsFlag;
    static ConfigDWORD  s_TraceInterpreterEntriesFlag;
    static ConfigDWORD  s_TraceInterpreterILFlag;
    static ConfigDWORD  s_TraceInterpreterOstackFlag;
    static ConfigDWORD  s_TraceInterpreterVerboseFlag;
    static ConfigDWORD  s_TraceInterpreterJITTransitionFlag;
    static ConfigDWORD  s_InterpreterStubMin;
    static ConfigDWORD  s_InterpreterStubMax;

    // The total number of method invocations.
    static LONG s_totalInvocations;
    // The total number of calls made by interpreted code.
    static LONG s_totalInterpCalls;
    static LONG s_totalInterpCallsToGetters;
    static LONG s_totalInterpCallsToDeadSimpleGetters;
    static LONG s_totalInterpCallsToDeadSimpleGettersShortCircuited;
    static LONG s_totalInterpCallsToSetters;
    static LONG s_totalInterpCallsToIntrinsics;
    static LONG s_totalInterpCallsToIntrinsicsUnhandled;

    enum ResolveTokenKind {
        RTK_Undefined,
        RTK_Constrained,
        RTK_NewObj,
        RTK_NewArr,
        RTK_LdToken,
        RTK_LdFtn,
        RTK_LdVirtFtn,
        RTK_SFldAddr,
        RTK_LdElem,
        RTK_Call,
        RTK_LdObj,
        RTK_StObj,
        RTK_CpObj,
        RTK_InitObj,
        RTK_IsInst,
        RTK_CastClass,
        RTK_MkRefAny,
        RTK_RefAnyVal,
        RTK_Sizeof,
        RTK_StElem,
        RTK_Box,
        RTK_Unbox,
        RTK_UnboxAny,
        RTK_LdFld,
        RTK_LdFldA,
        RTK_StFld,
        RTK_FindClass,
        RTK_CheckHandlesException,
        RTK_Count
    };
    static const char* s_tokenResolutionKindNames[RTK_Count];

    static LONG s_tokenResolutionOpportunities[RTK_Count];
    static LONG s_tokenResolutionCalls[RTK_Count];
#endif // INTERP_TRACING

#if INTERP_ILINSTR_PROFILE
    static unsigned short         s_ILInstrCategories[512];

    static int                   s_ILInstrExecs[256];
    static int                   s_ILInstrExecsByCategory[512];
#if INTERP_ILCYCLE_PROFILE
    static unsigned __int64       s_ILInstrCyclesByCategory[512];
#endif // INTERP_ILCYCLE_PROFILE

    static const unsigned         CountIlInstr2Byte = 0x22;
    static int                   s_ILInstr2ByteExecs[CountIlInstr2Byte];

#if INTERP_ILCYCLE_PROFILE
    static unsigned __int64       s_ILInstrCycles[512];
    // XXX
    static unsigned __int64              s_callCycles;
    static unsigned                      s_calls;
#endif // INTERP_ILCYCLE_PROFILE
#endif // INTERP_ILINSTR_PROFILE

    // Non-debug-only statics.
    static ConfigMethodSet s_InterpretMeths;
    static ConfigMethodSet s_InterpretMethsExclude;
    static ConfigDWORD s_InterpretMethHashMin;
    static ConfigDWORD s_InterpretMethHashMax;
    static ConfigDWORD s_InterpreterJITThreshold;
    static ConfigDWORD s_InterpreterDoLoopMethodsFlag;
    static bool        s_InterpreterDoLoopMethods;
    static ConfigDWORD s_InterpreterUseCachingFlag;
    static bool        s_InterpreterUseCaching;
    static ConfigDWORD s_InterpreterLooseRulesFlag;
    static bool        s_InterpreterLooseRules;
    static CrstExplicitInit   s_methodCacheLock;
    static CrstExplicitInit   s_interpStubToMDMapLock;

    // True iff a "constrained" prefix has preceded a call.
    bool                   m_constrainedFlag;
    // True iff a "volatile" prefixe precedes a memory reference.
    bool                   m_volatileFlag;
    // If there has been a "constrained" prefix, this is initialized
    // with the token of the constraint class.
    CORINFO_RESOLVED_TOKEN m_constrainedResolvedToken;
    // True iff a "readonly" prefix has preceded a ldelema.
    bool                   m_readonlyFlag;

    // Data structures related to localloc.
    class LocAllocData
    {
        typedef void* PVoid;

        unsigned m_locAllocSize;   // The currently allocated # elements in m_locAllocs
        unsigned m_locAllocCurIdx; // Number of elements of m_locAllocs in use; 0 <= m_locAllocCurIdx < m_locAllocSize
        void** m_locAllocs;        // Always non-null in a constructed LocAllocData.
        static const unsigned DefaultAllocs = 1;

        unsigned EnsureIdx()
        {
            if (m_locAllocCurIdx == m_locAllocSize)
            {
                unsigned newSize = m_locAllocSize * 2;
                void** newLocAllocs = new PVoid[newSize];
                for (unsigned j = 0; j < m_locAllocCurIdx; j++)
                {
                    newLocAllocs[j] = m_locAllocs[j];
                }
                m_locAllocSize = newSize; 
                delete[] m_locAllocs;
                m_locAllocs = newLocAllocs;
            }
            return m_locAllocCurIdx++;  // Note that we're returning the value before post-increment.
        }

    public:
        LocAllocData() :
            m_locAllocSize(DefaultAllocs),
            m_locAllocCurIdx(0)
        {
            m_locAllocs = new PVoid[DefaultAllocs];
            memset(m_locAllocs, 0, DefaultAllocs * sizeof(void*));
        }

        void* Alloc(NativeUInt sz)
        {
            unsigned idx = EnsureIdx();
            void* res = new char[sz];
            // We only *have* to do this if initlocals is set, but no harm in always doing it.
            memset(res, 0, sz);
            m_locAllocs[idx] = res;
            return res;
        }

        ~LocAllocData()
        {
            if (m_locAllocs != NULL)
            {
                for (unsigned i = 0; i < m_locAllocCurIdx; i++)
                {
                    delete[] reinterpret_cast<char*>(m_locAllocs[i]);
                }
            }
            delete[] m_locAllocs;
        }
    };

    LocAllocData* m_locAllocData;

    LocAllocData* GetLocAllocData()
    {
        if (m_locAllocData == NULL)
        {
            m_locAllocData = new LocAllocData();
        }
        return m_locAllocData;
    }

    // Search the current method's exception table, starting at "leaveEHIndex", for the first finally clause
    // for a try block that covers "lastLeaveOffset".  If one is found, sets m_ILCodePtr to the start of that
    // finally clause, updates "leaveEHIndex" to be the next index after the found clause in the exception
    // table, and returns true.  Otherwise, if no applicable finally clause is found, returns false.
    bool SearchForCoveringFinally();

    void LdIcon(INT32 c);
    void LdLcon(INT64 c);
    void LdR4con(INT32 c);
    void LdR8con(INT64 c);

    void LdArg(int argNum);
    void LdArgA(int argNum);
    void StArg(int argNum);

    __forceinline void LdLoc(int locNum);
    void LdLocA(int locNum);
    __forceinline void StLoc(int locNum);

    // Requires that "*addr" contain a value of type "tp"; reads that value and
    // pushes it on the operand stack.
    __forceinline void LdFromMemAddr(void* addr, InterpreterType tp);

    // Requires that "addr" is the address of a local var or argument location.
    // Pops the value on the operand stack, assumed to be of the given "tp", and stores
    // in "*addr".
    __forceinline void StToLocalMemAddr(void* addr, InterpreterType tp);

    void LdNull();

    // This requires that the width of "T" is at least 4 bytes.
    template<typename T, CorInfoType cit>
    void LdInd();

    // This requires that the width of "T" is less than 4 bytes (and loads it as an INT32).
    template<typename T, bool isUnsigned>
    void LdIndShort();

    void LdIndFloat();

    // Use this for non-object-ref types, and StInd_Ref for object refs.
    template<typename T>
    void StInd();

    void StInd_Ref();

    // Load/store instance/static fields.

    // If non-NULL, we've determined the field to be loaded by other means (e.g., we've identified a
    // "dead simple" property getter).  In this case, use this FieldDesc*, otherwise, look up via token
    // or cache.
    void LdFld(FieldDesc* fld = NULL);  

    void LdFldA();
    void LdSFld();
    void LdSFldA();
    void StFld();
    void StSFld();

    // Helper method used by the static field methods above.
    // Requires that the code stream be pointing to a LDSFLD, LDSFLDA, or STSFLD.
    // The "accessFlgs" variable should indicate which, by which of the CORINFO_ACCESS_GET,
    // CORINFO_ACCESS_GET, and CORINFO_ACCESS_ADDRESS bits are set.
    // Sets *pStaticFieldAddr, which must be a pointer to memory protected as a byref) to the address of the static field, 
    // sets *pit to the InterpreterType of the field,
    // sets *pFldSize to the size of the field, and sets *pManagedMem to true iff the address is in managed memory (this is
    // false only if the static variable is an "RVA").  (Increments the m_ILCodePtr of 'this' by 5, the
    // assumed size of all the listed instructions.
    __forceinline void StaticFldAddr(CORINFO_ACCESS_FLAGS accessFlgs, 
                                     /*out (byref)*/void** pStaticFieldAddr,
                                     /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem);

    // We give out the address of this as the address for an "intrinsic static Zero".
    static INT64 IntrinsicStaticZero;

    // The version above does caching; this version always does the work.  Returns "true" iff the results
    // are cacheable.
    bool StaticFldAddrWork(CORINFO_ACCESS_FLAGS accessFlgs, 
                           /*out (byref)*/void** pStaticFieldAddr,
                           /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem);

    // Ensure that pMT has been initialized (including running it's .cctor).
    static void EnsureClassInit(MethodTable* pMT);

    // Load/store array elements, get length.  "T" should be the element
    // type of the array (as indicated by a LDELEM opcode with a type); "IsObjType" should
    // be true iff T is an object type, and "cit" should be the stack-normal CorInfoType
    // to push on the type stack.
    template<typename T, bool IsObjType, CorInfoType cit>
    void LdElemWithType();

    // Load the address of an array element.

    template<typename T, bool IsObjType>
    void StElemWithType();

    template<bool takeAddr>
    void LdElem();
    void StElem();

    void InitBlk();
    void CpBlk();

    void Box();
    void UnboxAny();
    void Unbox();

    // Requires that operand stack location "i" contain a byref to a value of the struct type
    // "valCls".  Boxes the referent of that byref, and substitutes the resulting object pointer
    // at opstack location "i."
    void BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls);

    void Throw();
    void Rethrow();
    void EndFilter();

    void LdLen();

    // Perform a normal (non-constructor) call.  The "virtualCall" argument indicates whether the
    // call should be virtual.
    void DoCall(bool virtualCall);

    // Perform a call.  For normal (non-constructor) calls, all optional args should be
    // NULL (the default).  For constructors, "thisArg" should be a this pointer (that is not on the operand stack),
    // and "callInfoPtr" should be the callInfo describing the constructor.  There's a special case here: for "VAROBJSIZE" constructors
    // (which currently are defined for String), we want to explicitly pass NULL to the (pseudo) constructor.  So passing
    // the special value "0x1" as "thisArg" will cause NULL to be pushed.
    void DoCallWork(bool virtualCall, void* thisArg = NULL, CORINFO_RESOLVED_TOKEN* methTokPtr = NULL, CORINFO_CALL_INFO* callInfoPtr = NULL);

    // Do the call-indirect operation.
    void CallI();

    // Analyze the given method to see if it is a "dead simple" property getter:
    //   * if instance, ldarg.0, ldfld, ret.
    //   * if static, ldstfld ret.
    // More complicated forms in DBG.  Sets *offsetOfLd" to the offset of the ldfld or ldstfld instruction.
    static bool IsDeadSimpleGetter(CEEInfo* info, MethodDesc* pMD, size_t* offsetOfLd);
    static const unsigned ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg = 2;
    static const unsigned ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt = 1;
    static const unsigned ILOffsetOfLdSFldInDeadSimpleStaticGetter = 0;

    // Here we handle a few intrinsic calls directly.
    void DoStringLength();
    void DoStringGetChar();
    void DoGetTypeFromHandle();
    void DoByReferenceCtor();
    void DoByReferenceValue();
    void DoSIMDHwAccelerated();

    // Returns the proper generics context for use in resolving tokens ("precise" in the sense of including generic instantiation
    // information).
    CORINFO_METHOD_HANDLE m_preciseGenericsContext;

    CORINFO_METHOD_HANDLE GetPreciseGenericsContext()
    {
        if (m_preciseGenericsContext == NULL)
        {
            m_preciseGenericsContext = m_methInfo->GetPreciseGenericsContext(m_thisArg, m_genericsCtxtArg);
        }
        return m_preciseGenericsContext;
    }

    // Process the "CONSTRAINED" prefix, recording the constraint on the "this" parameter.
    void RecordConstrainedCall();

    // Emit a barrier if the m_volatile flag is set, and reset the flag.
    void BarrierIfVolatile()
    {
        if (m_volatileFlag) 
        {
            MemoryBarrier(); m_volatileFlag = false;
        }
    }

    enum BinaryArithOpEnum
    {
        BA_Add, BA_Sub, BA_Mul, BA_Div, BA_Rem
    };
    template<int op>
    __forceinline void BinaryArithOp();

    // "IsIntType" must be true iff "T" is an integral type, and "cit" must correspond to
    // "T".  "TypeIsUnchanged" implies that the proper type is already on the operand type stack.
    template<int op, typename T, bool IsIntType, CorInfoType cit, bool TypeIsUnchanged>
    __forceinline void BinaryArithOpWork(T val1, T val2);

    // "op" is a BinaryArithOpEnum above; actually, must be one "BA_Add", "BA_Sub", "BA_Mul".
    template<int op, bool asUnsigned>
    void BinaryArithOvfOp();

    template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
    void BinaryArithOvfOpWork(T val1, T val2);

    INT32 RemFunc(INT32 v1, INT32 v2) { return v1 % v2; }
    INT64 RemFunc(INT64 v1, INT64 v2) { return v1 % v2; }
    float RemFunc(float v1, float v2); 
    double RemFunc(double v1, double v2);

    enum BinaryIntOpEnum
    {
        BIO_And, BIO_DivUn, BIO_Or, BIO_RemUn, BIO_Xor
    };
    template<int op>
    void BinaryIntOp();

    template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
    void BinaryIntOpWork(T val1, T val2);

    template<int op>
    void ShiftOp();

    template<int op, typename T, typename UT>
    void ShiftOpWork(unsigned op1idx, CorInfoType cit2);

    void Neg();
    void Not();

    // "T" should be the type indicated by the opcode.
    // "TIsUnsigned" should be true if "T" is an unsigned type.
    // "TCanHoldPtr" should be true if the type can hold a pointer (true for NativeInt and Long).
    // "TIsShort" should be true if "T" is less wide than Int32.
    // "cit" should be the *stack-normal* type of the converted value; even if "TIsShort", "cit" should be CORINFO_TYPE_INT.
    template<typename T, bool TIsUnsigned, bool TCanHoldPtr, bool TIsShort, CorInfoType cit>
    void Conv();

    void ConvRUn();

    // This version is for conversion to integral types. 
    template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
    void ConvOvf();

    // This version is for conversion to integral types. 
    template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
    void ConvOvfUn();

    void LdObj();
    void LdObjValueClassWork(CORINFO_CLASS_HANDLE valueClsHnd, unsigned ind, void* src);
    void CpObj();
    void StObj();
    void InitObj();

    void LdStr();
    void NewObj();
    void NewArr();
    void IsInst();
    void CastClass();

    void MkRefany();
    void RefanyType();
    void RefanyVal();

    void CkFinite();

    void LdToken();
    void LdFtn();
    void LdVirtFtn();

    // The JIT/EE machinery for transforming delegate constructor calls requires the
    // CORINFO_METHOD_HANDLE of a method.  Usually, the method will be provided by a previous LDFTN/LDVIRTFTN.
    // In the JIT, we fold that previous instruction and the delegate constructor into a single tree, before morphing.
    // At this time, the loaded function is still in the form of a CORINFO_METHOD_HANDLE.  At morph time, delegate constructor is transformed,
    // looking into the argument trees to find this handle.  LDFTN's that are not removed this way are morphed to have actual native code addresses.
    // To support both of these needs, LDFTN will push the native code address of a method, as uses that actually need the value to invoke or store in
    // data structures require, but it will also ensure that this parallel stack is allocated, and set the corresponding index to hold the method handle.
    // When we call a delegate constructor, we find the method handle on this stack.
    CORINFO_METHOD_HANDLE* m_functionPointerStack;
    CORINFO_METHOD_HANDLE* GetFunctionPointerStack()
    {
        if (m_functionPointerStack == NULL)
        {
            m_functionPointerStack = new CORINFO_METHOD_HANDLE[m_methInfo->m_maxStack];
            for (unsigned i = 0; i < m_methInfo->m_maxStack; i++)
            {
                m_functionPointerStack[i] = NULL;
            }
        }
        return m_functionPointerStack;
    }

    void Sizeof();

    void LocAlloc();

#if INTERP_ILINSTR_PROFILE
    static void SetILInstrCategories();

    // This type is used in sorting il instructions in a profile.
    struct InstrExecRecord
    {
        unsigned short m_instr;
        bool           m_is2byte;
        unsigned m_execs;
#if INTERP_ILCYCLE_PROFILE
        unsigned __int64 m_cycles;
#endif // INTERP_ILCYCLE_PROFILE

        static int _cdecl Compare(const void* v0, const void* v1)
        {
            InstrExecRecord* iep0 = (InstrExecRecord*)v0;
            InstrExecRecord* iep1 = (InstrExecRecord*)v1;
#if INTERP_ILCYCLE_PROFILE
            if (iep0->m_cycles > iep1->m_cycles) return -1;
            else if (iep0->m_cycles == iep1->m_cycles) return 0;
            else return 1;
#else
            if (iep0->m_execs > iep1->m_execs) return -1;
            else if (iep0->m_execs == iep1->m_execs) return 0;
            else return 1;
#endif // INTERP_ILCYCLE_PROFILE
        }
    };
    // Prints the given array "recs", assumed to already be sorted.
    static void PrintILProfile(InstrExecRecord* recs, unsigned totInstrs
#if INTERP_ILCYCLE_PROFILE
                                 , unsigned __int64 totCycles
#endif // INTERP_ILCYCLE_PROFILE
                                 );
#endif // INTERP_ILINSTR_PROFILE

    static size_t GetTypedRefSize(CEEInfo* info);
    static CORINFO_CLASS_HANDLE GetTypedRefClsHnd(CEEInfo* info);
    static InterpreterType GetTypedRefIT(CEEInfo* info);

    OBJECTREF TypeHandleToTypeRef(TypeHandle* pth);

    CorInfoType GetTypeForPrimitiveValueClass(CORINFO_CLASS_HANDLE clsHnd);

    static bool s_initialized;
    static bool s_compilerStaticsInitialized;

    // This is the class handle for the struct type TypedRef (aka "Refany").
    static CORINFO_CLASS_HANDLE s_TypedRefClsHnd;
    // This is the InterpreterType for the struct type TypedRef (aka "Refany").
    static InterpreterType s_TypedRefIT;
    // And this is the size of that struct.
    static size_t s_TypedRefSize;

    // This returns the class corresponding to the token, of kind "tokKind", at "codePtr".  If this
    // includes any runtime lookup via a generics context parameter, does that.
    CORINFO_CLASS_HANDLE GetTypeFromToken(BYTE* codePtr, CorInfoTokenKind tokKind  InterpTracingArg(ResolveTokenKind rtk));


    // Calls m_interpCeeInfo.resolveToken
    inline void ResolveToken(CORINFO_RESOLVED_TOKEN* resTok, mdToken token, CorInfoTokenKind tokenType InterpTracingArg(ResolveTokenKind rtk));

    inline FieldDesc* FindField(unsigned metaTok InterpTracingArg(ResolveTokenKind rtk));
    inline CORINFO_CLASS_HANDLE FindClass(unsigned metaTok InterpTracingArg(ResolveTokenKind rtk));

    enum CompareOpEnum
    {
        CO_EQ, CO_GT, CO_GT_UN, CO_LT, CO_LT_UN
    };

    // It does not help making these next two inline functions (taking the
    // template arg as a "real" arg).
    template<int compOp>
    void CompareOp();

    // Requires that the m_curStackHt is at least op1Idx+2.
    // Returns the result (0 or 1) of the comparison "opStack[op1Idx] op opStack[op1Idx + 1]".
    template<int compOp>
    INT32 CompareOpRes(unsigned op1Idx);

    // Making this inline, by making its arguments real arguments,
    // and using __forceinline didn't result in material difference.
    template<bool val, int targetLen>
    void BrOnValue();

    // A worker function for BrOnValue.  Assumes that "shouldBranch" indicates whether
    // a branch should be taken, and that "targetLen" is the length of the branch offset (1 or 4).
    // Updates "m_ILCodePtr" to the branch target if "shouldBranch" is true, or else
    // he next instruction (+ 1 + targetLength).
    __forceinline void BrOnValueTakeBranch(bool shouldBranch, int targetLen);

    template<int compOp, bool reverse, int targetLen>
    void BrOnComparison();

    inline static
    INT8 getI1(const BYTE * ptr)
    { return *(INT8*)ptr; }

    inline static
    UINT16 getU2LittleEndian(const BYTE * ptr)
    { return VAL16(*(UNALIGNED UINT16*)ptr); }

    inline static
    UINT32 getU4LittleEndian(const BYTE * ptr)
    { return VAL32(*(UNALIGNED UINT32*)ptr); }

    inline static
    INT32 getI4LittleEndian(const BYTE * ptr)
    { return VAL32(*(UNALIGNED INT32*)ptr); }

    inline static
    INT64 getI8LittleEndian(const BYTE * ptr)
    { return VAL64(*(UNALIGNED INT64*)ptr); }

    void VerificationError(const char* msg);

    void ThrowDivideByZero();
    void ThrowSysArithException();
    void ThrowNullPointerException();
    void ThrowOverflowException();
    void ThrowArrayBoundsException();
    void ThrowInvalidCastException();
    void ThrowStackOverflow();
    void ThrowOnInvalidPointer(void* ptr);

#ifdef _DEBUG
    bool TOSIsPtr();
#endif

#if INTERP_TRACING
    // Code copied from eeinterface.cpp in "compiler".  Should be common...
    const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd);
#endif // INTERP_TRACING
};

#if defined(_X86_)
inline
unsigned short Interpreter::NumberOfIntegerRegArgs() { return 2; }
#elif  defined(_AMD64_)
unsigned short Interpreter::NumberOfIntegerRegArgs() { return 4; }
#elif  defined(_ARM_)
unsigned short Interpreter::NumberOfIntegerRegArgs() { return 4; }
#elif defined(_ARM64_)
unsigned short Interpreter::NumberOfIntegerRegArgs() { return 8; }
#else
#error Unsupported architecture.
#endif

#endif  // INTERPRETER_H_DEFINED