summaryrefslogtreecommitdiff
path: root/Documentation/design-docs
diff options
context:
space:
mode:
authorJiyoung Yun <jy910.yun@samsung.com>2017-02-10 20:35:12 +0900
committerJiyoung Yun <jy910.yun@samsung.com>2017-02-10 20:35:12 +0900
commit4b11dc566a5bbfa1378d6266525c281b028abcc8 (patch)
treeb48831a898906734f8884d08b6e18f1144ee2b82 /Documentation/design-docs
parentdb20f3f1bb8595633a7e16c8900fd401a453a6b5 (diff)
downloadcoreclr-4b11dc566a5bbfa1378d6266525c281b028abcc8.tar.gz
coreclr-4b11dc566a5bbfa1378d6266525c281b028abcc8.tar.bz2
coreclr-4b11dc566a5bbfa1378d6266525c281b028abcc8.zip
Imported Upstream version 1.0.0.9910upstream/1.0.0.9910
Diffstat (limited to 'Documentation/design-docs')
-rw-r--r--Documentation/design-docs/finally-optimizations.md487
-rw-r--r--Documentation/design-docs/tailcalls-with-helpers.md460
2 files changed, 947 insertions, 0 deletions
diff --git a/Documentation/design-docs/finally-optimizations.md b/Documentation/design-docs/finally-optimizations.md
new file mode 100644
index 0000000000..d35d5a47bd
--- /dev/null
+++ b/Documentation/design-docs/finally-optimizations.md
@@ -0,0 +1,487 @@
+Finally Optimizations
+=====================
+
+In MSIL, a try-finally is a construct where a block of code
+(the finally) is guaranteed to be executed after control leaves a
+protected region of code (the try) either normally or via an
+exception.
+
+In RyuJit a try-finally is currently implemented by transforming the
+finally into a local function that is invoked via jitted code at normal
+exits from the try block and is invoked via the runtime for exceptional
+exits from the try block.
+
+For x86 the local function is simply a part of the method and shares
+the same stack frame with the method. For other architectures the
+local function is promoted to a potentially separable "funclet"
+which is almost like a regular function with a prolog and epilog. A
+custom calling convention gives the funclet access to the parent stack
+frame.
+
+In this proposal we outline three optimizations for finallys: removing
+empty trys, removing empty finallys and finally cloning.
+
+Empty Finally Removal
+---------------------
+
+An empty finally is one that has no observable effect. These often
+arise from `foreach` or `using` constructs (which induce a
+try-finally) where the cleanup method called in the finally does
+nothing. Often, after inlining, the empty finally is readily apparent.
+
+For example, this snippet of C# code
+```C#
+static int Sum(List<int> x) {
+ int sum = 0;
+ foreach(int i in x) {
+ sum += i;
+ }
+ return sum;
+}
+```
+produces the following jitted code:
+```asm
+; Successfully inlined Enumerator[Int32][System.Int32]:Dispose():this
+; (1 IL bytes) (depth 1) [below ALWAYS_INLINE size]
+G_M60484_IG01:
+ 55 push rbp
+ 57 push rdi
+ 56 push rsi
+ 4883EC50 sub rsp, 80
+ 488D6C2460 lea rbp, [rsp+60H]
+ 488BF1 mov rsi, rcx
+ 488D7DD0 lea rdi, [rbp-30H]
+ B906000000 mov ecx, 6
+ 33C0 xor rax, rax
+ F3AB rep stosd
+ 488BCE mov rcx, rsi
+ 488965C0 mov qword ptr [rbp-40H], rsp
+
+G_M60484_IG02:
+ 33C0 xor eax, eax
+ 8945EC mov dword ptr [rbp-14H], eax
+ 8B01 mov eax, dword ptr [rcx]
+ 8B411C mov eax, dword ptr [rcx+28]
+ 33D2 xor edx, edx
+ 48894DD0 mov gword ptr [rbp-30H], rcx
+ 8955D8 mov dword ptr [rbp-28H], edx
+ 8945DC mov dword ptr [rbp-24H], eax
+ 8955E0 mov dword ptr [rbp-20H], edx
+
+G_M60484_IG03:
+ 488D4DD0 lea rcx, bword ptr [rbp-30H]
+ E89B35665B call Enumerator[Int32][System.Int32]:MoveNext():bool:this
+ 85C0 test eax, eax
+ 7418 je SHORT G_M60484_IG05
+
+; Body of foreach loop
+
+G_M60484_IG04:
+ 8B4DE0 mov ecx, dword ptr [rbp-20H]
+ 8B45EC mov eax, dword ptr [rbp-14H]
+ 03C1 add eax, ecx
+ 8945EC mov dword ptr [rbp-14H], eax
+ 488D4DD0 lea rcx, bword ptr [rbp-30H]
+ E88335665B call Enumerator[Int32][System.Int32]:MoveNext():bool:this
+ 85C0 test eax, eax
+ 75E8 jne SHORT G_M60484_IG04
+
+; Normal exit from the implicit try region created by `foreach`
+; Calls the finally to dispose of the iterator
+
+G_M60484_IG05:
+ 488BCC mov rcx, rsp
+ E80C000000 call G_M60484_IG09 // call to finally
+
+G_M60484_IG06:
+ 90 nop
+
+G_M60484_IG07:
+ 8B45EC mov eax, dword ptr [rbp-14H]
+
+G_M60484_IG08:
+ 488D65F0 lea rsp, [rbp-10H]
+ 5E pop rsi
+ 5F pop rdi
+ 5D pop rbp
+ C3 ret
+
+; Finally funclet. Note it simply sets up and then tears down a stack
+; frame. The dispose method was inlined and is empty.
+
+G_M60484_IG09:
+ 55 push rbp
+ 57 push rdi
+ 56 push rsi
+ 4883EC30 sub rsp, 48
+ 488B6920 mov rbp, qword ptr [rcx+32]
+ 48896C2420 mov qword ptr [rsp+20H], rbp
+ 488D6D60 lea rbp, [rbp+60H]
+
+G_M60484_IG10:
+ 4883C430 add rsp, 48
+ 5E pop rsi
+ 5F pop rdi
+ 5D pop rbp
+ C3 ret
+```
+
+In such cases the try-finally can be removed, leading to code like the following:
+```asm
+G_M60484_IG01:
+ 57 push rdi
+ 56 push rsi
+ 4883EC38 sub rsp, 56
+ 488BF1 mov rsi, rcx
+ 488D7C2420 lea rdi, [rsp+20H]
+ B906000000 mov ecx, 6
+ 33C0 xor rax, rax
+ F3AB rep stosd
+ 488BCE mov rcx, rsi
+
+G_M60484_IG02:
+ 33F6 xor esi, esi
+ 8B01 mov eax, dword ptr [rcx]
+ 8B411C mov eax, dword ptr [rcx+28]
+ 48894C2420 mov gword ptr [rsp+20H], rcx
+ 89742428 mov dword ptr [rsp+28H], esi
+ 8944242C mov dword ptr [rsp+2CH], eax
+ 89742430 mov dword ptr [rsp+30H], esi
+
+G_M60484_IG03:
+ 488D4C2420 lea rcx, bword ptr [rsp+20H]
+ E8A435685B call Enumerator[Int32][System.Int32]:MoveNext():bool:this
+ 85C0 test eax, eax
+ 7414 je SHORT G_M60484_IG05
+
+G_M60484_IG04:
+ 8B4C2430 mov ecx, dword ptr [rsp+30H]
+ 03F1 add esi, ecx
+ 488D4C2420 lea rcx, bword ptr [rsp+20H]
+ E89035685B call Enumerator[Int32][System.Int32]:MoveNext():bool:this
+ 85C0 test eax, eax
+ 75EC jne SHORT G_M60484_IG04
+
+G_M60484_IG05:
+ 8BC6 mov eax, esi
+
+G_M60484_IG06:
+ 4883C438 add rsp, 56
+ 5E pop rsi
+ 5F pop rdi
+ C3 ret
+```
+
+Empty finally removal is unconditionally profitable: it should always
+reduce code size and improve code speed.
+
+Empty Try Removal
+---------------------
+
+If the try region of a try-finally is empty, and the jitted code will
+execute on a runtime that does not protect finally execution from
+thread abort, then the try-finally can be replaced with just the
+content of the finally.
+
+Empty trys with non-empty finallys often exist in code that must run
+under both thread-abort aware and non-thread-abort aware runtimes. In
+the former case the placement of cleanup code in the finally ensures
+that the cleanup code will execute fully. But if thread abort is not
+possible, the extra protection offered by the finally is not needed.
+
+Empty try removal looks for try-finallys where the try region does
+nothing except invoke the finally. There are currently two different
+EH implementation models, so the try screening has two cases:
+
+* callfinally thunks (x64/arm64): the try must be a single empty
+basic block that always jumps to a callfinally that is the first
+half of a callfinally/always pair;
+* non-callfinally thunks (x86/arm32): the try must be a
+callfinally/always pair where the first block is an empty callfinally.
+
+The screening then verifies that the callfinally identified above is
+the only callfinally for the try. No other callfinallys are expected
+because this try cannot have multiple leaves and its handler cannot be
+reached by nested exit paths.
+
+When the empty try is identified, the jit modifies the
+callfinally/always pair to branch to the handler, modifies the
+handler's return to branch directly to the continuation (the
+branch target of the second half of the callfinally/always pair),
+updates various status flags on the blocks, and then removes the
+try-finally region.
+
+Finally Cloning
+---------------
+
+Finally cloning is an optimization where the jit duplicates the code
+in the finally for one or more of the normal exit paths from the try,
+and has those exit points branch to the duplicated code directly,
+rather than calling the finally. This transformation allows for
+improved performance and optimization of the common case where the try
+completes without an exception.
+
+Finally cloning also allows hot/cold splitting of finally bodies: the
+cloned finally code covers the normal try exit paths (the hot cases)
+and can be placed in the main method region, and the original finally,
+now used largely or exclusively for exceptional cases (the cold cases)
+spilt off into the cold code region. Without cloning, RyuJit
+would always treat the finally as cold code.
+
+Finally cloning will increase code size, though often the size
+increase is mitigated somewhat by more compact code generation in the
+try body and streamlined invocation of the cloned finallys.
+
+Try-finally regions may have multiple normal exit points. For example
+the following `try` has two: one at the `return 3` and one at the try
+region end:
+
+```C#
+try {
+ if (p) return 3;
+ ...
+}
+finally {
+ ...
+}
+return 4;
+```
+
+Here the finally must be executed no matter how the try exits. So
+there are to two normal exit paths from the try, both of which pass
+through the finally but which then diverge. The fact that some try
+regions can have multiple exits opens the potential for substantial
+code growth from finally cloning, and so leads to a choice point in
+the implementation:
+
+* Share the clone along all exit paths
+* Share the clone along some exit paths
+* Clone along all exit paths
+* Clone along some exit paths
+* Only clone along one exit path
+* Only clone when there is one exit path
+
+The shared clone option must essentially recreate or simulate the
+local call mechanism for the finally, though likely somewhat more
+efficiently. Each exit point must designate where control should
+resume once the shared finally has finished. For instance the jit
+could introduce a new local per try-finally to determine where the
+cloned finally should resume, and enumerate the possibilities using a
+small integer. The end of the cloned finally would then use a switch
+to determine what code to execute next. This has the downside of
+introducing unrealizable paths into the control flow graph.
+
+Cloning along all exit paths can potentially lead to large amounts of
+code growth.
+
+Cloning along some paths or only one path implies that some normal
+exit paths won't be as well optimized. Nonetheless cloning along one
+path was the choice made by JIT64 and the one we recommend for
+implementation. In particular we suggest only cloning along the end of
+try region exit path, so that any early exit will continue to invoke
+the funclet for finally cleanup (unless that exit happens to have the
+same post-finally continuation as the end try region exit, in which
+case it can simply jump to the cloned finally).
+
+One can imagine adaptive strategies. The size of the finally can
+be roughly estimated and the number of clones needed for full cloning
+readily computed. Selective cloning can be based on profile
+feedback or other similar mechanisms for choosing the profitable
+cases.
+
+The current implementation will clone the finally and retarget the
+last (largest IL offset) leave in the try region to the clone. Any
+other leave that ultimately transfers control to the same post-finally
+offset will also be modified to jump to the clone.
+
+Empirical studies have shown that most finallys are small. Thus to
+avoid excessive code growth, a crude size estimate is formed by
+counting the number of statements in the blocks that make up the
+finally. Any finally larger that 15 statements is not cloned. In our
+study this disqualifed about 0.5% of all finallys from cloning.
+
+### EH Nesting Considerations
+
+Finally cloning is also more complicated when the finally encloses
+other EH regions, since the clone will introduce copies of all these
+regions. While it is possible to implement cloning in such cases we
+propose to defer for now.
+
+Finally cloning is also a bit more complicated if the finally is
+enclosed by another finally region, so we likewise propose deferring
+support for this. (Seems like a rare enough thing but maybe not too
+hard to handle -- though possibly not worth it if we're not going to
+support the enclosing case).
+
+### Control-Flow and Other Considerations
+
+If the try never exits normally, then the finally can only be invoked
+in exceptional cases. There is no benefit to cloning since the cloned
+finally would be unreachable. We can detect a subset of such cases
+because there will be no call finally blocks.
+
+JIT64 does not clone finallys that contained switch. We propose to
+do likewise. (Initially I did not include this restriction but
+hit a failing test case where the finally contained a switch. Might be
+worth a deeper look, though such cases are presumably rare.)
+
+If the finally never exits normally, then we presume it is cold code,
+and so will not clone.
+
+If the finally is marked as run rarely, we will not clone.
+
+Implementation Proposal
+-----------------------
+
+We propose that empty finally removal and finally cloning be run back
+to back, spliced into the phase list just after fgInline and
+fgAddInternal, and just before implicit by-ref and struct
+promotion. We want to run these early before a lot of structural
+invariants regarding EH are put in place, and before most
+other optimization, but run them after inlining
+(so empty finallys can be more readily identified) and after the
+addition of implicit try-finallys created by the jit. Empty finallys
+may arise later because of optimization, but this seems relatively
+uncommon.
+
+We will remove empty finallys first, then clone.
+
+Neither optimization will run when the jit is generating debuggable
+code or operating in min opts mode.
+
+### Empty Finally Removal (Sketch)
+
+Skip over methods that have no EH, are compiled with min opts, or
+where the jit is generating debuggable code.
+
+Walk the handler table, looking for try-finally (we could also look
+for and remove try-faults with empty faults, but those are presumably
+rare).
+
+If the finally is a single block and contains only a `retfilter`
+statement, then:
+
+* Retarget the callfinally(s) to jump always to the continuation blocks.
+* Remove the paired jump always block(s) (note we expect all finally
+calls to be paired since the empty finally returns).
+* For funclet EH models with finally target bits, clear the finally
+target from the continuations.
+* For non-funclet EH models only, clear out the GT_END_LFIN statement
+in the finally continuations.
+* Remove the handler block.
+* Reparent all directly contained try blocks to the enclosing try region
+or to the method region if there is no enclosing try.
+* Remove the try-finally from the EH table via `fgRemoveEHTableEntry`.
+
+After the walk, if any empty finallys were removed, revalidate the
+integrity of the handler table.
+
+### Finally Cloning (Sketch)
+
+Skip over all methods, if the runtime suports thread abort. More on
+this below.
+
+Skip over methods that have no EH, are compiled with min opts, or
+where the jit is generating debuggable code.
+
+Walk the handler table, looking for try-finally. If the finally is
+enclosed in a handler or encloses another handler, skip.
+
+Walk the finally body blocks. If any is BBJ_SWITCH, or if none
+is BBJ_EHFINALLYRET, skip cloning. If all blocks are RunRarely
+skip cloning. If the finally has more that 15 statements, skip
+cloning.
+
+Walk the try region from back to front (from largest to smallest IL
+offset). Find the last block in the try that invokes the finally. That
+will be the path that will invoke the clone.
+
+If the EH model requires callfinally thunks, and there are multiple
+thunks that invoke the finally, and the callfinally thunk along the
+clone path is not the first, move it to the front (this helps avoid
+extra jumps).
+
+Set the insertion point to just after the callfinally in the path (for
+thunk models) or the end of the try (for non-thunk models). Set up a
+block map. Clone the finally body using `fgNewBBinRegion` and
+`fgNewBBafter` to make the first and subsequent blocks, and
+`CloneBlockState` to fill in the block contents. Clear the handler
+region on the cloned blocks. Bail out if cloning fails. Mark the first
+and last cloned blocks with appropriate BBF flags. Patch up inter-clone
+branches and convert the returns into jumps to the continuation.
+
+Walk the callfinallys, retargeting the ones that return to the
+continuation so that they invoke the clone. Remove the paired always
+blocks. Clear the finally target bit and any GT_END_LFIN from the
+continuation.
+
+If all call finallys are converted, modify the region to be try/fault
+(interally EH_HANDLER_FAULT_WAS_FINALLY, so we can distinguish it
+later from "organic" try/faults). Otherwise leave it as a
+try/finally.
+
+Clear the catch type on the clone entry.
+
+### Thread Abort
+
+For runtimes that support thread abort (desktop), more work is
+required:
+
+* The cloned finally must be reported to the runtime. Likely this
+can trigger off of the BBF_CLONED_FINALLY_BEGIN/END flags.
+* The jit must maintain the integrity of the clone by not losing
+track of the blocks involved, and not allowing code to move in our
+out of the cloned region
+
+Code Size Impact
+----------------
+
+Code size impact from finally cloning was measured for CoreCLR on
+Windows x64.
+
+```
+Total bytes of diff: 16158 (0.12 % of base)
+ diff is a regression.
+Total byte diff includes 0 bytes from reconciling methods
+ Base had 0 unique methods, 0 unique bytes
+ Diff had 0 unique methods, 0 unique bytes
+Top file regressions by size (bytes):
+ 3518 : Microsoft.CodeAnalysis.CSharp.dasm (0.16 % of base)
+ 1895 : System.Linq.Expressions.dasm (0.32 % of base)
+ 1626 : Microsoft.CodeAnalysis.VisualBasic.dasm (0.07 % of base)
+ 1428 : System.Threading.Tasks.Parallel.dasm (4.66 % of base)
+ 1248 : System.Linq.Parallel.dasm (0.20 % of base)
+Top file improvements by size (bytes):
+ -4529 : System.Private.CoreLib.dasm (-0.14 % of base)
+ -975 : System.Reflection.Metadata.dasm (-0.28 % of base)
+ -239 : System.Private.Uri.dasm (-0.27 % of base)
+ -104 : System.Runtime.InteropServices.RuntimeInformation.dasm (-3.36 % of base)
+ -99 : System.Security.Cryptography.Encoding.dasm (-0.61 % of base)
+57 total files with size differences.
+Top method regessions by size (bytes):
+ 645 : System.Diagnostics.Process.dasm - System.Diagnostics.Process:StartCore(ref):bool:this
+ 454 : Microsoft.CSharp.dasm - Microsoft.CSharp.RuntimeBinder.Semantics.ExpressionBinder:AdjustCallArgumentsForParams(ref,ref,ref,ref,ref,byref):this
+ 447 : System.Threading.Tasks.Dataflow.dasm - System.Threading.Tasks.Dataflow.Internal.SpscTargetCore`1[__Canon][System.__Canon]:ProcessMessagesLoopCore():this
+ 421 : Microsoft.CodeAnalysis.VisualBasic.dasm - Microsoft.CodeAnalysis.VisualBasic.Symbols.ImplementsHelper:FindExplicitlyImplementedMember(ref,ref,ref,ref,ref,ref,byref):ref
+ 358 : System.Private.CoreLib.dasm - System.Threading.TimerQueueTimer:Change(int,int):bool:this
+Top method improvements by size (bytes):
+ -2512 : System.Private.CoreLib.dasm - DomainNeutralILStubClass:IL_STUB_CLRtoWinRT():ref:this (68 methods)
+ -824 : Microsoft.CodeAnalysis.dasm - Microsoft.Cci.PeWriter:WriteHeaders(ref,ref,ref,ref,byref):this
+ -663 : System.Private.CoreLib.dasm - DomainNeutralILStubClass:IL_STUB_CLRtoWinRT(ref):int:this (17 methods)
+ -627 : System.Private.CoreLib.dasm - System.Diagnostics.Tracing.ManifestBuilder:CreateManifestString():ref:this
+ -546 : System.Private.CoreLib.dasm - DomainNeutralILStubClass:IL_STUB_WinRTtoCLR(long):int:this (67 methods)
+3014 total methods with size differences.
+```
+
+The largest growth is seen in `Process:StartCore`, which has 4
+try-finally constructs.
+
+Diffs generally show improved codegen in the try bodies with cloned
+finallys. However some of this improvement comes from more aggressive
+use of callee save registers, and this causes size inflation in the
+funclets (note finally cloning does not alter the number of
+funclets). So if funclet save/restore could be contained to registers
+used in the funclet, the size impact would be slightly smaller.
+
+There are also some instances where cloning relatively small finallys
+leads to large code size increases. xxx is one example.
diff --git a/Documentation/design-docs/tailcalls-with-helpers.md b/Documentation/design-docs/tailcalls-with-helpers.md
new file mode 100644
index 0000000000..e23b51e130
--- /dev/null
+++ b/Documentation/design-docs/tailcalls-with-helpers.md
@@ -0,0 +1,460 @@
+# The current way of handling tail-calls
+## Fast tail calls
+These are tail calls that are handled directly by the jitter and no runtime cooperation is needed. They are limited to cases where:
+* Return value and call target arguments are all either primitive types, reference types, or valuetypes with a single primitive type or reference type fields
+* The aligned size of call target arguments is less or equal to aligned size of caller arguments
+
+## Tail calls using a helper
+Tail calls in cases where we cannot perform the call in a simple way are implemented using a tail call helper. Here is a rough description of how it works:
+* For each tail call target, the jitter asks runtime to generate an assembler argument copying routine. This routine reads vararg list of arguments and places the arguments in their proper slots in the CONTEXT or on the stack. Together with the argument copying routine, the runtime also builds a list of offsets of references and byrefs for return value of reference type or structs returned in a hidden return buffer and for structs passed by ref. The gc layout data block is stored at the end of the argument copying thunk.
+* At the time of the tail call, the caller generates a vararg list of all arguments of the tail called function and then calls JIT_TailCall runtime function. It passes it the copying routine address, the target address and the vararg list of the arguments.
+* The JIT_TailCall then performs the following:
+ * It calls RtlVirtualUnwind twice to get the context of the caller of the caller of the tail call to simulate the effect of running epilog of the caller of the tail call and also its return.
+ * It prepares stack space for the callee stack arguments, a helper explicit TailCallFrame and also a CONTEXT structure where the argument registers of the callee, stack pointer and the target function address are set. In case the tail call caller has enough space for the callee arguments and the TailCallFrame in its stack frame, that space is used directly for the callee arguments. Otherwise the stack arguments area is allocated at the top of the stack. This slightly differs in the case the tail call was called from another tail called function - the TailCallFrame already exists and so it is not recreated. The TailCallFrame also keeps a pointer to the list of gc reference offsets of the arguments and structure return buffer members. The stack walker during GC then uses that to ensure proper GC liveness of those references.
+ * It calls the copying routine to translate the arguments from the vararg list to the just reserved stack area and the context.
+ * If the stack arguments and TailCaillFrame didn't fit into the caller's stack frame, these data are now moved to the final location
+ * RtlRestoreContext is used to start executing the callee.
+
+There are several issues with this approach:
+* It is expensive to port to new platforms
+ * Parsing the vararg list is not possible to do in a portable way on Unix. Unlike on Windows, the list is not stored a linear sequence of the parameter data bytes in memory. va_list on Unix is an opaque data type, some of the parameters can be in registers and some in the memory.
+ * Generating the copying asm routine needs to be done for each target architecture / platform differently. And it is also very complex, error prone and impossible to do on platforms where code generation at runtime is not allowed.
+* It is slower than it has to be
+ * The parameters are copied possibly twice - once from the vararg list to the stack and then one more time if there was not enough space in the caller's stack frame.
+ * RtlRestoreContext restores all registers from the CONTEXT structure, not just a subset of them that is really necessary for the functionality, so it results in another unnecessary memory accesses.
+* Stack walking over the stack frames of the tail calls requires runtime assistance.
+
+# The new approach to tail calls using helpers
+## Objectives
+The new way of handling tail calls using helpers was designed with the following objectives:
+* It should be cheap to port to new platforms, architectures and code generators
+* It needs to work in both jitted and AOT compiled scenarios
+* It should support platforms where runtime code generation is not possible
+* The tail calls should be reasonably fast compared to regular calls with the same arguments
+* The tail calls should not be slower than existing mechanism on Windows
+* No runtime assistance should be necessary for unwinding stack with tail call frames on it
+* The stack should be unwindable at any spot during the tail calls to properly support sampling profilers and similar tools.
+* Stack walk during GC must be able to always correctly report GC references.
+* It should work in all cases except those where a tail call is not allowed as described in the ECMA 335 standard section III.2.4
+
+## Requirements
+* The code generator needs to be able to compile a tail call to a target as a call to a thunk with the same parameters as the target, but void return, followed by a jump to an assembler helper.
+
+## Implementation
+This section describes the helper functions and data structures that the tail calls use and also describes the tail call sequence step by step.
+### Helper functions
+The tail calls use the following thunks and helpers:
+* StoreArguments - this thunk stores the arguments into a thread local storage together with the address of the corresponding CallTarget thunk and a descriptor of locations and types of managed references in the stored arguments data. This thunk is generated as IL and compiled by the jitter or AOT compiler. There is one such thunk per tail call target.
+It has a signature that is compatible with the tailcall target function except for the return type which is void. But it is not the same. It gets the same arguments as the tail call target function, but it would also get "this" pointer and the generic context as explicit arguments if the tail call target requires them. Arguments of generic reference types are passed as "object" so that the StoreArguments doesn't have to be generic.
+* CallTarget - this thunk gets the arguments buffer that was filled by the StoreArguments thunk, loads the arguments from the buffer, releases the buffer and calls the target function using calli. The signature used for the calli would ensure that all arguments including the optional hidden return buffer and the generic context are passed in the right registers / stack slots. Generic reference arguments will be specified as "object" in the signature so that the CallTarget doesn't have to be generic.
+The CallTarget is also generated as IL and compiled by the jitter or AOT compiler. There is one such thunk per tail call target. This thunk has the same return type as the tailcall target or return "object" if the return type of the tail call target is a generic reference type.
+* TailCallHelper - this is an assembler helper that is responsible for restoring stack pointer to the location where it was when the first function in a tail call chain was entered and then jumping to the CallTarget thunk. This helper is common for all tail call targets.
+In a context of each tailcall invocation, the TailCallHelper will be handled by the jitter as if it had the same return type as the tail call target. That means that if the tail call target needs a hidden return buffer for returning structs, the pointer to this buffer will be passed to the TailCallHelper the same way as it would be passed to the tail call target. The TailCallHelper would then pass this hidden argument to the CallTarget helper.
+There will be two flavors of this helper, based on whether the tail call target needs a hidden return buffer or not:
+ * TailCallHelper
+ * TailCallHelper_RetBuf
+
+### Helper data structures
+The tail calls use the following data structures:
+* Thread local storage for arguments. It stores the arguments of a tail call for a short period of time between the StoreArguments and CallTarget calls.
+* Arguments GC descriptor - descriptor of locations and types of managed references in the arguments.
+* TailCallHelperStack - a per thread stack of helper entries that is used to determine whether a tail call is chained or not. Its entries are allocated as local variables in CallTarget thunks. Each entry contains:
+ * Stack pointer captured right before a call to a tail call target
+ * ChainCall flag indicating whether the CallTarget thunk should return after the call to the tail call target or whether it should execute its epilog and jump to TailCallHelper instead. The latter is used by the TailCallHelper to remove the stack frame of the CallTarget before making a tail call from a tail called function.
+ * Pointer to the next entry on the stack.
+
+### Tail call sequence
+* The caller calls the StoreArguments thunk corresponding to the callee to store the pointer to the tail call target function, its arguments, their GC descriptors, optional "this" and generic context arguments and the corresponding CallTarget thunk address in a thread local storage.
+* The caller executes its epilog, restoring stack pointer and callee saved registers to their values when the caller was entered.
+* The caller jumps to the TailCallHelper. This function performs the following operations:
+ * Get the topmost TailCallHelperStack entry for the current thread.
+ * Check if the previous stack frame is a CallTarget thunk frame by comparing the stack pointer value stored in the TailCallHelperStack entry to the current CFA (call frame address). If it matches, it means that the previous stack frame belongs to a CallTarget thunk and so the tail call caller was also tail called.
+ * If the previous frame was a CallTarget thunk, its stack frame needs to be removed to ensure that the stack will not grow when tail calls are chained. Set ChainCall flag in the TailCallHelperStack entry and return. That returns control to the CallTarget thunk. It checks the ChainCall flag and since it set, it executes its epilog and jumps to the TailCallHelper again.
+ * If the previous frame was not a CallTarget thunk, get the address of the CallTarget thunk of the tailcall target from the arguments buffer and jump to it.
+* The CallTarget thunk function then does the following operation:
+ * Create local instance of TailCallHelperStack entry and store the current stack pointer value in it.
+ * Push the entry to the TailCallHelperStack of the current thread.
+ * Get the arguments buffer from the thread local storage, extract the regular arguments and the optional "this" and generic context arguments and the target function pointer. Release the buffer and call the target function. The frame of the CallTarget thunk ensures that the arguments of the target are GC protected until the target function returns or tail calls to another function.
+ * Pop the TailCallHelperStack entry from the TailCallHelperStack of the current thread.
+ * Check the ChainCall flag in the TailCallHelperStack entry. If it is set, run epilog and jump to the TailCallHelper.
+ * If the ChainCall flag is clear, it means that the last function in the tail call chain has returned. So return the return value of the target function.
+
+## Work that needs to be done to implement the new tail calls mechanism
+### JIT (compiler in the AOT scenario)
+* Modify compilation of tail calls with helper so that a tail call is compiled as a call to the StoreArguments thunk followed by the jump to the assembler TailCallHelper. In other words, the
+```
+tail. call/callvirt <method>
+ret
+```
+becomes
+```
+call/callvirt <StoreArguments thunk>
+tail. call <TailCallHelper>
+ret
+```
+### Runtime (compiler in the AOT scenario)
+* Add generation of the StoreArguments and CallTarget IL thunks to the runtime (compiler tool chain in at AOT scenario). As a possible optimization, In the AOT scenario, the thunks can be generated by the compiler as native code directly without the intermediate IL.
+* For the JIT scenario, add a new method to the JIT to EE interface to get the StoreArguments thunk method handle for a given target method and the TailCallHelper address.
+
+### Runtime in both scenarios
+* Add support for the arguments buffer, which means:
+ * Add functions to create, release and get the buffer for a thread
+ * Add support for GC scanning the arguments buffers.
+* Implement the TailCallHelper asm helper for all architectures
+### Debugging in both scenarios
+Ensure that the stepping in a debugger works correctly. In CoreCLR, the TailCallStubManager needs to be updated accordingly.
+
+## Example code
+
+```C#
+struct S
+{
+ public S(long p1, long p2, long p3)
+ {
+ s1 = p1; s2 = p2; s3 = p3;
+ }
+
+ public long s1, s2, s3;
+}
+
+struct T
+{
+ public T(S s)
+ {
+ t1 = s.s1; t2 = s.s2; t3 = s.s3 t4 = 4;
+ }
+ public long t1, t2, t3, t4;
+}
+
+struct U
+{
+ public U(T t)
+ {
+ u1 = t.t1; u2 = t.t2; u3 = t.t3 u4 = t.t4; u5 = 5;
+ }
+ public long u1, u2, u3, u4, u5;
+}
+
+int D(U u)
+{
+ int local;
+ Console.WriteLine("In C, U = [{0}, {1}, {2}, {3}, {4}", u.u1, u.u2, u.u3, u.u4, u.u5);
+ return 1;
+}
+
+int C(T t)
+{
+ int local;
+ Console.WriteLine("In C");
+ U args = new U(t);
+ return tailcall D(args);
+}
+
+int B(S s)
+{
+ int local;
+ Console.WriteLine("In B");
+ T args = new T(S);
+ return tailcall C(args);
+}
+
+int A()
+{
+ S args = new S(1, 2, 3);
+ int result = B(args);
+ Console.WriteLine("Done, result = {0}\n", result);
+}
+```
+
+## Example code execution
+This section shows how stack evolves during the execution of the example code above. Execution starts at function A, but the details below start at the interesting point where the first tail call is about to be called.
+### B is about to tail call C
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of B
+```
+### Arguments of C are stored in the thread local buffer, now we are in the TailCallHelper
+The callee saved registers and locals of B are not on the stack anymore
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+```
+
+### In CallTarget thunk for C, about to call C
+The thunk will now extract parameters for C from the thread local storage and call C.
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of CallTarget thunk for C
+```
+### C is about to tail call D
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of CallTarget thunk for C
+* Stack arguments of C
+* Return address of C
+* Callee saved registers and locals of C
+```
+### Arguments of D are stored in the thread local buffer, now we are in the TailCallHelper
+The callee saved registers and locals of C are not on the stack anymore.
+But we still have the return address of C, stack arguments of C and callee saved registers and locals of CallTarget thunk for C on the stack.
+We need to remove them as well to prevent stack growing.
+The TailCallHelper detects that the previous stack frame was the frame of the CallTarget thunk for C and so it sets the ChainCall flag in the topmost TailCallHelperStackEntry and returns to CallTarget thunk for C in order to let it cleanup its stack frame.
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of CallTarget thunk for C
+* Stack arguments of C
+* Return address of C
+```
+### Returned to CallTarget thunk for C with ChainCall flag in the TailCallHelperStackEntry **set**
+The thunk checks the ChainCall flag and since it is set, it runs its epilog and then jumps to the TailCallHelper.
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of CallTarget thunk for C
+```
+### Back in TailCallHelper
+Now the stack is back in the state where we have made the previous tail call. Since the previous stack frame was not a CallTarget thunk frame, we just jump to the CallTarget thunk for D.
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+```
+### In CallTarget thunk for D, about to call D
+The thunk will now extract parameters for D from the thread local storage and call D.
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of CallTarget thunk for D
+```
+### In D
+We are in the last function of the chain, so after it does its work, it returns to its CallTarget thunk.
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of CallTarget thunk for D
+* Stack arguments of D
+* Return address of D
+* Callee saved registers and locals of D
+```
+### Returned to CallTarget thunk for D with ChainCall flag in the TailCallHelperStackEntry **clear**
+The thunk checks the ChainCall flag and since it is clear, it recognizes we are now returning from the call chain and so it returns the result of the D.
+```
+* Return address of A
+* Callee saved registers and locals of A
+* Stack arguments of B
+* Return address of B
+* Callee saved registers and locals of CallTarget thunk for D
+```
+### Returned to A
+We are back in A and we have the return value of the call chain.
+```
+* Return address of A
+* Callee saved registers and locals of A
+```
+
+## Example of thunks generated for a simple generic method
+
+```C#
+struct Point
+{
+ public int x;
+ public int y;
+ public int z;
+}
+
+class Foo
+{
+ public Point Test<T>(int x, T t) where T : class
+ {
+ Console.WriteLine("T: {0}", typeof(t));
+ return new Point(x, x, x);
+ }
+}
+```
+
+For tail calling the Test function, the IL helpers could look e.g. as follows:
+
+```IL
+.method public static void StoreArgumentsTest(native int target, object thisObj, native int genericContext, int x, object t) cil managed
+{
+ .maxstack 4
+ .locals init (
+ [0] native int buffer,
+ )
+
+ call native int AllocateArgumentBuffer(56)
+ stloc.0
+
+ ldloc.0
+ ldc.i8 0x12345678 // pointer to the GC descriptor of the arguments buffer
+ stind.i
+ ldloc.0
+ sizeof native int
+ add
+ stloc.0
+
+ ldloc.0
+ ldftn Point CallTargetTest()
+ stind.i
+ ldloc.0
+ sizeof native int
+ add
+ stloc.0
+
+ ldloc.0
+ ldarg.1 // "thisObj"
+ stobj object
+ ldloc.0
+ sizeof object
+ add
+ stloc.0
+
+ ldloc.0
+ ldarg.2 // "genericContext"
+ stind.i
+ ldloc.0
+ sizeof native int
+ add
+ stloc.0
+
+ ldloc.0
+ ldarg.3 // "x"
+ stind.i4
+ ldloc.0
+ sizeof native int
+ add
+ stloc.0
+
+ ldloc.0
+ ldarg.4 // "t"
+ stobj object
+ ldloc.0
+ sizeof object
+ add
+ stloc.0
+
+ ldloc.0
+ ldarg.0 // "target"
+ stind.i
+
+ ret
+}
+
+```
+
+```IL
+.method public static Point CallTargetTest() cil managed
+{
+ .maxstack 4
+ .locals init (
+ [0] native int buffer,
+ [1] valuetype TailCallHelperStackEntry entry,
+ [2] Point result
+ )
+
+ // Initialize the TailCallHelperStackEntry
+ // chainCall = false
+ // sp = current sp
+
+ ldloca.s 1
+ ldc.i4.0
+ stfld bool TailCallHelperStackEntry::chainCall
+ ldloca.s 1
+ call native int GetCurrentSp()
+ stfld native int TailCallHelperStackEntry::sp
+
+ ldloca.s 1 // TailCallHelperStackEntry
+ call void PushHelperStackEntry(native int)
+
+ // Prepare arguments for the tail call target
+
+ call native int FetchArgumentBuffer()
+ sizeof native int
+ add // skip the pointer to the GC descriptor of the arguments buffer
+ sizeof native int
+ add // skip the address of the CallTargetTest in the buffer, it is used by the TailCallHelper only
+ stloc.0
+
+ ldloc.0
+ ldobj object // this
+ ldloc.0
+ sizeof object
+ add
+ stloc.0
+
+ ldloc.0
+ ldind.i // generic context
+ ldloc.0
+ sizeof native int
+ add
+ stloc.0
+
+ ldloc.0
+ ldind.i4 // int x
+ ldloc.0
+ sizeof native int
+ add
+ stloc.0
+
+ ldloc.0
+ ldobj object // T t
+ ldloc.0
+ sizeof object
+ add
+
+ ldobj native int // tailcall target
+
+ // The arguments buffer is not needed anymore
+ call void ReleaseArgumentBuffer()
+
+ .try
+ {
+ calli Point (object, native int, int32, object) // this, generic context, x, t
+ stloc.2
+ leave.s Done
+ }
+ finally
+ {
+ ldloca.s 1 // TailCallHelperStackEntry
+ call void PopHelperStackEntry(native int)
+ endfinally
+ }
+
+Done:
+ ldloc.1 // TailCallHelperStackEntry
+ ldfld bool TailCallHelperStackEntry::chainCall
+ brfalse.s NotChained
+
+ // Jump to the TailCallHelper that will call to the next tail call in the chain.
+ // The stack frame of the current CallTargetTest is reclaimed and epilog executed
+ // before the TailCallHelper is entered.
+ tail.call int32 TailCallHelper()
+ ret
+
+NotChained:
+ // Now we are returning from a chain of tail calls to the caller of this chain
+ ldloc.2
+ ret
+}
+``` \ No newline at end of file