From 2832f54a6602cd4c0dff4fa65163345ab3ad953c Mon Sep 17 00:00:00 2001 From: John Doe Date: Fri, 26 Apr 2019 19:45:22 -0700 Subject: Typos (#24280) * thier -> their * exeption -> exception * Estbalisher -> Establisher * neeed -> need * neeed -> need * neeeded -> needed * neeeded -> needed * facilitiate -> facilitate * extremly -> extremely * extry -> extra --- src/gc/gc.cpp | 2 +- src/jit/jiteh.cpp | 2 +- src/jit/lower.cpp | 2 +- src/vm/ceeload.cpp | 2 +- src/vm/dllimport.cpp | 2 +- src/vm/eventtrace.cpp | 2 +- src/vm/excep.cpp | 2 +- src/vm/exceptionhandling.cpp | 8 ++++---- src/vm/win32threadpool.cpp | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp index 1b57fa029e..63df5513f6 100644 --- a/src/gc/gc.cpp +++ b/src/gc/gc.cpp @@ -4697,7 +4697,7 @@ gc_heap::soh_get_segment_to_expand() dprintf (GTC_LOG, ("max_gen-1: Found existing segment to expand into %Ix", (size_t)seg)); // If we return 0 here, the allocator will think since we are short on end - // of seg we neeed to trigger a full compacting GC. So if sustained low latency + // of seg we need to trigger a full compacting GC. So if sustained low latency // is set we should acquire a new seg instead, that way we wouldn't be short. // The real solution, of course, is to actually implement seg reuse in gen1. if (settings.pause_mode != pause_sustained_low_latency) diff --git a/src/jit/jiteh.cpp b/src/jit/jiteh.cpp index 5179dd23dc..571670a56f 100644 --- a/src/jit/jiteh.cpp +++ b/src/jit/jiteh.cpp @@ -1598,7 +1598,7 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) #if !FEATURE_EH /***************************************************************************** - * fgRemoveEH: To facilitiate the bring-up of new platforms without having to + * fgRemoveEH: To facilitate the bring-up of new platforms without having to * worry about fully implementing EH, we want to simply remove EH constructs * from the IR. This works because a large percentage of our tests contain * EH constructs but don't actually throw exceptions. This function removes diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp index 75c354dc38..f8a5babd04 100644 --- a/src/jit/lower.cpp +++ b/src/jit/lower.cpp @@ -2490,7 +2490,7 @@ GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) // Try to move the first SUB_HI operands right in front of it, this allows using // a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do // this only for locals as they won't change condition flags. Note that we could - // move constants (except 0 which generates XOR reg, reg) but it's extremly rare + // move constants (except 0 which generates XOR reg, reg) but it's extremely rare // to have a constant as the first operand. // diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp index 0de5dd4a53..af6204f4c0 100644 --- a/src/vm/ceeload.cpp +++ b/src/vm/ceeload.cpp @@ -2815,7 +2815,7 @@ void Module::AllocateStatics(AllocMemTracker *pamTracker) m_dwThreadStaticsBlockSize = ThreadLocalModule::OffsetOfDataBlob(); // If it has no code, we don't have to allocate anything - LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Resource module %s. No statics neeeded\n", GetSimpleName())); + LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Resource module %s. No statics needed\n", GetSimpleName())); _ASSERTE(m_maxTypeRidStaticsAllocated == 0); return; } diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp index c9a11f3a7d..dd16dd53a8 100644 --- a/src/vm/dllimport.cpp +++ b/src/vm/dllimport.cpp @@ -1963,7 +1963,7 @@ void NDirectStubLinker::AdjustTargetStackDeltaForReverseInteropHRESULTSwapping() // If the managed return type is void, undo the HRESULT // return type added to our target sig for HRESULT swapping. // No extra argument will have been added because it makes - // no sense to add an extry byref void argument. + // no sense to add an extra byref void argument. // m_iTargetStackDelta--; } diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp index 4eb424fccb..c145584551 100644 --- a/src/vm/eventtrace.cpp +++ b/src/vm/eventtrace.cpp @@ -164,7 +164,7 @@ Volatile ETW::GCLog::s_l64LastClientSequenceNumber = 0; // expensive events on newer runtimes (>= 4.5) where NGEN PDB info IS available. Note // that 4.0 has NGEN PDBS but unfortunately not the OverrideAndSuppressNGenEvents // keyword, b/c NGEN PDBs were made publicly only after 4.0 shipped. So tools that need -// to consume both <4.0 and 4.0 events would neeed to enable the expensive NGEN events to +// to consume both <4.0 and 4.0 events would need to enable the expensive NGEN events to // deal properly with 3.5, even though those events aren't necessary on 4.0. // // On CoreCLR, this keyword is a no-op, because coregen PDBs don't exist (and thus we'll diff --git a/src/vm/excep.cpp b/src/vm/excep.cpp index 52cab11138..0e99239d4c 100644 --- a/src/vm/excep.cpp +++ b/src/vm/excep.cpp @@ -7988,7 +7988,7 @@ LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo) // in-proc helper thread to work. What it does is continue the exception unhandled which // will let the thread immediately execute to this point. Inside this worker the thread // will block until the debugger knows how to continue the exception. If it decides the - // exception was handled then we immediately resume execution as if the exeption had never + // exception was handled then we immediately resume execution as if the exception had never // even been allowed to run into this handler. If it is unhandled then we keep processing // this handler // diff --git a/src/vm/exceptionhandling.cpp b/src/vm/exceptionhandling.cpp index 948913c8d3..c67e681894 100644 --- a/src/vm/exceptionhandling.cpp +++ b/src/vm/exceptionhandling.cpp @@ -6106,7 +6106,7 @@ void ExceptionTracker::StackRange::CombineWith(StackFrame sfCurrent, StackRange* // So in this case, we'll see a sfCurrent that is larger than the previous tracker's high bound and // we'll have an empty scan range for the current tracker. And we'll just need to pre-init the // scanned stack range for the new tracker to the previous tracker's range. This maintains the - // invariant that the scanned range for nested trackers completely cover the scanned range of thier + // invariant that the scanned range for nested trackers completely cover the scanned range of their // previous tracker once they "escape" the previous tracker. STRESS_LOG3(LF_EH, LL_INFO100, "Initializing current StackRange with previous tracker's StackRange. sfCurrent: %p, prev low: %p, prev high: %p\n", @@ -6431,7 +6431,7 @@ bool ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(CrawlFrame * pCF) // then: // // For case (1) above, sfCurrentEstablisherFrame will be the same as the callerSP of the managed frame. - // For case (2) above, sfLastUnwoundEstbalisherFrame would be the same as the managed frame's SP (or upper bound) + // For case (2) above, sfLastUnwoundEstablisherFrame would be the same as the managed frame's SP (or upper bound) // // For these scenarios, the frame is considered unwound. @@ -6454,8 +6454,8 @@ bool ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(CrawlFrame * pCF) #else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP // On ARM, if the callerSP of the managed frame is the same as upper bound, then: // - // For case (1), sfCurrentEstablisherFrame will be above the callerSP of the managed frame (since EstbalisherFrame is the caller SP for a given frame on ARM) - // For case (2), upper bound will be the same as LastUnwoundEstbalisherFrame. + // For case (1), sfCurrentEstablisherFrame will be above the callerSP of the managed frame (since EstablisherFrame is the caller SP for a given frame on ARM) + // For case (2), upper bound will be the same as LastUnwoundEstablisherFrame. // // For these scenarios, the frame is considered unwound. if (sfUpperBound == csfToCheck) diff --git a/src/vm/win32threadpool.cpp b/src/vm/win32threadpool.cpp index da44beb72e..83d8d4d80a 100644 --- a/src/vm/win32threadpool.cpp +++ b/src/vm/win32threadpool.cpp @@ -3803,7 +3803,7 @@ BOOL ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadCounter::Counts cou && (counts.NumActive == 0 || !GCHeapUtilities::IsGCInProgress(TRUE)) ) { - // adjust limit if neeeded + // adjust limit if needed if (counts.NumRetired == 0) { if (counts.NumActive + counts.NumRetired < MaxLimitTotalCPThreads && -- cgit v1.2.3