summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Doe <github.john.doe@outlook.com>2018-05-24 23:22:42 -0700
committerJan Kotas <jkotas@microsoft.com>2018-05-24 23:22:42 -0700
commit8e48db2c113052ef18cdafb43e114a8633864ec0 (patch)
tree866d4dcfe565cfaefbb4f4718d4fad4420b665ac
parent6468753a39d2419083cd842497ae2fc0f35c143a (diff)
downloadcoreclr-8e48db2c113052ef18cdafb43e114a8633864ec0.tar.gz
coreclr-8e48db2c113052ef18cdafb43e114a8633864ec0.tar.bz2
coreclr-8e48db2c113052ef18cdafb43e114a8633864ec0.zip
Typo (#18122)
* acquringing -> acquiring * Activ -> Active * activley -> actively * acutal -> actual * bIncomingIPAdddefed -> bIncomingIPAddRefed * adddr -> addr * readding -> reading * Addfunction -> AddFunction * additionnal -> additional * Additonal -> Additional * Additonally -> Additionally * Addresss -> Address * addtion -> addition * aded -> added * aditional -> additional * adjustements -> adjustments * Adress -> Address * afer -> after * aformentioned -> aforementioned * afte -> after * agains -> against * agaisnt -> against * aggresively -> aggressively * aggreates -> aggregates * aggregious -> egregious * aginst -> against * agregates -> aggregates * Agressive -> Aggressive * ahve -> have * ajdust -> adjust * ajust -> adjust * alement -> element * algoritm -> algorithm * alighnment -> alignment * alignmant -> alignment * constraits -> constraints * Allcator -> Allocator * alllocate -> allocate * alloacted -> allocated * allocatate -> allocate * allocatoror -> allocator * alloctaed -> allocated * alloction -> allocation * alloted -> allotted * allt he -> all the * alltogether -> altogether * alocate -> allocate * alocated -> allocated * Alocates -> Allocates * alogrithm -> algorithm * aloocate -> allocate * alot -> a lot * alwasy -> always * alwyas -> always * alwys -> always
-rw-r--r--src/debug/daccess/dacdbiimplstackwalk.cpp2
-rw-r--r--src/debug/daccess/dacfn.cpp2
-rw-r--r--src/debug/di/process.cpp2
-rw-r--r--src/debug/di/rsthread.cpp8
-rw-r--r--src/debug/ee/controller.cpp8
-rw-r--r--src/debug/ee/debugger.cpp4
-rw-r--r--src/gc/handletable.cpp2
-rw-r--r--src/jit/codegencommon.cpp2
-rw-r--r--src/jit/compiler.cpp4
-rw-r--r--src/jit/earlyprop.cpp2
-rw-r--r--src/jit/emitxarch.cpp4
-rw-r--r--src/jit/inlinepolicy.cpp2
-rw-r--r--src/jit/lsra.cpp4
-rw-r--r--src/jit/regalloc.cpp2
-rw-r--r--src/jit/scopeinfo.cpp2
-rw-r--r--src/jit/simdcodegenxarch.cpp2
-rw-r--r--src/jit/valuenum.cpp2
-rw-r--r--src/md/ceefilegen/blobfetcher.cpp2
-rw-r--r--src/pal/src/file/find.cpp2
-rw-r--r--src/pal/src/loader/module.cpp2
-rw-r--r--src/pal/src/map/map.cpp2
-rw-r--r--src/pal/src/objmgr/shmobjectmanager.cpp2
-rw-r--r--src/pal/tests/palsuite/file_io/ReadFile/test2/ReadFile.cpp2
-rw-r--r--src/utilcode/clrhost_nodependencies.cpp2
-rw-r--r--src/utilcode/debug.cpp2
-rw-r--r--src/utilcode/sbuffer.cpp2
-rw-r--r--src/utilcode/securitywrapper.cpp2
-rw-r--r--src/vm/codeman.cpp2
-rw-r--r--src/vm/comcache.cpp6
-rw-r--r--src/vm/cominterfacemarshaler.cpp2
-rw-r--r--src/vm/debugdebugger.cpp2
-rw-r--r--src/vm/dynamicmethod.cpp2
-rw-r--r--src/vm/eetwain.cpp2
-rw-r--r--src/vm/eventtrace.cpp6
-rw-r--r--src/vm/excep.cpp2
-rw-r--r--src/vm/fieldmarshaler.cpp4
-rw-r--r--src/vm/gccover.cpp2
-rw-r--r--src/vm/gcenv.ee.cpp2
-rw-r--r--src/vm/i386/excepx86.cpp2
-rw-r--r--src/vm/i386/jitinterfacex86.cpp2
-rw-r--r--src/vm/jitinterface.cpp2
-rw-r--r--src/vm/safehandle.cpp2
-rw-r--r--src/vm/simplerwlock.cpp2
-rw-r--r--src/vm/stackprobe.cpp2
-rw-r--r--src/vm/threadsuspend.cpp2
-rw-r--r--src/vm/virtualcallstub.cpp2
46 files changed, 61 insertions, 61 deletions
diff --git a/src/debug/daccess/dacdbiimplstackwalk.cpp b/src/debug/daccess/dacdbiimplstackwalk.cpp
index 3bb2b9fdec..8cdda342f9 100644
--- a/src/debug/daccess/dacdbiimplstackwalk.cpp
+++ b/src/debug/daccess/dacdbiimplstackwalk.cpp
@@ -54,7 +54,7 @@ public:
};
// Helper to allocate stackwalk datastructures for given parameters.
-// This is allocated on the local heap (and not via the forDbi allocatoror on the dac-cache), and then
+// This is allocated on the local heap (and not via the forDbi allocator on the dac-cache), and then
// freed via code:DacDbiInterfaceImpl::DeleteStackWalk
//
// Throws on error (mainly OOM).
diff --git a/src/debug/daccess/dacfn.cpp b/src/debug/daccess/dacfn.cpp
index d6e0a89267..64d07c90ba 100644
--- a/src/debug/daccess/dacfn.cpp
+++ b/src/debug/daccess/dacfn.cpp
@@ -360,7 +360,7 @@ DacInstantiateTypeByAddressHelper(TADDR addr, ULONG32 size, bool throwEx, bool f
{
#ifdef _PREFIX_
- // Dac accesses are not interesting for PREfix and cause alot of PREfix noise
+ // Dac accesses are not interesting for PREfix and cause a lot of PREfix noise
// so we just return the unmodified pointer for our PREFIX builds
return (PVOID)addr;
diff --git a/src/debug/di/process.cpp b/src/debug/di/process.cpp
index 8e772eb472..25b3eea2f1 100644
--- a/src/debug/di/process.cpp
+++ b/src/debug/di/process.cpp
@@ -12800,7 +12800,7 @@ void CordbProcess::HandleDebugEventForInteropDebugging(const DEBUG_EVENT * pEven
// of the queue or if the process is currently synchronized. Of course, we only do this if the
// process is initialized.
//
- // Note: we also hijack these left over in-band events if we're activley trying to send the
+ // Note: we also hijack these left over in-band events if we're actively trying to send the
// managed continue message to the Left Side. This is controlled by m_specialDeferment below.
// Only exceptions can be IB events - everything else is OOB.
diff --git a/src/debug/di/rsthread.cpp b/src/debug/di/rsthread.cpp
index 92ed90eb09..97eeed21bc 100644
--- a/src/debug/di/rsthread.cpp
+++ b/src/debug/di/rsthread.cpp
@@ -1209,7 +1209,7 @@ HRESULT CordbThread::CreateEval(ICorDebugEval ** ppEval)
// we can compare DAC & the RS and make sure DACs working.
void CheckAgainstDAC(CordbFunction * pFunc, void * pIP, mdMethodDef mdExpected)
{
- // This is a hook to add DAC checks agaisnt a {function, ip}
+ // This is a hook to add DAC checks against a {function, ip}
}
@@ -4184,7 +4184,7 @@ void CordbUnmanagedThread::SetupForSkipBreakpoint(NativePatch * pNativePatch)
}
#endif
#if defined(DBG_TARGET_X86)
- STRESS_LOG2(LF_CORDB, LL_INFO100, "CUT::SetupSkip. adddr=%p. Opcode=%x\n", pNativePatch->pAddress, (DWORD) pNativePatch->opcode);
+ STRESS_LOG2(LF_CORDB, LL_INFO100, "CUT::SetupSkip. addr=%p. Opcode=%x\n", pNativePatch->pAddress, (DWORD) pNativePatch->opcode);
#endif
// Replace the BP w/ the opcode.
@@ -4224,11 +4224,11 @@ void CordbUnmanagedThread::FixupForSkipBreakpoint()
if (GetProcess()->GetNativePatch(m_pPatchSkipAddress) != NULL)
{
ApplyRemotePatch(GetProcess(), m_pPatchSkipAddress);
- STRESS_LOG1(LF_CORDB, LL_INFO100, "CUT::FixupSetupSkip. adddr=%p\n", m_pPatchSkipAddress);
+ STRESS_LOG1(LF_CORDB, LL_INFO100, "CUT::FixupSetupSkip. addr=%p\n", m_pPatchSkipAddress);
}
else
{
- STRESS_LOG1(LF_CORDB, LL_INFO100, "CUT::FixupSetupSkip. Patch removed. Not-readding. adddr=%p\n", m_pPatchSkipAddress);
+ STRESS_LOG1(LF_CORDB, LL_INFO100, "CUT::FixupSetupSkip. Patch removed. Not-reading. addr=%p\n", m_pPatchSkipAddress);
}
m_pPatchSkipAddress = NULL;
diff --git a/src/debug/ee/controller.cpp b/src/debug/ee/controller.cpp
index 8a56e17d6c..d88eacda19 100644
--- a/src/debug/ee/controller.cpp
+++ b/src/debug/ee/controller.cpp
@@ -5820,12 +5820,12 @@ bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in)
// What if the thread is stopped at a managed debug event outside of a filter ctx? Eg, stopped
// somewhere directly in mscorwks (like sending a LogMsg or ClsLoad event) or even at WaitForSingleObject.
// ActiveFrame is either the stepper's initial frame or the frame of a filterctx.
- bool fIsActivFrameLive = (info->m_activeFrame.fp == info->m_bottomFP);
+ bool fIsActiveFrameLive = (info->m_activeFrame.fp == info->m_bottomFP);
// If this thread isn't stopped in managed code, it can't be at the active frame.
if (GetManagedStoppedCtx(this->GetThread()) == NULL)
{
- fIsActivFrameLive = false;
+ fIsActiveFrameLive = false;
}
bool fIsJump = false;
@@ -5834,7 +5834,7 @@ bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in)
// If m_activeFrame is not the actual active frame,
// we should skip this first switch - never single step, and
// assume our context is bogus.
- if (fIsActivFrameLive)
+ if (fIsActiveFrameLive)
{
LOG((LF_CORDB,LL_INFO10000, "DC::TS: immediate?\n"));
@@ -5974,7 +5974,7 @@ bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in)
walker.Next();
}
}
- } // if (fIsActivFrameLive)
+ } // if (fIsActiveFrameLive)
//
// Use our range, if we're in the original
diff --git a/src/debug/ee/debugger.cpp b/src/debug/ee/debugger.cpp
index b6570a2523..6b738048e1 100644
--- a/src/debug/ee/debugger.cpp
+++ b/src/debug/ee/debugger.cpp
@@ -12965,7 +12965,7 @@ BOOL EnCSequencePointHelper::ShouldSetRemapBreakpoint(unsigned int offsetIndex)
//-----------------------------------------------------------------------------
// For each function that's EnC-ed, the EE will call either UpdateFunction
-// (if the function already is loaded + jitted) or Addfunction
+// (if the function already is loaded + jitted) or AddFunction
//
// This is called before the EE updates the MethodDesc, so pMD does not yet
// point to the version we'll be remapping to.
@@ -15103,7 +15103,7 @@ HRESULT Debugger::TerminateAppDomainIPC(void)
m_pAppDomainCB->m_iNumOfUsedSlots = 0;
m_pAppDomainCB->m_iTotalSlots = 0;
- // Now delete the memory alloacted for AppDomainInfo array
+ // Now delete the memory allocated for AppDomainInfo array
delete [] m_pAppDomainCB->m_rgListOfAppDomains;
m_pAppDomainCB->m_rgListOfAppDomains = NULL;
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 0ae2b03d00..fde5cff021 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -80,7 +80,7 @@ __inline PTR_HandleTable Table(HHANDLETABLE hTable)
/*
* HndCreateHandleTable
*
- * Alocates and initializes a handle table.
+ * Allocates and initializes a handle table.
*
*/
HHANDLETABLE HndCreateHandleTable(const uint32_t *pTypeFlags, uint32_t uTypeCount, ADIndex uADIndex)
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index 62eaf7f5cc..dde2bb8c10 100644
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -9035,7 +9035,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
//
// Case 2: localloc used
// genSPToFPDelta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize)
- // Offset = Amount to be aded to RBP to point at callee saved int regs.
+ // Offset = Amount to be added to RBP to point at callee saved int regs.
offset = genSPtoFPdelta() - compiler->compLclFrameSize;
// Offset should fit within a byte if localloc is not used.
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 0a64394e1b..31a2888813 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -472,7 +472,7 @@ var_types Compiler::getJitGCType(BYTE gcType)
// Return Value:
// Two [or more] values are written into the gcPtrs array
//
-// Note that for ARM64 there will alwys be exactly two pointer sized fields
+// Note that for ARM64 there will always be exactly two pointer sized fields
void Compiler::getStructGcPtrsFromOp(GenTree* op, BYTE* gcPtrsOut)
{
@@ -5811,7 +5811,7 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
#endif
- // Check for COMPlus_AgressiveInlining
+ // Check for COMPlus_AggressiveInlining
if (JitConfig.JitAggressiveInlining())
{
compDoAggressiveInlining = true;
diff --git a/src/jit/earlyprop.cpp b/src/jit/earlyprop.cpp
index b0db714a09..46f89584a2 100644
--- a/src/jit/earlyprop.cpp
+++ b/src/jit/earlyprop.cpp
@@ -362,7 +362,7 @@ GenTree* Compiler::optEarlyPropRewriteTree(GenTree* tree)
}
DecLclVarRefCountsVisitor::WalkTree(this, tree);
- // acutalValClone has small tree node size, it is safe to use CopyFrom here.
+ // actualValClone has small tree node size, it is safe to use CopyFrom here.
tree->ReplaceWith(actualValClone, this);
IncLclVarRefCountsVisitor::WalkTree(this, tree);
diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp
index 51f7d93592..4fdc5c8cb2 100644
--- a/src/jit/emitxarch.cpp
+++ b/src/jit/emitxarch.cpp
@@ -5971,7 +5971,7 @@ void emitter::emitIns_Call(EmitCallType callType,
#if STACK_PROBES
if (emitComp->opts.compNeedStackProbes)
{
- // If we've pushed more than JIT_RESERVED_STACK allows, do an aditional stack probe
+ // If we've pushed more than JIT_RESERVED_STACK allows, do an additional stack probe
// Else, just make sure the prolog does a probe for us. Invariant we're trying
// to get is that at any point we go out to unmanaged code, there is at least
// CORINFO_STACKPROBE_DEPTH bytes of stack available.
@@ -5991,7 +5991,7 @@ void emitter::emitIns_Call(EmitCallType callType,
// localloc and on the prolog (invariant is that
// genStackLevel is 0 on basic block entry and exit and
// after any alloca). genStackLevel will include any arguments
- // to the call, so we will insert an aditional probe if
+ // to the call, so we will insert an additional probe if
// we've consumed more than JIT_RESERVED_STACK bytes
// of stack, which is what the prolog probe covers (in
// addition to the EE requested size)
diff --git a/src/jit/inlinepolicy.cpp b/src/jit/inlinepolicy.cpp
index 306a04d7a6..9cb534c18a 100644
--- a/src/jit/inlinepolicy.cpp
+++ b/src/jit/inlinepolicy.cpp
@@ -701,7 +701,7 @@ double DefaultPolicy::DetermineMultiplier()
if (additionalMultiplier != 0)
{
multiplier += additionalMultiplier;
- JITDUMP("\nmultiplier increased via JitInlineAdditonalMultiplier=%d to %g.", additionalMultiplier, multiplier);
+ JITDUMP("\nmultiplier increased via JitInlineAdditionalMultiplier=%d to %g.", additionalMultiplier, multiplier);
}
if (m_RootCompiler->compInlineStress())
diff --git a/src/jit/lsra.cpp b/src/jit/lsra.cpp
index befaac2241..5d5baea139 100644
--- a/src/jit/lsra.cpp
+++ b/src/jit/lsra.cpp
@@ -7704,7 +7704,7 @@ void LinearScan::resolveEdges()
// We will try to avoid resolution across critical edges in cases where all the critical-edge
// targets of a block have the same home. We will then split the edges only for the
// remaining mismatches. We visit the out-edges, as that allows us to share the moves that are
- // common among allt he targets.
+ // common among all the targets.
if (hasCriticalEdges)
{
@@ -10140,7 +10140,7 @@ void LinearScan::verifyFinalAllocation()
}
// Now, verify the resolution blocks.
- // Currently these are nearly always at the end of the method, but that may not alwyas be the case.
+ // Currently these are nearly always at the end of the method, but that may not always be the case.
// So, we'll go through all the BBs looking for blocks whose bbNum is greater than bbNumMaxBeforeResolution.
for (BasicBlock* currentBlock = compiler->fgFirstBB; currentBlock != nullptr; currentBlock = currentBlock->bbNext)
{
diff --git a/src/jit/regalloc.cpp b/src/jit/regalloc.cpp
index 70e33f4925..aa3d0f43bf 100644
--- a/src/jit/regalloc.cpp
+++ b/src/jit/regalloc.cpp
@@ -59,7 +59,7 @@ DWORD Compiler::getCanDoubleAlign()
// - We pay one extra memory reference for each variable that could have been enregistered in EBP (refCntWtdEBP).
//
// If the misalignment penalty is estimated to be less than the bytes used, we don't double align.
-// Otherwise, we compare the weighted ref count of ebp-enregistered variables aginst double the
+// Otherwise, we compare the weighted ref count of ebp-enregistered variables against double the
// ref count for double-aligned values.
//
bool Compiler::shouldDoubleAlign(
diff --git a/src/jit/scopeinfo.cpp b/src/jit/scopeinfo.cpp
index 31f66e2c2a..5a3f704cfc 100644
--- a/src/jit/scopeinfo.cpp
+++ b/src/jit/scopeinfo.cpp
@@ -1194,7 +1194,7 @@ void CodeGen::psiMoveToReg(unsigned varNum, regNumber reg, regNumber otherReg)
* CodeGen::psiMoveToStack
*
* A incoming register-argument is being moved to its final home on the stack
- * (ie. all adjustements to {F/S}PBASE have been made
+ * (ie. all adjustments to {F/S}PBASE have been made
*/
void CodeGen::psiMoveToStack(unsigned varNum)
diff --git a/src/jit/simdcodegenxarch.cpp b/src/jit/simdcodegenxarch.cpp
index a61fbb7e94..6f40010e9a 100644
--- a/src/jit/simdcodegenxarch.cpp
+++ b/src/jit/simdcodegenxarch.cpp
@@ -2232,7 +2232,7 @@ void CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
// tmp = v0
// tmp = shuffle(tmp, tmp, SHUFFLE_XYZW) // tmp = (0+1, 1+0, 2+3, 3+2)
// v0 = v0 + tmp // v0 = (0+1+2+3, 0+1+2+3, 0+1+2+3, 0+1+2+3)
- // // Essentially horizontal addtion of all elements.
+ // // Essentially horizontal addition of all elements.
// // We could achieve the same using SSEv3 instruction
// // HADDPS.
//
diff --git a/src/jit/valuenum.cpp b/src/jit/valuenum.cpp
index dfc4e67a74..306c861a79 100644
--- a/src/jit/valuenum.cpp
+++ b/src/jit/valuenum.cpp
@@ -6618,7 +6618,7 @@ void Compiler::fgValueNumberTree(GenTree* tree, bool evalAsgLhsInd)
else if ((oper == GT_IND) || GenTree::OperIsBlk(oper))
{
// So far, we handle cases in which the address is a ptr-to-local, or if it's
- // a pointer to an object field or array alement. Other cases become uses of
+ // a pointer to an object field or array element. Other cases become uses of
// the current ByrefExposed value and the pointer value, so that at least we
// can recognize redundant loads with no stores between them.
GenTree* addr = tree->AsIndir()->Addr();
diff --git a/src/md/ceefilegen/blobfetcher.cpp b/src/md/ceefilegen/blobfetcher.cpp
index 4e934d0b24..01d5868ad2 100644
--- a/src/md/ceefilegen/blobfetcher.cpp
+++ b/src/md/ceefilegen/blobfetcher.cpp
@@ -88,7 +88,7 @@ char * CBlobFetcher::CPillar::MakeNewBlock(unsigned len, unsigned pad) {
_ASSERTE(pad < maxAlign);
- // Make sure we have memory in this block to allocatate
+ // Make sure we have memory in this block to allocate
if (m_dataStart == NULL) {
// make sure allocate at least as big as length
diff --git a/src/pal/src/file/find.cpp b/src/pal/src/file/find.cpp
index efc651226a..c874e28a65 100644
--- a/src/pal/src/file/find.cpp
+++ b/src/pal/src/file/find.cpp
@@ -857,7 +857,7 @@ finding no matches but without any error occurring) or FALSE if any error
occurs. It calls SetLastError() if it returns FALSE.
Sorting doesn't seem to be consistent on all Windows platform, and it's
-not required for Rotor to have the same sorting alogrithm than Windows 2000.
+not required for Rotor to have the same sorting algorithm than Windows 2000.
This implementation will give slightly different result for the sort list
than Windows 2000.
diff --git a/src/pal/src/loader/module.cpp b/src/pal/src/loader/module.cpp
index f73adfa56a..16bca0f929 100644
--- a/src/pal/src/loader/module.cpp
+++ b/src/pal/src/loader/module.cpp
@@ -1378,7 +1378,7 @@ static LPWSTR LOADGetModuleFileName(MODSTRUCT *module)
/* return "real" name of module if it is known. we have this if LoadLibrary
was given an absolute or relative path; we can also determine it at the
- first GetProcAdress call. */
+ first GetProcAddress call. */
TRACE("Returning full path name of module\n");
return module->lib_name;
}
diff --git a/src/pal/src/map/map.cpp b/src/pal/src/map/map.cpp
index 4a435f8cef..228b48bfca 100644
--- a/src/pal/src/map/map.cpp
+++ b/src/pal/src/map/map.cpp
@@ -2351,7 +2351,7 @@ void * MAPMapPEFile(HANDLE hFile)
InternalEnterCriticalSection(pThread, &mapping_critsec);
#ifdef BIT64
- // First try to reserve virtual memory using ExecutableAllcator. This allows all PE images to be
+ // First try to reserve virtual memory using ExecutableAllocator. This allows all PE images to be
// near each other and close to the coreclr library which also allows the runtime to generate
// more efficient code (by avoiding usage of jump stubs). Alignment to a 64 KB granularity should
// not be necessary (alignment to page size should be sufficient), but see
diff --git a/src/pal/src/objmgr/shmobjectmanager.cpp b/src/pal/src/objmgr/shmobjectmanager.cpp
index 90caa655e3..2277003139 100644
--- a/src/pal/src/objmgr/shmobjectmanager.cpp
+++ b/src/pal/src/objmgr/shmobjectmanager.cpp
@@ -1158,7 +1158,7 @@ CSharedMemoryObjectManager::ImportSharedObjectIntoProcess(
}
else
{
- ERROR("Unable to alllocate new object\n");
+ ERROR("Unable to allocate new object\n");
palError = ERROR_OUTOFMEMORY;
goto ImportSharedObjectIntoProcessExit;
}
diff --git a/src/pal/tests/palsuite/file_io/ReadFile/test2/ReadFile.cpp b/src/pal/tests/palsuite/file_io/ReadFile/test2/ReadFile.cpp
index 789e001700..7120d1fc9d 100644
--- a/src/pal/tests/palsuite/file_io/ReadFile/test2/ReadFile.cpp
+++ b/src/pal/tests/palsuite/file_io/ReadFile/test2/ReadFile.cpp
@@ -145,7 +145,7 @@ int __cdecl main(int argc, char *argv[])
return FAIL;
}
- /* aloocate read-write memery for readBuffer */
+ /* allocate read-write memery for readBuffer */
if (!(readBuffer = (char*) VirtualAlloc(NULL, BUFFER_SIZE, MEM_COMMIT, PAGE_READWRITE)))
{
Fail("VirtualAlloc failed: GetLastError returns %d\n", GetLastError());
diff --git a/src/utilcode/clrhost_nodependencies.cpp b/src/utilcode/clrhost_nodependencies.cpp
index 62453b7e81..9b66717afa 100644
--- a/src/utilcode/clrhost_nodependencies.cpp
+++ b/src/utilcode/clrhost_nodependencies.cpp
@@ -200,7 +200,7 @@ ClrDebugState *CLRInitDebugState()
pNewClrDebugState = (ClrDebugState*)::HeapAlloc(GetProcessHeap(), 0, sizeof(ClrDebugState));
if (pNewClrDebugState != NULL)
{
- // Only allocate a DbgStateLockData if its owning ClrDebugState was successfully alloctaed
+ // Only allocate a DbgStateLockData if its owning ClrDebugState was successfully allocated
pNewLockData = (DbgStateLockData *)::HeapAlloc(GetProcessHeap(), 0, sizeof(DbgStateLockData));
}
#define GetProcessHeap() Dont_Use_GetProcessHeap()
diff --git a/src/utilcode/debug.cpp b/src/utilcode/debug.cpp
index ed4f0445fc..3d8704826b 100644
--- a/src/utilcode/debug.cpp
+++ b/src/utilcode/debug.cpp
@@ -307,7 +307,7 @@ BOOL LaunchJITDebugger()
// This function is called in order to ultimately return an out of memory
// failed hresult. But this guy will check what environment you are running
// in and give an assert for running in a debug build environment. Usually
-// out of memory on a dev machine is a bogus alloction, and this allows you
+// out of memory on a dev machine is a bogus allocation, and this allows you
// to catch such errors. But when run in a stress envrionment where you are
// trying to get out of memory, assert behavior stops the tests.
//*****************************************************************************
diff --git a/src/utilcode/sbuffer.cpp b/src/utilcode/sbuffer.cpp
index b97e12614f..35f7c0463f 100644
--- a/src/utilcode/sbuffer.cpp
+++ b/src/utilcode/sbuffer.cpp
@@ -88,7 +88,7 @@ void SBuffer::Replace(const Iterator &i, COUNT_T deleteSize, COUNT_T insertSize)
COUNT_T startRange = (COUNT_T) (i.m_ptr - m_buffer);
// The PRECONDITION(CheckIterationRange(i, deleteSize)) should check this in
// contract-checking builds, but this ensures we don't integer overflow if someone
- // passes in an aggregious deleteSize by capping it to the remaining length in the
+ // passes in an egregious deleteSize by capping it to the remaining length in the
// buffer.
if ((COUNT_T)(m_buffer + m_size - i.m_ptr) < deleteSize)
{
diff --git a/src/utilcode/securitywrapper.cpp b/src/utilcode/securitywrapper.cpp
index 0f146ab55e..10672b7004 100644
--- a/src/utilcode/securitywrapper.cpp
+++ b/src/utilcode/securitywrapper.cpp
@@ -694,7 +694,7 @@ Sid Win32SecurityDescriptor::GetOwner()
//-----------------------------------------------------------------------------
// Initialize this instance of a SecurityDescriptor with the SD for the handle.
-// The handle must ahve READ_CONTROL permissions to do this.
+// The handle must have READ_CONTROL permissions to do this.
// Throws on error.
//-----------------------------------------------------------------------------
HRESULT Win32SecurityDescriptor::InitFromHandleNoThrow(HANDLE h)
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index 4c8aee2038..55b63223d2 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -5092,7 +5092,7 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
}
// allocJumpStubBlock will allocate from the LoaderCodeHeap for normal methods
- // and will alocate from a HostCodeHeap for LCG methods.
+ // and will allocate from a HostCodeHeap for LCG methods.
//
// note that this can throw an OOM exception
diff --git a/src/vm/comcache.cpp b/src/vm/comcache.cpp
index ab2e53024d..a7834078f1 100644
--- a/src/vm/comcache.cpp
+++ b/src/vm/comcache.cpp
@@ -803,7 +803,7 @@ void IUnkEntry::ReleaseStream()
}
}
-// Indicates if the COM component being wrapped by the IUnkEntry aggreates the FTM
+// Indicates if the COM component being wrapped by the IUnkEntry aggregates the FTM
bool IUnkEntry::IsFreeThreaded()
{
LIMITED_METHOD_CONTRACT;
@@ -1063,7 +1063,7 @@ bool IUnkEntry::IsComponentFreeThreaded(IUnknown *pUnk)
{
SafeComHolderPreemp<IMarshal> pMarshal = NULL;
- // If not, then we can try to determine if the component agregates the FTM via IMarshal.
+ // If not, then we can try to determine if the component aggregates the FTM via IMarshal.
hr = SafeQueryInterfacePreemp(pUnk, IID_IMarshal, (IUnknown **)&pMarshal);
LogInteropQI(pUnk, IID_IMarshal, hr, "IUnkEntry::IsComponentFreeThreaded: QI for IMarshal");
if (SUCCEEDED(hr))
@@ -1226,7 +1226,7 @@ DWORD WINAPI MDAContextSwitchDeadlockThreadProc(LPVOID lpParameter)
if (retval == WAIT_TIMEOUT)
{
- // We didn't transition into the context within the alloted timeout period.
+ // We didn't transition into the context within the allotted timeout period.
// We'll fire the mda and close the event, but we can't delete is as the
// thread may still complete the transition and attempt to signal the event.
// So we'll just leak it and let the transition thread recognize that the
diff --git a/src/vm/cominterfacemarshaler.cpp b/src/vm/cominterfacemarshaler.cpp
index 2eb06b3734..9b9d56a6ff 100644
--- a/src/vm/cominterfacemarshaler.cpp
+++ b/src/vm/cominterfacemarshaler.cpp
@@ -1125,7 +1125,7 @@ OBJECTREF COMInterfaceMarshaler::WrapWithComObject()
&oref, // pComObj
NULL, // ppIncomingIP
NULL, // pIncomingItfMT
- false // bIncomingIPAdddefed
+ false // bIncomingIPAddRefed
);
}
GCPROTECT_END();
diff --git a/src/vm/debugdebugger.cpp b/src/vm/debugdebugger.cpp
index 04620a6596..daadc91a0a 100644
--- a/src/vm/debugdebugger.cpp
+++ b/src/vm/debugdebugger.cpp
@@ -660,7 +660,7 @@ FCIMPL4(void, DebugStackTrace::GetStackFramesInternal,
// look for the entry matching the one we're looking for
if (offsets[j] >= dwCurILOffset)
{
- // if this offset is > what we're looking for, ajdust the index
+ // if this offset is > what we're looking for, adjust the index
if (offsets[j] > dwCurILOffset && j > 0)
{
j--;
diff --git a/src/vm/dynamicmethod.cpp b/src/vm/dynamicmethod.cpp
index 7778924efb..5fd552a0a1 100644
--- a/src/vm/dynamicmethod.cpp
+++ b/src/vm/dynamicmethod.cpp
@@ -1279,7 +1279,7 @@ STRINGREF* LCGMethodResolver::GetOrInternString(STRINGREF *pProtectedStringRef)
}
// AddToUsedIndCellList adds a IndCellList link to the beginning of m_UsedIndCellList. It is called by
-// code:CEEInfo::getCallInfo when a indirection cell is alocated for m_pDynamicMethod.
+// code:CEEInfo::getCallInfo when a indirection cell is allocated for m_pDynamicMethod.
// All the indirection cells usded by m_pDynamicMethod will be recycled when this resolver
// is finalized, see code:LCGMethodResolver::RecycleIndCells
void LCGMethodResolver::AddToUsedIndCellList(BYTE * indcell)
diff --git a/src/vm/eetwain.cpp b/src/vm/eetwain.cpp
index 511a635509..cde9fe6fde 100644
--- a/src/vm/eetwain.cpp
+++ b/src/vm/eetwain.cpp
@@ -4755,7 +4755,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
// We encode the arguments as if they were ESP based variables even though they aren't
// If this frame would have ben an ESP based frame, This fake frame is one DWORD
// smaller than the real frame because it did not push EBP but the real frame did.
- // Thus to get the correct EBP relative offset we have to ajust by info.stackSize-sizeof(void*)
+ // Thus to get the correct EBP relative offset we have to adjust by info.stackSize-sizeof(void*)
ptrAddr = EBP + (stkOffs-(info.stackSize - sizeof(void*)));
}
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
index 3de630f12a..00d519d2b8 100644
--- a/src/vm/eventtrace.cpp
+++ b/src/vm/eventtrace.cpp
@@ -5361,7 +5361,7 @@ VOID ETW::MethodLog::StubInitialized(ULONGLONG ullHelperStartAddress, LPCWSTR pH
/**********************************************************/
/* This is called by the runtime when helpers with stubs are initialized */
/**********************************************************/
-VOID ETW::MethodLog::StubsInitialized(PVOID *pHelperStartAddresss, PVOID *pHelperNames, LONG lNoOfHelpers)
+VOID ETW::MethodLog::StubsInitialized(PVOID *pHelperStartAddress, PVOID *pHelperNames, LONG lNoOfHelpers)
{
WRAPPER_NO_CONTRACT;
@@ -5371,9 +5371,9 @@ VOID ETW::MethodLog::StubsInitialized(PVOID *pHelperStartAddresss, PVOID *pHelpe
{
for(int i=0; i<lNoOfHelpers; i++)
{
- if(pHelperStartAddresss[i])
+ if(pHelperStartAddress[i])
{
- StubInitialized((ULONGLONG)pHelperStartAddresss[i], (LPCWSTR)pHelperNames[i]);
+ StubInitialized((ULONGLONG)pHelperStartAddress[i], (LPCWSTR)pHelperNames[i]);
}
}
}
diff --git a/src/vm/excep.cpp b/src/vm/excep.cpp
index d4e00d635d..0c9e101542 100644
--- a/src/vm/excep.cpp
+++ b/src/vm/excep.cpp
@@ -12744,7 +12744,7 @@ StackWalkAction TAResetStateCallback(CrawlFrame* pCf, void* data)
}
#endif // WIN64EXCEPTIONS
-// This function will reset the thread abort state agains the specified thread if it is determined that
+// This function will reset the thread abort state against the specified thread if it is determined that
// there is no more managed code on the stack.
//
// Note: This function should be invoked ONLY during unwind.
diff --git a/src/vm/fieldmarshaler.cpp b/src/vm/fieldmarshaler.cpp
index dfbbb9aebb..1015657bac 100644
--- a/src/vm/fieldmarshaler.cpp
+++ b/src/vm/fieldmarshaler.cpp
@@ -1639,7 +1639,7 @@ VOID EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing(
//
// Each field has an alignment requirement. The alignment-requirement
// of a scalar field is the smaller of its size and the declared packsize.
- // The alighnment-requirement of a struct field is the smaller of the
+ // The alignment-requirement of a struct field is the smaller of the
// declared packsize and the largest of the alignment-requirement
// of its fields. The alignment requirement of an array is that
// of one of its elements.
@@ -2027,7 +2027,7 @@ VOID LayoutUpdateNative(LPVOID *ppProtectedManagedData, SIZE_T offsetbias, Metho
}
// The cleanup work list is not used to clean up the native contents. It is used
- // to handle cleanup of any additionnal resources the FieldMarshalers allocate.
+ // to handle cleanup of any additional resources the FieldMarshalers allocate.
((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
}
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index ca91687887..a604f857df 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -1611,7 +1611,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// @Todo: possible race here, might need to be fixed if it become a problem.
// It could become a problem if 64bit does partially interrupt work.
- // OK, we have the MD, mark the instruction afer the CALL
+ // OK, we have the MD, mark the instruction after the CALL
// appropriately
#ifdef _TARGET_ARM_
size_t instrLen = GetARMInstructionLength(nextInstr);
diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp
index 4fb8dec638..0e781a425d 100644
--- a/src/vm/gcenv.ee.cpp
+++ b/src/vm/gcenv.ee.cpp
@@ -1009,7 +1009,7 @@ bool GCToEEInterface::ForceFullGCToBeBlocking()
// a blocking GC. In the past, this workaround was done to fix an Stress AV, but the root
// cause of the AV was never discovered and this workaround remains in place.
//
- // It would be nice if this were not necessary. However, it's not clear if the aformentioned
+ // It would be nice if this were not necessary. However, it's not clear if the aforementioned
// stress bug is still lurking and will return if this workaround is removed. We should
// do some experiments: remove this workaround and see if the stress bug still repros.
// If so, we should find the root cause instead of relying on this.
diff --git a/src/vm/i386/excepx86.cpp b/src/vm/i386/excepx86.cpp
index 9f558c457e..bc85ad3d62 100644
--- a/src/vm/i386/excepx86.cpp
+++ b/src/vm/i386/excepx86.cpp
@@ -3246,7 +3246,7 @@ void ResumeAtJitEH(CrawlFrame* pCf,
context.SetSP(pNestedHandlerExRecord);
// We might have moved the bottommost handler. The nested record itself is never
- // the bottom most handler -- it's pushed afte the fact. So we have to make the
+ // the bottom most handler -- it's pushed after the fact. So we have to make the
// bottom-most handler the one BEFORE the nested record.
if (pExInfo->m_pBottomMostHandler < pNewBottomMostHandler)
{
diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
index 4eccd35a76..58294ff3e7 100644
--- a/src/vm/i386/jitinterfacex86.cpp
+++ b/src/vm/i386/jitinterfacex86.cpp
@@ -1471,7 +1471,7 @@ const int PostGrow_CardTableSecondLocation = 36;
#ifndef CODECOVERAGE // Deactivate alignment validation for code coverage builds
- // because the instrumented binaries will not preserve alignmant constraits and we will fail.
+ // because the instrumented binaries will not preserve alignment constraints and we will fail.
void ValidateWriteBarrierHelpers()
{
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index d886091d6c..e8e1ea7a63 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -2789,7 +2789,7 @@ void CEEInfo::ScanInstantiation(Module * pModule, Instantiation inst)
// is not "active". And we don't want to intercept every call during runtime, so during compile time we track static calls and
// everything that can result in new virtual calls.
//
-// The current algoritm (scan the parent type chain and instantiation variables) is more than enough to maintain this invariant.
+// The current algorithm (scan the parent type chain and instantiation variables) is more than enough to maintain this invariant.
// One could come up with a more efficient algorithm that still maintains the invariant, but it may introduce backward compatibility
// issues.
//
diff --git a/src/vm/safehandle.cpp b/src/vm/safehandle.cpp
index 23e415d8bc..369fbb9863 100644
--- a/src/vm/safehandle.cpp
+++ b/src/vm/safehandle.cpp
@@ -209,7 +209,7 @@ void SafeHandle::Release(bool fDispose)
} while (InterlockedCompareExchange((LONG*)&sh->m_state, newState, oldState) != oldState);
- // If we get here we successfully decremented the ref count. Additonally we
+ // If we get here we successfully decremented the ref count. Additionally we
// may have decremented it to zero and set the handle state as closed. In
// this case (providng we own the handle) we will call the ReleaseHandle
// method on the SafeHandle subclass.
diff --git a/src/vm/simplerwlock.cpp b/src/vm/simplerwlock.cpp
index 8c5b50fa39..a6d3ac24e3 100644
--- a/src/vm/simplerwlock.cpp
+++ b/src/vm/simplerwlock.cpp
@@ -241,7 +241,7 @@ void SimpleRWLock::CheckGCNoTrigger()
#ifdef _DEBUG
//=====================================================================
-// GC mode assertions before acquringing a lock based on its mode.
+// GC mode assertions before acquiring a lock based on its mode.
//=====================================================================
void SimpleRWLock::PreEnter()
{
diff --git a/src/vm/stackprobe.cpp b/src/vm/stackprobe.cpp
index ef997ff5e5..fd0488c222 100644
--- a/src/vm/stackprobe.cpp
+++ b/src/vm/stackprobe.cpp
@@ -666,7 +666,7 @@ void TerminateStackProbes()
//-----------------------------------------------------------------------------
// Error handling when we go past a stack guard.
-// We have different messages to more aggresively diagnose the problem
+// We have different messages to more aggressively diagnose the problem
//-----------------------------------------------------------------------------
// Called by Check_Stack when we overwrite the cookie
diff --git a/src/vm/threadsuspend.cpp b/src/vm/threadsuspend.cpp
index 79a8069c8b..65a3841525 100644
--- a/src/vm/threadsuspend.cpp
+++ b/src/vm/threadsuspend.cpp
@@ -6624,7 +6624,7 @@ StackWalkAction SWCB_GetExecutionStateForSwitchIn(CrawlFrame *pCF, VOID *pData)
// call to GetThreadContext. This feature exists on all Win64 platforms, so this change is only for 32-bit
// platforms. We've asked for this fix to be applied to future 32-bit OS's, so we can remove this on those
// platforms when that happens. Furthermore, once we stop supporting the older 32-bit OS versions that don't have
-// the new feature, we can remove these alltogether.
+// the new feature, we can remove these altogether.
//
// WARNING: Interrupts (int 3) immediately increment the IP whereas traps (AVs) do not.
// So this heuristic only works for trap, but not for interrupts. As a result, the race
diff --git a/src/vm/virtualcallstub.cpp b/src/vm/virtualcallstub.cpp
index 8040cbf8f2..cbffe925a8 100644
--- a/src/vm/virtualcallstub.cpp
+++ b/src/vm/virtualcallstub.cpp
@@ -525,7 +525,7 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA
//
// Setup an expected number of items to commit and reserve
//
- // The commit number is not that important as we alwasy commit at least one page worth of items
+ // The commit number is not that important as we always commit at least one page worth of items
// The reserve number shoudl be high enough to cover a typical lare application,
// in order to minimize the fragmentation of our rangelists
//