diff options
author | Suchiman <robinsue@live.de> | 2019-06-11 20:18:39 +0200 |
---|---|---|
committer | Jan Kotas <jkotas@microsoft.com> | 2019-06-11 20:26:23 -0700 |
commit | 7d0bffa8e5cea35b498dedb36393e0620377d03e (patch) | |
tree | 2f56b915da19e509cf39936906be13f74e09b80a | |
parent | a12705bfc76d6f7d7c9f795acffa92a539662b70 (diff) | |
download | coreclr-7d0bffa8e5cea35b498dedb36393e0620377d03e.tar.gz coreclr-7d0bffa8e5cea35b498dedb36393e0620377d03e.tar.bz2 coreclr-7d0bffa8e5cea35b498dedb36393e0620377d03e.zip |
Port typo fixes from CoreRT
-rw-r--r-- | src/gc/gc.cpp | 54 | ||||
-rw-r--r-- | src/gc/gcrecord.h | 6 | ||||
-rw-r--r-- | src/gc/handletable.cpp | 4 | ||||
-rw-r--r-- | src/gc/handletablecore.cpp | 16 | ||||
-rw-r--r-- | src/gc/handletablescan.cpp | 14 | ||||
-rw-r--r-- | src/gc/objecthandle.cpp | 4 |
6 files changed, 49 insertions, 49 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp index 35f46d4b90..e59c7a931a 100644 --- a/src/gc/gc.cpp +++ b/src/gc/gc.cpp @@ -3099,7 +3099,7 @@ void gc_heap::fire_pevents() gc_data_global.condemned_generation, gc_data_global.gen0_reduction_count, gc_data_global.reason, - gc_data_global.global_mechanims_p, + gc_data_global.global_mechanisms_p, gc_data_global.pause_mode, gc_data_global.mem_pressure); @@ -3242,7 +3242,7 @@ gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number) } // DTREVIEW: Right now we only estimate gen2 fragmentation. -// on 64-bit though we should consider gen1 or even gen0 fragmentatioin as +// on 64-bit though we should consider gen1 or even gen0 fragmentation as // well inline BOOL gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem) @@ -3798,7 +3798,7 @@ gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o) gc_heap* hp = ((o > entry->boundary) ? entry->h1 : entry->h0); - dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix", + dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix", o, index, (entry->boundary + 1), (uint8_t*)(entry->h0), (uint8_t*)(entry->seg0), (uint8_t*)(entry->h1), (uint8_t*)(entry->seg1))); @@ -3867,7 +3867,7 @@ heap_segment* seg_mapping_table_segment_of (uint8_t* o) size_t index = (size_t)o >> gc_heap::min_segment_size_shr; seg_mapping* entry = &seg_mapping_table[index]; - dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, seg0: %Ix, seg1: %Ix", + dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, seg0: %Ix, seg1: %Ix", o, index, (entry->boundary + 1), (uint8_t*)(entry->seg0), (uint8_t*)(entry->seg1))); @@ -5573,7 +5573,7 @@ public: // Supposedly Pinned objects cannot have references but we are seeing some from pinvoke // frames. Also if it's an artificially pinned plug created by us, it can certainly // have references. - // We know these cases will be rare so we can optimize this to be only allocated on decommand. + // We know these cases will be rare so we can optimize this to be only allocated on demand. gap_reloc_pair saved_post_plug_reloc; // We need to calculate this after we are done with plan phase and before compact @@ -5845,7 +5845,7 @@ void gc_mechanisms::record (gc_history_global* history) history->reason = reason; history->pause_mode = (int)pause_mode; history->mem_pressure = entry_memory_load; - history->global_mechanims_p = 0; + history->global_mechanisms_p = 0; // start setting the boolean values. if (concurrent) @@ -6374,7 +6374,7 @@ void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, gener size_t gc_heap::deque_pinned_plug () { - dprintf (3, ("dequed: %Id", mark_stack_bos)); + dprintf (3, ("deque: %Id", mark_stack_bos)); size_t m = mark_stack_bos; mark_stack_bos++; return m; @@ -7265,7 +7265,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address)) { { - //modify the higest address so the span covered + //modify the highest address so the span covered //is twice the previous one. uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit()); // On non-Windows systems, we get only an approximate value that can possibly be @@ -8520,7 +8520,7 @@ void gc_heap::combine_mark_lists() assert (end_of_list < &g_mark_list [n_heaps*mark_list_size]); if (end_of_list > &g_mark_list[0]) _sort (&g_mark_list[0], end_of_list, 0); - //adjust the mark_list to the begining of the resulting mark list. + //adjust the mark_list to the beginning of the resulting mark list. for (int i = 0; i < n_heaps; i++) { g_heaps [i]->mark_list = g_mark_list; @@ -8531,7 +8531,7 @@ void gc_heap::combine_mark_lists() else { uint8_t** end_of_list = g_mark_list; - //adjust the mark_list to the begining of the resulting mark list. + //adjust the mark_list to the beginning of the resulting mark list. //put the index beyond the end to turn off mark list processing for (int i = 0; i < n_heaps; i++) { @@ -8556,7 +8556,7 @@ class seg_free_spaces struct free_space_bucket { seg_free_space* free_space; - ptrdiff_t count_add; // Assigned when we first contruct the array. + ptrdiff_t count_add; // Assigned when we first construct the array. ptrdiff_t count_fit; // How many items left when we are fitting plugs. }; @@ -8840,7 +8840,7 @@ public: // BARTOKTODO (4841): this code path is disabled (see can_fit_all_blocks_p) until we take alignment requirements into account _ASSERTE(requiredAlignment == DATA_ALIGNMENT && false); #endif // FEATURE_STRUCTALIGN - // TODO: this is also not large alignment ready. We would need to consider alignment when chosing the + // TODO: this is also not large alignment ready. We would need to consider alignment when choosing the // the bucket. size_t plug_size_to_fit = plug_size; @@ -9760,7 +9760,7 @@ void gc_heap::restart_vm() { //assert (generation_allocation_pointer (youngest_generation) == 0); dprintf (3, ("Restarting EE")); - STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Retarting EE\n"); + STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Restarting EE\n"); ee_proceed_event.Set(); } @@ -10652,7 +10652,7 @@ gc_heap::init_gc_heap (int h_number) { #ifndef INTERIOR_POINTERS //set the brick_table for large objects - //but default value is clearded + //but default value is cleared //clear_brick_table ((uint8_t*)heap_segment_mem (lseg), // (uint8_t*)heap_segment_reserved (lseg)); @@ -15203,7 +15203,7 @@ int gc_heap::generation_to_condemn (int n_initial, } } - //figure out which ephemeral generation is too fragramented + //figure out which ephemeral generation is too fragmented temp_gen = n; for (i = n+1; i < max_generation; i++) { @@ -25006,7 +25006,7 @@ void gc_heap::copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL co clear_card_for_addresses (dest, dest + len); } -// POPO TODO: We should actually just recover the artifically made gaps here..because when we copy +// POPO TODO: We should actually just recover the artificially made gaps here..because when we copy // we always copy the earlier plugs first which means we won't need the gap sizes anymore. This way // we won't need to individually recover each overwritten part of plugs. inline @@ -25989,7 +25989,7 @@ BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr) // the mark_array flag for these segments will remain the same. BOOL gc_heap::commit_new_mark_array (uint32_t* new_mark_array_addr) { - dprintf (GC_TABLE_LOG, ("commiting existing segs on MA %Ix", new_mark_array_addr)); + dprintf (GC_TABLE_LOG, ("committing existing segs on MA %Ix", new_mark_array_addr)); generation* gen = generation_of (max_generation); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); while (1) @@ -27220,7 +27220,7 @@ BOOL gc_heap::prepare_bgc_thread(gc_heap* gh) gh->bgc_threads_timeout_cs.Enter(); if (!(gh->bgc_thread_running)) { - dprintf (2, ("GC thread not runnning")); + dprintf (2, ("GC thread not running")); if ((gh->bgc_thread == 0) && create_bgc_thread(gh)) { success = TRUE; @@ -28545,7 +28545,7 @@ void gc_heap::count_plug (size_t last_plug_size, uint8_t*& last_plug) { deque_pinned_plug(); update_oldest_pinned_plug(); - dprintf (3, ("dequed pin,now oldest pin is %Ix", pinned_plug (oldest_pin()))); + dprintf (3, ("deque pin,now oldest pin is %Ix", pinned_plug (oldest_pin()))); } else { @@ -34669,7 +34669,7 @@ static int32_t GCStressCurCount = 0; static int32_t GCStressStartAtJit = -1; // the maximum number of foreground GCs we'll induce during one BGC -// (this number does not include "naturally" occuring GCs). +// (this number does not include "naturally" occurring GCs). static int32_t GCStressMaxFGCsPerBGC = -1; // CLRRandom implementation can produce FPU exceptions if @@ -34830,7 +34830,7 @@ bool GCHeap::StressHeap(gc_alloc_context * context) if (str) { // Chop off the end of the string and form a new object out of it. - // This will 'free' an object at the begining of the heap, which will + // This will 'free' an object at the beginning of the heap, which will // force data movement. Note that we can only do this so many times. // before we have to move on to the next string. unsigned sizeOfNewObj = (unsigned)Align(min_obj_size * 31); @@ -35552,7 +35552,7 @@ void gc_heap::record_interesting_info_per_heap() heap_number, (size_t)settings.gc_index, settings.condemned_generation, - // TEMP - I am just doing this for wks GC 'cuase I wanna see the pattern of doing C/S GCs. + // TEMP - I am just doing this for wks GC 'cause I wanna see the pattern of doing C/S GCs. (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction ((expand_mechanism >= 0)? "X" : ""), // EX ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF @@ -35994,7 +35994,7 @@ GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason) size_t GCHeap::GetTotalBytesInUse () { #ifdef MULTIPLE_HEAPS - //enumarate all the heaps and get their size. + //enumerate all the heaps and get their size. size_t tot_size = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { @@ -36225,10 +36225,10 @@ int GCHeap::GetLOHCompactionMode() return pGenGCHeap->loh_compaction_mode; } -void GCHeap::SetLOHCompactionMode (int newLOHCompactionyMode) +void GCHeap::SetLOHCompactionMode (int newLOHCompactionMode) { #ifdef FEATURE_LOH_COMPACTION - pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionyMode; + pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionMode; #endif //FEATURE_LOH_COMPACTION } @@ -36341,7 +36341,7 @@ HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters) counters->promoted_size = 0; counters->collection_count = 0; - //enumarate all the heaps and get their counters. + //enumerate all the heaps and get their counters. for (int i = 0; i < gc_heap::n_heaps; i++) { dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen); @@ -36461,7 +36461,7 @@ Object* GCHeap::GetNextFinalizableObject() if (O) return O; } - //return the first non crtitical/critical one in the first queue. + //return the first non critical/critical one in the first queue. for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; diff --git a/src/gc/gcrecord.h b/src/gc/gcrecord.h index 30966953e2..c00f8b0826 100644 --- a/src/gc/gcrecord.h +++ b/src/gc/gcrecord.h @@ -409,16 +409,16 @@ struct gc_history_global gc_reason reason; int pause_mode; uint32_t mem_pressure; - uint32_t global_mechanims_p; + uint32_t global_mechanisms_p; void set_mechanism_p (gc_global_mechanism_p mechanism) { - global_mechanims_p |= (1 << mechanism); + global_mechanisms_p |= (1 << mechanism); } BOOL get_mechanism_p (gc_global_mechanism_p mechanism) { - return (global_mechanims_p & (1 << mechanism)); + return (global_mechanisms_p & (1 << mechanism)); } void print(); diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp index 221911500c..0913ee6c4b 100644 --- a/src/gc/handletable.cpp +++ b/src/gc/handletable.cpp @@ -678,7 +678,7 @@ void HndEnumHandles(HHANDLETABLE hTable, const uint32_t *puType, uint32_t uTypeC * * Multiple type scanning entrypoint for GC. * - * This entrypoint is provided for GC-time scnas of the handle table ONLY. It + * This entrypoint is provided for GC-time scans of the handle table ONLY. It * enables ephemeral scanning of the table, and optionally ages the write barrier * as it scans. * @@ -802,7 +802,7 @@ void HndScanHandlesForGC(HHANDLETABLE hTable, HANDLESCANPROC scanProc, uintptr_t /* * HndResetAgeMap * - * Service to forceably reset the age map for a set of handles. + * Service to forcibly reset the age map for a set of handles. * * Provided for GC-time resetting the handle table's write barrier. This is not * normally advisable, as it increases the amount of work that will be done in diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp index ee2d8873a8..92bc32b84a 100644 --- a/src/gc/handletablecore.cpp +++ b/src/gc/handletablecore.cpp @@ -125,7 +125,7 @@ void QuickSort(uintptr_t *pData, int left, int right, PFNCOMPARE pfnCompare) * * Returns: * <0 - handle P should be freed before handle Q - * =0 - handles are eqivalent for free order purposes + * =0 - handles are equivalent for free order purposes * >0 - handle Q should be freed before handle P * */ @@ -238,7 +238,7 @@ BOOL TableCanFreeSegmentNow(HandleTable *pTable, TableSegment *pSegment) _ASSERTE(threadId.IsCurrentThread()); #endif // _DEBUG - // deterine if any segment is currently being scanned asynchronously + // determine if any segment is currently being scanned asynchronously TableSegment *pSegmentAsync = NULL; // do we have async info? @@ -1315,7 +1315,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment) /* * BlockAllocHandlesInMask * - * Attempts to allocate the requested number of handes of the specified type, + * Attempts to allocate the requested number of handles of the specified type, * from the specified mask of the specified handle block. * * Returns the number of available handles actually allocated. @@ -1475,7 +1475,7 @@ uint32_t BlockAllocHandlesInitial(TableSegment *pSegment, uint32_t uType, uint32 /* * BlockAllocHandles * - * Attempts to allocate the requested number of handes of the specified type, + * Attempts to allocate the requested number of handles of the specified type, * from the specified handle block. * * Returns the number of available handles actually allocated. @@ -1533,7 +1533,7 @@ uint32_t BlockAllocHandles(TableSegment *pSegment, uint32_t uBlock, OBJECTHANDLE /* * SegmentAllocHandlesFromTypeChain * - * Attempts to allocate the requested number of handes of the specified type, + * Attempts to allocate the requested number of handles of the specified type, * from the specified segment's block chain for the specified type. This routine * ONLY scavenges existing blocks in the type chain. No new blocks are committed. * @@ -1617,7 +1617,7 @@ uint32_t SegmentAllocHandlesFromTypeChain(TableSegment *pSegment, uint32_t uType /* * SegmentAllocHandlesFromFreeList * - * Attempts to allocate the requested number of handes of the specified type, + * Attempts to allocate the requested number of handles of the specified type, * by committing blocks from the free list to that type's type chain. * * Returns the number of available handles actually allocated. @@ -1676,7 +1676,7 @@ uint32_t SegmentAllocHandlesFromFreeList(TableSegment *pSegment, uint32_t uType, /* * SegmentAllocHandles * - * Attempts to allocate the requested number of handes of the specified type, + * Attempts to allocate the requested number of handles of the specified type, * from the specified segment. * * Returns the number of available handles actually allocated. @@ -1714,7 +1714,7 @@ uint32_t SegmentAllocHandles(TableSegment *pSegment, uint32_t uType, OBJECTHANDL /* * TableAllocBulkHandles * - * Attempts to allocate the requested number of handes of the specified type. + * Attempts to allocate the requested number of handles of the specified type. * * Returns the number of handles that were actually allocated. This is always * the same as the number of handles requested except in out-of-memory conditions, diff --git a/src/gc/handletablescan.cpp b/src/gc/handletablescan.cpp index 1c67cb35c4..82aae2f72a 100644 --- a/src/gc/handletablescan.cpp +++ b/src/gc/handletablescan.cpp @@ -42,7 +42,7 @@ MaskDWORD is also non-zero. 2. AgeEphemeral. When Ephemeral GC happens, ages for handles which belong to the GC condemned generation should be incremented by 1. The operation is done by calculating a new uint32_t using the old uint32_t value: NewGenerationDWORD = COMPUTE_AGED_CLUMPS(OldGenerationDWORD, BuildAgeMask(condemnedGeneration, MaxGen)) -so that if a byte in OldGenerationDWORD is smaller than or equals to condemnedGeneration. the coresponding byte in +so that if a byte in OldGenerationDWORD is smaller than or equals to condemnedGeneration. the corresponding byte in NewGenerationDWORD is 1 bigger than the old value, otherwise it remains unchanged. 3. Age. Similar as AgeEphemeral, but we use a special mask if condemned generation is max gen (2): @@ -114,7 +114,7 @@ If you change any of those algorithm, please verify it by this program: assert (mask == 0); return; } - //any generaion bigger than 2 is actually 2 + //any generation bigger than 2 is actually 2 if (gen > 2) gen = 2; @@ -712,10 +712,10 @@ void CALLBACK BlockScanBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBloc uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uBlock; uint32_t *pdwGenLast = pdwGen + uCount; - // loop over all the blocks, checking for elligible clumps as we go + // loop over all the blocks, checking for eligible clumps as we go do { - // determine if any clumps in this block are elligible + // determine if any clumps in this block are eligible uint32_t dwClumpMask = COMPUTE_CLUMP_MASK(*pdwGen, dwAgeMask); // if there are any clumps to scan then scan them now @@ -1396,7 +1396,7 @@ PTR_TableSegment CALLBACK StandardSegmentIterator(PTR_HandleTable pTable, PTR_Ta PTR_TableSegment pNextSegment = QuickSegmentIterator(pTable, pPrevSegment); #ifndef DACCESS_COMPILE - // re-sort the block chains if neccessary + // re-sort the block chains if necessary if (pNextSegment && pNextSegment->fResortChains) SegmentResortChains(pNextSegment); #endif @@ -1592,7 +1592,7 @@ void SegmentScanByTypeChain(PTR_TableSegment pSegment, uint32_t uType, BLOCKSCAN } while ((uNext == uLast) && (uNext != uHead)); - // call the calback for this group of blocks + // call the callback for this group of blocks pfnBlockHandler(pSegment, uBlock, (uLast - uBlock), pInfo); // advance to the next block @@ -1656,7 +1656,7 @@ void SegmentScanByTypeMap(PTR_TableSegment pSegment, const BOOL *rgTypeInclusion break; } - // call the calback for the group of blocks we found + // call the callback for the group of blocks we found pfnBlockHandler(pSegment, uFirst, (uBlock - uFirst), pInfo); // look for another range starting with the next block diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp index aad2f0607c..789d0b8735 100644 --- a/src/gc/objecthandle.cpp +++ b/src/gc/objecthandle.cpp @@ -79,7 +79,7 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtra WRAPPER_NO_CONTRACT; UNREFERENCED_PARAMETER(pExtraInfo); - // there are too many races when asychnronously scanning ref-counted handles so we no longer support it + // there are too many races when asynchronously scanning ref-counted handles so we no longer support it _ASSERTE(!((ScanContext*)lp1)->concurrent); LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("", pObjRef, "causes promotion of ", *pObjRef))); @@ -1167,7 +1167,7 @@ void Ref_CheckReachable(uint32_t condemned, uint32_t maxgen, uintptr_t lp1) // strong handle to refer to the secondary as this could case a cycle in the graph if the secondary somehow // pointed back to the primary. Can't use weak handle because that would not keep the secondary object alive. // -// The result is that a dependenHandle has the EFFECT of +// The result is that a dependentHandle has the EFFECT of // * long weak handles in both the primary and secondary objects // * a strong reference from the primary object to the secondary one // |