summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
authorJohn Doe <github.john.doe@outlook.com>2017-08-17 21:41:54 -0700
committerJan Kotas <jkotas@microsoft.com>2017-08-17 21:41:54 -0700
commitbed0a5e263324d3e035a2140c24cc70f1e143d93 (patch)
treee82c967bdb9286b760e916507f93883e8f2efc31 /src/gc
parent63411df7707e0636efca86f15280d961050d452a (diff)
downloadcoreclr-bed0a5e263324d3e035a2140c24cc70f1e143d93.tar.gz
coreclr-bed0a5e263324d3e035a2140c24cc70f1e143d93.tar.bz2
coreclr-bed0a5e263324d3e035a2140c24cc70f1e143d93.zip
Typo (#13444)
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/gc.cpp14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index ede1ad22eb..89454c6bb0 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -906,7 +906,7 @@ respin:
}
// Reverse join - first thread gets here does the work; other threads will only proceed
- // afte the work is done.
+ // after the work is done.
// Note that you cannot call this twice in a row on the same thread. Plus there's no
// need to call it twice in row - you should just merge the work.
BOOL r_join (gc_heap* gch, int join_id)
@@ -5667,7 +5667,7 @@ public:
// We also need to recover the saved info because we'll need to recover it later.
//
// So we would call swap_p*_plug_and_saved once to recover the object info; then call
- // it again to recover the artifical gap.
+ // it again to recover the artificial gap.
void swap_pre_plug_and_saved()
{
gap_reloc_pair temp;
@@ -9277,7 +9277,7 @@ void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding)
}
}
-//resets the pages beyond alloctes size so they won't be swapped out and back in
+//resets the pages beyond allocates size so they won't be swapped out and back in
void gc_heap::reset_heap_segment_pages (heap_segment* seg)
{
@@ -12417,7 +12417,7 @@ BOOL gc_heap::allocate_small (int gen_number,
if (!commit_failed_p)
{
// some other threads already grabbed the more space lock and allocated
- // so we should attemp an ephemeral GC again.
+ // so we should attempt an ephemeral GC again.
assert (heap_segment_allocated (ephemeral_heap_segment) < alloc_allocated);
soh_alloc_state = a_state_trigger_ephemeral_gc;
}
@@ -12489,7 +12489,7 @@ BOOL gc_heap::allocate_small (int gen_number,
if (!commit_failed_p)
{
// some other threads already grabbed the more space lock and allocated
- // so we should attemp an ephemeral GC again.
+ // so we should attempt an ephemeral GC again.
assert (heap_segment_allocated (ephemeral_heap_segment) < alloc_allocated);
soh_alloc_state = a_state_trigger_ephemeral_gc;
}
@@ -22719,7 +22719,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
assert (len >= Align (min_obj_size));
make_unused_array (arr, len);
// fix fully contained bricks + first one
- // if the array goes beyong the first brick
+ // if the array goes beyond the first brick
size_t start_brick = brick_of (arr);
size_t end_brick = brick_of (arr + len);
if (end_brick != start_brick)
@@ -29345,7 +29345,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
eph_size += switch_alignment_size(FALSE);
#endif //RESPECT_LARGE_ALIGNMENT
//Since the generation start can be larger than min_obj_size
- //Compare the alignemnt of the first object in gen1
+ //Compare the alignment of the first object in gen1
if (grow_heap_segment (new_seg, heap_segment_mem (new_seg) + eph_size) == 0)
{
fgm_result.set_fgm (fgm_commit_eph_segment, eph_size, FALSE);