Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove AllocLHeap. #33402

Merged
merged 5 commits into from
Mar 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
243 changes: 75 additions & 168 deletions src/coreclr/src/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37106,187 +37106,74 @@ bool GCHeap::StressHeap(gc_alloc_context * context)
} \
} while (false)

//
// Small Object Allocator
//
//
// Allocate small object with an alignment requirement of 8-bytes.
Object*
GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
{
#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;

alloc_context* acontext = static_cast<alloc_context*>(ctx);

#ifdef MULTIPLE_HEAPS
if (acontext->get_alloc_heap() == 0)
{
AssignHeap (acontext);
assert (acontext->get_alloc_heap());
}

gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

return AllocAlign8Common(hp, acontext, size, flags);
#else
UNREFERENCED_PARAMETER(ctx);
UNREFERENCED_PARAMETER(size);
UNREFERENCED_PARAMETER(flags);
assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
return nullptr;
#endif //FEATURE_64BIT_ALIGNMENT
}

// Common code used by both variants of AllocAlign8 above.
Object*
GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint32_t flags)
// Allocate small object with an alignment requirement of 8-bytes.
Object* AllocAlign8(alloc_context* acontext, gc_heap* hp, size_t size, uint32_t flags)
{
#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;

gc_heap* hp = (gc_heap*)_hp;

TRIGGERSGC();

Object* newAlloc = NULL;

if (size < loh_size_threshold)
{
#ifdef TRACE_GC
AllocSmallCount++;
#endif //TRACE_GC

// Depending on where in the object the payload requiring 8-byte alignment resides we might have to
// align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned
// case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag.
size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0;
// Depending on where in the object the payload requiring 8-byte alignment resides we might have to
// align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned
// case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag.
size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0;

// Retrieve the address of the next allocation from the context (note that we're inside the alloc
// lock at this point).
uint8_t* result = acontext->alloc_ptr;
// Retrieve the address of the next allocation from the context (note that we're inside the alloc
// lock at this point).
uint8_t* result = acontext->alloc_ptr;

// Will an allocation at this point yield the correct alignment and fit into the remainder of the
// context?
if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit))
{
// Yes, we can just go ahead and make the allocation.
newAlloc = (Object*) hp->allocate (size, acontext, flags);
ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
}
else
// Will an allocation at this point yield the correct alignment and fit into the remainder of the
// context?
if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit))
{
// Yes, we can just go ahead and make the allocation.
newAlloc = (Object*) hp->allocate (size, acontext, flags);
ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
}
else
{
// No, either the next available address is not aligned in the way we require it or there's
// not enough space to allocate an object of the required size. In both cases we allocate a
// padding object (marked as a free object). This object's size is such that it will reverse
// the alignment of the next header (asserted below).
//
// We allocate both together then decide based on the result whether we'll format the space as
// free object + real object or real object + free object.
ASSERT((Align(min_obj_size) & 7) == 4);
CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext, flags);
if (freeobj)
{
// No, either the next available address is not aligned in the way we require it or there's
// not enough space to allocate an object of the required size. In both cases we allocate a
// padding object (marked as a free object). This object's size is such that it will reverse
// the alignment of the next header (asserted below).
//
// We allocate both together then decide based on the result whether we'll format the space as
// free object + real object or real object + free object.
ASSERT((Align(min_obj_size) & 7) == 4);
CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext, flags);
if (freeobj)
if (((size_t)freeobj & 7) == desiredAlignment)
{
if (((size_t)freeobj & 7) == desiredAlignment)
{
// New allocation has desired alignment, return this one and place the free object at the
// end of the allocated space.
newAlloc = (Object*)freeobj;
freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size));
}
else
// New allocation has desired alignment, return this one and place the free object at the
// end of the allocated space.
newAlloc = (Object*)freeobj;
freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size));
}
else
{
// New allocation is still mis-aligned, format the initial space as a free object and the
// rest of the space should be correctly aligned for the real object.
newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size));
ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
if (flags & GC_ALLOC_ZEROING_OPTIONAL)
{
// New allocation is still mis-aligned, format the initial space as a free object and the
// rest of the space should be correctly aligned for the real object.
newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size));
ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
if (flags & GC_ALLOC_ZEROING_OPTIONAL)
{
// clean the syncblock of the aligned object.
*(((PTR_PTR)newAlloc)-1) = 0;
}
// clean the syncblock of the aligned object.
*(((PTR_PTR)newAlloc)-1) = 0;
}
freeobj->SetFree(min_obj_size);
}
freeobj->SetFree(min_obj_size);
}
}
else
{
// The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't
// support mis-aligned object headers so we can't support biased headers as above. Luckily for us
// we've managed to arrange things so the only case where we see a bias is for boxed value types and
// these can never get large enough to be allocated on the LOH.
ASSERT(65536 < loh_size_threshold);
ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0);

alloc_context* acontext = generation_alloc_context (hp->generation_of (loh_generation));

newAlloc = (Object*) hp->allocate_uoh_object (size, flags, loh_generation, acontext->alloc_bytes_uoh);
ASSERT(((size_t)newAlloc & 7) == 0);
}

CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);

#ifdef TRACE_GC
AllocCount++;
#endif //TRACE_GC
return newAlloc;
#else
UNREFERENCED_PARAMETER(_hp);
UNREFERENCED_PARAMETER(acontext);
UNREFERENCED_PARAMETER(size);
UNREFERENCED_PARAMETER(flags);
assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
return nullptr;
#endif // FEATURE_64BIT_ALIGNMENT
}

Object *
GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;

TRIGGERSGC();

Object* newAlloc = NULL;

#ifdef MULTIPLE_HEAPS
//take the first heap....
gc_heap* hp = gc_heap::g_heaps[0];
#else
gc_heap* hp = pGenGCHeap;
#ifdef _PREFAST_
// prefix complains about us dereferencing hp in wks build even though we only access static members
// this way. not sure how to shut it up except for this ugly workaround:
PREFIX_ASSUME(hp != NULL);
#endif //_PREFAST_
#endif //MULTIPLE_HEAPS

alloc_context* acontext = generation_alloc_context (hp->generation_of (loh_generation));
newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, loh_generation, acontext->alloc_bytes_uoh);

#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN
CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);

#ifdef TRACE_GC
AllocCount++;
#endif //TRACE_GC
return newAlloc;
}
#endif // FEATURE_64BIT_ALIGNMENT

Object*
GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL)
Expand Down Expand Up @@ -37317,23 +37204,43 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
#endif //_PREFAST_
#endif //MULTIPLE_HEAPS

if (size < loh_size_threshold)
if (size >= loh_size_threshold || (flags & GC_ALLOC_LARGE_OBJECT_HEAP))
{
// The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't
// support mis-aligned object headers so we can't support biased headers. Luckily for us
// we've managed to arrange things so the only case where we see a bias is for boxed value types and
// these can never get large enough to be allocated on the LOH.
ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0);
ASSERT(65536 < loh_size_threshold);

newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, loh_generation, acontext->alloc_bytes_uoh);
ASSERT(((size_t)newAlloc & 7) == 0);

#ifdef TRACE_GC
AllocSmallCount++;
#endif //TRACE_GC
newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags);
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN
// ASSERT (newAlloc);
}
else
{
newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, loh_generation, acontext->alloc_bytes_uoh);
#ifdef TRACE_GC
AllocSmallCount++;
#endif //TRACE_GC

#ifdef FEATURE_64BIT_ALIGNMENT
if (flags & GC_ALLOC_ALIGN8)
{
newAlloc = AllocAlign8 (acontext, hp, size, flags);
}
else
#else
assert ((flags & GC_ALLOC_ALIGN8) == 0);
#endif
{
newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags);
}

#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
#endif // FEATURE_STRUCTALIGN
}

Expand Down
2 changes: 0 additions & 2 deletions src/coreclr/src/gc/gc.h
Original file line number Diff line number Diff line change
Expand Up @@ -261,8 +261,6 @@ class IGCHeapInternal : public IGCHeap {

virtual ~IGCHeapInternal() {}

private:
virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags) = 0;
public:
virtual int GetNumberOfHeaps () = 0;
virtual int GetHomeHeapNumber () = 0;
Expand Down
6 changes: 0 additions & 6 deletions src/coreclr/src/gc/gcimpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,12 +102,6 @@ class GCHeap : public IGCHeapInternal

HRESULT Initialize ();

//flags can be GC_ALLOC_CONTAINS_REF GC_ALLOC_FINALIZE
Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags);
private:
Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags);
public:
Object* AllocLHeap (size_t size, uint32_t flags);
Object* Alloc (gc_alloc_context* acontext, size_t size, uint32_t flags);

void FixAllocContext (gc_alloc_context* acontext, void* arg, void *heap);
Expand Down
15 changes: 3 additions & 12 deletions src/coreclr/src/gc/gcinterface.h
Original file line number Diff line number Diff line change
Expand Up @@ -770,19 +770,8 @@ class IGCHeap {
// a lock to ensure that the calling thread has unique ownership over this alloc context;
virtual Object* Alloc(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;

// Allocates an object on the large object heap with the given size and flags.
virtual Object* AllocLHeap(size_t size, uint32_t flags) = 0;

// Allocates an object on the given allocation context, aligned to 64 bits,
// with the given size and flags.
// It is the responsibility of the caller to ensure that the passed-in alloc context is
// owned by the thread that is calling this function. If using per-thread alloc contexts,
// no lock is needed; callers not using per-thread alloc contexts will need to acquire
// a lock to ensure that the calling thread has unique ownership over this alloc context.
virtual Object* AllocAlign8(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;

// This is for the allocator to indicate it's done allocating a large object during a
// background GC as the BGC threads also need to walk LOH.
// background GC as the BGC threads also need to walk UOH.
jkotas marked this conversation as resolved.
Show resolved Hide resolved
virtual void PublishObject(uint8_t* obj) = 0;

// Signals the WaitForGCEvent event, indicating that a GC has completed.
Expand Down Expand Up @@ -910,6 +899,8 @@ enum GC_ALLOC_FLAGS
GC_ALLOC_ALIGN8_BIAS = 4,
GC_ALLOC_ALIGN8 = 8,
GC_ALLOC_ZEROING_OPTIONAL = 16,
GC_ALLOC_LARGE_OBJECT_HEAP = 32,
GC_ALLOC_PINNED_OBJECT_HEAP = 64,
};

inline GC_ALLOC_FLAGS operator|(GC_ALLOC_FLAGS a, GC_ALLOC_FLAGS b)
Expand Down
Loading