summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
authorSung Yoon Whang <suwhang@microsoft.com>2019-04-24 15:09:00 -0700
committerAndrew Au <cshung@gmail.com>2019-05-01 18:33:08 -0700
commit30130c3f75c684f7863efe59c5ca322861716118 (patch)
treedaf577824659fb5cb157c6ea669643d2d35fa28f /src/vm
parent1144acad9bd92e0bd7485a0231ece95e016e5178 (diff)
downloadcoreclr-30130c3f75c684f7863efe59c5ca322861716118.tar.gz
coreclr-30130c3f75c684f7863efe59c5ca322861716118.tar.bz2
coreclr-30130c3f75c684f7863efe59c5ca322861716118.zip
Pulling in Noah's fix in event_pipe_lock_fix branch
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/eventpipe.cpp30
-rw-r--r--src/vm/eventpipe.h6
-rw-r--r--src/vm/eventpipebuffer.cpp44
-rw-r--r--src/vm/eventpipebuffer.h55
-rw-r--r--src/vm/eventpipebuffermanager.cpp554
-rw-r--r--src/vm/eventpipebuffermanager.h135
6 files changed, 544 insertions, 280 deletions
diff --git a/src/vm/eventpipe.cpp b/src/vm/eventpipe.cpp
index b7536c64a4..9e4ab02ed2 100644
--- a/src/vm/eventpipe.cpp
+++ b/src/vm/eventpipe.cpp
@@ -389,8 +389,9 @@ void EventPipe::DisableInternal(EventPipeSessionID id, EventPipeProviderCallback
// Delete the flush timer.
DeleteFlushTimerCallback();
- // Flush all write buffers to make sure that all threads see the change.
- FlushProcessWriteBuffers();
+ // Force all in-progress writes to either finish or cancel
+ // This is required to ensure we can safely flush and delete the buffers
+ s_pBufferManager->SuspendWriteEvent();
// Write to the file.
if (s_pFile != nullptr)
@@ -401,7 +402,10 @@ void EventPipe::DisableInternal(EventPipeSessionID id, EventPipeProviderCallback
if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EventPipeRundown) > 0)
{
- // Before closing the file, do rundown.
+ // Before closing the file, do rundown. We have to re-enable event writing for this.
+
+ s_pBufferManager->ResumeWriteEvent();
+
const EventPipeProviderConfiguration RundownProviders[] = {
{W("Microsoft-Windows-DotNETRuntime"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose), NULL}, // Public provider.
{W("Microsoft-Windows-DotNETRuntimeRundown"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose), NULL} // Rundown provider.
@@ -425,6 +429,9 @@ void EventPipe::DisableInternal(EventPipeSessionID id, EventPipeProviderCallback
// Delete the rundown session.
s_pConfig->DeleteSession(s_pSession);
s_pSession = NULL;
+
+ // Suspend again after rundown session
+ s_pBufferManager->SuspendWriteEvent();
}
delete s_pFile;
@@ -437,6 +444,10 @@ void EventPipe::DisableInternal(EventPipeSessionID id, EventPipeProviderCallback
// Delete deferred providers.
// Providers can't be deleted during tracing because they may be needed when serializing the file.
s_pConfig->DeleteDeferredProviders();
+
+ // ALlow WriteEvent to begin accepting work again so that sometime in the future
+ // we can re-enable events and they will be recorded
+ s_pBufferManager->ResumeWriteEvent();
}
}
@@ -912,4 +923,17 @@ EventPipeEventInstance *EventPipe::GetNextEvent()
EventPipeProvider::InvokeCallback(eventPipeProviderCallbackData);
}
+#ifdef DEBUG
+/* static */ bool EventPipe::IsLockOwnedByCurrentThread()
+{
+ return GetLock()->OwnedByCurrentThread();
+}
+
+/* static */ bool EventPipe::IsBufferManagerLockOwnedByCurrentThread()
+{
+ return s_pBufferManager->IsLockOwnedByCurrentThread();
+}
+#endif
+
+
#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipe.h b/src/vm/eventpipe.h
index 05a33a21a3..ed61fc31c3 100644
--- a/src/vm/eventpipe.h
+++ b/src/vm/eventpipe.h
@@ -333,6 +333,12 @@ public:
// Get next event.
static EventPipeEventInstance *GetNextEvent();
+#ifdef DEBUG
+ static bool IsLockOwnedByCurrentThread();
+ static bool IsBufferManagerLockOwnedByCurrentThread();
+#endif
+
+
template<class T>
static void RunWithCallbackPostponed(T f)
{
diff --git a/src/vm/eventpipebuffer.cpp b/src/vm/eventpipebuffer.cpp
index 887fbc0a22..fc4c3c4a6c 100644
--- a/src/vm/eventpipebuffer.cpp
+++ b/src/vm/eventpipebuffer.cpp
@@ -7,10 +7,11 @@
#include "eventpipe.h"
#include "eventpipeeventinstance.h"
#include "eventpipebuffer.h"
+#include "eventpipebuffermanager.h"
#ifdef FEATURE_PERFTRACING
-EventPipeBuffer::EventPipeBuffer(unsigned int bufferSize)
+EventPipeBuffer::EventPipeBuffer(unsigned int bufferSize DEBUG_ARG(EventPipeThread* pWriterThread))
{
CONTRACTL
{
@@ -19,13 +20,18 @@ EventPipeBuffer::EventPipeBuffer(unsigned int bufferSize)
MODE_ANY;
}
CONTRACTL_END;
-
+ m_state = EventPipeBufferState::WRITABLE;
+#ifdef DEBUG
+ m_pWriterThread = pWriterThread;
+#endif
m_pBuffer = new BYTE[bufferSize];
memset(m_pBuffer, 0, bufferSize);
m_pLimit = m_pBuffer + bufferSize;
m_pCurrent = GetNextAlignedAddress(m_pBuffer);
- m_mostRecentTimeStamp.QuadPart = 0;
+
+ QueryPerformanceCounter(&m_creationTimeStamp);
+ _ASSERTE(m_creationTimeStamp.QuadPart > 0);
m_pLastPoppedEvent = NULL;
m_pPrevBuffer = NULL;
m_pNextBuffer = NULL;
@@ -41,6 +47,9 @@ EventPipeBuffer::~EventPipeBuffer()
}
CONTRACTL_END;
+ // We should never be deleting a buffer that a writer thread might still try to write to
+ _ASSERTE(m_state == EventPipeBufferState::READ_ONLY);
+
if(m_pBuffer != NULL)
{
delete[] m_pBuffer;
@@ -58,6 +67,9 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeSession &session, Eve
}
CONTRACTL_END;
+ // We should never try to write to a buffer that isn't expecting to be written to.
+ _ASSERTE(m_state == EventPipeBufferState::WRITABLE);
+
// Calculate the size of the event.
unsigned int eventSize = sizeof(EventPipeEventInstance) + payload.GetSize();
@@ -106,9 +118,6 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeSession &session, Eve
payload.CopyData(pDataDest);
}
- // Save the most recent event timestamp.
- m_mostRecentTimeStamp = *pInstance->GetTimeStamp();
-
}
EX_CATCH
{
@@ -126,11 +135,11 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeSession &session, Eve
return success;
}
-LARGE_INTEGER EventPipeBuffer::GetMostRecentTimeStamp() const
+LARGE_INTEGER EventPipeBuffer::GetCreationTimeStamp() const
{
LIMITED_METHOD_CONTRACT;
- return m_mostRecentTimeStamp;
+ return m_creationTimeStamp;
}
EventPipeEventInstance* EventPipeBuffer::GetNext(EventPipeEventInstance *pEvent, LARGE_INTEGER beforeTimeStamp)
@@ -143,6 +152,8 @@ EventPipeEventInstance* EventPipeBuffer::GetNext(EventPipeEventInstance *pEvent,
}
CONTRACTL_END;
+ _ASSERTE(m_state == EventPipeBufferState::READ_ONLY);
+
EventPipeEventInstance *pNextInstance = NULL;
// If input is NULL, return the first event if there is one.
if(pEvent == NULL)
@@ -212,6 +223,8 @@ EventPipeEventInstance* EventPipeBuffer::PeekNext(LARGE_INTEGER beforeTimeStamp)
}
CONTRACTL_END;
+ _ASSERTE(m_state == READ_ONLY);
+
// Get the next event using the last popped event as a marker.
return GetNext(m_pLastPoppedEvent, beforeTimeStamp);
}
@@ -226,6 +239,8 @@ EventPipeEventInstance* EventPipeBuffer::PopNext(LARGE_INTEGER beforeTimeStamp)
}
CONTRACTL_END;
+ _ASSERTE(m_state == READ_ONLY);
+
// Get the next event using the last popped event as a marker.
EventPipeEventInstance *pNext = PeekNext(beforeTimeStamp);
if(pNext != NULL)
@@ -236,6 +251,19 @@ EventPipeEventInstance* EventPipeBuffer::PopNext(LARGE_INTEGER beforeTimeStamp)
return pNext;
}
+EventPipeBufferState EventPipeBuffer::GetVolatileState()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_state.Load();
+}
+
+void EventPipeBuffer::ConvertToReadOnly()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pWriterThread->GetLock()->OwnedByCurrentThread());
+ m_state.Store(EventPipeBufferState::READ_ONLY);
+}
+
#ifdef _DEBUG
bool EventPipeBuffer::EnsureConsistency()
{
diff --git a/src/vm/eventpipebuffer.h b/src/vm/eventpipebuffer.h
index c717d894c2..926a87cccd 100644
--- a/src/vm/eventpipebuffer.h
+++ b/src/vm/eventpipebuffer.h
@@ -12,6 +12,34 @@
#include "eventpipeeventinstance.h"
#include "eventpipesession.h"
+class EventPipeThread;
+
+
+// Synchronization
+//
+// EventPipeBuffer starts off writable and accumulates events in a buffer, then at some point converts to be readable and a second thread can
+// read back the events which have accumulated. The transition occurs when calling ConvertToReadOnly(). Write methods will assert if the buffer
+// isn't writable and read-related methods will assert if it isn't readable. Methods that have no asserts should have immutable results that
+// can be used at any point during the buffer's lifetime. The buffer has no internal locks so it is the caller's responsibility to synchronize
+// their usage.
+// Writing into the buffer and calling ConvertToReadOnly() is always done with EventPipeThread::m_lock held. The eventual reader thread can do
+// a few different things to ensure it sees a consistent state:
+// 1) Take the writer's EventPipeThread::m_lock at least once after the last time the writer writes events
+// 2) Use a memory barrier that prevents reader loads from being re-ordered earlier, such as the one that will occur implicitly by evaluating
+// EventPipeBuffer::GetVolatileState()
+
+
+enum EventPipeBufferState
+{
+ // This buffer is currently assigned to a thread and pWriterThread may write events into it
+ // at any time
+ WRITABLE = 0,
+
+ // This buffer has been returned to the EventPipeBufferManager and pWriterThread is guaranteed
+ // to never access it again.
+ READ_ONLY = 1
+};
+
class EventPipeBuffer
{
@@ -24,6 +52,15 @@ private:
// It is OK for the data payloads to be unaligned because they are opaque blobs that are copied via memcpy.
const size_t AlignmentSize = 8;
+ // State transition WRITABLE -> READ_ONLY only occurs while holding the m_pWriterThread->m_lock;
+ // It can be read at any time
+ Volatile<EventPipeBufferState> m_state;
+
+ // Thread that is/was allowed to write into this buffer when m_state == WRITABLE
+#ifdef DEBUG
+ EventPipeThread* m_pWriterThread;
+#endif
+
// A pointer to the actual buffer.
BYTE *m_pBuffer;
@@ -33,8 +70,10 @@ private:
// The max write pointer (end of the buffer).
BYTE *m_pLimit;
- // The timestamp of the most recent event in the buffer.
- LARGE_INTEGER m_mostRecentTimeStamp;
+ // The timestamp the buffer was created. If our clock source
+ // is monotonic then all events in the buffer should have
+ // timestamp >= this one. If not then all bets are off.
+ LARGE_INTEGER m_creationTimeStamp;
// Used by PopNext as input to GetNext.
// If NULL, no events have been popped.
@@ -89,7 +128,7 @@ private:
public:
- EventPipeBuffer(unsigned int bufferSize);
+ EventPipeBuffer(unsigned int bufferSize DEBUG_ARG(EventPipeThread* pWriterThread));
~EventPipeBuffer();
// Write an event to the buffer.
@@ -100,8 +139,8 @@ public:
// - false: The write failed. In this case, the buffer should be considered full.
bool WriteEvent(Thread *pThread, EventPipeSession &session, EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack = NULL);
- // Get the timestamp of the most recent event in the buffer.
- LARGE_INTEGER GetMostRecentTimeStamp() const;
+ // Get the timestamp the buffer was created.
+ LARGE_INTEGER GetCreationTimeStamp() const;
// Get the next event from the buffer as long as it is before the specified timestamp.
// Input of NULL gets the first event.
@@ -113,6 +152,12 @@ public:
// Get the next event from the buffer and mark it as read.
EventPipeEventInstance* PopNext(LARGE_INTEGER beforeTimeStamp);
+ // Check the state of the buffer
+ EventPipeBufferState GetVolatileState();
+
+ // Convert the buffer writable to readable
+ void ConvertToReadOnly();
+
#ifdef _DEBUG
bool EnsureConsistency();
#endif // _DEBUG
diff --git a/src/vm/eventpipebuffermanager.cpp b/src/vm/eventpipebuffermanager.cpp
index ae2800975c..e5966849c1 100644
--- a/src/vm/eventpipebuffermanager.cpp
+++ b/src/vm/eventpipebuffermanager.cpp
@@ -10,12 +10,103 @@
#ifdef FEATURE_PERFTRACING
+void ReleaseEventPipeThreadRef(EventPipeThread* pThread) { LIMITED_METHOD_CONTRACT; pThread->Release(); }
+void AcquireEventPipeThreadRef(EventPipeThread* pThread) { LIMITED_METHOD_CONTRACT; pThread->AddRef(); }
+
#ifndef __GNUC__
-__declspec(thread) ThreadEventBufferList ThreadEventBufferList::gCurrentThreadEventBufferList;
+__declspec(thread) EventPipeThreadHolder EventPipeThread::gCurrentEventPipeThreadHolder;;
#else // !__GNUC__
-thread_local ThreadEventBufferList ThreadEventBufferList::gCurrentThreadEventBufferList;
+thread_local EventPipeThreadHolder EventPipeThread::gCurrentEventPipeThreadHolder;
#endif // !__GNUC__
+EventPipeThread::EventPipeThread()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ m_lock.Init(LOCK_TYPE_DEFAULT);
+ m_refCount = 0;
+}
+
+EventPipeThread::~EventPipeThread()
+{
+ _ASSERTE(m_pWriteBuffer == nullptr);
+ _ASSERTE(m_pBufferList == nullptr);
+}
+
+/*static */ EventPipeThread* EventPipeThread::Get()
+{
+ LIMITED_METHOD_CONTRACT;
+ return gCurrentEventPipeThreadHolder;
+}
+
+/*static */ void EventPipeThread::Set(EventPipeThread* pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+ gCurrentEventPipeThreadHolder = pThread;
+}
+
+void EventPipeThread::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockIncrement(&m_refCount);
+}
+
+void EventPipeThread::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (FastInterlockDecrement(&m_refCount) == 0)
+ {
+ delete this;
+ }
+}
+
+SpinLock* EventPipeThread::GetLock()
+{
+ LIMITED_METHOD_CONTRACT;
+ return &m_lock;
+}
+
+EventPipeBuffer* EventPipeThread::GetWriteBuffer()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_lock.OwnedByCurrentThread());
+ _ASSERTE(m_pWriteBuffer == nullptr || m_pWriteBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE);
+ return m_pWriteBuffer;
+}
+
+void EventPipeThread::SetWriteBuffer(EventPipeBuffer* pNewBuffer)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_lock.OwnedByCurrentThread());
+ _ASSERTE(m_pWriteBuffer == nullptr || m_pWriteBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE);
+ _ASSERTE(pNewBuffer == nullptr || pNewBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE);
+ if (m_pWriteBuffer)
+ {
+ m_pWriteBuffer->ConvertToReadOnly();
+ }
+ m_pWriteBuffer = pNewBuffer;
+}
+
+EventPipeBufferList* EventPipeThread::GetBufferList()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(EventPipe::IsBufferManagerLockOwnedByCurrentThread());
+ return m_pBufferList;
+}
+
+void EventPipeThread::SetBufferList(EventPipeBufferList* pNewBufferList)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(EventPipe::IsBufferManagerLockOwnedByCurrentThread());
+ m_pBufferList = pNewBufferList;
+}
+
+
EventPipeBufferManager::EventPipeBufferManager()
{
CONTRACTL
@@ -29,6 +120,7 @@ EventPipeBufferManager::EventPipeBufferManager()
m_pPerThreadBufferList = new SList<SListElem<EventPipeBufferList*>>();
m_sizeOfAllBuffers = 0;
m_lock.Init(LOCK_TYPE_DEFAULT);
+ m_writeEventSuspending = FALSE;
#ifdef _DEBUG
m_numBuffersAllocated = 0;
@@ -50,30 +142,19 @@ EventPipeBufferManager::~EventPipeBufferManager()
}
CONTRACTL_END;
- if(m_pPerThreadBufferList != NULL)
- {
- SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
- while(pElem != NULL)
- {
- SListElem<EventPipeBufferList*> *pCurElem = pElem;
-
- EventPipeBufferList *pThreadBufferList = pCurElem->GetValue();
- if (!pThreadBufferList->OwnedByThread())
- {
- // We don't delete buffers themself because they can be in-use
- delete(pThreadBufferList);
- }
-
- pElem = m_pPerThreadBufferList->GetNext(pElem);
- delete(pCurElem);
- }
+ // setting this true should have no practical effect other than satisfying asserts at this point.
+ m_writeEventSuspending = TRUE;
+ DeAllocateBuffers();
+}
- delete(m_pPerThreadBufferList);
- m_pPerThreadBufferList = NULL;
- }
+#ifdef DEBUG
+bool EventPipeBufferManager::IsLockOwnedByCurrentThread()
+{
+ return m_lock.OwnedByCurrentThread();
}
+#endif
-EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeSession &session, unsigned int requestSize)
+EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeSession &session, unsigned int requestSize, BOOL & writeSuspended)
{
CONTRACTL
{
@@ -87,14 +168,43 @@ EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeSessio
// Allocating a buffer requires us to take the lock.
SpinLockHolder _slh(&m_lock);
+ // if we are deallocating then give up, see the comments in SuspendWriteEvents() for why this is important.
+ if (m_writeEventSuspending.Load())
+ {
+ writeSuspended = TRUE;
+ return NULL;
+ }
+
// Determine if the requesting thread has at least one buffer.
// If not, we guarantee that each thread gets at least one (to prevent thrashing when the circular buffer size is too small).
bool allocateNewBuffer = false;
- EventPipeBufferList *pThreadBufferList = ThreadEventBufferList::Get();
- if(pThreadBufferList == NULL)
+ EventPipeThread *pEventPipeThread = EventPipeThread::Get();
+
+ if (pEventPipeThread == NULL)
+ {
+ EX_TRY
+ {
+ pEventPipeThread = new EventPipeThread();
+ EventPipeThread::Set(pEventPipeThread);
+ }
+ EX_CATCH
+ {
+ pEventPipeThread = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pEventPipeThread == NULL)
+ {
+ return NULL;
+ }
+ }
+
+ EventPipeBufferList *pThreadBufferList = pEventPipeThread->GetBufferList();
+ if (pThreadBufferList == NULL)
{
- pThreadBufferList = new (nothrow) EventPipeBufferList(this);
+ pThreadBufferList = new (nothrow) EventPipeBufferList(this, pEventPipeThread);
+
if (pThreadBufferList == NULL)
{
return NULL;
@@ -107,12 +217,11 @@ EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeSessio
}
m_pPerThreadBufferList->InsertTail(pElem);
- ThreadEventBufferList::Set(pThreadBufferList);
+ pEventPipeThread->SetBufferList(pThreadBufferList);
allocateNewBuffer = true;
}
- // Determine if policy allows us to allocate another buffer, or if we need to steal one
- // from another thread.
+ // Determine if policy allows us to allocate another buffer
if(!allocateNewBuffer)
{
EventPipeConfiguration *pConfig = EventPipe::GetConfiguration();
@@ -129,48 +238,7 @@ EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeSessio
allocateNewBuffer = true;
}
}
-
- // Only steal buffers from other threads if the session being written to is a
- // file-based session. Streaming sessions will simply drop events.
- // TODO: Add dropped events telemetry here.
- EventPipeBuffer *pNewBuffer = NULL;
- if(!allocateNewBuffer && (session.GetSessionType() == EventPipeSessionType::File))
- {
- // We can't allocate a new buffer.
- // Find the oldest buffer, de-allocate it, and re-purpose it for this thread.
-
- // Find the thread that contains the oldest stealable buffer, and get its list of buffers.
- EventPipeBufferList *pListToStealFrom = FindThreadToStealFrom();
- if(pListToStealFrom != NULL)
- {
- // Assert that the buffer we're stealing is not the only buffer in the list.
- // This invariant is enforced by FindThreadToStealFrom.
- _ASSERTE((pListToStealFrom->GetHead() != NULL) && (pListToStealFrom->GetHead()->GetNext() != NULL));
-
- // Remove the oldest buffer from the list.
- pNewBuffer = pListToStealFrom->GetAndRemoveHead();
-
- // De-allocate the buffer. We do this because buffers are variable sized
- // based on how much volume is coming from the thread.
- DeAllocateBuffer(pNewBuffer);
- pNewBuffer = NULL;
-
- // Set that we want to allocate a new buffer.
- allocateNewBuffer = true;
-
-#ifdef _DEBUG
- m_numBuffersStolen++;
-#endif // _DEBUG
-
- }
- else
- {
- // This only happens when # of threads == # of buffers.
- // We'll allocate one more buffer, and then this won't happen again.
- allocateNewBuffer = true;
- }
- }
-
+ EventPipeBuffer* pNewBuffer = NULL;
if(allocateNewBuffer)
{
// Pick a buffer size by multiplying the base buffer size by the number of buffers already allocated for this thread.
@@ -204,7 +272,7 @@ EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeSessio
// could throw, and cannot be easily checked
EX_TRY
{
- pNewBuffer = new EventPipeBuffer(bufferSize);
+ pNewBuffer = new EventPipeBuffer(bufferSize DEBUG_ARG(pEventPipeThread));
}
EX_CATCH
{
@@ -233,49 +301,6 @@ EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeSessio
return NULL;
}
-EventPipeBufferList* EventPipeBufferManager::FindThreadToStealFrom()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- PRECONDITION(m_lock.OwnedByCurrentThread());
- }
- CONTRACTL_END;
-
- // Find the thread buffer list containing the buffer whose most recent event is the oldest as long as the buffer is not
- // the current buffer for the thread (e.g. it's next pointer is non-NULL).
- // This means that the thread must also have multiple buffers, so that we don't steal its only buffer.
- EventPipeBufferList *pOldestContainingList = NULL;
-
- SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
- while(pElem != NULL)
- {
- EventPipeBufferList *pCandidate = pElem->GetValue();
-
- // The current candidate has more than one buffer (otherwise it is disqualified).
- if(pCandidate->GetHead()->GetNext() != NULL)
- {
- // If we haven't seen any candidates, this one automatically becomes the oldest candidate.
- if(pOldestContainingList == NULL)
- {
- pOldestContainingList = pCandidate;
- }
- // Otherwise, to replace the existing candidate, this candidate must have an older timestamp in its oldest buffer.
- else if((pOldestContainingList->GetHead()->GetMostRecentTimeStamp().QuadPart) >
- (pCandidate->GetHead()->GetMostRecentTimeStamp().QuadPart))
- {
- pOldestContainingList = pCandidate;
- }
- }
-
- pElem = m_pPerThreadBufferList->GetNext(pElem);
- }
-
- return pOldestContainingList;
-}
-
void EventPipeBufferManager::DeAllocateBuffer(EventPipeBuffer *pBuffer)
{
CONTRACTL
@@ -331,36 +356,36 @@ bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeSession &sessi
return false;
}
+ StackContents stackContents;
+ if (pStack == NULL && event.NeedStack() && !session.RundownEnabled())
+ {
+ EventPipe::WalkManagedStackForCurrentThread(stackContents);
+ pStack = &stackContents;
+ }
+
// See if the thread already has a buffer to try.
bool allocNewBuffer = false;
EventPipeBuffer *pBuffer = NULL;
- EventPipeBufferList *pThreadBufferList = ThreadEventBufferList::Get();
+ EventPipeThread *pEventPipeThread = EventPipeThread::Get();
- if(pThreadBufferList == NULL)
+ if(pEventPipeThread == NULL)
{
allocNewBuffer = true;
}
else
{
- // The thread already has a buffer list. Select the newest buffer and attempt to write into it.
- pBuffer = pThreadBufferList->GetTail();
+ SpinLockHolder _slh(pEventPipeThread->GetLock());
+ pBuffer = pEventPipeThread->GetWriteBuffer();
+
if(pBuffer == NULL)
{
- // This should never happen. If the buffer list exists, it must contain at least one entry.
- _ASSERT(!"Thread buffer list with zero entries encountered.");
- return false;
+ allocNewBuffer = true;
}
else
{
- // The event is still enabled. Mark that the thread is now writing an event.
- pThreadBufferList->SetThreadEventWriteInProgress(true);
-
// Attempt to write the event to the buffer. If this fails, we should allocate a new buffer.
allocNewBuffer = !pBuffer->WriteEvent(pEventThread, session, event, payload, pActivityId, pRelatedActivityId, pStack);
-
- // Mark that the thread is no longer writing an event.
- pThreadBufferList->SetThreadEventWriteInProgress(false);
}
}
@@ -375,22 +400,45 @@ bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeSession &sessi
// to switch to preemptive mode here.
unsigned int requestSize = sizeof(EventPipeEventInstance) + payload.GetSize();
- pBuffer = AllocateBufferForThread(session, requestSize);
- }
-
- // Try to write the event after we allocated (or stole) a buffer.
- // This is the first time if the thread had no buffers before the call to this function.
- // This is the second time if this thread did have one or more buffers, but they were full.
- if(allocNewBuffer && pBuffer != NULL)
- {
- // By this point, a new buffer list has been allocated so we should fetch it again before using it.
- pThreadBufferList = ThreadEventBufferList::Get();
- // The event is still enabled. Mark that the thread is now writing an event.
- pThreadBufferList->SetThreadEventWriteInProgress(true);
- allocNewBuffer = !pBuffer->WriteEvent(pEventThread, session, event, payload, pActivityId, pRelatedActivityId, pStack);
+ BOOL writeSuspended = FALSE;
+ pBuffer = AllocateBufferForThread(session, requestSize, writeSuspended);
+ if (pBuffer == NULL)
+ {
+ // We treat this as the WriteEvent() call occurring after this session stopped listening for events, effectively the
+ // same as if event.IsEnabled() test above returned false.
+ if (writeSuspended)
+ {
+ return false;
+ }
+ }
+ else
+ {
+ EventPipeThread *pEventPipeThread = EventPipeThread::Get();
+ _ASSERTE(pEventPipeThread != NULL);
+ {
+ SpinLockHolder _slh(pEventPipeThread->GetLock());
+ if (m_writeEventSuspending.Load())
+ {
+ // After leaving the manager's lock in AllocateBufferForThread some other thread decided to suspend writes.
+ // We need to immediately return the buffer we just took without storing it or writing to it.
+ // SuspendWriteEvent() is spinning waiting for this buffer to be relinquished.
+ pBuffer->ConvertToReadOnly();
+
+ // We treat this as the WriteEvent() call occurring after this session stopped listening for events, effectively the
+ // same as if event.IsEnabled() returned false.
+ return false;
+ }
+ else
+ {
+ pEventPipeThread->SetWriteBuffer(pBuffer);
- // Mark that the thread is no longer writing an event.
- pThreadBufferList->SetThreadEventWriteInProgress(false);
+ // Try to write the event after we allocated a buffer.
+ // This is the first time if the thread had no buffers before the call to this function.
+ // This is the second time if this thread did have one or more buffers, but they were full.
+ allocNewBuffer = !pBuffer->WriteEvent(pEventThread, session, event, payload, pActivityId, pRelatedActivityId, pStack);
+ }
+ }
+ }
}
@@ -511,6 +559,16 @@ EventPipeEventInstance* EventPipeBufferManager::GetNextEvent()
// Peek the next event out of the list.
EventPipeBuffer *pContainingBuffer = NULL;
+
+ // PERF: This may be too aggressive? If this method is being called frequently enough to keep pace with the
+ // writing threads we could be in a state of high lock contention and lots of churning buffers. Each writer
+ // would take several locks, allocate a new buffer, write one event into it, then the reader would take the
+ // lock, convert the buffer to read-only and read the single event out of it. Allowing more events to accumulate
+ // in the buffers before converting between writable and read-only amortizes a lot of the overhead. One way
+ // to achieve that would be picking a stopTimeStamp that was Xms in the past. This would let Xms of events
+ // to accumulate in the write buffer before we converted it and forced the writer to allocate another. Other more
+ // sophisticated approaches would probably build a low overhead synchronization mechanism to read and write the
+ // buffer at the same time.
EventPipeEventInstance *pNext = pBufferList->PeekNextEvent(stopTimeStamp, &pContainingBuffer);
if (pNext != NULL)
{
@@ -541,6 +599,85 @@ EventPipeEventInstance* EventPipeBufferManager::GetNextEvent()
}
}
+void EventPipeBufferManager::SuspendWriteEvent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(EnsureConsistency());
+
+ // All calls to this method must not be synchronized by our caller
+ _ASSERTE(EventPipe::IsLockOwnedByCurrentThread());
+
+ CQuickArrayList<EventPipeThread*> threadList;
+ {
+ SpinLockHolder _slh(&m_lock);
+ m_writeEventSuspending.Store(TRUE);
+ // From this point until m_writeEventSuspending is reset to FALSE it is impossible
+ // for new EventPipeBufferLists to be added to the m_pPerThreadBufferList. The only
+ // way AllocateBufferForThread is allowed to add one is by:
+ // 1) take m_lock - AllocateBufferForThread can't own it now because this thread owns it,
+ // but after this thread gives it up lower in this function it could be acquired.
+ // 2) observe m_writeEventSuspending = False - that won't happen, acquiring m_lock
+ // guarantees AllocateBufferForThread will observe all the memory changes this
+ // thread made prior to releasing m_lock and we've already set it TRUE.
+ // This ensures that we iterate over the list of threads below we've got the complete list.
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while(pElem != NULL)
+ {
+ threadList.Push(pElem->GetValue()->GetThread());
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
+ }
+
+ // Iterate through all the threads, forcing them to finish writes in progress inside EventPipeThread::m_lock,
+ // relinquish any buffers stored in EventPipeThread::m_pWriteBuffer and prevent storing new ones.
+ for (size_t i = 0 ; i < threadList.Size(); i++)
+ {
+ EventPipeThread* pThread = threadList[i];
+ {
+ SpinLockHolder _slh(pThread->GetLock());
+ pThread->SetWriteBuffer(nullptr);
+ // From this point until m_writeEventSuspending is reset to FALSE it is impossible
+ // for new EventPipeBufferLists to be added to the m_pPerThreadBufferList. The only
+ // way AllocateBufferForThread is allowed to add one is by:
+ // 1) take m_lock - AllocateBufferForThread can't own it now because this thread owns it,
+ // but after this thread gives it up lower in this function it could be acquired.
+ // 2) observe m_writeEventSuspending = False - that won't happen, acquiring m_lock
+ // guarantees AllocateBufferForThread will observe all the memory changes this
+ // thread made prior to releasing m_lock and we've already set it TRUE.
+ }
+ }
+
+ // Wait for any straggler WriteEvent threads that may have already allocated a buffer but
+ // hadn't yet relinquished it.
+ {
+ SpinLockHolder _slh(&m_lock);
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while (pElem != NULL)
+ {
+ // Get the list and remove it from the thread.
+ EventPipeBufferList *pBufferList = pElem->GetValue();
+ for (EventPipeBuffer* pBuffer = pBufferList->GetHead(); pBuffer != nullptr; pBuffer = pBuffer->GetNext())
+ {
+ // Above we guaranteed that other threads wouldn't acquire new buffers or keep the ones they
+ // already have indefinitely, but we haven't quite guaranteed the buffer has been relinquished
+ // back to us. It's possible the WriteEvent thread allocated the buffer before we took m_lock
+ // above, but it hasn't yet acquired EventPipeThread::m_lock in order to observe that it needs
+ // to relinquish the buffer. In this state, it has a pointer to the buffer stored in registers
+ // or on the stack. If the thread is in that tiny window, all we have to do is wait for it.
+ YIELD_WHILE(pBuffer->GetVolatileState() != EventPipeBufferState::READ_ONLY);
+ }
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
+ }
+}
+
void EventPipeBufferManager::DeAllocateBuffers()
{
CONTRACTL
@@ -552,6 +689,7 @@ void EventPipeBufferManager::DeAllocateBuffers()
CONTRACTL_END;
_ASSERTE(EnsureConsistency());
+ _ASSERTE(m_writeEventSuspending);
// Take the buffer manager manipulation lock
SpinLockHolder _slh(&m_lock);
@@ -561,35 +699,46 @@ void EventPipeBufferManager::DeAllocateBuffers()
{
// Get the list and determine if we can free it.
EventPipeBufferList *pBufferList = pElem->GetValue();
- if(!pBufferList->OwnedByThread())
+ EventPipeThread *pThread = pBufferList->GetThread();
+ pThread->SetBufferList(nullptr);
+
+ // Iterate over all nodes in the list and deallocate them.
+ EventPipeBuffer *pBuffer = pBufferList->GetAndRemoveHead();
+ while (pBuffer != NULL)
{
- // Iterate over all nodes in the list and de-allocate them.
- EventPipeBuffer *pBuffer = pBufferList->GetAndRemoveHead();
- while(pBuffer != NULL)
- {
- DeAllocateBuffer(pBuffer);
- pBuffer = pBufferList->GetAndRemoveHead();
- }
+ DeAllocateBuffer(pBuffer);
+ pBuffer = pBufferList->GetAndRemoveHead();
+ }
- // Remove the buffer list from the per-thread buffer list.
- pElem = m_pPerThreadBufferList->FindAndRemove(pElem);
- _ASSERTE(pElem != NULL);
+ // Remove the buffer list from the per-thread buffer list.
+ pElem = m_pPerThreadBufferList->FindAndRemove(pElem);
+ _ASSERTE(pElem != NULL);
- SListElem<EventPipeBufferList*> *pCurElem = pElem;
- pElem = m_pPerThreadBufferList->GetNext(pElem);
- delete(pCurElem);
+ SListElem<EventPipeBufferList*> *pCurElem = pElem;
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ delete(pCurElem);
- // Now that all of the list elements have been freed, free the list itself.
- delete(pBufferList);
- pBufferList = NULL;
- }
- else
- {
- pElem = m_pPerThreadBufferList->GetNext(pElem);
- }
+ // Now that all the list elements have been freed, free the list itself.
+ delete(pBufferList);
+ pBufferList = NULL;
}
}
+void EventPipeBufferManager::ResumeWriteEvent()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // All calls to this method must be synchronized by our caller.
+
+ _ASSERTE(EventPipe::IsLockOwnedByCurrentThread());
+ _ASSERTE(EnsureConsistency());
+
+ m_writeEventSuspending.Store(FALSE);
+
+ // At this point threads are allowed to again allocate new BufferLists and Buffers. However our caller
+ // presumablyh disabled all the events and until events are re-enabled no thread is going to get past
+ // the event.IsEnabled() checks in WriteEvent() to make any of those allocations happen.
+}
#ifdef _DEBUG
bool EventPipeBufferManager::EnsureConsistency()
@@ -610,21 +759,15 @@ bool EventPipeBufferManager::EnsureConsistency()
}
#endif // _DEBUG
-EventPipeBufferList::EventPipeBufferList(EventPipeBufferManager *pManager)
+EventPipeBufferList::EventPipeBufferList(EventPipeBufferManager *pManager, EventPipeThread* pThread)
{
LIMITED_METHOD_CONTRACT;
m_pManager = pManager;
+ m_pThread = pThread;
m_pHeadBuffer = NULL;
m_pTailBuffer = NULL;
m_bufferCount = 0;
- m_pReadBuffer = NULL;
- m_ownedByThread = true;
- m_threadEventWriteInProgress = false;
-
-#ifdef _DEBUG
- m_pCreatingThread = GetThread();
-#endif // _DEBUG
}
EventPipeBuffer* EventPipeBufferList::GetHead()
@@ -732,6 +875,33 @@ unsigned int EventPipeBufferList::GetCount() const
return m_bufferCount;
}
+EventPipeBuffer* EventPipeBufferList::TryGetReadBuffer(LARGE_INTEGER beforeTimeStamp, EventPipeBuffer* pNewReadBuffer)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(EventPipe::IsBufferManagerLockOwnedByCurrentThread());
+
+ // is it possible that the read buffer has events that occur before 'beforeTimeStamp'?
+ if (pNewReadBuffer == NULL || pNewReadBuffer->GetCreationTimeStamp().QuadPart >= beforeTimeStamp.QuadPart)
+ {
+ return NULL;
+ }
+
+ // we can't read from a buffer while it is being simultaneously being written, we need to preempt the writer.
+ if (pNewReadBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE)
+ {
+ {
+ SpinLockHolder _slh(m_pThread->GetLock());
+ if (m_pThread->GetWriteBuffer() == pNewReadBuffer)
+ {
+ m_pThread->SetWriteBuffer(nullptr);
+ }
+ }
+ _ASSERTE(pNewReadBuffer->GetVolatileState() == EventPipeBufferState::READ_ONLY);
+ }
+
+ return pNewReadBuffer;
+}
+
EventPipeEventInstance* EventPipeBufferList::PeekNextEvent(LARGE_INTEGER beforeTimeStamp, EventPipeBuffer **pContainingBuffer)
{
CONTRACTL
@@ -742,40 +912,35 @@ EventPipeEventInstance* EventPipeBufferList::PeekNextEvent(LARGE_INTEGER beforeT
}
CONTRACTL_END;
- // Get the current read buffer.
- // If it's not set, start with the head buffer.
- if(m_pReadBuffer == NULL)
- {
- m_pReadBuffer = m_pHeadBuffer;
- }
+ EventPipeBuffer* pReadBuffer = TryGetReadBuffer(beforeTimeStamp, m_pHeadBuffer);
- // If the read buffer is still NULL, then this list contains no buffers.
- if(m_pReadBuffer == NULL)
+ // If the read buffer is still NULL, then this list contains no buffers with events in the time range we care about.
+ if(pReadBuffer == NULL)
{
return NULL;
}
// Get the next event in the buffer.
- EventPipeEventInstance *pNext = m_pReadBuffer->PeekNext(beforeTimeStamp);
+ EventPipeEventInstance *pNext = pReadBuffer->PeekNext(beforeTimeStamp);
// If the next event is NULL, then go to the next buffer.
if(pNext == NULL)
{
- m_pReadBuffer = m_pReadBuffer->GetNext();
- if(m_pReadBuffer != NULL)
+ pReadBuffer = TryGetReadBuffer(beforeTimeStamp, pReadBuffer->GetNext());
+ if(pReadBuffer != NULL)
{
- pNext = m_pReadBuffer->PeekNext(beforeTimeStamp);
+ pNext = pReadBuffer->PeekNext(beforeTimeStamp);
}
}
// Set the containing buffer.
if(pNext != NULL && pContainingBuffer != NULL)
{
- *pContainingBuffer = m_pReadBuffer;
+ *pContainingBuffer = pReadBuffer;
}
// Make sure pContainingBuffer is properly set.
- _ASSERTE((pNext == NULL) || (pNext != NULL && pContainingBuffer == NULL) || (pNext != NULL && *pContainingBuffer == m_pReadBuffer));
+ _ASSERTE((pNext == NULL) || (pNext != NULL && pContainingBuffer == NULL) || (pNext != NULL && *pContainingBuffer == pReadBuffer));
return pNext;
}
@@ -814,14 +979,13 @@ EventPipeEventInstance* EventPipeBufferList::PopNextEvent(LARGE_INTEGER beforeTi
return pNext;
}
-#ifdef _DEBUG
-Thread* EventPipeBufferList::GetThread()
+EventPipeThread* EventPipeBufferList::GetThread()
{
LIMITED_METHOD_CONTRACT;
-
- return m_pCreatingThread;
+ return m_pThread;
}
+#ifdef _DEBUG
bool EventPipeBufferList::EnsureConsistency()
{
CONTRACTL
@@ -891,13 +1055,5 @@ bool EventPipeBufferList::EnsureConsistency()
}
#endif // _DEBUG
-ThreadEventBufferList::~ThreadEventBufferList()
-{
- // Before the thread dies, mark its buffers as no longer owned
- // so that they can be cleaned up after the thread dies.
- // EventPipeBufferList *pList = GetThreadEventBufferList();
- if (m_pThreadEventBufferList != NULL)
- m_pThreadEventBufferList->SetOwnedByThread(false);
-}
#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipebuffermanager.h b/src/vm/eventpipebuffermanager.h
index a6e47f30bc..50add1fda1 100644
--- a/src/vm/eventpipebuffermanager.h
+++ b/src/vm/eventpipebuffermanager.h
@@ -14,31 +14,53 @@
#include "spinlock.h"
class EventPipeBufferList;
+class EventPipeThread;
-// This class is a TLS wrapper around a pointer to thread-specific EventPipeBufferList
-// The struct wrapper is present mainly because we need a way to free the EventPipeBufferList
-// when the thread that owns it dies. Placing this class as a TLS variable will call ~ThreadEventBufferList()
-// when the thread dies so we can free EventPipeBufferList in the destructor.
-class ThreadEventBufferList
+void ReleaseEventPipeThreadRef(EventPipeThread* pThread);
+void AcquireEventPipeThreadRef(EventPipeThread* pThread);
+typedef Wrapper<EventPipeThread*, AcquireEventPipeThreadRef, ReleaseEventPipeThreadRef> EventPipeThreadHolder;
+
+class EventPipeThread
{
#ifndef __GNUC__
-__declspec(thread) static ThreadEventBufferList gCurrentThreadEventBufferList;
+ __declspec(thread) static EventPipeThreadHolder gCurrentEventPipeThreadHolder;
#else // !__GNUC__
-thread_local static ThreadEventBufferList gCurrentThreadEventBufferList;
+ thread_local static EventPipeThreadHolder gCurrentEventPipeThreadHolder;
#endif // !__GNUC__
EventPipeBufferList * m_pThreadEventBufferList = NULL;
- ~ThreadEventBufferList();
+ ~EventPipeThread();
+
+ // The EventPipeThreadHolder maintains one count while the thread is alive
+ // and each session's EventPipeBufferList maintains one count while it
+ // exists
+ LONG m_refCount;
+
+ // this is the one and only buffer this thread is allowed to write to
+ // if non-null, it must match the tail of the m_bufferList
+ // this pointer is protected by m_lock
+ EventPipeBuffer *m_pWriteBuffer = NULL;
+
+ // this is a list of buffers that were written to by this thread
+ // it is protected by EventPipeBufferManager::m_lock
+ EventPipeBufferList *m_pBufferList = NULL;
+
+ // This lock is designed to have low contention. Normally it is only taken by this thread,
+ // but occasionally it may also be taken by another thread which is trying to collect and drain
+ // buffers from all threads.
+ SpinLock m_lock;
public:
- static EventPipeBufferList* Get()
- {
- return gCurrentThreadEventBufferList.m_pThreadEventBufferList;
- }
-
- static void Set(EventPipeBufferList * bl)
- {
- gCurrentThreadEventBufferList.m_pThreadEventBufferList = bl;
- }
+ static EventPipeThread* Get();
+ static void Set(EventPipeThread* pThread);
+
+ EventPipeThread();
+ void AddRef();
+ void Release();
+ SpinLock * GetLock();
+ EventPipeBuffer* GetWriteBuffer();
+ void SetWriteBuffer(EventPipeBuffer* pNewBuffer);
+ EventPipeBufferList * GetBufferList();
+ void SetBufferList(EventPipeBufferList * pBufferList);
};
class EventPipeBufferManager
@@ -62,7 +84,7 @@ private:
// Lock to protect access to the per-thread buffer list and total allocation size.
SpinLock m_lock;
-
+ Volatile<BOOL> m_writeEventSuspending;
#ifdef _DEBUG
// For debugging purposes.
@@ -77,7 +99,7 @@ private:
// Allocate a new buffer for the specified thread.
// This function will store the buffer in the thread's buffer list for future use and also return it here.
// A NULL return value means that a buffer could not be allocated.
- EventPipeBuffer* AllocateBufferForThread(EventPipeSession &session, unsigned int requestSize);
+ EventPipeBuffer* AllocateBufferForThread(EventPipeSession &session, unsigned int requestSize, BOOL & writeSuspended);
// Add a buffer to the thread buffer list.
void AddBufferToThreadBufferList(EventPipeBufferList *pThreadBuffers, EventPipeBuffer *pBuffer);
@@ -100,6 +122,24 @@ public:
// Otherwise, if a stack trace is needed, one will be automatically collected.
bool WriteEvent(Thread *pThread, EventPipeSession &session, EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread = NULL, StackContents *pStack = NULL);
+ // Ends the suspension period created by SuspendWriteEvent(). After this call returns WriteEvent()
+ // can again be called succesfully, new BufferLists and Buffers may be allocated.
+ // The caller is required to synchronize all calls to SuspendWriteEvent() and ResumeWriteEvent()
+ void ResumeWriteEvent();
+
+ // From the time this function returns until ResumeWriteEvent() is called a suspended state will
+ // be in effect that blocks all WriteEvent activity. All existing buffers will be in the
+ // PENDING_FLUSH state and no new EventPipeBuffers or EventPipeBufferLists can be created. Calls to
+ // WriteEvent that start during the suspension period or were in progress but hadn't yet recorded
+ // their event into a buffer before the start of the suspension period will return false and the
+ // event will not be recorded. Any events that not recorded as a result of this suspension will be
+ // treated the same as events that were not recorded due to configuration.
+ // EXPECTED USAGE: First the caller will disable all events via configuration, then call
+ // SuspendWriteEvents() to force any WriteEvent calls that may still be in progress to either
+ // finish or cancel. After that all BufferLists and Buffers can be safely drained and/or deleted.
+ // The caller is required to synchronize all calls to SuspendWriteEvent() and ResumeWriteEvent()
+ void SuspendWriteEvent();
+
// Write the contents of the managed buffers to the specified file.
// The stopTimeStamp is used to determine when tracing was stopped to ensure that we
// skip any events that might be partially written due to races when tracing is stopped.
@@ -115,6 +155,7 @@ public:
#ifdef _DEBUG
bool EnsureConsistency();
+ bool IsLockOwnedByCurrentThread();
#endif // _DEBUG
};
@@ -126,6 +167,9 @@ private:
// The buffer manager that owns this list.
EventPipeBufferManager *m_pManager;
+ // The thread which writes to the buffers in this list
+ EventPipeThreadHolder m_pThread;
+
// Buffers are stored in an intrusive linked-list from oldest to newest.
// Head is the oldest buffer. Tail is the newest (and currently used) buffer.
EventPipeBuffer *m_pHeadBuffer;
@@ -134,24 +178,13 @@ private:
// The number of buffers in the list.
unsigned int m_bufferCount;
- // The current read buffer (used when processing events on tracing stop).
- EventPipeBuffer *m_pReadBuffer;
-
- // True if this thread is owned by a thread.
- // If it is false, then this buffer can be de-allocated after it is drained.
- Volatile<bool> m_ownedByThread;
-
- // True if a thread is writing something to this list
- Volatile<bool> m_threadEventWriteInProgress;
-
-#ifdef _DEBUG
- // For diagnostics, keep the thread pointer.
- Thread *m_pCreatingThread;
-#endif // _DEBUG
+ // Check pNewReadBuffer to see if it has events in the right time-range and convert it to a readable
+ // state if needed
+ EventPipeBuffer* TryGetReadBuffer(LARGE_INTEGER beforeTimeStamp, EventPipeBuffer* pNewReadBuffer);
public:
- EventPipeBufferList(EventPipeBufferManager *pManager);
+ EventPipeBufferList(EventPipeBufferManager *pManager, EventPipeThread* pThread);
// Get the head node of the list.
EventPipeBuffer* GetHead();
@@ -174,39 +207,11 @@ public:
// Get the next event as long as it is before the specified timestamp, and also mark it as read.
EventPipeEventInstance* PopNextEvent(LARGE_INTEGER beforeTimeStamp);
- // True if a thread owns this list.
- bool OwnedByThread() const
- {
- LIMITED_METHOD_CONTRACT;
- return m_ownedByThread;
- }
-
- // Set whether or not this list is owned by a thread.
- // If it is not owned by a thread, then it can be de-allocated
- // after the buffer is drained.
- // The default value is true.
- void SetOwnedByThread(bool value)
- {
- LIMITED_METHOD_CONTRACT;
- m_ownedByThread = value;
- }
-
- bool GetThreadEventWriteInProgress() const
- {
- LIMITED_METHOD_CONTRACT;
- return m_threadEventWriteInProgress;
- }
-
- void SetThreadEventWriteInProgress(bool value)
- {
- LIMITED_METHOD_CONTRACT;
- m_threadEventWriteInProgress = value;
- }
-
-#ifdef _DEBUG
// Get the thread associated with this list.
- Thread* GetThread();
+ EventPipeThread* GetThread();
+
+#ifdef _DEBUG
// Validate the consistency of the list.
// This function will assert if the list is in an inconsistent state.
bool EnsureConsistency();