Browse Source

pipeline: revert change causing shutdown crash with DEBUG_THREADS

We actually need to leak this lock, because we need to call this at static destruction time, and we can't guarantee static destruction order.

This reverts commit afed80e83a5c033f03497bcf38353794e662eedf.
rdb 6 years ago
parent
commit
ff2fbe60ef

+ 13 - 13
panda/src/pipeline/conditionVarDebug.cxx

@@ -29,7 +29,7 @@ using std::ostringstream;
 ConditionVarDebug::
 ConditionVarDebug::
 ConditionVarDebug(MutexDebug &mutex) :
 ConditionVarDebug(MutexDebug &mutex) :
   _mutex(mutex),
   _mutex(mutex),
-  _impl(mutex._global_lock)
+  _impl(*mutex.get_global_lock())
 {
 {
   nassertv(!_mutex._allow_recursion);
   nassertv(!_mutex._allow_recursion);
 }
 }
@@ -60,7 +60,7 @@ ConditionVarDebug::
  */
  */
 void ConditionVarDebug::
 void ConditionVarDebug::
 wait() {
 wait() {
-  _mutex._global_lock.lock();
+  _mutex._global_lock->lock();
 
 
   Thread *current_thread = Thread::get_current_thread();
   Thread *current_thread = Thread::get_current_thread();
 
 
@@ -69,7 +69,7 @@ wait() {
     ostr << *current_thread << " attempted to wait on "
     ostr << *current_thread << " attempted to wait on "
          << *this << " without holding " << _mutex;
          << *this << " without holding " << _mutex;
     nassert_raise(ostr.str());
     nassert_raise(ostr.str());
-    _mutex._global_lock.unlock();
+    _mutex._global_lock->unlock();
     return;
     return;
   }
   }
 
 
@@ -95,7 +95,7 @@ wait() {
       << *current_thread << " awake on " << *this << "\n";
       << *current_thread << " awake on " << *this << "\n";
   }
   }
 
 
-  _mutex._global_lock.unlock();
+  _mutex._global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -108,7 +108,7 @@ wait() {
  */
  */
 void ConditionVarDebug::
 void ConditionVarDebug::
 wait(double timeout) {
 wait(double timeout) {
-  _mutex._global_lock.lock();
+  _mutex._global_lock->lock();
 
 
   Thread *current_thread = Thread::get_current_thread();
   Thread *current_thread = Thread::get_current_thread();
 
 
@@ -117,7 +117,7 @@ wait(double timeout) {
     ostr << *current_thread << " attempted to wait on "
     ostr << *current_thread << " attempted to wait on "
          << *this << " without holding " << _mutex;
          << *this << " without holding " << _mutex;
     nassert_raise(ostr.str());
     nassert_raise(ostr.str());
-    _mutex._global_lock.unlock();
+    _mutex._global_lock->unlock();
     return;
     return;
   }
   }
 
 
@@ -144,7 +144,7 @@ wait(double timeout) {
       << *current_thread << " awake on " << *this << "\n";
       << *current_thread << " awake on " << *this << "\n";
   }
   }
 
 
-  _mutex._global_lock.unlock();
+  _mutex._global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -158,7 +158,7 @@ wait(double timeout) {
  */
  */
 void ConditionVarDebug::
 void ConditionVarDebug::
 notify() {
 notify() {
-  _mutex._global_lock.lock();
+  _mutex._global_lock->lock();
 
 
   /*
   /*
   if (!_mutex.do_debug_is_locked()) {
   if (!_mutex.do_debug_is_locked()) {
@@ -167,7 +167,7 @@ notify() {
     ostr << *current_thread << " attempted to notify "
     ostr << *current_thread << " attempted to notify "
          << *this << " without holding " << _mutex;
          << *this << " without holding " << _mutex;
     nassert_raise(ostr.str());
     nassert_raise(ostr.str());
-    _mutex._global_lock.unlock();
+    _mutex._global_lock->unlock();
     return;
     return;
   }
   }
   */
   */
@@ -179,7 +179,7 @@ notify() {
   }
   }
 
 
   _impl.notify();
   _impl.notify();
-  _mutex._global_lock.unlock();
+  _mutex._global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -190,7 +190,7 @@ notify() {
  */
  */
 void ConditionVarDebug::
 void ConditionVarDebug::
 notify_all() {
 notify_all() {
-  _mutex._global_lock.lock();
+  _mutex._global_lock->lock();
 
 
   /*
   /*
   if (!_mutex.do_debug_is_locked()) {
   if (!_mutex.do_debug_is_locked()) {
@@ -199,7 +199,7 @@ notify_all() {
     ostr << *current_thread << " attempted to notify "
     ostr << *current_thread << " attempted to notify "
          << *this << " without holding " << _mutex;
          << *this << " without holding " << _mutex;
     nassert_raise(ostr.str());
     nassert_raise(ostr.str());
-    _mutex._global_lock.unlock();
+    _mutex._global_lock->unlock();
     return;
     return;
   }
   }
   */
   */
@@ -211,7 +211,7 @@ notify_all() {
   }
   }
 
 
   _impl.notify_all();
   _impl.notify_all();
-  _mutex._global_lock.unlock();
+  _mutex._global_lock->unlock();
 }
 }
 
 
 /**
 /**

+ 28 - 14
panda/src/pipeline/mutexDebug.I

@@ -18,9 +18,9 @@
 INLINE void MutexDebug::
 INLINE void MutexDebug::
 lock() {
 lock() {
   TAU_PROFILE("void MutexDebug::acquire()", " ", TAU_USER);
   TAU_PROFILE("void MutexDebug::acquire()", " ", TAU_USER);
-  _global_lock.lock();
+  _global_lock->lock();
   ((MutexDebug *)this)->do_lock(Thread::get_current_thread());
   ((MutexDebug *)this)->do_lock(Thread::get_current_thread());
-  _global_lock.unlock();
+  _global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -30,9 +30,9 @@ lock() {
 INLINE bool MutexDebug::
 INLINE bool MutexDebug::
 try_lock() {
 try_lock() {
   TAU_PROFILE("void MutexDebug::try_lock()", " ", TAU_USER);
   TAU_PROFILE("void MutexDebug::try_lock()", " ", TAU_USER);
-  _global_lock.lock();
+  _global_lock->lock();
   bool acquired = ((MutexDebug *)this)->do_try_lock(Thread::get_current_thread());
   bool acquired = ((MutexDebug *)this)->do_try_lock(Thread::get_current_thread());
-  _global_lock.unlock();
+  _global_lock->unlock();
   return acquired;
   return acquired;
 }
 }
 
 
@@ -43,9 +43,9 @@ try_lock() {
 INLINE void MutexDebug::
 INLINE void MutexDebug::
 unlock() {
 unlock() {
   TAU_PROFILE("void MutexDebug::unlock()", " ", TAU_USER);
   TAU_PROFILE("void MutexDebug::unlock()", " ", TAU_USER);
-  _global_lock.lock();
+  _global_lock->lock();
   ((MutexDebug *)this)->do_unlock();
   ((MutexDebug *)this)->do_unlock();
-  _global_lock.unlock();
+  _global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -62,9 +62,9 @@ INLINE void MutexDebug::
 acquire(Thread *current_thread) const {
 acquire(Thread *current_thread) const {
   TAU_PROFILE("void MutexDebug::acquire(Thread *)", " ", TAU_USER);
   TAU_PROFILE("void MutexDebug::acquire(Thread *)", " ", TAU_USER);
   nassertv(current_thread == Thread::get_current_thread());
   nassertv(current_thread == Thread::get_current_thread());
-  _global_lock.lock();
+  _global_lock->lock();
   ((MutexDebug *)this)->do_lock(current_thread);
   ((MutexDebug *)this)->do_lock(current_thread);
-  _global_lock.unlock();
+  _global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -75,9 +75,9 @@ INLINE bool MutexDebug::
 try_acquire(Thread *current_thread) const {
 try_acquire(Thread *current_thread) const {
   TAU_PROFILE("void MutexDebug::try_acquire(Thread *)", " ", TAU_USER);
   TAU_PROFILE("void MutexDebug::try_acquire(Thread *)", " ", TAU_USER);
   nassertr(current_thread == Thread::get_current_thread(), false);
   nassertr(current_thread == Thread::get_current_thread(), false);
-  _global_lock.lock();
+  _global_lock->lock();
   bool acquired = ((MutexDebug *)this)->do_try_lock(current_thread);
   bool acquired = ((MutexDebug *)this)->do_try_lock(current_thread);
-  _global_lock.unlock();
+  _global_lock->unlock();
   return acquired;
   return acquired;
 }
 }
 
 
@@ -114,9 +114,9 @@ elevate_lock() const {
 INLINE void MutexDebug::
 INLINE void MutexDebug::
 release() const {
 release() const {
   TAU_PROFILE("void MutexDebug::release()", " ", TAU_USER);
   TAU_PROFILE("void MutexDebug::release()", " ", TAU_USER);
-  _global_lock.lock();
+  _global_lock->lock();
   ((MutexDebug *)this)->do_unlock();
   ((MutexDebug *)this)->do_unlock();
-  _global_lock.unlock();
+  _global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -128,8 +128,22 @@ release() const {
 INLINE bool MutexDebug::
 INLINE bool MutexDebug::
 debug_is_locked() const {
 debug_is_locked() const {
   TAU_PROFILE("bool MutexDebug::debug_is_locked()", " ", TAU_USER);
   TAU_PROFILE("bool MutexDebug::debug_is_locked()", " ", TAU_USER);
-  _global_lock.lock();
+  _global_lock->lock();
   bool is_locked = do_debug_is_locked();
   bool is_locked = do_debug_is_locked();
-  _global_lock.unlock();
+  _global_lock->unlock();
   return is_locked;
   return is_locked;
 }
 }
+
+/**
+ * Ensures the global MutexImpl pointer has been created, and returns its
+ * pointer.  Since this method is called by the MutexDebug constructor, any
+ * other (non-static) methods of MutexDebug may simply assume that the pointer
+ * has already been created.
+ */
+INLINE MutexTrueImpl *MutexDebug::
+get_global_lock() {
+  if (_global_lock == nullptr) {
+    _global_lock = new MutexTrueImpl;
+  }
+  return _global_lock;
+}

+ 8 - 8
panda/src/pipeline/mutexDebug.cxx

@@ -21,7 +21,7 @@ using std::ostream;
 using std::ostringstream;
 using std::ostringstream;
 
 
 int MutexDebug::_pstats_count = 0;
 int MutexDebug::_pstats_count = 0;
-MutexTrueImpl MutexDebug::_global_lock;
+MutexTrueImpl *MutexDebug::_global_lock;
 
 
 /**
 /**
  *
  *
@@ -34,7 +34,7 @@ MutexDebug(const std::string &name, bool allow_recursion, bool lightweight) :
   _locking_thread(nullptr),
   _locking_thread(nullptr),
   _lock_count(0),
   _lock_count(0),
   _deleted_name(nullptr),
   _deleted_name(nullptr),
-  _cvar_impl(_global_lock)
+  _cvar_impl(*get_global_lock())
 {
 {
 #ifndef SIMPLE_THREADS
 #ifndef SIMPLE_THREADS
   // If we're using real threads, there's no such thing as a lightweight
   // If we're using real threads, there's no such thing as a lightweight
@@ -87,12 +87,12 @@ output(ostream &out) const {
  */
  */
 void MutexDebug::
 void MutexDebug::
 output_with_holder(ostream &out) const {
 output_with_holder(ostream &out) const {
-  _global_lock.lock();
+  _global_lock->lock();
   output(out);
   output(out);
   if (_locking_thread != nullptr) {
   if (_locking_thread != nullptr) {
     out << " (held by " << *_locking_thread << ")\n";
     out << " (held by " << *_locking_thread << ")\n";
   }
   }
-  _global_lock.unlock();
+  _global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -102,9 +102,9 @@ output_with_holder(ostream &out) const {
  */
  */
 void MutexDebug::
 void MutexDebug::
 increment_pstats() {
 increment_pstats() {
-  _global_lock.lock();
+  _global_lock->lock();
   ++_pstats_count;
   ++_pstats_count;
-  _global_lock.unlock();
+  _global_lock->unlock();
 }
 }
 
 
 /**
 /**
@@ -113,9 +113,9 @@ increment_pstats() {
  */
  */
 void MutexDebug::
 void MutexDebug::
 decrement_pstats() {
 decrement_pstats() {
-  _global_lock.lock();
+  _global_lock->lock();
   --_pstats_count;
   --_pstats_count;
-  _global_lock.unlock();
+  _global_lock->unlock();
 }
 }
 
 
 /**
 /**

+ 3 - 1
panda/src/pipeline/mutexDebug.h

@@ -65,6 +65,8 @@ private:
   void report_deadlock(Thread *current_thread);
   void report_deadlock(Thread *current_thread);
 
 
 private:
 private:
+  INLINE static MutexTrueImpl *get_global_lock();
+
   bool _allow_recursion;
   bool _allow_recursion;
   bool _lightweight;
   bool _lightweight;
   Thread *_locking_thread;
   Thread *_locking_thread;
@@ -78,7 +80,7 @@ private:
   ConditionVarImpl _cvar_impl;
   ConditionVarImpl _cvar_impl;
 
 
   static int _pstats_count;
   static int _pstats_count;
-  static MutexTrueImpl _global_lock;
+  static MutexTrueImpl *_global_lock;
 
 
   friend class ConditionVarDebug;
   friend class ConditionVarDebug;
 };
 };