Browse Source

make sync primitives more similar to python function names

David Rose 17 years ago
parent
commit
ef771a796b
100 changed files with 733 additions and 387 deletions
  1. 2 2
      dtool/src/dtoolbase/deletedBufferChain.cxx
  2. 8 8
      dtool/src/dtoolbase/memoryHook.cxx
  3. 12 2
      dtool/src/dtoolbase/mutexDummyImpl.I
  4. 2 1
      dtool/src/dtoolbase/mutexDummyImpl.h
  5. 12 12
      dtool/src/dtoolbase/mutexPosixImpl.I
  6. 4 4
      dtool/src/dtoolbase/mutexPosixImpl.h
  7. 5 5
      dtool/src/dtoolbase/mutexSpinlockImpl.I
  8. 2 2
      dtool/src/dtoolbase/mutexSpinlockImpl.h
  9. 4 4
      dtool/src/dtoolbase/mutexWin32Impl.I
  10. 2 2
      dtool/src/dtoolbase/mutexWin32Impl.h
  11. 1 1
      dtool/src/dtoolbase/neverFreeMemory.I
  12. 1 1
      dtool/src/dtoolbase/neverFreeMemory.cxx
  13. 20 20
      dtool/src/dtoolbase/typeRegistry.cxx
  14. 4 4
      panda/src/audiotraits/milesAudioManager.cxx
  15. 2 2
      panda/src/chan/animControl.cxx
  16. 3 3
      panda/src/device/analogNode.I
  17. 2 2
      panda/src/device/analogNode.cxx
  18. 5 5
      panda/src/device/buttonNode.I
  19. 3 3
      panda/src/device/buttonNode.cxx
  20. 1 1
      panda/src/device/clientAnalogDevice.I
  21. 1 1
      panda/src/device/clientButtonDevice.cxx
  22. 4 4
      panda/src/device/clientDevice.I
  23. 1 1
      panda/src/device/clientDevice.cxx
  24. 1 1
      panda/src/device/clientDevice.h
  25. 2 2
      panda/src/device/clientDialDevice.I
  26. 3 3
      panda/src/device/dialNode.I
  27. 1 1
      panda/src/device/trackerNode.cxx
  28. 11 11
      panda/src/display/graphicsEngine.cxx
  29. 1 1
      panda/src/event/asyncTask.cxx
  30. 15 15
      panda/src/event/asyncTaskChain.cxx
  31. 3 3
      panda/src/event/asyncTaskManager.cxx
  32. 2 2
      panda/src/express/subStreamBuf.cxx
  33. 1 1
      panda/src/express/trueClock.I
  34. 3 3
      panda/src/express/weakReferenceList.cxx
  35. 1 1
      panda/src/gobj/adaptiveLru.cxx
  36. 1 1
      panda/src/gobj/geomMunger.cxx
  37. 1 1
      panda/src/gobj/geomVertexArrayData.I
  38. 1 1
      panda/src/gobj/geomVertexData.cxx
  39. 1 1
      panda/src/gobj/simpleLru.cxx
  40. 4 4
      panda/src/gobj/texture.cxx
  41. 6 6
      panda/src/gobj/vertexDataPage.cxx
  42. 3 3
      panda/src/net/datagramQueue.cxx
  43. 3 0
      panda/src/pipeline/Sources.pp
  44. 2 2
      panda/src/pipeline/conditionVar.I
  45. 3 3
      panda/src/pipeline/conditionVar.h
  46. 7 7
      panda/src/pipeline/conditionVarDebug.cxx
  47. 1 1
      panda/src/pipeline/conditionVarDebug.h
  48. 5 5
      panda/src/pipeline/conditionVarDirect.I
  49. 1 1
      panda/src/pipeline/conditionVarDirect.h
  50. 2 2
      panda/src/pipeline/conditionVarDummyImpl.I
  51. 2 2
      panda/src/pipeline/conditionVarDummyImpl.h
  52. 5 5
      panda/src/pipeline/conditionVarFull.h
  53. 10 10
      panda/src/pipeline/conditionVarFullDebug.cxx
  54. 2 2
      panda/src/pipeline/conditionVarFullDebug.h
  55. 8 8
      panda/src/pipeline/conditionVarFullDirect.I
  56. 2 2
      panda/src/pipeline/conditionVarFullDirect.h
  57. 8 8
      panda/src/pipeline/conditionVarFullWin32Impl.I
  58. 5 5
      panda/src/pipeline/conditionVarFullWin32Impl.h
  59. 4 4
      panda/src/pipeline/conditionVarPosixImpl.I
  60. 2 2
      panda/src/pipeline/conditionVarPosixImpl.h
  61. 2 2
      panda/src/pipeline/conditionVarSimpleImpl.I
  62. 2 2
      panda/src/pipeline/conditionVarSimpleImpl.cxx
  63. 2 2
      panda/src/pipeline/conditionVarSimpleImpl.h
  64. 2 2
      panda/src/pipeline/conditionVarSpinlockImpl.I
  65. 1 1
      panda/src/pipeline/conditionVarSpinlockImpl.cxx
  66. 2 2
      panda/src/pipeline/conditionVarSpinlockImpl.h
  67. 1 1
      panda/src/pipeline/conditionVarWin32Impl.I
  68. 3 3
      panda/src/pipeline/conditionVarWin32Impl.h
  69. 1 1
      panda/src/pipeline/cyclerHolder.I
  70. 1 1
      panda/src/pipeline/cyclerHolder.h
  71. 4 4
      panda/src/pipeline/lightMutexDirect.I
  72. 1 1
      panda/src/pipeline/lightMutexDirect.h
  73. 2 2
      panda/src/pipeline/lightMutexHolder.I
  74. 11 11
      panda/src/pipeline/lightReMutexDirect.I
  75. 2 2
      panda/src/pipeline/lightReMutexDirect.h
  76. 3 3
      panda/src/pipeline/lightReMutexHolder.I
  77. 24 22
      panda/src/pipeline/mutexDebug.I
  78. 98 29
      panda/src/pipeline/mutexDebug.cxx
  79. 5 4
      panda/src/pipeline/mutexDebug.h
  80. 17 4
      panda/src/pipeline/mutexDirect.I
  81. 2 1
      panda/src/pipeline/mutexDirect.h
  82. 2 2
      panda/src/pipeline/mutexHolder.I
  83. 1 1
      panda/src/pipeline/mutexHolder.h
  84. 5 5
      panda/src/pipeline/mutexSimpleImpl.I
  85. 2 2
      panda/src/pipeline/mutexSimpleImpl.h
  86. 1 1
      panda/src/pipeline/pipeline.cxx
  87. 4 4
      panda/src/pipeline/pipelineCyclerDummyImpl.I
  88. 1 1
      panda/src/pipeline/pipelineCyclerDummyImpl.h
  89. 3 3
      panda/src/pipeline/pipelineCyclerTrivialImpl.I
  90. 1 1
      panda/src/pipeline/pipelineCyclerTrivialImpl.h
  91. 11 11
      panda/src/pipeline/pipelineCyclerTrueImpl.I
  92. 2 2
      panda/src/pipeline/pipelineCyclerTrueImpl.cxx
  93. 2 2
      panda/src/pipeline/pipelineCyclerTrueImpl.h
  94. 1 0
      panda/src/pipeline/pipeline_composite2.cxx
  95. 67 17
      panda/src/pipeline/reMutexDirect.I
  96. 47 8
      panda/src/pipeline/reMutexDirect.cxx
  97. 8 4
      panda/src/pipeline/reMutexDirect.h
  98. 3 3
      panda/src/pipeline/reMutexHolder.I
  99. 126 0
      panda/src/pipeline/semaphore.I
  100. 26 0
      panda/src/pipeline/semaphore.cxx

+ 2 - 2
dtool/src/dtoolbase/deletedBufferChain.cxx

@@ -51,7 +51,7 @@ allocate(size_t size, TypeHandle type_handle) {
 
   ObjectNode *obj;
 
-  _lock.lock();
+  _lock.acquire();
   if (_deleted_chain != (ObjectNode *)NULL) {
     obj = _deleted_chain;
     _deleted_chain = _deleted_chain->_next;
@@ -121,7 +121,7 @@ deallocate(void *ptr, TypeHandle type_handle) {
   assert(orig_flag == (AtomicAdjust::Integer)DCF_alive);
 #endif  // NDEBUG
 
-  _lock.lock();
+  _lock.acquire();
 
   obj->_next = _deleted_chain;
   _deleted_chain = obj;

+ 8 - 8
dtool/src/dtoolbase/memoryHook.cxx

@@ -152,7 +152,7 @@ MemoryHook(const MemoryHook &copy) :
   _max_heap_size = copy._max_heap_size;
 #endif
 
-  ((MutexImpl &)copy._lock).lock();
+  ((MutexImpl &)copy._lock).acquire();
   _deleted_chains = copy._deleted_chains;
   ((MutexImpl &)copy._lock).release();
 }
@@ -182,7 +182,7 @@ MemoryHook::
 void *MemoryHook::
 heap_alloc_single(size_t size) {
 #ifdef MEMORY_HOOK_MALLOC_LOCK
-  _lock.lock();
+  _lock.acquire();
   void *alloc = call_malloc(inflate_size(size));
   _lock.release();
 #else
@@ -225,7 +225,7 @@ heap_free_single(void *ptr) {
 #endif  // DO_MEMORY_USAGE
 
 #ifdef MEMORY_HOOK_MALLOC_LOCK
-  _lock.lock();
+  _lock.acquire();
   call_free(alloc);
   _lock.release();
 #else
@@ -248,7 +248,7 @@ heap_free_single(void *ptr) {
 void *MemoryHook::
 heap_alloc_array(size_t size) {
 #ifdef MEMORY_HOOK_MALLOC_LOCK
-  _lock.lock();
+  _lock.acquire();
   void *alloc = call_malloc(inflate_size(size));
   _lock.release();
 #else
@@ -291,7 +291,7 @@ heap_realloc_array(void *ptr, size_t size) {
 #endif  // DO_MEMORY_USAGE
 
 #ifdef MEMORY_HOOK_MALLOC_LOCK
-  _lock.lock();
+  _lock.acquire();
   alloc = call_realloc(alloc, inflate_size(size));
   _lock.release();
 #else
@@ -323,7 +323,7 @@ heap_free_array(void *ptr) {
 #endif  // DO_MEMORY_USAGE
 
 #ifdef MEMORY_HOOK_MALLOC_LOCK
-  _lock.lock();
+  _lock.acquire();
   call_free(alloc);
   _lock.release();
 #else
@@ -353,7 +353,7 @@ heap_trim(size_t pad) {
   // Since malloc_trim() isn't standard C, we can't be sure it exists
   // on a given platform.  But if we're using dlmalloc, we know we
   // have dlmalloc_trim.
-  _lock.lock();
+  _lock.acquire();
   if (dlmalloc_trim(pad)) {
     trimmed = true;
   }
@@ -485,7 +485,7 @@ DeletedBufferChain *MemoryHook::
 get_deleted_chain(size_t buffer_size) {
   DeletedBufferChain *chain;
 
-  _lock.lock();
+  _lock.acquire();
   DeletedChains::iterator dci = _deleted_chains.find(buffer_size);
   if (dci != _deleted_chains.end()) {
     chain = (*dci).second;

+ 12 - 2
dtool/src/dtoolbase/mutexDummyImpl.I

@@ -32,12 +32,22 @@ INLINE MutexDummyImpl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexDummyImpl::lock
+//     Function: MutexDummyImpl::acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void MutexDummyImpl::
-lock() {
+acquire() {
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: MutexDummyImpl::try_acquire
+//       Access: Public
+//  Description: 
+////////////////////////////////////////////////////////////////////
+INLINE bool MutexDummyImpl::
+try_acquire() {
+  return true;
 }
 
 ////////////////////////////////////////////////////////////////////

+ 2 - 1
dtool/src/dtoolbase/mutexDummyImpl.h

@@ -29,7 +29,8 @@ public:
   INLINE MutexDummyImpl();
   INLINE ~MutexDummyImpl();
 
-  INLINE void lock();
+  INLINE void acquire();
+  INLINE bool try_acquire();
   INLINE void release();
 };
 

+ 12 - 12
dtool/src/dtoolbase/mutexPosixImpl.I

@@ -42,25 +42,25 @@ INLINE MutexPosixImpl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexPosixImpl::lock
+//     Function: MutexPosixImpl::acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void MutexPosixImpl::
-lock() {
-  TAU_PROFILE("void MutexPosixImpl::lock", " ", TAU_USER);
+acquire() {
+  TAU_PROFILE("void MutexPosixImpl::acquire", " ", TAU_USER);
   int result = pthread_mutex_lock(&_lock);
   assert(result == 0);
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexPosixImpl::try_lock
+//     Function: MutexPosixImpl::try_acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE bool MutexPosixImpl::
-try_lock() {
-  TAU_PROFILE("bool MutexPosixImpl::try_lock", " ", TAU_USER);
+try_acquire() {
+  TAU_PROFILE("bool MutexPosixImpl::try_acquire", " ", TAU_USER);
   int result = pthread_mutex_trylock(&_lock);
   assert(result == 0 || result == EBUSY);
   return (result == 0);
@@ -107,25 +107,25 @@ INLINE ReMutexPosixImpl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: ReMutexPosixImpl::lock
+//     Function: ReMutexPosixImpl::acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ReMutexPosixImpl::
-lock() {
-  TAU_PROFILE("void ReMutexPosixImpl::lock", " ", TAU_USER);
+acquire() {
+  TAU_PROFILE("void ReMutexPosixImpl::acquire", " ", TAU_USER);
   int result = pthread_mutex_lock(&_lock);
   assert(result == 0);
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: ReMutexPosixImpl::try_lock
+//     Function: ReMutexPosixImpl::try_acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE bool ReMutexPosixImpl::
-try_lock() {
-  TAU_PROFILE("bool ReMutexPosixImpl::try_lock", " ", TAU_USER);
+try_acquire() {
+  TAU_PROFILE("bool ReMutexPosixImpl::try_acquire", " ", TAU_USER);
   int result = pthread_mutex_trylock(&_lock);
   assert(result == 0 || result == EBUSY);
   return (result == 0);

+ 4 - 4
dtool/src/dtoolbase/mutexPosixImpl.h

@@ -33,8 +33,8 @@ public:
   INLINE MutexPosixImpl();
   INLINE ~MutexPosixImpl();
 
-  INLINE void lock();
-  INLINE bool try_lock();
+  INLINE void acquire();
+  INLINE bool try_acquire();
   INLINE void release();
 
 private:
@@ -51,8 +51,8 @@ public:
   INLINE ReMutexPosixImpl();
   INLINE ~ReMutexPosixImpl();
 
-  INLINE void lock();
-  INLINE bool try_lock();
+  INLINE void acquire();
+  INLINE bool try_acquire();
   INLINE void release();
 
 private:

+ 5 - 5
dtool/src/dtoolbase/mutexSpinlockImpl.I

@@ -33,24 +33,24 @@ INLINE MutexSpinlockImpl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexSpinlockImpl::lock
+//     Function: MutexSpinlockImpl::acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void MutexSpinlockImpl::
-lock() {
-  if (!try_lock()) {
+acquire() {
+  if (!try_acquire()) {
     do_lock();
   }
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexSpinlockImpl::try_lock
+//     Function: MutexSpinlockImpl::try_acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE bool MutexSpinlockImpl::
-try_lock() {
+try_acquire() {
   return (AtomicAdjust::compare_and_exchange(_lock, 0, 1) == 0);
 }
 

+ 2 - 2
dtool/src/dtoolbase/mutexSpinlockImpl.h

@@ -36,8 +36,8 @@ public:
   INLINE MutexSpinlockImpl();
   INLINE ~MutexSpinlockImpl();
 
-  INLINE void lock();
-  INLINE bool try_lock();
+  INLINE void acquire();
+  INLINE bool try_acquire();
   INLINE void release();
 
 private:

+ 4 - 4
dtool/src/dtoolbase/mutexWin32Impl.I

@@ -24,22 +24,22 @@ INLINE MutexWin32Impl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexWin32Impl::lock
+//     Function: MutexWin32Impl::acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void MutexWin32Impl::
-lock() {
+acquire() {
   EnterCriticalSection(&_lock);
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexWin32Impl::try_lock
+//     Function: MutexWin32Impl::try_acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE bool MutexWin32Impl::
-try_lock() {
+try_acquire() {
   return (TryEnterCriticalSection(&_lock) != 0);
 }
 

+ 2 - 2
dtool/src/dtoolbase/mutexWin32Impl.h

@@ -31,8 +31,8 @@ public:
   MutexWin32Impl();
   INLINE ~MutexWin32Impl();
 
-  INLINE void lock();
-  INLINE bool try_lock();
+  INLINE void acquire();
+  INLINE bool try_acquire();
   INLINE void release();
 
 private:

+ 1 - 1
dtool/src/dtoolbase/neverFreeMemory.I

@@ -57,7 +57,7 @@ get_total_used() {
 INLINE size_t NeverFreeMemory::
 get_total_unused() {
   NeverFreeMemory *global_ptr = get_global_ptr();
-  global_ptr->_lock.lock();
+  global_ptr->_lock.acquire();
   size_t total_unused = global_ptr->_total_alloc - global_ptr->_total_used;
   global_ptr->_lock.release();
   return total_unused;

+ 1 - 1
dtool/src/dtoolbase/neverFreeMemory.cxx

@@ -43,7 +43,7 @@ NeverFreeMemory() {
 ////////////////////////////////////////////////////////////////////
 void *NeverFreeMemory::
 ns_alloc(size_t size) {
-  _lock.lock();
+  _lock.acquire();
 
   _total_used += size;
   

+ 20 - 20
dtool/src/dtoolbase/typeRegistry.cxx

@@ -37,7 +37,7 @@ TypeRegistry *TypeRegistry::_global_pointer = NULL;
 ////////////////////////////////////////////////////////////////////
 bool TypeRegistry::
 register_type(TypeHandle &type_handle, const string &name) {
-  _lock->lock();
+  _lock->acquire();
 
   if (type_handle != TypeHandle::none()) {
     // Here's a type that was already registered.  Just make sure
@@ -124,7 +124,7 @@ register_type(TypeHandle &type_handle, const string &name) {
 ////////////////////////////////////////////////////////////////////
 TypeHandle TypeRegistry::
 register_dynamic_type(const string &name) {
-  _lock->lock();
+  _lock->acquire();
 
   NameRegistry::iterator ri;
   ri = _name_registry.find(name);
@@ -165,7 +165,7 @@ register_dynamic_type(const string &name) {
 ////////////////////////////////////////////////////////////////////
 void TypeRegistry::
 record_derivation(TypeHandle child, TypeHandle parent) {
-  _lock->lock();
+  _lock->acquire();
 
   TypeRegistryNode *cnode = look_up(child, NULL);
   assert(cnode != (TypeRegistryNode *)NULL);
@@ -198,7 +198,7 @@ record_derivation(TypeHandle child, TypeHandle parent) {
 ////////////////////////////////////////////////////////////////////
 void TypeRegistry::
 record_alternate_name(TypeHandle type, const string &name) {
-  _lock->lock();
+  _lock->acquire();
 
   TypeRegistryNode *rnode = look_up(type, (TypedObject *)NULL);
   if (rnode != (TypeRegistryNode *)NULL) {
@@ -223,7 +223,7 @@ record_alternate_name(TypeHandle type, const string &name) {
 ////////////////////////////////////////////////////////////////////
 TypeHandle TypeRegistry::
 find_type(const string &name) const {
-  _lock->lock();
+  _lock->acquire();
 
   TypeHandle handle = TypeHandle::none();
   NameRegistry::const_iterator ri;
@@ -249,7 +249,7 @@ find_type(const string &name) const {
 ////////////////////////////////////////////////////////////////////
 string TypeRegistry::
 get_name(TypeHandle type, TypedObject *object) const {
-  _lock->lock();
+  _lock->acquire();
   TypeRegistryNode *rnode = look_up(type, object);
   assert(rnode != (TypeRegistryNode *)NULL);
   string name = rnode->_name;
@@ -279,7 +279,7 @@ get_name(TypeHandle type, TypedObject *object) const {
 bool TypeRegistry::
 is_derived_from(TypeHandle child, TypeHandle base,
                 TypedObject *child_object) {
-  _lock->lock();
+  _lock->acquire();
 
   const TypeRegistryNode *child_node = look_up(child, child_object);
   const TypeRegistryNode *base_node = look_up(base, (TypedObject *)NULL);
@@ -300,7 +300,7 @@ is_derived_from(TypeHandle child, TypeHandle base,
 ////////////////////////////////////////////////////////////////////
 int TypeRegistry::
 get_num_typehandles() {
-  _lock->lock();
+  _lock->acquire();
   int num_types = (int)_handle_registry.size();
   _lock->release();
   return num_types;
@@ -314,7 +314,7 @@ get_num_typehandles() {
 ////////////////////////////////////////////////////////////////////
 TypeHandle TypeRegistry::
 get_typehandle(int n) {
-  _lock->lock();
+  _lock->acquire();
   TypeRegistryNode *rnode = NULL;
   if (n >= 0 && n < (int)_handle_registry.size()) {
     rnode = _handle_registry[n];
@@ -337,7 +337,7 @@ get_typehandle(int n) {
 ////////////////////////////////////////////////////////////////////
 int TypeRegistry::
 get_num_root_classes() {
-  _lock->lock();
+  _lock->acquire();
   freshen_derivations();
   int num_roots = _root_classes.size();
   _lock->release();
@@ -352,7 +352,7 @@ get_num_root_classes() {
 ////////////////////////////////////////////////////////////////////
 TypeHandle TypeRegistry::
 get_root_class(int n) {
-  _lock->lock();
+  _lock->acquire();
   freshen_derivations();
   TypeHandle handle;
   if (n >= 0 && n < (int)_root_classes.size()) {
@@ -381,7 +381,7 @@ get_root_class(int n) {
 ////////////////////////////////////////////////////////////////////
 int TypeRegistry::
 get_num_parent_classes(TypeHandle child, TypedObject *child_object) const {
-  _lock->lock();
+  _lock->acquire();
   TypeRegistryNode *rnode = look_up(child, child_object);
   assert(rnode != (TypeRegistryNode *)NULL);
   int num_parents = rnode->_parent_classes.size();
@@ -398,7 +398,7 @@ get_num_parent_classes(TypeHandle child, TypedObject *child_object) const {
 ////////////////////////////////////////////////////////////////////
 TypeHandle TypeRegistry::
 get_parent_class(TypeHandle child, int index) const {
-  _lock->lock();
+  _lock->acquire();
   TypeHandle handle;
   TypeRegistryNode *rnode = look_up(child, (TypedObject *)NULL);
   assert(rnode != (TypeRegistryNode *)NULL);
@@ -423,7 +423,7 @@ get_parent_class(TypeHandle child, int index) const {
 ////////////////////////////////////////////////////////////////////
 int TypeRegistry::
 get_num_child_classes(TypeHandle child, TypedObject *child_object) const {
-  _lock->lock();
+  _lock->acquire();
   TypeRegistryNode *rnode = look_up(child, child_object);
   assert(rnode != (TypeRegistryNode *)NULL);
   int num_children = rnode->_child_classes.size();
@@ -440,7 +440,7 @@ get_num_child_classes(TypeHandle child, TypedObject *child_object) const {
 ////////////////////////////////////////////////////////////////////
 TypeHandle TypeRegistry::
 get_child_class(TypeHandle child, int index) const {
-  _lock->lock();
+  _lock->acquire();
   TypeHandle handle;
   TypeRegistryNode *rnode = look_up(child, (TypedObject *)NULL);
   assert(rnode != (TypeRegistryNode *)NULL);
@@ -468,7 +468,7 @@ get_child_class(TypeHandle child, int index) const {
 TypeHandle TypeRegistry::
 get_parent_towards(TypeHandle child, TypeHandle base,
                    TypedObject *child_object) {
-  _lock->lock();
+  _lock->acquire();
   TypeHandle handle;
   const TypeRegistryNode *child_node = look_up(child, child_object);
   const TypeRegistryNode *base_node = look_up(base, NULL);
@@ -495,7 +495,7 @@ get_parent_towards(TypeHandle child, TypeHandle base,
 void TypeRegistry::
 reregister_types() {
   init_lock();
-  _lock->lock();
+  _lock->acquire();
   HandleRegistry::iterator ri;
   TypeRegistry *reg = ptr();
   for (ri = reg->_handle_registry.begin();
@@ -519,7 +519,7 @@ reregister_types() {
 ////////////////////////////////////////////////////////////////////
 void TypeRegistry::
 write(ostream &out) const {
-  _lock->lock();
+  _lock->acquire();
   do_write(out);
   _lock->release();
 }
@@ -533,7 +533,7 @@ write(ostream &out) const {
 TypeRegistry *TypeRegistry::
 ptr() {
   init_lock();
-  _lock->lock();
+  _lock->acquire();
   if (_global_pointer == NULL) {
     init_global_pointer();
   }
@@ -688,7 +688,7 @@ look_up(TypeHandle handle, TypedObject *object) const {
       // the lock while we do this, so we don't get a recursive lock.
       _lock->release();
       handle = object->force_init_type();
-      _lock->lock();
+      _lock->acquire();
       if (handle._index == 0) {
         // Strange.
         cerr

+ 4 - 4
panda/src/audiotraits/milesAudioManager.cxx

@@ -523,7 +523,7 @@ cleanup() {
       MutexHolder holder(_streams_lock);
       nassertv(!_stream_thread.is_null());
       _stream_thread->_keep_running = false;
-      _streams_cvar.signal();
+      _streams_cvar.notify();
       old_thread = _stream_thread;
       _stream_thread.clear();
     }
@@ -658,7 +658,7 @@ start_service_stream(HSTREAM stream) {
   MutexHolder holder(_streams_lock);
   nassertv(find(_streams.begin(), _streams.end(), stream) == _streams.end());
   _streams.push_back(stream);
-  _streams_cvar.signal();
+  _streams_cvar.notify();
 
   if (_stream_thread.is_null() && Thread::is_threading_supported()) {
     milesAudio_cat.info()
@@ -901,7 +901,7 @@ thread_main(volatile bool &keep_running) {
     // Now yield to be polite to the main application.
     _streams_lock.release();
     Thread::force_yield();
-    _streams_lock.lock();
+    _streams_lock.acquire();
   }
 }
 
@@ -921,7 +921,7 @@ do_service_streams() {
     _streams_lock.release();
     AIL_service_stream(stream, 0);
     Thread::consider_yield();
-    _streams_lock.lock();
+    _streams_lock.acquire();
     
     ++i;
   }

+ 2 - 2
panda/src/chan/animControl.cxx

@@ -75,7 +75,7 @@ setup_anim(PartBundle *part, AnimBundle *anim, int channel_index,
   // Now the AnimControl is fully set up.
   _marked_frame = -1;
   _pending = false;
-  _pending_cvar.signal_all();
+  _pending_cvar.notify_all();
   if (!_pending_done_event.empty()) {
     throw_event(_pending_done_event);
   }
@@ -105,7 +105,7 @@ fail_anim(PartBundle *part) {
   MutexHolder holder(_pending_lock);
   nassertv(_pending && part == _part);
   _pending = false;
-  _pending_cvar.signal_all();
+  _pending_cvar.notify_all();
   if (!_pending_done_event.empty()) {
     throw_event(_pending_done_event);
   }

+ 3 - 3
panda/src/device/analogNode.I

@@ -44,7 +44,7 @@ is_valid() const {
 ////////////////////////////////////////////////////////////////////
 INLINE int AnalogNode::
 get_num_controls() const {
-  _analog->lock();
+  _analog->acquire();
   int result = _analog->get_num_controls();
   _analog->unlock();
   return result;
@@ -60,7 +60,7 @@ get_num_controls() const {
 ////////////////////////////////////////////////////////////////////
 INLINE double AnalogNode::
 get_control_state(int index) const {
-  _analog->lock();
+  _analog->acquire();
   double result = _analog->get_control_state(index);
   _analog->unlock();
   return result;
@@ -75,7 +75,7 @@ get_control_state(int index) const {
 ////////////////////////////////////////////////////////////////////
 INLINE bool AnalogNode::
 is_control_known(int index) const {
-  _analog->lock();
+  _analog->acquire();
   bool result = _analog->is_control_known(index);
   _analog->unlock();
   return result;

+ 2 - 2
panda/src/device/analogNode.cxx

@@ -74,7 +74,7 @@ write(ostream &out, int indent_level) const {
   DataNode::write(out, indent_level);
 
   if (_analog != (ClientAnalogDevice *)NULL) {
-    _analog->lock();
+    _analog->acquire();
     _analog->write_controls(out, indent_level + 2);
     _analog->unlock();
   }
@@ -101,7 +101,7 @@ do_transmit_data(DataGraphTraverser *, const DataNodeTransmit &,
 
     LPoint2f out(0.0f, 0.0f);
 
-    _analog->lock();
+    _analog->acquire();
     for (int i = 0; i < max_outputs; i++) {
       if (_outputs[i]._index >= 0 &&
           _analog->is_control_known(_outputs[i]._index)) {

+ 5 - 5
panda/src/device/buttonNode.I

@@ -36,7 +36,7 @@ is_valid() const {
 ////////////////////////////////////////////////////////////////////
 INLINE int ButtonNode::
 get_num_buttons() const {
-  _button->lock();
+  _button->acquire();
   int result = _button->get_num_buttons();
   _button->unlock();
   return result;
@@ -58,7 +58,7 @@ get_num_buttons() const {
 ////////////////////////////////////////////////////////////////////
 INLINE void ButtonNode::
 set_button_map(int index, ButtonHandle button) {
-  _button->lock();
+  _button->acquire();
   _button->set_button_map(index, button);
   _button->unlock();
 }
@@ -73,7 +73,7 @@ set_button_map(int index, ButtonHandle button) {
 ////////////////////////////////////////////////////////////////////
 INLINE ButtonHandle ButtonNode::
 get_button_map(int index) const {
-  _button->lock();
+  _button->acquire();
   ButtonHandle result = _button->get_button_map(index);
   _button->unlock();
   return result;
@@ -88,7 +88,7 @@ get_button_map(int index) const {
 ////////////////////////////////////////////////////////////////////
 INLINE bool ButtonNode::
 get_button_state(int index) const {
-  _button->lock();
+  _button->acquire();
   bool result = _button->get_button_state(index);
   _button->unlock();
   return result;
@@ -103,7 +103,7 @@ get_button_state(int index) const {
 ////////////////////////////////////////////////////////////////////
 INLINE bool ButtonNode::
 is_button_known(int index) const {
-  _button->lock();
+  _button->acquire();
   bool result = _button->is_button_known(index);
   _button->unlock();
   return result;

+ 3 - 3
panda/src/device/buttonNode.cxx

@@ -75,7 +75,7 @@ output(ostream &out) const {
 
   if (_button != (ClientButtonDevice *)NULL) {
     out << " (";
-    _button->lock();
+    _button->acquire();
     _button->output_buttons(out);
     _button->unlock();
     out << ")";
@@ -92,7 +92,7 @@ write(ostream &out, int indent_level) const {
   DataNode::write(out, indent_level);
 
   if (_button != (ClientButtonDevice *)NULL) {
-    _button->lock();
+    _button->acquire();
     _button->write_buttons(out, indent_level + 2);
     _button->unlock();
   }
@@ -116,7 +116,7 @@ do_transmit_data(DataGraphTraverser *, const DataNodeTransmit &,
                  DataNodeTransmit &output) {
   if (is_valid()) {
     _button->poll();
-    _button->lock();
+    _button->acquire();
 
     (*_button_events) = (*_button->get_button_events());
 

+ 1 - 1
panda/src/device/clientAnalogDevice.I

@@ -52,7 +52,7 @@ get_num_controls() const {
 //     Function: ClientAnalogDevice::set_control_state
 //       Access: Public
 //  Description: Sets the state of the indicated analog index.  The
-//               caller should ensure that lock() is in effect while
+//               caller should ensure that acquire() is in effect while
 //               this call is made.  This should be a number in the
 //               range -1.0 to 1.0, representing the current position
 //               of the control within its total range of movement.

+ 1 - 1
panda/src/device/clientButtonDevice.cxx

@@ -39,7 +39,7 @@ ClientButtonDevice(ClientBase *client, const string &device_name):
 //               true indicates down, and false indicates up.  This
 //               may generate a ButtonEvent if the button has an
 //               associated ButtonHandle.  The caller should ensure
-//               that lock() is in effect while this call is made.
+//               that acquire() is in effect while this call is made.
 ////////////////////////////////////////////////////////////////////
 void ClientButtonDevice::
 set_button_state(int index, bool down) {

+ 4 - 4
panda/src/device/clientDevice.I

@@ -64,7 +64,7 @@ get_device_name() const {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: ClientDevice::lock
+//     Function: ClientDevice::acquire
 //       Access: Public
 //  Description: Grabs the mutex associated with this particular
 //               device.  The device will not update asynchronously
@@ -73,9 +73,9 @@ get_device_name() const {
 //               during the copy.
 ////////////////////////////////////////////////////////////////////
 INLINE void ClientDevice::
-lock() {
+acquire() {
 #ifdef OLD_HAVE_IPC
-  _lock.lock();
+  _lock.acquire();
 #endif
 }
 
@@ -84,7 +84,7 @@ lock() {
 //       Access: Public
 //  Description: Releases the mutex associated with this particular
 //               device.  This should be called after all the data has
-//               been successfully copied out.  See lock().
+//               been successfully copied out.  See acquire().
 ////////////////////////////////////////////////////////////////////
 INLINE void ClientDevice::
 unlock() {

+ 1 - 1
panda/src/device/clientDevice.cxx

@@ -73,7 +73,7 @@ ClientDevice::
 void ClientDevice::
 disconnect() {
   if (_is_connected) {
-    lock();
+    acquire();
     bool disconnected =
       _client->disconnect_device(_device_type, _device_name, this);
     _is_connected = false;

+ 1 - 1
panda/src/device/clientDevice.h

@@ -48,7 +48,7 @@ public:
   void disconnect();
 
   void poll();
-  INLINE void lock();
+  INLINE void acquire();
   INLINE void unlock();
 
   virtual void output(ostream &out) const;

+ 2 - 2
panda/src/device/clientDialDevice.I

@@ -52,7 +52,7 @@ get_num_dials() const {
 //       Access: Public
 //  Description: Marks that the dial has been offset by the indicated
 //               amount.  It is the user's responsibility to ensure
-//               that this call is protected within lock().
+//               that this call is protected within acquire().
 ////////////////////////////////////////////////////////////////////
 INLINE void ClientDialDevice::
 push_dial(int index, double offset) {
@@ -71,7 +71,7 @@ push_dial(int index, double offset) {
 //               to read the dial without resetting the counter.
 //
 //               It is the user's responsibility to ensure that this
-//               call is protected within lock().
+//               call is protected within acquire().
 ////////////////////////////////////////////////////////////////////
 INLINE double ClientDialDevice::
 read_dial(int index) {

+ 3 - 3
panda/src/device/dialNode.I

@@ -33,7 +33,7 @@ is_valid() const {
 ////////////////////////////////////////////////////////////////////
 INLINE int DialNode::
 get_num_dials() const {
-  _dial->lock();
+  _dial->acquire();
   int result = _dial->get_num_dials();
   _dial->unlock();
   return result;
@@ -49,7 +49,7 @@ get_num_dials() const {
 ////////////////////////////////////////////////////////////////////
 INLINE double DialNode::
 read_dial(int index) {
-  _dial->lock();
+  _dial->acquire();
   double result = _dial->read_dial(index);
   _dial->unlock();
   return result;
@@ -64,7 +64,7 @@ read_dial(int index) {
 ////////////////////////////////////////////////////////////////////
 INLINE bool DialNode::
 is_dial_known(int index) const {
-  _dial->lock();
+  _dial->acquire();
   bool result = _dial->is_dial_known(index);
   _dial->unlock();
   return result;

+ 1 - 1
panda/src/device/trackerNode.cxx

@@ -84,7 +84,7 @@ do_transmit_data(DataGraphTraverser *, const DataNodeTransmit &,
                  DataNodeTransmit &output) {
   if (is_valid()) {
     _tracker->poll();
-    _tracker->lock();
+    _tracker->acquire();
     _data = _tracker->get_data();
     _tracker->unlock();
 

+ 11 - 11
panda/src/display/graphicsEngine.cxx

@@ -673,7 +673,7 @@ render_frame() {
       Threads::const_iterator ti;
       for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
         RenderThread *thread = (*ti).second;
-        thread->_cv_mutex.lock();
+        thread->_cv_mutex.acquire();
         
         while (thread->_thread_state != TS_wait) {
           thread->_cv_done.wait();
@@ -789,7 +789,7 @@ render_frame() {
       RenderThread *thread = (*ti).second;
       if (thread->_thread_state == TS_wait) {
         thread->_thread_state = TS_do_frame;
-        thread->_cv_start.signal();
+        thread->_cv_start.notify();
       }
       thread->_cv_mutex.release();
     }
@@ -849,14 +849,14 @@ open_windows() {
     Threads::const_iterator ti;
     for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
       RenderThread *thread = (*ti).second;
-      thread->_cv_mutex.lock();
+      thread->_cv_mutex.acquire();
       
       while (thread->_thread_state != TS_wait) {
         thread->_cv_done.wait();
       }
       
       thread->_thread_state = TS_do_windows;
-      thread->_cv_start.signal();
+      thread->_cv_start.notify();
       thread->_cv_mutex.release();
     }
   }
@@ -1476,7 +1476,7 @@ do_sync_frame(Thread *current_thread) {
   Threads::const_iterator ti;
   for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
     RenderThread *thread = (*ti).second;
-    thread->_cv_mutex.lock();
+    thread->_cv_mutex.acquire();
     thread->_cv_mutex.release();
   }
 
@@ -1506,7 +1506,7 @@ do_flip_frame(Thread *current_thread) {
     Threads::const_iterator ti;
     for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
       RenderThread *thread = (*ti).second;
-      thread->_cv_mutex.lock();
+      thread->_cv_mutex.acquire();
 
       while (thread->_thread_state != TS_wait) {
         thread->_cv_done.wait();
@@ -1523,7 +1523,7 @@ do_flip_frame(Thread *current_thread) {
       RenderThread *thread = (*ti).second;
       nassertv(thread->_thread_state == TS_wait);
       thread->_thread_state = TS_do_flip;
-      thread->_cv_start.signal();
+      thread->_cv_start.notify();
       thread->_cv_mutex.release();
     }
   }
@@ -2083,14 +2083,14 @@ terminate_threads(Thread *current_thread) {
   Threads::const_iterator ti;
   for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
     RenderThread *thread = (*ti).second;
-    thread->_cv_mutex.lock();
+    thread->_cv_mutex.acquire();
   }
   
   // Now tell them to close their windows and terminate.
   for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
     RenderThread *thread = (*ti).second;
     thread->_thread_state = TS_terminate;
-    thread->_cv_start.signal();
+    thread->_cv_start.notify();
     thread->_cv_mutex.release();
   }
 
@@ -2578,7 +2578,7 @@ thread_main() {
       do_pending(_engine, current_thread);
       do_close(_engine, current_thread);
       _thread_state = TS_done;
-      _cv_done.signal();
+      _cv_done.notify();
       return;
 
     case TS_done:
@@ -2588,7 +2588,7 @@ thread_main() {
     }
 
     _thread_state = TS_wait;
-    _cv_done.signal();
+    _cv_done.notify();
 
     {
       PStatTimer timer(_wait_pcollector, current_thread);

+ 1 - 1
panda/src/event/asyncTask.cxx

@@ -393,7 +393,7 @@ unlock_and_do_task() {
   double end = clock->get_real_time();
 
   // Now reacquire the lock (so we can return with the lock held).
-  _manager->_lock.lock();
+  _manager->_lock.acquire();
 
   _dt = end - start;
   _max_dt = max(_dt, _max_dt);

+ 15 - 15
panda/src/event/asyncTaskChain.cxx

@@ -492,7 +492,7 @@ do_add(AsyncTask *task) {
   ++(_manager->_num_tasks);
   _needs_cleanup = true;
 
-  _cvar.signal_all();
+  _cvar.notify_all();
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -647,7 +647,7 @@ do_cleanup() {
   for (ti = dead.begin(); ti != dead.end(); ++ti) {
     (*ti)->upon_death(_manager, false);
   }
-  _manager->_lock.lock();
+  _manager->_lock.acquire();
 
   if (task_cat.is_spam()) {
     do_output(task_cat.spam());
@@ -750,7 +750,7 @@ service_one_task(AsyncTaskChain::AsyncTaskChainThread *thread) {
           // queue.
           task->_state = AsyncTask::S_active;
           _next_active.push_back(task);
-          _cvar.signal_all();
+          _cvar.notify_all();
           break;
           
         case AsyncTask::DS_again:
@@ -767,7 +767,7 @@ service_one_task(AsyncTaskChain::AsyncTaskChainThread *thread) {
                 << "Sleeping " << *task << ", wake time at " 
                 << task->get_wake_time() - now << "\n";
             }
-            _cvar.signal_all();
+            _cvar.notify_all();
           }
           break;
 
@@ -775,7 +775,7 @@ service_one_task(AsyncTaskChain::AsyncTaskChainThread *thread) {
           // The task wants to run again this frame if possible.
           task->_state = AsyncTask::S_active;
           _this_active.push_back(task);
-          _cvar.signal_all();
+          _cvar.notify_all();
           break;
 
         case AsyncTask::DS_interrupt:
@@ -784,7 +784,7 @@ service_one_task(AsyncTaskChain::AsyncTaskChainThread *thread) {
           _next_active.push_back(task);
           if (_state == S_started) {
             _state = S_interrupted;
-            _cvar.signal_all();
+            _cvar.notify_all();
           }
           break;
           
@@ -844,7 +844,7 @@ cleanup_task(AsyncTask *task, bool upon_death, bool clean_exit) {
   if (upon_death) {
     _manager->_lock.release();
     task->upon_death(_manager, clean_exit);
-    _manager->_lock.lock();
+    _manager->_lock.acquire();
   }
 }
 
@@ -872,7 +872,7 @@ finish_sort_group() {
     // There are more tasks; just set the next sort value.
     nassertr(_current_sort < _active.front()->get_sort(), true);
     _current_sort = _active.front()->get_sort();
-    _cvar.signal_all();
+    _cvar.notify_all();
     return true;
   }
 
@@ -922,7 +922,7 @@ finish_sort_group() {
           << ": tick clock\n";
       }
       _manager->_clock->tick();
-      _manager->_frame_cvar.signal_all();
+      _manager->_frame_cvar.notify_all();
     }
     
     // Check for any sleeping tasks that need to be woken.
@@ -972,7 +972,7 @@ finish_sort_group() {
 
   if (!_active.empty()) {
     // Signal the threads to start executing the first task again.
-    _cvar.signal_all();
+    _cvar.notify_all();
     return true;
   }
 
@@ -1079,8 +1079,8 @@ do_stop_threads() {
     }
 
     _state = S_shutdown;
-    _cvar.signal_all();
-    _manager->_frame_cvar.signal_all();
+    _cvar.notify_all();
+    _manager->_frame_cvar.notify_all();
     
     Threads wait_threads;
     wait_threads.swap(_threads);
@@ -1102,7 +1102,7 @@ do_stop_threads() {
           << *Thread::get_current_thread() << "\n";
       }
     }
-    _manager->_lock.lock();
+    _manager->_lock.acquire();
     
     _state = S_initial;
 
@@ -1246,7 +1246,7 @@ do_poll() {
       _num_busy_threads++;
       service_one_task(NULL);
       _num_busy_threads--;
-      _cvar.signal_all();
+      _cvar.notify_all();
 
       if (!_threads.empty()) {
         return;
@@ -1505,7 +1505,7 @@ thread_main() {
       _chain->_num_busy_threads++;
       _chain->service_one_task(this);
       _chain->_num_busy_threads--;
-      _chain->_cvar.signal_all();
+      _chain->_cvar.notify_all();
 
     } else {
       // We've finished all the available tasks of the current sort

+ 3 - 3
panda/src/event/asyncTaskManager.cxx

@@ -226,7 +226,7 @@ add(AsyncTask *task) {
 
     _lock.release();
     task->upon_birth(this);
-    _lock.lock();
+    _lock.acquire();
     nassertv(task->_manager == NULL &&
              task->_state == AsyncTask::S_inactive);
     nassertv(!do_has_task(task));
@@ -379,7 +379,7 @@ remove(const AsyncTaskCollection &tasks) {
       if (task->_chain->do_remove(task)) {
         _lock.release();
         task->upon_death(this, false);
-        _lock.lock();
+        _lock.acquire();
         ++num_removed;
       } else {
         if (task_cat.is_debug()) {
@@ -548,7 +548,7 @@ poll() {
 
   // Just in case the clock was ticked explicitly by one of our
   // polling chains.
-  _frame_cvar.signal_all();
+  _frame_cvar.notify_all();
 }
 
 ////////////////////////////////////////////////////////////////////

+ 2 - 2
panda/src/express/subStreamBuf.cxx

@@ -138,7 +138,7 @@ seekoff(streamoff off, ios_seekdir dir, ios_openmode mode) {
     if (_end == (streampos)0) {
       // If the end of the file is unspecified, we have to seek to
       // find it.
-      _lock.lock();
+      _lock.acquire();
       _source->seekg(off, ios::end);
       new_pos = _source->tellg();
       _lock.release();
@@ -239,7 +239,7 @@ underflow() {
     gbump(-(int)num_bytes);
     nassertr(gptr() + num_bytes <= egptr(), EOF);
 
-    _lock.lock();
+    _lock.acquire();
     _source->seekg(_cur);
     _source->read(gptr(), num_bytes);
     size_t read_count = _source->gcount();

+ 1 - 1
panda/src/express/trueClock.I

@@ -25,7 +25,7 @@ get_short_time() {
   bool is_paranoid_clock = get_paranoid_clock();
 
   if (is_paranoid_clock) {
-    _lock.lock();
+    _lock.acquire();
   }
 
   double time = get_short_raw_time();

+ 3 - 3
panda/src/express/weakReferenceList.cxx

@@ -33,7 +33,7 @@ WeakReferenceList() {
 ////////////////////////////////////////////////////////////////////
 WeakReferenceList::
 ~WeakReferenceList() {
-  _lock.lock();
+  _lock.acquire();
   Pointers::iterator pi;
   for (pi = _pointers.begin(); pi != _pointers.end(); ++pi) {
     (*pi)->mark_deleted();
@@ -56,7 +56,7 @@ WeakReferenceList::
 ////////////////////////////////////////////////////////////////////
 void WeakReferenceList::
 add_reference(WeakPointerToVoid *ptv) {
-  _lock.lock();
+  _lock.acquire();
   bool inserted = _pointers.insert(ptv).second;
   _lock.release();
   nassertv(inserted);
@@ -73,7 +73,7 @@ add_reference(WeakPointerToVoid *ptv) {
 ////////////////////////////////////////////////////////////////////
 void WeakReferenceList::
 clear_reference(WeakPointerToVoid *ptv) {
-  _lock.lock();
+  _lock.acquire();
   Pointers::iterator pi = _pointers.find(ptv);
   bool valid = (pi != _pointers.end());
   if (valid) {

+ 1 - 1
panda/src/gobj/adaptiveLru.cxx

@@ -391,7 +391,7 @@ do_evict_to(size_t target_size, bool hard_evict) {
           // We must release the lock while we call evict_lru().
           _lock.release();
           page->evict_lru();
-          _lock.lock();
+          _lock.acquire();
 
           if (_total_size <= target_size) {
             // We've evicted enough to satisfy our target.

+ 1 - 1
panda/src/gobj/geomMunger.cxx

@@ -118,7 +118,7 @@ munge_geom(CPT(Geom) &geom, CPT(GeomVertexData) &data,
 
   Geom::CacheKey key(source_data, this);
 
-  geom->_cache_lock.lock();
+  geom->_cache_lock.acquire();
   Geom::Cache::const_iterator ci = geom->_cache.find(&key);
   if (ci == geom->_cache.end()) {
     geom->_cache_lock.release();

+ 1 - 1
panda/src/gobj/geomVertexArrayData.I

@@ -300,7 +300,7 @@ GeomVertexArrayDataHandle(const GeomVertexArrayData *object,
 #endif  // DO_PIPELINING
   // We must grab the lock *after* we have incremented the reference
   // count, above.
-  _cdata->_rw_lock.lock();
+  _cdata->_rw_lock.acquire();
 #ifdef DO_MEMORY_USAGE
   MemoryUsage::update_type(this, get_class_type());
 #endif

+ 1 - 1
panda/src/gobj/geomVertexData.cxx

@@ -754,7 +754,7 @@ convert_to(const GeomVertexFormat *new_format) const {
 
   CacheKey key(new_format);
 
-  _cache_lock.lock();
+  _cache_lock.acquire();
   Cache::const_iterator ci = _cache.find(&key);
   if (ci == _cache.end()) {
     _cache_lock.release();

+ 1 - 1
panda/src/gobj/simpleLru.cxx

@@ -190,7 +190,7 @@ do_evict_to(size_t target_size, bool hard_evict) {
     // We must release the lock while we call evict_lru().
     _global_lock.release();
     node->evict_lru();
-    _global_lock.lock();
+    _global_lock.acquire();
 
     if (node == end || node == _prev) {
       // If we reach the original tail of the list, stop.

+ 4 - 4
panda/src/gobj/texture.cxx

@@ -2869,10 +2869,10 @@ do_write_txo(ostream &out, const string &filename) const {
   // will need to grab the lock).
   _lock.release();
   if (!writer.write_object(this)) {
-    _lock.lock();
+    _lock.acquire();
     return false;
   }
-  _lock.lock();
+  _lock.acquire();
 
   if (!do_has_ram_image()) {
     gobj_cat.error()
@@ -2918,7 +2918,7 @@ do_unlock_and_reload_ram_image(bool allow_compression) {
     // own mutex is left unlocked.
     tex->do_reload_ram_image(allow_compression);
 
-    _lock.lock();
+    _lock.acquire();
     do_assign(*tex);
 
     nassertv(_reloading);
@@ -2931,7 +2931,7 @@ do_unlock_and_reload_ram_image(bool allow_compression) {
     ++_image_modified;
     ++_properties_modified;
 
-    _cvar.signal_all();
+    _cvar.notify_all();
   }
 }
 

+ 6 - 6
panda/src/gobj/vertexDataPage.cxx

@@ -864,7 +864,7 @@ add_page(VertexDataPage *page, RamClass ram_class) {
     } else {
       _pending_writes.push_back(page);
     }
-    _pending_cvar.signal();
+    _pending_cvar.notify();
   }
 }
 
@@ -891,7 +891,7 @@ remove_page(VertexDataPage *page) {
       while (page == thread->_working_page) {
         thread->_working_cvar.wait();
       }
-      page->_lock.lock();
+      page->_lock.acquire();
       return;
     }
   }
@@ -980,7 +980,7 @@ stop_threads() {
   {
     MutexHolder holder(_tlock);
     _shutdown = true;
-    _pending_cvar.signal_all();
+    _pending_cvar.notify_all();
     threads.swap(_threads);
   }
 
@@ -1013,7 +1013,7 @@ PageThread(PageThreadManager *manager, const string &name) :
 ////////////////////////////////////////////////////////////////////
 void VertexDataPage::PageThread::
 thread_main() {
-  _tlock.lock();
+  _tlock.acquire();
 
   while (true) {
     PStatClient::thread_tick(get_sync_name());
@@ -1060,10 +1060,10 @@ thread_main() {
       }
     }
     
-    _tlock.lock();
+    _tlock.acquire();
 
     _working_page = NULL;
-    _working_cvar.signal();
+    _working_cvar.notify();
 
     Thread::consider_yield();
   }

+ 3 - 3
panda/src/net/datagramQueue.cxx

@@ -56,7 +56,7 @@ shutdown() {
   MutexHolder holder(_cvlock);
 
   _shutdown = true;
-  _cv.signal_all();
+  _cv.notify_all();
 }
 
 
@@ -88,7 +88,7 @@ insert(const NetDatagram &data, bool block) {
   if (enqueue_ok) {
     _queue.push_back(data);
   }
-  _cv.signal();  // Only need to wake up one thread.
+  _cv.notify();  // Only need to wake up one thread.
 
   return enqueue_ok;
 }
@@ -133,7 +133,7 @@ extract(NetDatagram &result) {
   _queue.pop_front();
 
   // Wake up any threads waiting to stuff things into the queue.
-  _cv.signal_all();
+  _cv.notify_all();
 
   return true;
 }

+ 3 - 0
panda/src/pipeline/Sources.pp

@@ -60,6 +60,7 @@
     reMutex.I reMutex.h \
     reMutexDirect.h reMutexDirect.I \
     reMutexHolder.I reMutexHolder.h \
+    semaphore.h semaphore.I \
     thread.h thread.I threadImpl.h \
     threadDummyImpl.h threadDummyImpl.I \
     threadPosixImpl.h threadPosixImpl.I \
@@ -112,6 +113,7 @@
     reMutex.cxx \
     reMutexDirect.cxx \
     reMutexHolder.cxx \
+    semaphore.cxx \
     thread.cxx \
     threadDummyImpl.cxx \
     threadPosixImpl.cxx \
@@ -170,6 +172,7 @@
     reMutex.I reMutex.h \
     reMutexDirect.h reMutexDirect.I \
     reMutexHolder.I reMutexHolder.h \
+    semaphore.h semaphore.I \
     thread.h thread.I threadImpl.h \
     threadDummyImpl.h threadDummyImpl.I \
     threadPosixImpl.h threadPosixImpl.I \

+ 2 - 2
panda/src/pipeline/conditionVar.I

@@ -71,12 +71,12 @@ operator = (const ConditionVar &copy) {
 ////////////////////////////////////////////////////////////////////
 //     Function: ConditionVar::signal_all
 //       Access: Private
-//  Description: The signal_all() method is specifically *not*
+//  Description: The notify_all() method is specifically *not*
 //               provided by ConditionVar.  Use ConditionVarFull if
 //               you need to call this method.
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVar::
-signal_all() {
+notify_all() {
   nassertv(false);
 }
 

+ 3 - 3
panda/src/pipeline/conditionVar.h

@@ -56,13 +56,13 @@ private:
 
   // These methods are inherited from the base class.
   // INLINE void wait();
-  // INLINE void signal();
+  // INLINE void notify();
 
 private:
-  // The signal_all() method is specifically *not* provided by
+  // The notify_all() method is specifically *not* provided by
   // ConditionVar.  Use ConditionVarFull if you need to call this
   // method.
-  INLINE void signal_all();
+  INLINE void notify_all();
 
 PUBLISHED:
   INLINE Mutex &get_mutex() const;

+ 7 - 7
panda/src/pipeline/conditionVarDebug.cxx

@@ -53,13 +53,13 @@ ConditionVarDebug::
 //               variable before calling this function.
 //
 //               wait() will release the lock, then go to sleep until
-//               some other thread calls signal() on this condition
+//               some other thread calls notify() on this condition
 //               variable.  At that time at least one thread waiting
 //               on the same ConditionVarDebug will grab the lock again,
 //               and then return from wait().
 //
 //               It is possible that wait() will return even if no one
-//               has called signal().  It is the responsibility of the
+//               has called notify().  It is the responsibility of the
 //               calling process to verify the condition on return
 //               from wait, and possibly loop back to wait again if
 //               necessary.
@@ -72,7 +72,7 @@ ConditionVarDebug::
 ////////////////////////////////////////////////////////////////////
 void ConditionVarDebug::
 wait() {
-  _mutex._global_lock->lock();
+  _mutex._global_lock->acquire();
 
   Thread *this_thread = Thread::get_current_thread();
 
@@ -124,7 +124,7 @@ wait() {
 ////////////////////////////////////////////////////////////////////
 void ConditionVarDebug::
 wait(double timeout) {
-  _mutex._global_lock->lock();
+  _mutex._global_lock->acquire();
 
   Thread *this_thread = Thread::get_current_thread();
 
@@ -182,8 +182,8 @@ wait(double timeout) {
 //               signal is lost.
 ////////////////////////////////////////////////////////////////////
 void ConditionVarDebug::
-signal() {
-  _mutex._global_lock->lock();
+notify() {
+  _mutex._global_lock->acquire();
 
   Thread *this_thread = Thread::get_current_thread();
 
@@ -201,7 +201,7 @@ signal() {
       << *this_thread << " signalling " << *this << "\n";
   }
 
-  _impl.signal();
+  _impl.notify();
   _mutex._global_lock->release();
 }
 

+ 1 - 1
panda/src/pipeline/conditionVarDebug.h

@@ -46,7 +46,7 @@ PUBLISHED:
 
   void wait();
   void wait(double timeout);
-  void signal();
+  void notify();
   virtual void output(ostream &out) const;
 
 private:

+ 5 - 5
panda/src/pipeline/conditionVarDirect.I

@@ -81,13 +81,13 @@ get_mutex() const {
 //               variable before calling this function.
 //
 //               wait() will release the lock, then go to sleep until
-//               some other thread calls signal() on this condition
+//               some other thread calls notify() on this condition
 //               variable.  At that time at least one thread waiting
 //               on the same ConditionVarDirect will grab the lock again,
 //               and then return from wait().
 //
 //               It is possible that wait() will return even if no one
-//               has called signal().  It is the responsibility of the
+//               has called notify().  It is the responsibility of the
 //               calling process to verify the condition on return
 //               from wait, and possibly loop back to wait again if
 //               necessary.
@@ -139,7 +139,7 @@ wait(double timeout) {
 //               signal is lost.
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarDirect::
-signal() {
-  TAU_PROFILE("ConditionVarDirect::signal()", " ", TAU_USER);
-  _impl.signal();
+notify() {
+  TAU_PROFILE("ConditionVarDirect::notify()", " ", TAU_USER);
+  _impl.notify();
 }

+ 1 - 1
panda/src/pipeline/conditionVarDirect.h

@@ -46,7 +46,7 @@ PUBLISHED:
 
   BLOCKING INLINE void wait();
   BLOCKING INLINE void wait(double timeout);
-  INLINE void signal();
+  INLINE void notify();
   void output(ostream &out) const;
 
 private:

+ 2 - 2
panda/src/pipeline/conditionVarDummyImpl.I

@@ -57,7 +57,7 @@ wait(double) {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarDummyImpl::
-signal() {
+notify() {
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -66,5 +66,5 @@ signal() {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarDummyImpl::
-signal_all() {
+notify_all() {
 }

+ 2 - 2
panda/src/pipeline/conditionVarDummyImpl.h

@@ -36,8 +36,8 @@ public:
 
   INLINE void wait();
   INLINE void wait(double timeout);
-  INLINE void signal();
-  INLINE void signal_all();
+  INLINE void notify();
+  INLINE void notify_all();
 };
 
 #include "conditionVarDummyImpl.I"

+ 5 - 5
panda/src/pipeline/conditionVarFull.h

@@ -25,21 +25,21 @@
 //               ConditionVar for a brief introduction to this class.
 //               The ConditionVarFull class provides a more complete
 //               implementation than ConditionVar; in particular, it
-//               provides the signal_all() method, which is guaranteed
+//               provides the notify_all() method, which is guaranteed
 //               to wake up all threads currently waiting on the
-//               condition (whereas signal() is guaranteed to wake up
+//               condition (whereas notify() is guaranteed to wake up
 //               at least one thread, but may or may not wake up all
 //               of them).
 //
 //               This class exists because on certain platforms
-//               (e.g. Win32), implementing signal_all() requires more
+//               (e.g. Win32), implementing notify_all() requires more
 //               overhead, so you should use ConditionVar for cases
-//               when you do not require the signal_all() semantics.
+//               when you do not require the notify_all() semantics.
 //
 //               There are still some minor semantics that POSIX
 //               condition variables provide which this implementation
 //               does not.  For instance, it is required (not
-//               optional) that the caller of signal() or signal_all()
+//               optional) that the caller of notify() or notify_all()
 //               is holding the condition variable's mutex before the
 //               call.
 //

+ 10 - 10
panda/src/pipeline/conditionVarFullDebug.cxx

@@ -53,13 +53,13 @@ ConditionVarFullDebug::
 //               variable before calling this function.
 //
 //               wait() will release the lock, then go to sleep until
-//               some other thread calls signal() on this condition
+//               some other thread calls notify() on this condition
 //               variable.  At that time at least one thread waiting
 //               on the same ConditionVarFullDebug will grab the lock again,
 //               and then return from wait().
 //
 //               It is possible that wait() will return even if no one
-//               has called signal().  It is the responsibility of the
+//               has called notify().  It is the responsibility of the
 //               calling process to verify the condition on return
 //               from wait, and possibly loop back to wait again if
 //               necessary.
@@ -72,7 +72,7 @@ ConditionVarFullDebug::
 ////////////////////////////////////////////////////////////////////
 void ConditionVarFullDebug::
 wait() {
-  _mutex._global_lock->lock();
+  _mutex._global_lock->acquire();
 
   Thread *this_thread = Thread::get_current_thread();
 
@@ -124,7 +124,7 @@ wait() {
 ////////////////////////////////////////////////////////////////////
 void ConditionVarFullDebug::
 wait(double timeout) {
-  _mutex._global_lock->lock();
+  _mutex._global_lock->acquire();
 
   Thread *this_thread = Thread::get_current_thread();
 
@@ -182,8 +182,8 @@ wait(double timeout) {
 //               signal is lost.
 ////////////////////////////////////////////////////////////////////
 void ConditionVarFullDebug::
-signal() {
-  _mutex._global_lock->lock();
+notify() {
+  _mutex._global_lock->acquire();
 
   Thread *this_thread = Thread::get_current_thread();
 
@@ -201,7 +201,7 @@ signal() {
       << *this_thread << " signalling " << *this << "\n";
   }
 
-  _impl.signal();
+  _impl.notify();
   _mutex._global_lock->release();
 }
 
@@ -220,8 +220,8 @@ signal() {
 //               signal is lost.
 ////////////////////////////////////////////////////////////////////
 void ConditionVarFullDebug::
-signal_all() {
-  _mutex._global_lock->lock();
+notify_all() {
+  _mutex._global_lock->acquire();
 
   Thread *this_thread = Thread::get_current_thread();
 
@@ -239,7 +239,7 @@ signal_all() {
       << *this_thread << " signalling all " << *this << "\n";
   }
 
-  _impl.signal_all();
+  _impl.notify_all();
   _mutex._global_lock->release();
 }
 

+ 2 - 2
panda/src/pipeline/conditionVarFullDebug.h

@@ -46,8 +46,8 @@ PUBLISHED:
 
   void wait();
   void wait(double timeout);
-  void signal();
-  void signal_all();
+  void notify();
+  void notify_all();
   virtual void output(ostream &out) const;
 
 private:

+ 8 - 8
panda/src/pipeline/conditionVarFullDirect.I

@@ -81,13 +81,13 @@ get_mutex() const {
 //               variable before calling this function.
 //
 //               wait() will release the lock, then go to sleep until
-//               some other thread calls signal() on this condition
+//               some other thread calls notify() on this condition
 //               variable.  At that time at least one thread waiting
 //               on the same ConditionVarFullDirect will grab the lock again,
 //               and then return from wait().
 //
 //               It is possible that wait() will return even if no one
-//               has called signal().  It is the responsibility of the
+//               has called notify().  It is the responsibility of the
 //               calling process to verify the condition on return
 //               from wait, and possibly loop back to wait again if
 //               necessary.
@@ -139,9 +139,9 @@ wait(double timeout) {
 //               signal is lost.
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarFullDirect::
-signal() {
-  TAU_PROFILE("ConditionVarFullDirect::signal()", " ", TAU_USER);
-  _impl.signal();
+notify() {
+  TAU_PROFILE("ConditionVarFullDirect::notify()", " ", TAU_USER);
+  _impl.notify();
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -159,7 +159,7 @@ signal() {
 //               signal is lost.
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarFullDirect::
-signal_all() {
-  TAU_PROFILE("ConditionVarFullDirect::signal()", " ", TAU_USER);
-  _impl.signal_all();
+notify_all() {
+  TAU_PROFILE("ConditionVarFullDirect::notify()", " ", TAU_USER);
+  _impl.notify_all();
 }

+ 2 - 2
panda/src/pipeline/conditionVarFullDirect.h

@@ -46,8 +46,8 @@ PUBLISHED:
 
   INLINE void wait();
   INLINE void wait(double timeout);
-  INLINE void signal();
-  INLINE void signal_all();
+  INLINE void notify();
+  INLINE void notify_all();
   void output(ostream &out) const;
 
 private:

+ 8 - 8
panda/src/pipeline/conditionVarFullWin32Impl.I

@@ -54,14 +54,14 @@ wait() {
   // This avoids the "lost wakeup" bug...
   LeaveCriticalSection(_external_mutex);
 
-  // Wait for either event to become signaled due to signal() being
-  // called or signal_all() being called.
+  // Wait for either event to become signaled due to notify() being
+  // called or notify_all() being called.
   int result = WaitForMultipleObjects(2, &_event_signal, FALSE, INFINITE);
 
   bool nonzero = AtomicAdjust::dec(_waiters_count);
   bool last_waiter = (result == WAIT_OBJECT_0 + 1 && !nonzero);
 
-  // Some thread called signal_all().
+  // Some thread called notify_all().
   if (last_waiter) {
     // We're the last waiter to be notified or to stop waiting, so
     // reset the manual event. 
@@ -86,14 +86,14 @@ wait(double timeout) {
   // This avoids the "lost wakeup" bug...
   LeaveCriticalSection(_external_mutex);
 
-  // Wait for either event to become signaled due to signal() being
-  // called or signal_all() being called.
+  // Wait for either event to become signaled due to notify() being
+  // called or notify_all() being called.
   int result = WaitForMultipleObjects(2, &_event_signal, FALSE, (DWORD)(timeout * 1000.0));
 
   bool nonzero = AtomicAdjust::dec(_waiters_count);
   bool last_waiter = (result == WAIT_OBJECT_0 + 1 && !nonzero);
 
-  // Some thread called signal_all().
+  // Some thread called notify_all().
   if (last_waiter) {
     // We're the last waiter to be notified or to stop waiting, so
     // reset the manual event. 
@@ -110,7 +110,7 @@ wait(double timeout) {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarFullWin32Impl::
-signal() {
+notify() {
   bool have_waiters = AtomicAdjust::get(_waiters_count) > 0;
 
   if (have_waiters) {
@@ -124,7 +124,7 @@ signal() {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarFullWin32Impl::
-signal_all() {
+notify_all() {
   bool have_waiters = AtomicAdjust::get(_waiters_count) > 0;
 
   if (have_waiters) {

+ 5 - 5
panda/src/pipeline/conditionVarFullWin32Impl.h

@@ -33,15 +33,15 @@ class MutexWin32Impl;
 //
 //               We follow the "SetEvent" implementation suggested by
 //               http://www.cs.wustl.edu/~schmidt/win32-cv-1.html .
-//               This allows us to implement both signal() and
-//               signal_all(), but it has more overhead than the
+//               This allows us to implement both notify() and
+//               notify_all(), but it has more overhead than the
 //               simpler implementation of ConditionVarWin32Impl.
 //
 //               As described by the above reference, this
 //               implementation suffers from a few weaknesses; in
 //               particular, it does not necessarily wake up all
 //               threads fairly; and it may sometimes incorrectly wake
-//               up a thread that was not waiting at the time signal()
+//               up a thread that was not waiting at the time notify()
 //               was called.  But we figure it's good enough for our
 //               purposes.
 ////////////////////////////////////////////////////////////////////
@@ -52,8 +52,8 @@ public:
 
   INLINE void wait();
   INLINE void wait(double timeout);
-  INLINE void signal();
-  INLINE void signal_all();
+  INLINE void notify();
+  INLINE void notify_all();
 
 private:
   CRITICAL_SECTION *_external_mutex;

+ 4 - 4
panda/src/pipeline/conditionVarPosixImpl.I

@@ -62,8 +62,8 @@ wait() {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarPosixImpl::
-signal() {
-  TAU_PROFILE("ConditionVarPosixImpl::signal()", " ", TAU_USER);
+notify() {
+  TAU_PROFILE("ConditionVarPosixImpl::notify()", " ", TAU_USER);
   int result = pthread_cond_signal(&_cvar);
   nassertv(result == 0);
 }
@@ -74,8 +74,8 @@ signal() {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarPosixImpl::
-signal_all() {
-  TAU_PROFILE("ConditionVarPosixImpl::signal()", " ", TAU_USER);
+notify_all() {
+  TAU_PROFILE("ConditionVarPosixImpl::notify()", " ", TAU_USER);
   int result = pthread_cond_broadcast(&_cvar);
   nassertv(result == 0);
 }

+ 2 - 2
panda/src/pipeline/conditionVarPosixImpl.h

@@ -39,8 +39,8 @@ public:
 
   INLINE void wait();
   void wait(double timeout);
-  INLINE void signal();
-  INLINE void signal_all();
+  INLINE void notify();
+  INLINE void notify_all();
 
 private:
   MutexPosixImpl &_mutex;

+ 2 - 2
panda/src/pipeline/conditionVarSimpleImpl.I

@@ -37,7 +37,7 @@ INLINE ConditionVarSimpleImpl::
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarSimpleImpl::
-signal() {
+notify() {
   if (_flags & F_has_waiters) {
     do_signal();
   }
@@ -49,7 +49,7 @@ signal() {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarSimpleImpl::
-signal_all() {
+notify_all() {
   if (_flags & F_has_waiters) {
     do_signal_all();
   }

+ 2 - 2
panda/src/pipeline/conditionVarSimpleImpl.cxx

@@ -33,7 +33,7 @@ wait() {
   manager->enqueue_block(thread, this);
   manager->next_context();
 
-  _mutex.lock();
+  _mutex.acquire();
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -54,7 +54,7 @@ wait(double timeout) {
   manager->enqueue_ready(thread);
   manager->next_context();
 
-  _mutex.lock();
+  _mutex.acquire();
 }
 
 ////////////////////////////////////////////////////////////////////

+ 2 - 2
panda/src/pipeline/conditionVarSimpleImpl.h

@@ -35,8 +35,8 @@ public:
 
   void wait();
   void wait(double timeout);
-  INLINE void signal();
-  INLINE void signal_all();
+  INLINE void notify();
+  INLINE void notify_all();
 
 private:
   void do_signal();

+ 2 - 2
panda/src/pipeline/conditionVarSpinlockImpl.I

@@ -38,7 +38,7 @@ INLINE ConditionVarSpinlockImpl::
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarSpinlockImpl::
-signal() {
+notify() {
   // This will wake up all waiters on the lock.  But that's allowed.
   AtomicAdjust::inc(_event);
 }
@@ -49,6 +49,6 @@ signal() {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarSpinlockImpl::
-signal_all() {
+notify_all() {
   AtomicAdjust::inc(_event);
 }

+ 1 - 1
panda/src/pipeline/conditionVarSpinlockImpl.cxx

@@ -31,7 +31,7 @@ wait() {
   while (AtomicAdjust::get(_event) == current) {
   }
 
-  _mutex.lock();
+  _mutex.acquire();
 }
 
 #endif  // MUTEX_SPINLOCK

+ 2 - 2
panda/src/pipeline/conditionVarSpinlockImpl.h

@@ -41,8 +41,8 @@ public:
   INLINE ~ConditionVarSpinlockImpl();
 
   void wait();
-  INLINE void signal();
-  INLINE void signal_all();
+  INLINE void notify();
+  INLINE void notify_all();
 
 private:
   MutexSpinlockImpl &_mutex;

+ 1 - 1
panda/src/pipeline/conditionVarWin32Impl.I

@@ -72,6 +72,6 @@ wait(double timeout) {
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void ConditionVarWin32Impl::
-signal() {
+notify() {
   SetEvent(_event_signal);
 }

+ 3 - 3
panda/src/pipeline/conditionVarWin32Impl.h

@@ -33,9 +33,9 @@ class MutexWin32Impl;
 //               The Windows native synchronization primitives don't
 //               actually implement a full POSIX-style condition
 //               variable, but the Event primitive does a fair job if
-//               we disallow signal_all() (POSIX broadcast).  See
+//               we disallow notify_all() (POSIX broadcast).  See
 //               ConditionVarFullWin32Impl for a full implementation
-//               that includes signal_all().  This class is much
+//               that includes notify_all().  This class is much
 //               simpler than that full implementation, so we can
 //               avoid the overhead required to support broadcast.
 ////////////////////////////////////////////////////////////////////
@@ -46,7 +46,7 @@ public:
 
   INLINE void wait();
   INLINE void wait(double timeout);
-  INLINE void signal();
+  INLINE void notify();
 
 private:
   CRITICAL_SECTION *_external_mutex;

+ 1 - 1
panda/src/pipeline/cyclerHolder.I

@@ -22,7 +22,7 @@ INLINE CyclerHolder::
 CyclerHolder(PipelineCyclerBase &cycler) {
 #ifdef DO_PIPELINING
   _cycler = &cycler;
-  _cycler->lock();
+  _cycler->acquire();
 #endif
 }
 

+ 1 - 1
panda/src/pipeline/cyclerHolder.h

@@ -21,7 +21,7 @@
 ////////////////////////////////////////////////////////////////////
 //       Class : CyclerHolder
 // Description : A lightweight C++ object whose constructor calls
-//               lock() and whose destructor calls release() on a
+//               acquire() and whose destructor calls release() on a
 //               PipelineCyclerBase object.  This is similar to a
 //               MutexHolder.
 ////////////////////////////////////////////////////////////////////

+ 4 - 4
panda/src/pipeline/lightMutexDirect.I

@@ -52,7 +52,7 @@ operator = (const LightMutexDirect &copy) {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: LightMutexDirect::lock
+//     Function: LightMutexDirect::acquire
 //       Access: Published
 //  Description: Grabs the lightMutex if it is available.  If it is not
 //               available, blocks until it becomes available, then
@@ -67,9 +67,9 @@ operator = (const LightMutexDirect &copy) {
 //               Also see LightMutexHolder.
 ////////////////////////////////////////////////////////////////////
 INLINE void LightMutexDirect::
-lock() const {
-  TAU_PROFILE("void LightMutexDirect::lock()", " ", TAU_USER);
-  ((LightMutexDirect *)this)->_impl.lock();
+acquire() const {
+  TAU_PROFILE("void LightMutexDirect::acquire()", " ", TAU_USER);
+  ((LightMutexDirect *)this)->_impl.acquire();
 }
 
 ////////////////////////////////////////////////////////////////////

+ 1 - 1
panda/src/pipeline/lightMutexDirect.h

@@ -37,7 +37,7 @@ private:
   INLINE void operator = (const LightMutexDirect &copy);
 
 PUBLISHED:
-  BLOCKING INLINE void lock() const;
+  BLOCKING INLINE void acquire() const;
   INLINE void release() const;
   INLINE bool debug_is_locked() const;
 

+ 2 - 2
panda/src/pipeline/lightMutexHolder.I

@@ -22,7 +22,7 @@ INLINE LightMutexHolder::
 LightMutexHolder(const LightMutex &mutex) {
 #if defined(HAVE_THREADS) || defined(DEBUG_THREADS)
   _mutex = &mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 
@@ -44,7 +44,7 @@ LightMutexHolder(LightMutex *&mutex) {
     mutex = new LightMutex;
   }
   _mutex = mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 

+ 11 - 11
panda/src/pipeline/lightReMutexDirect.I

@@ -64,7 +64,7 @@ operator = (const LightReMutexDirect &copy) {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: LightReMutexDirect::lock
+//     Function: LightReMutexDirect::acquire
 //       Access: Published
 //  Description: Grabs the lightReMutex if it is available.  If it is not
 //               available, blocks until it becomes available, then
@@ -79,23 +79,23 @@ operator = (const LightReMutexDirect &copy) {
 //               Also see LightReMutexHolder.
 ////////////////////////////////////////////////////////////////////
 INLINE void LightReMutexDirect::
-lock() const {
-  TAU_PROFILE("void LightReMutexDirect::lock()", " ", TAU_USER);
-  ((LightReMutexDirect *)this)->_impl.lock();
+acquire() const {
+  TAU_PROFILE("void LightReMutexDirect::acquire()", " ", TAU_USER);
+  ((LightReMutexDirect *)this)->_impl.acquire();
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: LightReMutexDirect::lock
+//     Function: LightReMutexDirect::acquire
 //       Access: Published
-//  Description: This variant on lock() accepts the current thread as
+//  Description: This variant on acquire() accepts the current thread as
 //               a parameter, if it is already known, as an
 //               optimization.
 ////////////////////////////////////////////////////////////////////
 INLINE void LightReMutexDirect::
-lock(Thread *current_thread) const {
-  TAU_PROFILE("void LightReMutexDirect::lock(Thread *)", " ", TAU_USER);
+acquire(Thread *current_thread) const {
+  TAU_PROFILE("void LightReMutexDirect::acquire(Thread *)", " ", TAU_USER);
 #ifdef HAVE_REMUTEXIMPL
-  ((LightReMutexDirect *)this)->_impl.lock();
+  ((LightReMutexDirect *)this)->_impl.acquire();
 #else
   ((LightReMutexDirect *)this)->_impl.do_lock(current_thread);
 #endif  // HAVE_REMUTEXIMPL
@@ -110,7 +110,7 @@ lock(Thread *current_thread) const {
 //               time to release the lock.
 //
 //               This method really performs the same function as
-//               lock(), but it offers a potential (slight)
+//               acquire(), but it offers a potential (slight)
 //               performance benefit when the calling thread knows
 //               that it already holds the lock.  It is an error to
 //               call this when the calling thread does not hold the
@@ -120,7 +120,7 @@ INLINE void LightReMutexDirect::
 elevate_lock() const {
   TAU_PROFILE("void LightReMutexDirect::elevate_lock()", " ", TAU_USER);
 #ifdef HAVE_REMUTEXIMPL
-  ((LightReMutexDirect *)this)->_impl.lock();
+  ((LightReMutexDirect *)this)->_impl.acquire();
 #else
   ((LightReMutexDirect *)this)->_impl.do_elevate_lock();
 #endif  // HAVE_REMUTEXIMPL

+ 2 - 2
panda/src/pipeline/lightReMutexDirect.h

@@ -36,8 +36,8 @@ private:
   INLINE void operator = (const LightReMutexDirect &copy);
 
 PUBLISHED:
-  BLOCKING INLINE void lock() const;
-  BLOCKING INLINE void lock(Thread *current_thread) const;
+  BLOCKING INLINE void acquire() const;
+  BLOCKING INLINE void acquire(Thread *current_thread) const;
   INLINE void elevate_lock() const;
   INLINE void release() const;
 

+ 3 - 3
panda/src/pipeline/lightReMutexHolder.I

@@ -22,7 +22,7 @@ INLINE LightReMutexHolder::
 LightReMutexHolder(const LightReMutex &mutex) {
 #if defined(HAVE_THREADS) || defined(DEBUG_THREADS)
   _mutex = &mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 
@@ -37,7 +37,7 @@ INLINE LightReMutexHolder::
 LightReMutexHolder(const LightReMutex &mutex, Thread *current_thread) {
 #if defined(HAVE_THREADS) || defined(DEBUG_THREADS)
   _mutex = &mutex;
-  _mutex->lock(current_thread);
+  _mutex->acquire(current_thread);
 #endif
 }
 
@@ -59,7 +59,7 @@ LightReMutexHolder(LightReMutex *&mutex) {
     mutex = new LightReMutex;
   }
   _mutex = mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 

+ 24 - 22
panda/src/pipeline/mutexDebug.I

@@ -34,7 +34,7 @@ operator = (const MutexDebug &copy) {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexDebug::lock
+//     Function: MutexDebug::acquire
 //       Access: Published
 //  Description: Grabs the mutex if it is available.  If it is not
 //               available, blocks until it becomes available, then
@@ -48,29 +48,31 @@ operator = (const MutexDebug &copy) {
 //
 //               Also see MutexHolder.
 ////////////////////////////////////////////////////////////////////
-INLINE void MutexDebug::
-lock() const {
-  TAU_PROFILE("void MutexDebug::lock()", " ", TAU_USER);
-  _global_lock->lock();
-  ((MutexDebug *)this)->do_lock();
-  _global_lock->release();
+INLINE bool MutexDebug::
+acquire(Thread *current_thread) const {
+  TAU_PROFILE("void MutexDebug::acquire(Thread *)", " ", TAU_USER);
+  nassertv(current_thread == Thread::get_current_thread());
+  _global_lock->acquire();
+  ((MutexDebug *)this)->do_acquire();
+  _global_lock->release(current_thread);
+  return true;
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexDebug::lock
+//     Function: MutexDebug::try_acquire
 //       Access: Published
-//  Description: This variant on lock() accepts the current thread as
-//               a parameter, if it is already known, as an
-//               optimization.
+//  Description: Returns immediately, with a true value indicating the
+//               mutex has been acquired, and false indicating it has
+//               not.
 ////////////////////////////////////////////////////////////////////
-INLINE void MutexDebug::
-lock(Thread *current_thread) const {
-  TAU_PROFILE("void MutexDebug::lock(Thread *)", " ", TAU_USER);
+INLINE bool MutexDebug::
+try_acquire(Thread *current_thread) const {
+  TAU_PROFILE("void MutexDebug::acquire(Thread *)", " ", TAU_USER);
   nassertv(current_thread == Thread::get_current_thread());
-  // You may only pass a Thread parameter to a ReMutex--that is, to a
-  // mutex whose _allow_recursion flag is true.
-  nassertv(_allow_recursion);
-  lock();
+  _global_lock->acquire();
+  bool acquired = ((MutexDebug *)this)->do_try_acquire(current_thread);
+  _global_lock->release();
+  return acquired;
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -82,7 +84,7 @@ lock(Thread *current_thread) const {
 //               time to release the lock.
 //
 //               This method really performs the same function as
-//               lock(), but it offers a potential (slight)
+//               acquire(), but it offers a potential (slight)
 //               performance benefit when the calling thread knows
 //               that it already holds the lock.  It is an error to
 //               call this when the calling thread does not hold the
@@ -98,7 +100,7 @@ elevate_lock() const {
   // Also, it's an error to call this if the lock is not already held.
   nassertv(debug_is_locked());
 
-  lock();
+  acquire();
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -114,7 +116,7 @@ elevate_lock() const {
 INLINE void MutexDebug::
 release() const {
   TAU_PROFILE("void MutexDebug::release()", " ", TAU_USER);
-  _global_lock->lock();
+  _global_lock->acquire();
   ((MutexDebug *)this)->do_release();
   _global_lock->release();
 }
@@ -132,7 +134,7 @@ release() const {
 INLINE bool MutexDebug::
 debug_is_locked() const {
   TAU_PROFILE("bool MutexDebug::debug_is_locked()", " ", TAU_USER);
-  _global_lock->lock();
+  _global_lock->acquire();
   bool is_locked = do_debug_is_locked();
   _global_lock->release();
   return is_locked;

+ 98 - 29
panda/src/pipeline/mutexDebug.cxx

@@ -85,13 +85,13 @@ output(ostream &out) const {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexDebug::do_lock
+//     Function: MutexDebug::do_acquire
 //       Access: Private
-//  Description: The private implementation of lock() assumes that
+//  Description: The private implementation of acquire() assumes that
 //               _lock_impl is held.
 ////////////////////////////////////////////////////////////////////
 void MutexDebug::
-do_lock() {
+do_acquire(Thread *current_thread) {
   // If this assertion is triggered, you tried to lock a
   // recently-destructed mutex.
   nassertd(_lock_count != -100) {
@@ -107,21 +107,19 @@ do_lock() {
     return;
   }
 
-  Thread *this_thread = Thread::get_current_thread();
-
   if (_locking_thread == (Thread *)NULL) {
     // The mutex is not already locked by anyone.  Lock it.
-    _locking_thread = this_thread;
+    _locking_thread = current_thread;
     ++_lock_count;
     nassertv(_lock_count == 1);
 
-  } else if (_locking_thread == this_thread) {
+  } else if (_locking_thread == current_thread) {
     // The mutex is already locked by this thread.  Increment the lock
     // count.
     nassertv(_lock_count > 0);
     if (!_allow_recursion) {
       ostringstream ostr;
-      ostr << *this_thread << " attempted to double-lock non-reentrant "
+      ostr << *current_thread << " attempted to double-lock non-reentrant "
            << *this;
       nassert_raise(ostr.str());
     }
@@ -132,15 +130,15 @@ do_lock() {
 
     if (_lightweight) {
       // In this case, it's not a real mutex.  Just watch it go by.
-      MissedThreads::iterator mi = _missed_threads.insert(MissedThreads::value_type(this_thread, 0)).first;
+      MissedThreads::iterator mi = _missed_threads.insert(MissedThreads::value_type(current_thread, 0)).first;
       if ((*mi).second == 0) {
         thread_cat.info()
-          << *this_thread << " not stopped by " << *this << " (held by "
+          << *current_thread << " not stopped by " << *this << " (held by "
           << *_locking_thread << ")\n";
       } else {
         if (!_allow_recursion) {
           ostringstream ostr;
-          ostr << *this_thread << " attempted to double-lock non-reentrant "
+          ostr << *current_thread << " attempted to double-lock non-reentrant "
                << *this;
           nassert_raise(ostr.str());
         }
@@ -153,9 +151,9 @@ do_lock() {
       // Check for deadlock.
       MutexDebug *next_mutex = this;
       while (next_mutex != NULL) {
-        if (next_mutex->_locking_thread == this_thread) {
+        if (next_mutex->_locking_thread == current_thread) {
           // Whoops, the thread is blocked on me!  Deadlock!
-          report_deadlock(this_thread);
+          report_deadlock(current_thread);
           nassert_raise("Deadlock");
           return;
         }
@@ -173,13 +171,13 @@ do_lock() {
       }
       
       // OK, no deadlock detected.  Carry on.
-      this_thread->_blocked_on_mutex = this;
+      current_thread->_blocked_on_mutex = this;
       
       // Go to sleep on the condition variable until it's unlocked.
       
       if (thread_cat->is_debug()) {
         thread_cat.debug()
-          << *this_thread << " blocking on " << *this << " (held by "
+          << *current_thread << " blocking on " << *this << " (held by "
           << *_locking_thread << ")\n";
       }
       
@@ -189,22 +187,93 @@ do_lock() {
       
       if (thread_cat.is_debug()) {
         thread_cat.debug()
-          << *this_thread << " awake on " << *this << "\n";
+          << *current_thread << " awake on " << *this << "\n";
       }
       
-      this_thread->_blocked_on_mutex = NULL;
+      current_thread->_blocked_on_mutex = NULL;
       
-      _locking_thread = this_thread;
+      _locking_thread = current_thread;
       ++_lock_count;
       nassertv(_lock_count == 1);
     }
   }
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: MutexDebug::do_try_acquire
+//       Access: Private
+//  Description: The private implementation of acquire(false) assumes
+//               that _lock_impl is held.
+////////////////////////////////////////////////////////////////////
+bool MutexDebug::
+do_try_acquire(Thread *current_thread) {
+  // If this assertion is triggered, you tried to lock a
+  // recently-destructed mutex.
+  nassertd(_lock_count != -100) {
+    pipeline_cat.error()
+      << "Destructed mutex: " << (void *)this << "\n";
+    if (name_deleted_mutexes && _deleted_name != NULL) {
+      pipeline_cat.error()
+        << _deleted_name << "\n";
+    } else {
+      pipeline_cat.error()
+        << "Configure name-deleted-mutexes 1 to see the mutex name.\n";
+    }
+    return;
+  }
+
+  bool acquired = true;
+  if (_locking_thread == (Thread *)NULL) {
+    // The mutex is not already locked by anyone.  Lock it.
+    _locking_thread = current_thread;
+    ++_lock_count;
+    nassertv(_lock_count == 1);
+
+  } else if (_locking_thread == current_thread) {
+    // The mutex is already locked by this thread.  Increment the lock
+    // count.
+    nassertv(_lock_count > 0);
+    if (!_allow_recursion) {
+      ostringstream ostr;
+      ostr << *current_thread << " attempted to double-lock non-reentrant "
+           << *this;
+      nassert_raise(ostr.str());
+    }
+    ++_lock_count;
+
+  } else {
+    // The mutex is locked by some other thread.  Return false.
+
+    if (_lightweight) {
+      // In this case, it's not a real mutex.  Just watch it go by.
+      MissedThreads::iterator mi = _missed_threads.insert(MissedThreads::value_type(current_thread, 0)).first;
+      if ((*mi).second == 0) {
+        thread_cat.info()
+          << *current_thread << " not stopped by " << *this << " (held by "
+          << *_locking_thread << ")\n";
+      } else {
+        if (!_allow_recursion) {
+          ostringstream ostr;
+          ostr << *current_thread << " attempted to double-lock non-reentrant "
+               << *this;
+          nassert_raise(ostr.str());
+        }
+      }
+      ++((*mi).second);
+
+    } else {
+      // This is the real case.
+      acquired = false;
+    }
+  }
+
+  return acquired;
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: MutexDebug::do_release
 //       Access: Private
-//  Description: The private implementation of lock() assumes that
+//  Description: The private implementation of acquire() assumes that
 //               _lock_impl is held.
 ////////////////////////////////////////////////////////////////////
 void MutexDebug::
@@ -224,16 +293,16 @@ do_release() {
     return;
   }
 
-  Thread *this_thread = Thread::get_current_thread();
+  Thread *current_thread = Thread::get_current_thread();
 
-  if (_locking_thread != this_thread) {
+  if (_locking_thread != current_thread) {
     // We're not holding this mutex.
 
     if (_lightweight) {
       // Not a real mutex.  This just means we blew past a mutex
       // without locking it, above.
 
-      MissedThreads::iterator mi = _missed_threads.find(this_thread);
+      MissedThreads::iterator mi = _missed_threads.find(current_thread);
       nassertv(mi != _missed_threads.end());
       nassertv((*mi).second > 0);
       --((*mi).second);
@@ -245,7 +314,7 @@ do_release() {
     } else {
       // In the real-mutex case, this is an error condition.
       ostringstream ostr;
-      ostr << *this_thread << " attempted to release "
+      ostr << *current_thread << " attempted to release "
            << *this << " which it does not own";
       nassert_raise(ostr.str());
     }
@@ -269,7 +338,7 @@ do_release() {
         nassertv(_lock_count > 0);
       }
     } else {
-      _cvar_impl.signal();
+      _cvar_impl.notify();
     }
   }
 }
@@ -282,13 +351,13 @@ do_release() {
 ////////////////////////////////////////////////////////////////////
 bool MutexDebug::
 do_debug_is_locked() const {
-  Thread *this_thread = Thread::get_current_thread();
-  if (_locking_thread == this_thread) {
+  Thread *current_thread = Thread::get_current_thread();
+  if (_locking_thread == current_thread) {
     return true;
   }
 
   if (_lightweight) {
-    MissedThreads::const_iterator mi = _missed_threads.find(this_thread);
+    MissedThreads::const_iterator mi = _missed_threads.find(current_thread);
     if (mi != _missed_threads.end()) {
       nassertr((*mi).second > 0, false);
       return true;
@@ -305,7 +374,7 @@ do_debug_is_locked() const {
 //               should be already held.
 ////////////////////////////////////////////////////////////////////
 void MutexDebug::
-report_deadlock(Thread *this_thread) {
+report_deadlock(Thread *current_thread) {
   thread_cat->error()
     << "\n\n"
     << "****************************************************************\n"
@@ -314,7 +383,7 @@ report_deadlock(Thread *this_thread) {
     << "\n";
 
   thread_cat.error()
-    << *this_thread << " attempted to lock " << *this
+    << *current_thread << " attempted to lock " << *this
     << " which is held by " << *_locking_thread << "\n";
 
   MutexDebug *next_mutex = this;

+ 5 - 4
panda/src/pipeline/mutexDebug.h

@@ -38,8 +38,8 @@ private:
   INLINE void operator = (const MutexDebug &copy);
 
 PUBLISHED:
-  BLOCKING INLINE void lock() const;
-  BLOCKING INLINE void lock(Thread *current_thread) const;
+  BLOCKING INLINE void acquire(Thread *current_thread = Thread::get_current_thread()) const;
+  BLOCKING INLINE bool try_acquire(Thread *current_thread = Thread::get_current_thread()) const;
   INLINE void elevate_lock() const;
   INLINE void release() const;
   INLINE bool debug_is_locked() const;
@@ -49,11 +49,12 @@ PUBLISHED:
   typedef void VoidFunc();
 
 private:
-  void do_lock();
+  void do_acquire(Thread *current_thread);
+  bool do_try_acquire(Thread *current_thread);
   void do_release();
   bool do_debug_is_locked() const;
 
-  void report_deadlock(Thread *this_thread);
+  void report_deadlock(Thread *current_thread);
 
 private:
   INLINE static MutexTrueImpl *get_global_lock();

+ 17 - 4
panda/src/pipeline/mutexDirect.I

@@ -52,7 +52,7 @@ operator = (const MutexDirect &copy) {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexDirect::lock
+//     Function: MutexDirect::acquire
 //       Access: Published
 //  Description: Grabs the mutex if it is available.  If it is not
 //               available, blocks until it becomes available, then
@@ -67,9 +67,22 @@ operator = (const MutexDirect &copy) {
 //               Also see MutexHolder.
 ////////////////////////////////////////////////////////////////////
 INLINE void MutexDirect::
-lock() const {
-  TAU_PROFILE("void MutexDirect::lock()", " ", TAU_USER);
-  ((MutexDirect *)this)->_impl.lock();
+acquire() const {
+  TAU_PROFILE("void MutexDirect::acquire()", " ", TAU_USER);
+  ((MutexDirect *)this)->_impl.acquire();
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: MutexDirect::try_acquire
+//       Access: Published
+//  Description: Returns immediately, with a true value indicating the
+//               mutex has been acquired, and false indicating it has
+//               not.
+////////////////////////////////////////////////////////////////////
+INLINE bool MutexDirect::
+try_acquire() const {
+  TAU_PROFILE("void MutexDirect::acquire(bool)", " ", TAU_USER);
+  return ((MutexDirect *)this)->_impl.try_acquire();
 }
 
 ////////////////////////////////////////////////////////////////////

+ 2 - 1
panda/src/pipeline/mutexDirect.h

@@ -38,7 +38,8 @@ private:
   INLINE void operator = (const MutexDirect &copy);
 
 PUBLISHED:
-  BLOCKING INLINE void lock() const;
+  BLOCKING INLINE void acquire() const;
+  BLOCKING INLINE bool try_acquire() const;
   INLINE void release() const;
   INLINE bool debug_is_locked() const;
 

+ 2 - 2
panda/src/pipeline/mutexHolder.I

@@ -22,7 +22,7 @@ INLINE MutexHolder::
 MutexHolder(const Mutex &mutex) {
 #if defined(HAVE_THREADS) || defined(DEBUG_THREADS)
   _mutex = &mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 
@@ -44,7 +44,7 @@ MutexHolder(Mutex *&mutex) {
     mutex = new Mutex;
   }
   _mutex = mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 

+ 1 - 1
panda/src/pipeline/mutexHolder.h

@@ -21,7 +21,7 @@
 ////////////////////////////////////////////////////////////////////
 //       Class : MutexHolder
 // Description : A lightweight C++ object whose constructor calls
-//               lock() and whose destructor calls release() on a
+//               acquire() and whose destructor calls release() on a
 //               mutex.  It is a C++ convenience wrapper to call
 //               release() automatically when a block exits (for
 //               instance, on return).

+ 5 - 5
panda/src/pipeline/mutexSimpleImpl.I

@@ -32,24 +32,24 @@ INLINE MutexSimpleImpl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexSimpleImpl::lock
+//     Function: MutexSimpleImpl::acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE void MutexSimpleImpl::
-lock() {
-  if (!try_lock()) {
+acquire() {
+  if (!try_acquire()) {
     do_lock();
   }
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: MutexSimpleImpl::try_lock
+//     Function: MutexSimpleImpl::try_acquire
 //       Access: Public
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 INLINE bool MutexSimpleImpl::
-try_lock() {
+try_acquire() {
   if ((_flags & F_lock_count) != 0) {
     return false;
   }

+ 2 - 2
panda/src/pipeline/mutexSimpleImpl.h

@@ -46,8 +46,8 @@ public:
   INLINE MutexSimpleImpl();
   INLINE ~MutexSimpleImpl();
 
-  INLINE void lock();
-  INLINE bool try_lock();
+  INLINE void acquire();
+  INLINE bool try_acquire();
   INLINE void release();
 
 private:

+ 1 - 1
panda/src/pipeline/pipeline.cxx

@@ -176,7 +176,7 @@ set_num_stages(int num_stages) {
     PipelineCyclerLinks *links;
     for (links = this->_next; links != this; links = links->_next) {
       PipelineCyclerTrueImpl *cycler = (PipelineCyclerTrueImpl *)links;
-      cycler->_lock.lock();
+      cycler->_lock.acquire();
     }
 
     _num_stages = num_stages;

+ 4 - 4
panda/src/pipeline/pipelineCyclerDummyImpl.I

@@ -69,15 +69,15 @@ INLINE PipelineCyclerDummyImpl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: PipelineCyclerDummyImpl::lock
+//     Function: PipelineCyclerDummyImpl::acquire
 //       Access: Public
 //  Description: Grabs an overall lock on the cycler.  Release it with
 //               a call to release().  This lock should be held while
 //               walking the list of stages.
 ////////////////////////////////////////////////////////////////////
 INLINE void PipelineCyclerDummyImpl::
-lock(Thread *) {
-  TAU_PROFILE("void PipelineCyclerDummyImpl::lock(Thread *)", " ", TAU_USER);
+acquire(Thread *) {
+  TAU_PROFILE("void PipelineCyclerDummyImpl::acquire(Thread *)", " ", TAU_USER);
   nassertv(!_locked);
   _locked = true;
 }
@@ -86,7 +86,7 @@ lock(Thread *) {
 //     Function: PipelineCyclerDummyImpl::release
 //       Access: Public
 //  Description: Release the overall lock on the cycler that was
-//               grabbed via lock().
+//               grabbed via acquire().
 ////////////////////////////////////////////////////////////////////
 INLINE void PipelineCyclerDummyImpl::
 release() {

+ 1 - 1
panda/src/pipeline/pipelineCyclerDummyImpl.h

@@ -48,7 +48,7 @@ public:
   INLINE void operator = (const PipelineCyclerDummyImpl &copy);
   INLINE ~PipelineCyclerDummyImpl();
 
-  INLINE void lock(Thread *current_thread = NULL);
+  INLINE void acquire(Thread *current_thread = NULL);
   INLINE void release();
 
   INLINE const CycleData *read_unlocked(Thread *current_thread) const;

+ 3 - 3
panda/src/pipeline/pipelineCyclerTrivialImpl.I

@@ -74,21 +74,21 @@ INLINE PipelineCyclerTrivialImpl::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: PipelineCyclerTrivialImpl::lock
+//     Function: PipelineCyclerTrivialImpl::acquire
 //       Access: Public
 //  Description: Grabs an overall lock on the cycler.  Release it with
 //               a call to release().  This lock should be held while
 //               walking the list of stages.
 ////////////////////////////////////////////////////////////////////
 INLINE void PipelineCyclerTrivialImpl::
-lock(Thread *) {
+acquire(Thread *) {
 }
 
 ////////////////////////////////////////////////////////////////////
 //     Function: PipelineCyclerTrivialImpl::release
 //       Access: Public
 //  Description: Release the overall lock on the cycler that was
-//               grabbed via lock().
+//               grabbed via acquire().
 ////////////////////////////////////////////////////////////////////
 INLINE void PipelineCyclerTrivialImpl::
 release() {

+ 1 - 1
panda/src/pipeline/pipelineCyclerTrivialImpl.h

@@ -53,7 +53,7 @@ private:
 public:
   INLINE ~PipelineCyclerTrivialImpl();
 
-  INLINE void lock(Thread *current_thread = NULL);
+  INLINE void acquire(Thread *current_thread = NULL);
   INLINE void release();
 
   INLINE const CycleData *read_unlocked(Thread *current_thread) const;

+ 11 - 11
panda/src/pipeline/pipelineCyclerTrueImpl.I

@@ -14,36 +14,36 @@
 
 
 ////////////////////////////////////////////////////////////////////
-//     Function: PipelineCyclerTrueImpl::lock
+//     Function: PipelineCyclerTrueImpl::acquire
 //       Access: Public
 //  Description: Grabs an overall lock on the cycler.  Release it with
 //               a call to release().  This lock should be held while
 //               walking the list of stages.
 ////////////////////////////////////////////////////////////////////
 INLINE void PipelineCyclerTrueImpl::
-lock() {
-  TAU_PROFILE("void PipelineCyclerTrueImpl::lock()", " ", TAU_USER);
-  _lock.lock();
+acquire() {
+  TAU_PROFILE("void PipelineCyclerTrueImpl::acquire()", " ", TAU_USER);
+  _lock.acquire();
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: PipelineCyclerTrueImpl::lock
+//     Function: PipelineCyclerTrueImpl::acquire
 //       Access: Public
 //  Description: Grabs an overall lock on the cycler.  Release it with
 //               a call to release().  This lock should be held while
 //               walking the list of stages.
 ////////////////////////////////////////////////////////////////////
 INLINE void PipelineCyclerTrueImpl::
-lock(Thread *current_thread) {
-  TAU_PROFILE("void PipelineCyclerTrueImpl::lock(Thread *)", " ", TAU_USER);
-  _lock.lock(current_thread);
+acquire(Thread *current_thread) {
+  TAU_PROFILE("void PipelineCyclerTrueImpl::acquire(Thread *)", " ", TAU_USER);
+  _lock.acquire(current_thread);
 }
 
 ////////////////////////////////////////////////////////////////////
 //     Function: PipelineCyclerTrueImpl::release
 //       Access: Public
 //  Description: Release the overall lock on the cycler that was
-//               grabbed via lock().
+//               grabbed via acquire().
 ////////////////////////////////////////////////////////////////////
 INLINE void PipelineCyclerTrueImpl::
 release() {
@@ -93,7 +93,7 @@ read(Thread *current_thread) const {
 #ifdef _DEBUG
   nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, NULL);
 #endif
-  _lock.lock(current_thread);
+  _lock.acquire(current_thread);
   return _data[pipeline_stage];
 }
 
@@ -312,7 +312,7 @@ read_stage(int pipeline_stage, Thread *current_thread) const {
 #ifdef _DEBUG
   nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, NULL);
 #endif
-  _lock.lock(current_thread);
+  _lock.acquire(current_thread);
   return _data[pipeline_stage];
 }
 

+ 2 - 2
panda/src/pipeline/pipelineCyclerTrueImpl.cxx

@@ -137,7 +137,7 @@ PipelineCyclerTrueImpl::
 ////////////////////////////////////////////////////////////////////
 CycleData *PipelineCyclerTrueImpl::
 write_stage(int pipeline_stage, Thread *current_thread) {
-  _lock.lock(current_thread);
+  _lock.acquire(current_thread);
 
 #ifndef NDEBUG
   nassertd(pipeline_stage >= 0 && pipeline_stage < _num_stages) {
@@ -175,7 +175,7 @@ write_stage(int pipeline_stage, Thread *current_thread) {
 ////////////////////////////////////////////////////////////////////
 CycleData *PipelineCyclerTrueImpl::
 write_stage_upstream(int pipeline_stage, bool force_to_0, Thread *current_thread) {
-  _lock.lock(current_thread);
+  _lock.acquire(current_thread);
 
 #ifndef NDEBUG
   nassertd(pipeline_stage >= 0 && pipeline_stage < _num_stages) {

+ 2 - 2
panda/src/pipeline/pipelineCyclerTrueImpl.h

@@ -54,8 +54,8 @@ public:
   void operator = (const PipelineCyclerTrueImpl &copy);
   ~PipelineCyclerTrueImpl();
 
-  INLINE void lock();
-  INLINE void lock(Thread *current_thread);
+  INLINE void acquire();
+  INLINE void acquire(Thread *current_thread);
   INLINE void release();
 
   INLINE const CycleData *read_unlocked(Thread *current_thread) const;

+ 1 - 0
panda/src/pipeline/pipeline_composite2.cxx

@@ -13,6 +13,7 @@
 #include "reMutex.cxx"
 #include "reMutexDirect.cxx"
 #include "reMutexHolder.cxx"
+#include "semaphore.cxx"
 #include "thread.cxx"
 #include "threadDummyImpl.cxx"
 #include "threadPosixImpl.cxx"

+ 67 - 17
panda/src/pipeline/reMutexDirect.I

@@ -64,7 +64,7 @@ operator = (const ReMutexDirect &copy) {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: ReMutexDirect::lock
+//     Function: ReMutexDirect::acquire
 //       Access: Published
 //  Description: Grabs the reMutex if it is available.  If it is not
 //               available, blocks until it becomes available, then
@@ -79,29 +79,63 @@ operator = (const ReMutexDirect &copy) {
 //               Also see ReMutexHolder.
 ////////////////////////////////////////////////////////////////////
 INLINE void ReMutexDirect::
-lock() const {
-  TAU_PROFILE("void ReMutexDirect::lock()", " ", TAU_USER);
+acquire() const {
+  TAU_PROFILE("void ReMutexDirect::acquire()", " ", TAU_USER);
 #ifdef HAVE_REMUTEXTRUEIMPL
-  ((ReMutexDirect *)this)->_impl.lock();
+  ((ReMutexDirect *)this)->_impl.acquire();
 #else
-  ((ReMutexDirect *)this)->do_lock();
+  ((ReMutexDirect *)this)->do_acquire();
 #endif  // HAVE_REMUTEXTRUEIMPL
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: ReMutexDirect::lock
+//     Function: ReMutexDirect::acquire
 //       Access: Published
-//  Description: This variant on lock() accepts the current thread as
+//  Description: This variant on acquire() accepts the current thread as
 //               a parameter, if it is already known, as an
 //               optimization.
 ////////////////////////////////////////////////////////////////////
 INLINE void ReMutexDirect::
-lock(Thread *current_thread) const {
-  TAU_PROFILE("void ReMutexDirect::lock(Thread *)", " ", TAU_USER);
+acquire(Thread *current_thread) const {
+  TAU_PROFILE("void ReMutexDirect::acquire(Thread *)", " ", TAU_USER);
 #ifdef HAVE_REMUTEXTRUEIMPL
-  ((ReMutexDirect *)this)->_impl.lock();
+  ((ReMutexDirect *)this)->_impl.acquire();
 #else
-  ((ReMutexDirect *)this)->do_lock(current_thread);
+  ((ReMutexDirect *)this)->do_acquire(current_thread);
+#endif  // HAVE_REMUTEXTRUEIMPL
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: ReMutexDirect::try_acquire
+//       Access: Published
+//  Description: Returns immediately, with a true value indicating the
+//               mutex has been acquired, and false indicating it has
+//               not.
+////////////////////////////////////////////////////////////////////
+INLINE bool ReMutexDirect::
+try_acquire() const {
+  TAU_PROFILE("void ReMutexDirect::acquire(bool)", " ", TAU_USER);
+#ifdef HAVE_REMUTEXTRUEIMPL
+  return ((ReMutexDirect *)this)->_impl.try_acquire();
+#else
+  return ((ReMutexDirect *)this)->do_try_acquire();
+#endif  // HAVE_REMUTEXTRUEIMPL
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: ReMutexDirect::try_acquire
+//       Access: Published
+//  Description: Returns immediately, with a true value indicating the
+//               mutex has been acquired, and false indicating it has
+//               not.
+////////////////////////////////////////////////////////////////////
+INLINE bool ReMutexDirect::
+try_acquire(Thread *current_thread) const {
+  TAU_PROFILE("void ReMutexDirect::acquire(bool)", " ", TAU_USER);
+#ifdef HAVE_REMUTEXTRUEIMPL
+  return ((ReMutexDirect *)this)->_impl.try_acquire();
+#else
+  return ((ReMutexDirect *)this)->do_try_acquire(current_thread);
 #endif  // HAVE_REMUTEXTRUEIMPL
 }
 
@@ -114,7 +148,7 @@ lock(Thread *current_thread) const {
 //               time to release the lock.
 //
 //               This method really performs the same function as
-//               lock(), but it offers a potential (slight)
+//               acquire(), but it offers a potential (slight)
 //               performance benefit when the calling thread knows
 //               that it already holds the lock.  It is an error to
 //               call this when the calling thread does not hold the
@@ -124,7 +158,7 @@ INLINE void ReMutexDirect::
 elevate_lock() const {
   TAU_PROFILE("void ReMutexDirect::elevate_lock()", " ", TAU_USER);
 #ifdef HAVE_REMUTEXTRUEIMPL
-  ((ReMutexDirect *)this)->_impl.lock();
+  ((ReMutexDirect *)this)->_impl.acquire();
 #else
   ((ReMutexDirect *)this)->do_elevate_lock();
 #endif  // HAVE_REMUTEXTRUEIMPL
@@ -209,17 +243,33 @@ get_name() const {
 
 #ifndef HAVE_REMUTEXTRUEIMPL
 ////////////////////////////////////////////////////////////////////
-//     Function: ReMutexDirect::do_lock
+//     Function: ReMutexDirect::do_acquire
 //       Access: Private
-//  Description: The private implementation of lock(), for the case in
+//  Description: The private implementation of acquire(), for the case in
 //               which the underlying lock system does not provide a
 //               reentrant mutex (and therefore we have to build this
 //               functionality on top of the existing non-reentrant
 //               mutex).
 ////////////////////////////////////////////////////////////////////
 INLINE void ReMutexDirect::
-do_lock() {
-  do_lock(Thread::get_current_thread());
+do_acquire() {
+  do_acquire(Thread::get_current_thread());
+}
+#endif
+
+#ifndef HAVE_REMUTEXTRUEIMPL
+////////////////////////////////////////////////////////////////////
+//     Function: ReMutexDirect::do_try_acquire
+//       Access: Private
+//  Description: The private implementation of acquire(false), for the
+//               case in which the underlying lock system does not
+//               provide a reentrant mutex (and therefore we have to
+//               build this functionality on top of the existing
+//               non-reentrant mutex).
+////////////////////////////////////////////////////////////////////
+INLINE bool ReMutexDirect::
+do_try_acquire() {
+  return do_try_acquire(Thread::get_current_thread());
 }
 #endif
 

+ 47 - 8
panda/src/pipeline/reMutexDirect.cxx

@@ -30,17 +30,17 @@ output(ostream &out) const {
 
 #ifndef HAVE_REMUTEXTRUEIMPL
 ////////////////////////////////////////////////////////////////////
-//     Function: ReMutexDirect::do_lock
+//     Function: ReMutexDirect::do_acquire
 //       Access: Private
-//  Description: The private implementation of lock(), for the case in
+//  Description: The private implementation of acquire(), for the case in
 //               which the underlying lock system does not provide a
 //               reentrant mutex (and therefore we have to build this
 //               functionality on top of the existing non-reentrant
 //               mutex).
 ////////////////////////////////////////////////////////////////////
 void ReMutexDirect::
-do_lock(Thread *current_thread) {
-  _lock_impl.lock();
+do_acquire(Thread *current_thread) {
+  _lock_impl.acquire();
 
   if (_locking_thread == (Thread *)NULL) {
     // The mutex is not already locked by anyone.  Lock it.
@@ -72,11 +72,50 @@ do_lock(Thread *current_thread) {
 }
 #endif  // !HAVE_REMUTEXTRUEIMPL
 
+#ifndef HAVE_REMUTEXTRUEIMPL
+////////////////////////////////////////////////////////////////////
+//     Function: ReMutexDirect::do_try_acquire
+//       Access: Private
+//  Description: The private implementation of acquire(false), for the
+//               case in which the underlying lock system does not
+//               provide a reentrant mutex (and therefore we have to
+//               build this functionality on top of the existing
+//               non-reentrant mutex).
+////////////////////////////////////////////////////////////////////
+bool ReMutexDirect::
+do_try_acquire(Thread *current_thread) {
+  bool acquired = true;
+  _lock_impl.acquire();
+
+  if (_locking_thread == (Thread *)NULL) {
+    // The mutex is not already locked by anyone.  Lock it.
+    _locking_thread = current_thread;
+    ++_lock_count;
+    nassertd(_lock_count == 1) {
+    }
+
+  } else if (_locking_thread == current_thread) {
+    // The mutex is already locked by this thread.  Increment the lock
+    // count.
+    ++_lock_count;
+    nassertd(_lock_count > 0) {
+    }
+    
+  } else {
+    // The mutex is locked by some other thread.  Return false.
+    acquired = false;
+  }
+  _lock_impl.release();
+
+  return acquired;
+}
+#endif  // !HAVE_REMUTEXTRUEIMPL
+
 #ifndef HAVE_REMUTEXTRUEIMPL
 ////////////////////////////////////////////////////////////////////
 //     Function: ReMutexDirect::do_elevate_lock
 //       Access: Private
-//  Description: The private implementation of lock(), for the case in
+//  Description: The private implementation of acquire(), for the case in
 //               which the underlying lock system does not provide a
 //               reentrant mutex (and therefore we have to build this
 //               functionality on top of the existing non-reentrant
@@ -84,7 +123,7 @@ do_lock(Thread *current_thread) {
 ////////////////////////////////////////////////////////////////////
 void ReMutexDirect::
 do_elevate_lock() {
-  _lock_impl.lock();
+  _lock_impl.acquire();
 
 #ifdef _DEBUG
   nassertd(_locking_thread == Thread::get_current_thread()) {
@@ -120,7 +159,7 @@ do_elevate_lock() {
 ////////////////////////////////////////////////////////////////////
 void ReMutexDirect::
 do_release() {
-  _lock_impl.lock();
+  _lock_impl.acquire();
 
 #ifdef _DEBUG
   if (_locking_thread != Thread::get_current_thread()) {
@@ -140,7 +179,7 @@ do_release() {
   if (_lock_count == 0) {
     // That was the last lock held by this thread.  Release the lock.
     _locking_thread = (Thread *)NULL;
-    _cvar_impl.signal();
+    _cvar_impl.notify();
   }
   _lock_impl.release();
 }

+ 8 - 4
panda/src/pipeline/reMutexDirect.h

@@ -38,8 +38,10 @@ private:
   INLINE void operator = (const ReMutexDirect &copy);
 
 PUBLISHED:
-  BLOCKING INLINE void lock() const;
-  BLOCKING INLINE void lock(Thread *current_thread) const;
+  BLOCKING INLINE void acquire() const;
+  BLOCKING INLINE void acquire(Thread *current_thread) const;
+  BLOCKING INLINE bool try_acquire() const;
+  BLOCKING INLINE bool try_acquire(Thread *current_thread) const;
   INLINE void elevate_lock() const;
   INLINE void release() const;
 
@@ -58,8 +60,10 @@ private:
 
 #else
   // If we don't have a reentrant mutex, we have to hand-roll one.
-  INLINE void do_lock();
-  void do_lock(Thread *current_thread);
+  INLINE void do_acquire();
+  void do_acquire(Thread *current_thread);
+  INLINE bool do_try_acquire();
+  bool do_try_acquire(Thread *current_thread);
   void do_elevate_lock();
   void do_release();
 

+ 3 - 3
panda/src/pipeline/reMutexHolder.I

@@ -22,7 +22,7 @@ INLINE ReMutexHolder::
 ReMutexHolder(const ReMutex &mutex) {
 #if defined(HAVE_THREADS) || defined(DEBUG_THREADS)
   _mutex = &mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 
@@ -37,7 +37,7 @@ INLINE ReMutexHolder::
 ReMutexHolder(const ReMutex &mutex, Thread *current_thread) {
 #if defined(HAVE_THREADS) || defined(DEBUG_THREADS)
   _mutex = &mutex;
-  _mutex->lock(current_thread);
+  _mutex->acquire(current_thread);
 #endif
 }
 
@@ -59,7 +59,7 @@ ReMutexHolder(ReMutex *&mutex) {
     mutex = new ReMutex;
   }
   _mutex = mutex;
-  _mutex->lock();
+  _mutex->acquire();
 #endif
 }
 

+ 126 - 0
panda/src/pipeline/semaphore.I

@@ -0,0 +1,126 @@
+// Filename: semaphore.I
+// Created by:  drose (13Oct08)
+//
+////////////////////////////////////////////////////////////////////
+//
+// PANDA 3D SOFTWARE
+// Copyright (c) Carnegie Mellon University.  All rights reserved.
+//
+// All use of this software is subject to the terms of the revised BSD
+// license.  You should have received a copy of this license along
+// with this source code in a file named "LICENSE."
+//
+////////////////////////////////////////////////////////////////////
+
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::Constructor
+//       Access: Published
+//  Description: 
+////////////////////////////////////////////////////////////////////
+INLINE Semaphore::
+Semaphore(int initial_count) :
+  _lock("Semaphore::_lock"),
+  _cvar(_lock),
+  _count(initial_count)
+{
+  nassertv(_count >= 0);
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::Destructor
+//       Access: Published
+//  Description: 
+////////////////////////////////////////////////////////////////////
+INLINE Semaphore::
+~Semaphore() {
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::Copy Constructor
+//       Access: Private
+//  Description: Do not attempt to copy semaphores.
+////////////////////////////////////////////////////////////////////
+INLINE Semaphore::
+Semaphore(const Semaphore &copy) : 
+  _cvar(_lock)
+{
+  nassertv(false);
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::Copy Assignment Operator
+//       Access: Private
+//  Description: Do not attempt to copy semaphores.
+////////////////////////////////////////////////////////////////////
+INLINE void Semaphore::
+operator = (const Semaphore &copy) {
+  nassertv(false);
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::acquire
+//       Access: Published
+//  Description: Decrements the internal count.  If the count was
+//               already at zero, blocks until the count is nonzero,
+//               then decrements it.
+////////////////////////////////////////////////////////////////////
+INLINE void Semaphore::
+acquire() {
+  TAU_PROFILE("void Semaphore::acquire()", " ", TAU_USER);
+  MutexHolder holder(_lock);
+  nassertv(_count >= 0);
+  while (_count <= 0) {
+    _cvar.wait();
+  }
+  --_count;
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::try_acquire
+//       Access: Published
+//  Description: If the semaphore can be acquired without blocking,
+//               does so and returns true.  Otherwise, returns false.
+////////////////////////////////////////////////////////////////////
+INLINE bool Semaphore::
+try_acquire() {
+  TAU_PROFILE("void Semaphore::acquire(bool)", " ", TAU_USER);
+  MutexHolder holder(_lock);
+  nassertr(_count >= 0, false);
+  if (_count <= 0) {
+    return false;
+  }
+  --_count;
+  return true;
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::release
+//       Access: Published
+//  Description: Increments the semaphore's internal count.  This may
+//               wake up another thread blocked on acquire().
+//
+//               Returns the count of the semaphore upon release.
+////////////////////////////////////////////////////////////////////
+INLINE int Semaphore::
+release() {
+  TAU_PROFILE("void Semaphore::release()", " ", TAU_USER);
+  MutexHolder holder(_lock);
+  ++_count;
+  _cvar.notify();
+  return _count;
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::get_count
+//       Access: Published
+//  Description: Returns the current semaphore count.  Note that this
+//               call is not thread-safe (the count may change at any
+//               time).
+////////////////////////////////////////////////////////////////////
+INLINE int Semaphore::
+get_count() const {
+  TAU_PROFILE("void Semaphore::get_count()", " ", TAU_USER);
+  MutexHolder holder(_lock);
+  return _count;
+}

+ 26 - 0
panda/src/pipeline/semaphore.cxx

@@ -0,0 +1,26 @@
+// Filename: semaphore.cxx
+// Created by:  drose (13Oct08)
+//
+////////////////////////////////////////////////////////////////////
+//
+// PANDA 3D SOFTWARE
+// Copyright (c) Carnegie Mellon University.  All rights reserved.
+//
+// All use of this software is subject to the terms of the revised BSD
+// license.  You should have received a copy of this license along
+// with this source code in a file named "LICENSE."
+//
+////////////////////////////////////////////////////////////////////
+
+#include "semaphore.h"
+
+////////////////////////////////////////////////////////////////////
+//     Function: Semaphore::output
+//       Access: Published
+//  Description: 
+////////////////////////////////////////////////////////////////////
+INLINE void Semaphore::
+output(ostream &out) const {
+  MutexHolder holder(_lock);
+  out << "Semaphore, count = " << _count;
+}

Some files were not shown because too many files changed in this diff