Browse Source

better thread safety

David Rose 18 years ago
parent
commit
ad6e2116cc

+ 5 - 1
dtool/Config.pp

@@ -380,7 +380,11 @@
 
 // Actually, let's *not* assume that threading implies pipelining, at
 // least not until pipelining is less of a performance hit.
-#defer DO_PIPELINING $[<= $[OPTIMIZE], 1]
+//#defer DO_PIPELINING $[<= $[OPTIMIZE], 1]
+
+// Pipelining is a little broken right now.  Turn it off altogether
+// for now.
+#defer DO_PIPELINING
 
 // Do you want to use one of the alternative malloc implementations?
 // This is almost always a good idea on Windows, where the standard

+ 11 - 0
dtool/src/dtoolbase/atomicAdjustDummyImpl.I

@@ -39,6 +39,17 @@ dec(TVOLATILE PN_int32 &var) {
   return (--var) != 0;
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: AtomicAdjustDummyImpl::add
+//       Access: Public, Static
+//  Description: Atomically computes var += delta.  It is legal for
+//               delta to be negative.
+////////////////////////////////////////////////////////////////////
+INLINE void AtomicAdjustDummyImpl::
+add(TVOLATILE PN_int32 &var, PN_int32 delta) {
+  var += delta;
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: AtomicAdjustDummyImpl::set
 //       Access: Public, Static

+ 1 - 0
dtool/src/dtoolbase/atomicAdjustDummyImpl.h

@@ -34,6 +34,7 @@ class EXPCL_DTOOL AtomicAdjustDummyImpl {
 public:
   INLINE static void inc(TVOLATILE PN_int32 &var);
   INLINE static bool dec(TVOLATILE PN_int32 &var);
+  INLINE static void add(TVOLATILE PN_int32 &var, PN_int32 delta);
   INLINE static PN_int32 set(TVOLATILE PN_int32 &var, PN_int32 new_value);
   INLINE static PN_int32 get(const TVOLATILE PN_int32 &var);
 

+ 14 - 0
dtool/src/dtoolbase/atomicAdjustI386Impl.I

@@ -66,6 +66,20 @@ dec(TVOLATILE PN_int32 &var) {
   return (c == 0);
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: AtomicAdjustI386Impl::add
+//       Access: Public, Static
+//  Description: Atomically computes var += delta.  It is legal for
+//               delta to be negative.
+////////////////////////////////////////////////////////////////////
+INLINE void AtomicAdjustI386Impl::
+add(TVOLATILE PN_int32 &var, PN_int32 delta) {
+  PN_int32 orig_value = var;
+  while (compare_and_exchange(var, orig_value, orig_value + delta) != orig_value) {
+    orig_value = var;
+  }
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: AtomicAdjustI386Impl::set
 //       Access: Public, Static

+ 1 - 0
dtool/src/dtoolbase/atomicAdjustI386Impl.h

@@ -37,6 +37,7 @@ class EXPCL_DTOOL AtomicAdjustI386Impl {
 public:
   INLINE static void inc(TVOLATILE PN_int32 &var);
   INLINE static bool dec(TVOLATILE PN_int32 &var);
+  INLINE static void add(TVOLATILE PN_int32 &var, PN_int32 delta);
   INLINE static PN_int32 set(TVOLATILE PN_int32 &var, PN_int32 new_value);
   INLINE static PN_int32 get(const TVOLATILE PN_int32 &var);
 

+ 13 - 0
dtool/src/dtoolbase/atomicAdjustPosixImpl.I

@@ -44,6 +44,19 @@ dec(TVOLATILE PN_int32 &var) {
   return (result != 0);
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: AtomicAdjustPosixImpl::add
+//       Access: Public, Static
+//  Description: Atomically computes var += delta.  It is legal for
+//               delta to be negative.
+////////////////////////////////////////////////////////////////////
+INLINE void AtomicAdjustPosixImpl::
+add(TVOLATILE PN_int32 &var, PN_int32 delta) {
+  pthread_mutex_lock(&_mutex);
+  var += delta;
+  pthread_mutex_unlock(&_mutex);
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: AtomicAdjustPosixImpl::set
 //       Access: Public, Static

+ 1 - 0
dtool/src/dtoolbase/atomicAdjustPosixImpl.h

@@ -36,6 +36,7 @@ class EXPCL_DTOOL AtomicAdjustPosixImpl {
 public:
   INLINE static void inc(TVOLATILE PN_int32 &var);
   INLINE static bool dec(TVOLATILE PN_int32 &var);
+  INLINE static void add(TVOLATILE PN_int32 &var, PN_int32 delta);
   INLINE static PN_int32 set(TVOLATILE PN_int32 &var, PN_int32 new_value);
   INLINE static PN_int32 get(const TVOLATILE PN_int32 &var);
 

+ 14 - 0
dtool/src/dtoolbase/atomicAdjustWin32Impl.I

@@ -39,6 +39,20 @@ dec(TVOLATILE PN_int32 &var) {
   return (InterlockedDecrement((LONG *)&var) != 0);
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: AtomicAdjustWin32Impl::add
+//       Access: Public, Static
+//  Description: Atomically computes var += delta.  It is legal for
+//               delta to be negative.
+////////////////////////////////////////////////////////////////////
+INLINE void AtomicAdjustWin32Impl::
+add(TVOLATILE PN_int32 &var, PN_int32 delta) {
+  PN_int32 orig_value = var;
+  while (compare_and_exchange(var, orig_value, orig_value + delta) != orig_value) {
+    orig_value = var;
+  }
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: AtomicAdjustWin32Impl::set
 //       Access: Public, Static

+ 1 - 0
dtool/src/dtoolbase/atomicAdjustWin32Impl.h

@@ -37,6 +37,7 @@ class EXPCL_DTOOL AtomicAdjustWin32Impl {
 public:
   INLINE static void inc(TVOLATILE PN_int32 &var);
   INLINE static bool dec(TVOLATILE PN_int32 &var);
+  INLINE static void add(TVOLATILE PN_int32 &var, PN_int32 delta);
   INLINE static PN_int32 set(TVOLATILE PN_int32 &var, PN_int32 new_value);
   INLINE static PN_int32 get(const TVOLATILE PN_int32 &var);
 

+ 2 - 2
dtool/src/dtoolbase/pallocator.T

@@ -54,7 +54,7 @@ allocate(TYPENAME pallocator_array<Type>::size_type n, TYPENAME allocator<void>:
   size_t alloc_size = n * sizeof(Type);
   // We also need to store the total number of bytes we allocated.
   alloc_size += sizeof(size_t);
-  _type_handle.inc_memory_usage(TypeHandle::MC_array, alloc_size);
+  _type_handle.inc_memory_usage(TypeHandle::MC_array, (int)alloc_size);
   void *ptr = (TYPENAME pallocator_array<Type>::pointer)(*global_operator_new)(alloc_size);
   *((size_t *)ptr) = alloc_size;
   return (TYPENAME pallocator_array<Type>::pointer)(((size_t *)ptr) + 1);
@@ -70,7 +70,7 @@ deallocate(TYPENAME pallocator_array<Type>::pointer p, TYPENAME pallocator_array
 #ifdef DO_MEMORY_USAGE
   // Now we need to recover the total number of bytes.
   size_t alloc_size = *(((size_t *)p) - 1);
-  _type_handle.dec_memory_usage(TypeHandle::MC_array, alloc_size);
+  _type_handle.dec_memory_usage(TypeHandle::MC_array, (int)alloc_size);
   (*global_operator_delete)(((size_t *)p) - 1);
 #else
   free(p);

+ 7 - 6
dtool/src/dtoolbase/typeHandle.cxx

@@ -18,6 +18,7 @@
 
 #include "typeHandle.h"
 #include "typeRegistryNode.h"
+#include "atomicAdjust.h"
 
 // This is initialized to zero by static initialization.
 TypeHandle TypeHandle::_none;
@@ -31,7 +32,7 @@ TypeHandle TypeHandle::_none;
 //               only updated if track-memory-usage is set true in
 //               your Config.prc file.
 ////////////////////////////////////////////////////////////////////
-size_t TypeHandle::
+int TypeHandle::
 get_memory_usage(MemoryClass memory_class) const {
   assert((int)memory_class >= 0 && (int)memory_class < (int)MC_limit);
   if ((*this) == TypeHandle::none()) {
@@ -39,7 +40,7 @@ get_memory_usage(MemoryClass memory_class) const {
   } else {
     TypeRegistryNode *rnode = TypeRegistry::ptr()->look_up(*this, NULL);
     assert(rnode != (TypeRegistryNode *)NULL);
-    return rnode->_memory_usage[memory_class];
+    return (size_t)AtomicAdjust::get(rnode->_memory_usage[memory_class]);
   }
 }
 #endif  // DO_MEMORY_USAGE
@@ -52,12 +53,12 @@ get_memory_usage(MemoryClass memory_class) const {
 //               allocated memory for objects of this type.
 ////////////////////////////////////////////////////////////////////
 void TypeHandle::
-inc_memory_usage(MemoryClass memory_class, size_t size) {
+inc_memory_usage(MemoryClass memory_class, int size) {
   assert((int)memory_class >= 0 && (int)memory_class < (int)MC_limit);
   if ((*this) != TypeHandle::none()) {
     TypeRegistryNode *rnode = TypeRegistry::ptr()->look_up(*this, NULL);
     assert(rnode != (TypeRegistryNode *)NULL);
-    rnode->_memory_usage[memory_class] += size;
+    AtomicAdjust::add(rnode->_memory_usage[memory_class], (PN_int32)size);
   }
 }
 #endif  // DO_MEMORY_USAGE
@@ -70,12 +71,12 @@ inc_memory_usage(MemoryClass memory_class, size_t size) {
 //               the total allocated memory for objects of this type.
 ////////////////////////////////////////////////////////////////////
 void TypeHandle::
-dec_memory_usage(MemoryClass memory_class, size_t size) {
+dec_memory_usage(MemoryClass memory_class, int size) {
   assert((int)memory_class >= 0 && (int)memory_class < (int)MC_limit);
   if ((*this) != TypeHandle::none()) {
     TypeRegistryNode *rnode = TypeRegistry::ptr()->look_up(*this, NULL);
     assert(rnode != (TypeRegistryNode *)NULL);
-    rnode->_memory_usage[memory_class] -= size;
+    AtomicAdjust::add(rnode->_memory_usage[memory_class], -(PN_int32)size);
   }
 }
 #endif  // DO_MEMORY_USAGE

+ 5 - 5
dtool/src/dtoolbase/typeHandle.h

@@ -127,13 +127,13 @@ PUBLISHED:
   INLINE  int get_best_parent_from_Set(const std::set< int > &legal_vals) const;
 
 #ifdef DO_MEMORY_USAGE
-  size_t get_memory_usage(MemoryClass memory_class) const;
-  void inc_memory_usage(MemoryClass memory_class, size_t size);
-  void dec_memory_usage(MemoryClass memory_class, size_t size);
+  int get_memory_usage(MemoryClass memory_class) const;
+  void inc_memory_usage(MemoryClass memory_class, int size);
+  void dec_memory_usage(MemoryClass memory_class, int size);
 #else
   INLINE size_t get_memory_usage(MemoryClass) const { return 0; }
-  INLINE void inc_memory_usage(MemoryClass, size_t) { }
-  INLINE void dec_memory_usage(MemoryClass, size_t) { }
+  INLINE void inc_memory_usage(MemoryClass, int) { }
+  INLINE void dec_memory_usage(MemoryClass, int) { }
 #endif  // DO_MEMORY_USAGE
 
   INLINE int get_index() const;

+ 2 - 1
dtool/src/dtoolbase/typeRegistryNode.h

@@ -22,6 +22,7 @@
 #include "dtoolbase.h"
 
 #include "typeHandle.h"
+#include "numeric_types.h"
 
 #include <assert.h>
 #include <vector>
@@ -54,7 +55,7 @@ public:
   Classes _child_classes;
 
 #ifdef DO_MEMORY_USAGE
-  size_t _memory_usage[TypeHandle::MC_limit];
+  PN_int32 _memory_usage[TypeHandle::MC_limit];
 #endif
 
   static bool _paranoid_inheritance;

+ 4 - 0
panda/src/gobj/simpleAllocator.h

@@ -31,6 +31,10 @@ class SimpleAllocatorBlock;
 //               integers within a specified upper limit; it uses a
 //               simple first-fit algorithm to find the next available
 //               space.
+//
+//               Note that this class is not inherently thread-safe;
+//               derived classes are responsible for protecting any
+//               calls into it within mutexes, if necessary.
 ////////////////////////////////////////////////////////////////////
 class EXPCL_PANDA SimpleAllocator : public LinkedListNode {
 PUBLISHED:

+ 17 - 4
panda/src/gobj/simpleLru.I

@@ -132,10 +132,20 @@ get_lru() const {
 ////////////////////////////////////////////////////////////////////
 INLINE void SimpleLruPage::
 enqueue_lru(SimpleLru *lru) {
-  dequeue_lru();
+  MutexHolder holder(SimpleLru::_global_lock);
+
+  if (_lru != (SimpleLru *)NULL) {
+    remove_from_list();
+    _lru->_total_size -= _lru_size;
+    _lru = NULL;
+  }
+
   _lru = lru;
-  _lru->_total_size += _lru_size;
-  insert_before(_lru);
+
+  if (_lru != (SimpleLru *)NULL) {
+    _lru->_total_size += _lru_size;
+    insert_before(_lru);
+  }
 
   // Let's not automatically evict pages; instead, we'll evict only on
   // an explicit epoch test.
@@ -149,6 +159,8 @@ enqueue_lru(SimpleLru *lru) {
 ////////////////////////////////////////////////////////////////////
 INLINE void SimpleLruPage::
 dequeue_lru() {
+  MutexHolder holder(SimpleLru::_global_lock);
+
   if (_lru != (SimpleLru *)NULL) {
     remove_from_list();
     _lru->_total_size -= _lru_size;
@@ -198,10 +210,11 @@ get_lru_size() const {
 //     Function: SimpleLruPage::set_lru_size
 //       Access: Published
 //  Description: Specifies the size of this page, presumably in bytes,
-//               although any units is possible.
+//               although any unit is possible.
 ////////////////////////////////////////////////////////////////////
 INLINE void SimpleLruPage::
 set_lru_size(size_t lru_size) {
+  MutexHolder holder(SimpleLru::_global_lock);
   if (_lru != (SimpleLru *)NULL) {
     _lru->_total_size -= _lru_size;
     _lru->_total_size += lru_size;

+ 9 - 2
panda/src/gobj/simpleLru.cxx

@@ -18,6 +18,7 @@
 
 #include "simpleLru.h"
 
+Mutex SimpleLru::_global_lock;
 
 ////////////////////////////////////////////////////////////////////
 //     Function: SimpleLru::Constructor
@@ -60,6 +61,7 @@ SimpleLru::
 ////////////////////////////////////////////////////////////////////
 size_t SimpleLru::
 count_active_size() const {
+  MutexHolder holder(_global_lock);
   size_t total = 0;
 
   LinkedListNode *node = _prev;
@@ -78,6 +80,7 @@ count_active_size() const {
 ////////////////////////////////////////////////////////////////////
 void SimpleLru::
 do_evict() {
+  MutexHolder holder(_global_lock);
   // Store the current end of the list.  If pages re-enqueue
   // themselves during this traversal, we don't want to visit them
   // twice.
@@ -87,9 +90,13 @@ do_evict() {
   SimpleLruPage *node = (SimpleLruPage *)_next;
   while (_total_size > _max_size) {
     SimpleLruPage *next = (SimpleLruPage *)node->_next;
-    
+
+    // We must release the lock while we call evict_lru().
+    _global_lock.release();
     node->evict_lru();
-    if (node == end) {
+    _global_lock.lock();
+
+    if (node == end || node == _prev) {
       // If we reach the original tail of the list, stop.
       return;
     }

+ 5 - 0
panda/src/gobj/simpleLru.h

@@ -21,6 +21,8 @@
 
 #include "pandabase.h"
 #include "linkedListNode.h"
+#include "pmutex.h"
+#include "mutexHolder.h"
 
 class SimpleLruPage;
 
@@ -41,6 +43,9 @@ PUBLISHED:
   INLINE void consider_evict();
   INLINE void begin_epoch();
 
+public:
+  static Mutex _global_lock;
+
 private:
   void do_evict();
 

+ 47 - 16
panda/src/gobj/vertexDataBook.I

@@ -63,22 +63,6 @@ get_ram_class() const {
   return _ram_class;
 }
 
-////////////////////////////////////////////////////////////////////
-//     Function: VertexDataPage::check_resident
-//       Access: Published
-//  Description: Forces the vertex data into system RAM, if it is not
-//               already there; also, marks it recently-used.
-////////////////////////////////////////////////////////////////////
-INLINE void VertexDataPage::
-check_resident() const {
-  if (get_ram_class() != RC_resident) {
-    ((VertexDataPage *)this)->make_resident();
-  } else {
-    ((VertexDataPage *)this)->mark_used_lru();
-  }
-  nassertv(_size == _uncompressed_size);
-}
-
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataPage::get_first_block
 //       Access: Published
@@ -87,6 +71,7 @@ check_resident() const {
 ////////////////////////////////////////////////////////////////////
 INLINE VertexDataBlock *VertexDataPage::
 get_first_block() const {
+  MutexHolder holder(_lock);
   check_resident();
   return (VertexDataBlock *)SimpleAllocator::get_first_block();
 }
@@ -130,6 +115,33 @@ get_save_file() {
   return _save_file;
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataPage::save_to_disk
+//       Access: Published
+//  Description: Writes the page to disk, but does not evict it from
+//               memory or affect its LRU status.  If it gets evicted
+//               later without having been modified, it will not need
+//               to write itself to disk again.
+////////////////////////////////////////////////////////////////////
+INLINE bool VertexDataPage::
+save_to_disk() {
+  MutexHolder holder(_lock);
+  return do_save_to_disk();
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataPage::restore_from_disk
+//       Access: Published
+//  Description: Restores the page from disk and makes it
+//               either compressed or resident (according to whether
+//               it was stored compressed on disk).
+////////////////////////////////////////////////////////////////////
+INLINE void VertexDataPage::
+restore_from_disk() {
+  MutexHolder holder(_lock);
+  do_restore_from_disk();
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataPage::get_page_data
 //       Access: Public
@@ -137,10 +149,29 @@ get_save_file() {
 ////////////////////////////////////////////////////////////////////
 INLINE unsigned char *VertexDataPage::
 get_page_data() const {
+  MutexHolder holder(_lock);
   check_resident();
   return _page_data;
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataPage::check_resident
+//       Access: Private
+//  Description: Forces the vertex data into system RAM, if it is not
+//               already there; also, marks it recently-used.
+//
+//               Assumes the lock is already held.
+////////////////////////////////////////////////////////////////////
+INLINE void VertexDataPage::
+check_resident() const {
+  if (_ram_class != RC_resident) {
+    ((VertexDataPage *)this)->make_resident();
+  } else {
+    ((VertexDataPage *)this)->mark_used_lru();
+  }
+  nassertv(_size == _uncompressed_size);
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataPage::set_ram_class
 //       Access: Private

+ 113 - 97
panda/src/gobj/vertexDataBook.cxx

@@ -20,6 +20,7 @@
 #include "configVariableInt.h"
 #include "vertexDataSaveFile.h"
 #include "pStatTimer.h"
+#include "mutexHolder.h"
 
 #ifdef HAVE_ZLIB
 #include <zlib.h>
@@ -100,6 +101,8 @@ VertexDataBook::
 ////////////////////////////////////////////////////////////////////
 VertexDataBlock *VertexDataBook::
 alloc(size_t size) {
+  MutexHolder holder(_lock);
+
   // First, try to allocate from the last page that worked; then
   // continue to the end of the list.
   size_t pi = _next_pi;
@@ -175,7 +178,7 @@ VertexDataPage(size_t page_size) : SimpleAllocator(page_size), SimpleLruPage(pag
   _size = page_size;
   _uncompressed_size = _size;
   _total_page_size += _size;
-  get_class_type().inc_memory_usage(TypeHandle::MC_array, _size);
+  get_class_type().inc_memory_usage(TypeHandle::MC_array, (int)_size);
   set_ram_class(RC_resident);
 }
 
@@ -187,7 +190,7 @@ VertexDataPage(size_t page_size) : SimpleAllocator(page_size), SimpleLruPage(pag
 VertexDataPage::
 ~VertexDataPage() {
   _total_page_size -= _size;
-  get_class_type().dec_memory_usage(TypeHandle::MC_array, _size);
+  get_class_type().dec_memory_usage(TypeHandle::MC_array, (int)_size);
 
   if (_page_data != NULL) {
     delete[] _page_data;
@@ -195,10 +198,90 @@ VertexDataPage::
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: VertexDataPage::make_resident
+//     Function: VertexDataPage::alloc
 //       Access: Published
+//  Description: Allocates a new block.  Returns NULL if a block of the
+//               requested size cannot be allocated.
+//
+//               To free the allocated block, call block->free(), or
+//               simply delete the block pointer.
+////////////////////////////////////////////////////////////////////
+VertexDataBlock *VertexDataPage::
+alloc(size_t size) {
+  MutexHolder holder(_lock);
+  check_resident();
+  
+  VertexDataBlock *block = (VertexDataBlock *)SimpleAllocator::alloc(size);
+
+  if (block != (VertexDataBlock *)NULL) {
+    // When we allocate a new block within the page, we have to clear
+    // the disk cache (since we have just invalidated it).
+    _saved_block.clear();
+  }
+
+  return block;
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataPage::make_block
+//       Access: Protected, Virtual
+//  Description: Creates a new SimpleAllocatorBlock object.  Override
+//               this function to specialize the block type returned.
+////////////////////////////////////////////////////////////////////
+SimpleAllocatorBlock *VertexDataPage::
+make_block(size_t start, size_t size) {
+  return new VertexDataBlock(this, start, size);
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataPage::evict_lru
+//       Access: Public, Virtual
+//  Description: Evicts the page from the LRU.  Called internally when
+//               the LRU determines that it is full.  May also be
+//               called externally when necessary to explicitly evict
+//               the page.
+//
+//               It is legal for this method to either evict the page
+//               as requested, do nothing (in which case the eviction
+//               will be requested again at the next epoch), or
+//               requeue itself on the tail of the queue (in which
+//               case the eviction will be requested again much
+//               later).
+////////////////////////////////////////////////////////////////////
+void VertexDataPage::
+evict_lru() {
+  MutexHolder holder(_lock);
+
+  switch (_ram_class) {
+  case RC_resident:
+    if (_compressed_lru.get_max_size() == 0) {
+      make_disk();
+    } else {
+      make_compressed();
+    }
+    break;
+
+  case RC_compressed:
+    make_disk();
+    break;
+
+  case RC_disk:
+    gobj_cat.warning()
+      << "Cannot evict array data from disk.\n";
+    break;
+
+  case RC_end_of_list:
+    break;
+  }
+}
+
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataPage::make_resident
+//       Access: Private
 //  Description: Moves the page to fully resident status by
 //               expanding it or reading it from disk as necessary.
+//
+//               Assumes the lock is already held.
 ////////////////////////////////////////////////////////////////////
 void VertexDataPage::
 make_resident() {
@@ -209,7 +292,7 @@ make_resident() {
   }
 
   if (_ram_class == RC_disk) {
-    restore_from_disk();
+    do_restore_from_disk();
   }
 
   if (_ram_class == RC_compressed) {
@@ -231,14 +314,14 @@ make_resident() {
     }
     nassertv(dest_len == _uncompressed_size);
 
-    get_class_type().dec_memory_usage(TypeHandle::MC_array, _size);
+    get_class_type().dec_memory_usage(TypeHandle::MC_array, (int)_size);
     _total_page_size -= _size;
 
     delete[] _page_data;
     _page_data = new_data;
     _size = _uncompressed_size;
 
-    get_class_type().inc_memory_usage(TypeHandle::MC_array, _size);
+    get_class_type().inc_memory_usage(TypeHandle::MC_array, (int)_size);
     _total_page_size += _size;
   
 #endif
@@ -249,9 +332,11 @@ make_resident() {
 
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataPage::make_compressed
-//       Access: Published
+//       Access: Private
 //  Description: Moves the page to compressed status by
 //               compressing it or reading it from disk as necessary.
+//
+//               Assumes the lock is already held.
 ////////////////////////////////////////////////////////////////////
 void VertexDataPage::
 make_compressed() {
@@ -262,7 +347,7 @@ make_compressed() {
   }
 
   if (_ram_class == RC_disk) {
-    restore_from_disk();
+    do_restore_from_disk();
   }
 
   if (_ram_class == RC_resident) {
@@ -289,14 +374,14 @@ make_compressed() {
     unsigned char *new_data = new unsigned char[buffer_size];
     memcpy(new_data, buffer, buffer_size);
 
-    get_class_type().dec_memory_usage(TypeHandle::MC_array, _size);
+    get_class_type().dec_memory_usage(TypeHandle::MC_array, (int)_size);
     _total_page_size -= _size;
 
     delete[] _page_data;
     _page_data = new_data;
     _size = buffer_size;
 
-    get_class_type().inc_memory_usage(TypeHandle::MC_array, _size);
+    get_class_type().inc_memory_usage(TypeHandle::MC_array, (int)_size);
     _total_page_size += _size;
 
     if (gobj_cat.is_debug()) {
@@ -312,9 +397,11 @@ make_compressed() {
 
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataPage::make_disk
-//       Access: Published
-//  Description: Moves the page to disk status by
-//               writing it to disk as necessary.
+//       Access: Private
+//  Description: Moves the page to disk status by writing it to disk
+//               as necessary.
+//
+//               Assumes the lock is already held.
 ////////////////////////////////////////////////////////////////////
 void VertexDataPage::
 make_disk() {
@@ -325,13 +412,13 @@ make_disk() {
   }
 
   if (_ram_class == RC_resident || _ram_class == RC_compressed) {
-    if (!save_to_disk()) {
+    if (!do_save_to_disk()) {
       // Can't save it to disk for some reason.
       mark_used_lru();
       return;
     }
 
-    get_class_type().dec_memory_usage(TypeHandle::MC_array, _size);
+    get_class_type().dec_memory_usage(TypeHandle::MC_array, (int)_size);
     _total_page_size -= _size;
 
     delete[] _page_data;
@@ -343,17 +430,18 @@ make_disk() {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: VertexDataPage::save_to_disk
-//       Access: Published
+//     Function: VertexDataPage::do_save_to_disk
+//       Access: Private
 //  Description: Writes the page to disk, but does not evict it from
 //               memory or affect its LRU status.  If it gets evicted
 //               later without having been modified, it will not need
 //               to write itself to disk again.
 //
-//               Returns true on success, false on failure.
+//               Returns true on success, false on failure.  Assumes
+//               the lock is already held.
 ////////////////////////////////////////////////////////////////////
 bool VertexDataPage::
-save_to_disk() {
+do_save_to_disk() {
   if (_ram_class == RC_resident || _ram_class == RC_compressed) {
     PStatTimer timer(_vdata_save_pcollector);
 
@@ -382,14 +470,16 @@ save_to_disk() {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: VertexDataPage::restore_from_disk
-//       Access: Published
+//     Function: VertexDataPage::do_restore_from_disk
+//       Access: Private
 //  Description: Restores the page from disk and makes it
 //               either compressed or resident (according to whether
 //               it was stored compressed on disk).
+//
+//               Assumes the lock is already held.
 ////////////////////////////////////////////////////////////////////
 void VertexDataPage::
-restore_from_disk() {
+do_restore_from_disk() {
   if (_ram_class == RC_disk) {
     nassertv(_saved_block != (VertexDataSaveBlock *)NULL);
     nassertv(_page_data == (unsigned char *)NULL && _size == 0);
@@ -410,7 +500,7 @@ restore_from_disk() {
     _page_data = new_data;
     _size = buffer_size;
 
-    get_class_type().inc_memory_usage(TypeHandle::MC_array, _size);
+    get_class_type().inc_memory_usage(TypeHandle::MC_array, (int)_size);
     _total_page_size += _size;
 
     set_lru_size(_size);
@@ -422,80 +512,6 @@ restore_from_disk() {
   }
 }
 
-////////////////////////////////////////////////////////////////////
-//     Function: VertexDataPage::alloc
-//       Access: Published
-//  Description: Allocates a new block.  Returns NULL if a block of the
-//               requested size cannot be allocated.
-//
-//               To free the allocated block, call block->free(), or
-//               simply delete the block pointer.
-////////////////////////////////////////////////////////////////////
-VertexDataBlock *VertexDataPage::
-alloc(size_t size) {
-  check_resident();
-  VertexDataBlock *block = (VertexDataBlock *)SimpleAllocator::alloc(size);
-
-  if (block != (VertexDataBlock *)NULL) {
-    // When we allocate a new block within the page, we have to clear
-    // the disk cache (since we have just invalidated it).
-    _saved_block.clear();
-  }
-
-  return block;
-}
-
-////////////////////////////////////////////////////////////////////
-//     Function: VertexDataPage::make_block
-//       Access: Protected, Virtual
-//  Description: Creates a new SimpleAllocatorBlock object.  Override
-//               this function to specialize the block type returned.
-////////////////////////////////////////////////////////////////////
-SimpleAllocatorBlock *VertexDataPage::
-make_block(size_t start, size_t size) {
-  return new VertexDataBlock(this, start, size);
-}
-
-////////////////////////////////////////////////////////////////////
-//     Function: VertexDataPage::evict_lru
-//       Access: Public, Virtual
-//  Description: Evicts the page from the LRU.  Called internally when
-//               the LRU determines that it is full.  May also be
-//               called externally when necessary to explicitly evict
-//               the page.
-//
-//               It is legal for this method to either evict the page
-//               as requested, do nothing (in which case the eviction
-//               will be requested again at the next epoch), or
-//               requeue itself on the tail of the queue (in which
-//               case the eviction will be requested again much
-//               later).
-////////////////////////////////////////////////////////////////////
-void VertexDataPage::
-evict_lru() {
-  switch (_ram_class) {
-  case RC_resident:
-    if (_compressed_lru.get_max_size() == 0) {
-      make_disk();
-    } else {
-      make_compressed();
-    }
-    break;
-
-  case RC_compressed:
-    make_disk();
-    break;
-
-  case RC_disk:
-    gobj_cat.warning()
-      << "Cannot evict array data from disk.\n";
-    break;
-
-  case RC_end_of_list:
-    break;
-  }
-}
-
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataPage::make_save_file
 //       Access: Private, Static

+ 17 - 7
panda/src/gobj/vertexDataBook.h

@@ -25,6 +25,8 @@
 #include "referenceCount.h"
 #include "pStatCollector.h"
 #include "vertexDataSaveFile.h"
+#include "pmutex.h"
+#include "mutexHolder.h"
 
 class VertexDataPage;
 class VertexDataBlock;
@@ -54,6 +56,7 @@ private:
   typedef pvector<VertexDataPage *> Pages;
   Pages _pages;
   size_t _next_pi;
+  Mutex _lock;
 };
 
 ////////////////////////////////////////////////////////////////////
@@ -80,13 +83,6 @@ PUBLISHED:
   };
 
   INLINE RamClass get_ram_class() const;
-  INLINE void check_resident() const;
-
-  void make_resident();
-  void make_compressed();
-  void make_disk();
-  bool save_to_disk();
-  void restore_from_disk();
 
   VertexDataBlock *alloc(size_t size);
   INLINE VertexDataBlock *get_first_block() const;
@@ -95,6 +91,9 @@ PUBLISHED:
   INLINE static SimpleLru *get_global_lru(RamClass rclass);
   INLINE static VertexDataSaveFile *get_save_file();
 
+  INLINE bool save_to_disk();
+  INLINE void restore_from_disk();
+
 public:
   INLINE unsigned char *get_page_data() const;
 
@@ -103,6 +102,15 @@ protected:
   virtual void evict_lru();
 
 private:
+  INLINE void check_resident() const;
+
+  void make_resident();
+  void make_compressed();
+  void make_disk();
+
+  bool do_save_to_disk();
+  void do_restore_from_disk();
+
   INLINE void set_ram_class(RamClass ram_class);
   static void make_save_file();
 
@@ -111,6 +119,8 @@ private:
   RamClass _ram_class;
   PT(VertexDataSaveBlock) _saved_block;
 
+  Mutex _lock;
+
   static SimpleLru _resident_lru;
   static SimpleLru _compressed_lru;
   static SimpleLru _disk_lru;

+ 63 - 8
panda/src/gobj/vertexDataBuffer.I

@@ -39,7 +39,7 @@ VertexDataBuffer(size_t size) :
   _resident_data(NULL),
   _size(0)
 {
-  unclean_realloc(size);
+  do_unclean_realloc(size);
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -62,7 +62,9 @@ VertexDataBuffer(const VertexDataBuffer &copy) :
 ////////////////////////////////////////////////////////////////////
 INLINE void VertexDataBuffer::
 operator = (const VertexDataBuffer &copy) {
-  unclean_realloc(copy.get_size());
+  MutexHolder holder(_lock);
+
+  do_unclean_realloc(copy.get_size());
   memcpy(_resident_data, copy.get_read_pointer(), _size);
   _source_file = copy._source_file;
   _source_pos = copy._source_pos;
@@ -85,6 +87,8 @@ INLINE VertexDataBuffer::
 ////////////////////////////////////////////////////////////////////
 INLINE const unsigned char *VertexDataBuffer::
 get_read_pointer() const {
+  MutexHolder holder(_lock);
+
   if (_block != (VertexDataBlock *)NULL) {
     // We don't necessarily need to page the buffer all the way into
     // independent status; it's sufficient just to return the block's
@@ -93,7 +97,7 @@ get_read_pointer() const {
   }
   if (_resident_data == (unsigned char *)NULL && !_source_file.is_null()) {
     // If we need to re-read the original source, do so.
-    ((VertexDataBuffer *)this)->page_in();
+    ((VertexDataBuffer *)this)->do_page_in();
   }
 
   return _resident_data;
@@ -105,10 +109,12 @@ get_read_pointer() const {
 //  Description: Returns a writable pointer to the raw data.
 ////////////////////////////////////////////////////////////////////
 INLINE unsigned char *VertexDataBuffer::
-get_write_pointer() {
+get_write_pointer() { 
+  MutexHolder holder(_lock);
+
   if (_block != (VertexDataBlock *)NULL || 
       _resident_data == (unsigned char *)NULL) {
-    page_in();
+    do_page_in();
   }
   _source_file.clear();
   return _resident_data;
@@ -124,6 +130,20 @@ get_size() const {
   return _size;
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataBuffer::clean_realloc
+//       Access: Public
+//  Description: Changes the size of the buffer, preserving its data
+//               (except for any data beyond the new end of the
+//               buffer, if the buffer is being reduced).  If the
+//               buffer is expanded, the new data is uninitialized.
+////////////////////////////////////////////////////////////////////
+INLINE void VertexDataBuffer::
+clean_realloc(size_t size) {
+  MutexHolder holder(_lock);
+  do_clean_realloc(size);
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataBuffer::unclean_realloc
 //       Access: Public
@@ -133,9 +153,8 @@ get_size() const {
 ////////////////////////////////////////////////////////////////////
 INLINE void VertexDataBuffer::
 unclean_realloc(size_t size) {
-  // At the moment, this has no distinct definition, since the system
-  // realloc() call doesn't have an unclean variant.
-  clean_realloc(size);
+  MutexHolder holder(_lock);
+  do_clean_realloc(size);
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -148,6 +167,22 @@ clear() {
   unclean_realloc(0);
 }
 
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataBuffer::page_out
+//       Access: Public
+//  Description: Moves the buffer out of independent memory and puts
+//               it on a page in the indicated book.  The buffer may
+//               still be directly accessible as long as its page
+//               remains resident.  Any subsequent attempt to rewrite
+//               the buffer will implicitly move it off of the page
+//               and back into independent memory.
+////////////////////////////////////////////////////////////////////
+INLINE void VertexDataBuffer::
+page_out(VertexDataBook &book) {
+  MutexHolder holder(_lock);
+  do_page_out(book);
+}
+
 ////////////////////////////////////////////////////////////////////
 //     Function: VertexDataBuffer::swap
 //       Access: Public
@@ -156,6 +191,9 @@ clear() {
 ////////////////////////////////////////////////////////////////////
 INLINE void VertexDataBuffer::
 swap(VertexDataBuffer &other) {
+  MutexHolder holder(_lock);
+  MutexHolder holder2(other._lock);
+
   unsigned char *resident_data = _resident_data;
   size_t size = _size;
   PT(VertexDataBlock) block = _block;
@@ -185,6 +223,23 @@ swap(VertexDataBuffer &other) {
 ////////////////////////////////////////////////////////////////////
 INLINE void VertexDataBuffer::
 set_file(VirtualFile *source_file, streampos source_pos) {
+  MutexHolder holder(_lock);
   _source_file = source_file;
   _source_pos = source_pos;
 }
+
+////////////////////////////////////////////////////////////////////
+//     Function: VertexDataBuffer::do_unclean_realloc
+//       Access: Private
+//  Description: Changes the size of the buffer, without regard to
+//               preserving its data.  The buffer may contain random
+//               data after this call.
+//
+//               Assumes the lock is already held.
+////////////////////////////////////////////////////////////////////
+INLINE void VertexDataBuffer::
+do_unclean_realloc(size_t size) {
+  // At the moment, this has no distinct definition, since the system
+  // realloc() call doesn't have an unclean variant.
+  do_clean_realloc(size);
+}

+ 20 - 15
panda/src/gobj/vertexDataBuffer.cxx

@@ -23,15 +23,17 @@ PStatCollector VertexDataBuffer::_vdata_reread_pcollector("*:Vertex Data:Reread"
 TypeHandle VertexDataBuffer::_type_handle;
 
 ////////////////////////////////////////////////////////////////////
-//     Function: VertexDataBuffer::clean_realloc
-//       Access: Public
+//     Function: VertexDataBuffer::do_clean_realloc
+//       Access: Private
 //  Description: Changes the size of the buffer, preserving its data
 //               (except for any data beyond the new end of the
 //               buffer, if the buffer is being reduced).  If the
 //               buffer is expanded, the new data is uninitialized.
+//
+//               Assumes the lock is already held.
 ////////////////////////////////////////////////////////////////////
 void VertexDataBuffer::
-clean_realloc(size_t size) {
+do_clean_realloc(size_t size) {
   if (size != _size) {
     _source_file.clear();
 
@@ -43,7 +45,7 @@ clean_realloc(size_t size) {
       if (_resident_data != (unsigned char *)NULL) {
         free(_resident_data);
         _resident_data = NULL;
-        get_class_type().dec_memory_usage(TypeHandle::MC_array, _size);
+        get_class_type().dec_memory_usage(TypeHandle::MC_array, (int)_size);
       }
       _block = NULL;
       
@@ -51,16 +53,15 @@ clean_realloc(size_t size) {
       // Page in if we're currently paged out.
       if (_block != (VertexDataBlock *)NULL || 
           _resident_data == (unsigned char *)NULL) {
-        page_in();
+        do_page_in();
       }
       
       if (_resident_data == (unsigned char *)NULL) {
         _resident_data = (unsigned char *)malloc(size);
-        get_class_type().inc_memory_usage(TypeHandle::MC_array, size);
+        get_class_type().inc_memory_usage(TypeHandle::MC_array, (int)size);
       } else {
         _resident_data = (unsigned char *)::realloc(_resident_data, size);
-        get_class_type().dec_memory_usage(TypeHandle::MC_array, _size);
-        get_class_type().inc_memory_usage(TypeHandle::MC_array, size);
+        get_class_type().inc_memory_usage(TypeHandle::MC_array, (int)size - (int)_size);
       }
       nassertv(_resident_data != (unsigned char *)NULL);
     }
@@ -70,17 +71,19 @@ clean_realloc(size_t size) {
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: VertexDataBuffer::page_out
-//       Access: Public
+//     Function: VertexDataBuffer::do_page_out
+//       Access: Private
 //  Description: Moves the buffer out of independent memory and puts
 //               it on a page in the indicated book.  The buffer may
 //               still be directly accessible as long as its page
 //               remains resident.  Any subsequent attempt to rewrite
 //               the buffer will implicitly move it off of the page
 //               and back into independent memory.
+//
+//               Assumes the lock is already held.
 ////////////////////////////////////////////////////////////////////
 void VertexDataBuffer::
-page_out(VertexDataBook &book) {
+do_page_out(VertexDataBook &book) {
   if (_block != (VertexDataBlock *)NULL || _size == 0) {
     // We're already paged out.
     return;
@@ -99,18 +102,20 @@ page_out(VertexDataBook &book) {
 
   free(_resident_data);
   _resident_data = NULL;
-  get_class_type().dec_memory_usage(TypeHandle::MC_array, _size);
+  get_class_type().dec_memory_usage(TypeHandle::MC_array, (int)_size);
 }
 
 ////////////////////////////////////////////////////////////////////
-//     Function: VertexDataBuffer::page_in
-//       Access: Public
+//     Function: VertexDataBuffer::do_page_in
+//       Access: Private
 //  Description: Moves the buffer off of its current page and into
 //               independent memory.  If the page is not already
 //               resident, it is forced resident first.
+//
+//               Assumes the lock is already held.
 ////////////////////////////////////////////////////////////////////
 void VertexDataBuffer::
-page_in() {
+do_page_in() {
   if (_source_file != (VirtualFile *)NULL && _resident_data == (unsigned char *)NULL) {
     // Re-read the data from its original source.
     PStatTimer timer(_vdata_reread_pcollector);

+ 39 - 4
panda/src/gobj/vertexDataBuffer.h

@@ -24,11 +24,40 @@
 #include "pointerTo.h"
 #include "virtualFile.h"
 #include "pStatCollector.h"
+#include "pmutex.h"
+#include "mutexHolder.h"
 
 ////////////////////////////////////////////////////////////////////
 //       Class : VertexDataBuffer
 // Description : A block of bytes that stores the actual raw vertex
 //               data referenced by a GeomVertexArrayData object.
+//
+//               At any point, a buffer may be in any of two states:
+//
+//               independent - the buffer's memory is resident, and
+//               owned by the VertexDataBuffer object itself (in
+//               _resident_data).
+//
+//               paged - the buffer's memory is owned by a
+//               VertexDataBlock.  That block might itself be
+//               resident, compressed, or paged to disk.  If it is
+//               resident, the memory may still be accessed directly
+//               from the block.  However, this memory is considered
+//               read-only.
+//
+//               VertexDataBuffers start out in independent state.
+//               They get moved to paged state when their owning
+//               GeomVertexArrayData objects get evicted from the
+//               _independent_lru.  They can get moved back to
+//               independent state if they are modified
+//               (e.g. get_write_pointer() or realloc() is called).
+//
+//               The idea is to keep the highly dynamic and
+//               frequently-modified VertexDataBuffers resident in
+//               easy-to-access memory, while collecting the static
+//               and rarely accessed VertexDataBuffers together onto
+//               pages, where they may be written to disk as a block
+//               when necessary.
 ////////////////////////////////////////////////////////////////////
 class EXPCL_PANDA VertexDataBuffer {
 public:
@@ -42,23 +71,29 @@ public:
   INLINE unsigned char *get_write_pointer();
 
   INLINE size_t get_size() const;
-  void clean_realloc(size_t size);
+  INLINE void clean_realloc(size_t size);
   INLINE void unclean_realloc(size_t size);
   INLINE void clear();
 
-  INLINE void swap(VertexDataBuffer &other);
+  INLINE void page_out(VertexDataBook &book);
 
-  void page_out(VertexDataBook &book);
-  void page_in();
+  INLINE void swap(VertexDataBuffer &other);
 
   INLINE void set_file(VirtualFile *source_file, streampos source_pos);
 
 private:
+  void do_clean_realloc(size_t size);
+  INLINE void do_unclean_realloc(size_t size);
+
+  void do_page_out(VertexDataBook &book);
+  void do_page_in();
+
   unsigned char *_resident_data;
   size_t _size;
   PT(VertexDataBlock) _block;
   PT(VirtualFile) _source_file;
   streampos _source_pos;
+  Mutex _lock;
 
   static PStatCollector _vdata_reread_pcollector;
 

+ 5 - 0
panda/src/gobj/vertexDataSaveFile.cxx

@@ -17,6 +17,7 @@
 ////////////////////////////////////////////////////////////////////
 
 #include "vertexDataSaveFile.h"
+#include "mutexHolder.h"
 
 #ifndef _WIN32
 #include <sys/types.h>
@@ -176,6 +177,8 @@ VertexDataSaveFile::
 ////////////////////////////////////////////////////////////////////
 PT(VertexDataSaveBlock) VertexDataSaveFile::
 write_data(const unsigned char *data, size_t size, bool compressed) {
+  MutexHolder holder(_lock);
+
   if (!_is_valid) {
     return NULL;
   }
@@ -229,6 +232,8 @@ write_data(const unsigned char *data, size_t size, bool compressed) {
 ////////////////////////////////////////////////////////////////////
 bool VertexDataSaveFile::
 read_data(unsigned char *data, size_t size, VertexDataSaveBlock *block) {
+  MutexHolder holder(_lock);
+
   if (!_is_valid) {
     return false;
   }

+ 2 - 0
panda/src/gobj/vertexDataSaveFile.h

@@ -22,6 +22,7 @@
 #include "pandabase.h"
 #include "simpleAllocator.h"
 #include "filename.h"
+#include "pmutex.h"
 
 #if defined(_WIN32)
 #define WIN32_LEAN_AND_MEAN
@@ -60,6 +61,7 @@ private:
   Filename _filename;
   bool _is_valid;
   size_t _total_file_size;
+  Mutex _lock;
 
 #ifdef _WIN32
   HANDLE _handle;

+ 0 - 2
panda/src/pgraph/bamFile.cxx

@@ -60,8 +60,6 @@ bool BamFile::
 open_read(const Filename &bam_filename, bool report_errors) {
   close();
 
-  VirtualFileSystem *vfs = VirtualFileSystem::get_global_ptr();
-
   if (!_din.open(bam_filename)) {
     return false;
   }

+ 4 - 2
panda/src/putil/copyOnWriteObject.I

@@ -39,7 +39,8 @@ CopyOnWriteObject()
 #endif
 #ifdef HAVE_THREADS
   _lock_status = LS_unlocked;
-#endif
+  _locking_thread = NULL;
+#endif  // HAVE_THREADS
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -59,7 +60,8 @@ CopyOnWriteObject(const CopyOnWriteObject &copy) :
 #endif
 #ifdef HAVE_THREADS
   _lock_status = LS_unlocked;
-#endif
+  _locking_thread = NULL;
+#endif  // HAVE_THREADS
 }
 
 ////////////////////////////////////////////////////////////////////

+ 1 - 0
panda/src/putil/copyOnWriteObject.cxx

@@ -39,6 +39,7 @@ unref() const {
   bool is_zero = CachedTypedWritableReferenceCount::unref();
   if (get_ref_count() == get_cache_ref_count()) {
     ((CopyOnWriteObject *)this)->_lock_status = LS_unlocked;
+    ((CopyOnWriteObject *)this)->_locking_thread = NULL;
     ((CopyOnWriteObject *)this)->_lock_cvar.signal();
   }
   return is_zero;

+ 1 - 0
panda/src/putil/copyOnWriteObject.h

@@ -57,6 +57,7 @@ private:
   Mutex _lock_mutex;
   ConditionVar _lock_cvar;
   LockStatus _lock_status;
+  Thread *_locking_thread;
 #endif  // HAVE_THREADS
 
 public:

+ 18 - 2
panda/src/putil/copyOnWritePointer.cxx

@@ -19,6 +19,7 @@
 #include "copyOnWritePointer.h"
 #include "mutexHolder.h"
 #include "config_util.h"
+#include "config_pipeline.h"
 
 #ifdef HAVE_THREADS
 ////////////////////////////////////////////////////////////////////
@@ -37,12 +38,18 @@ get_read_pointer() const {
     return NULL;
   }
 
+  Thread *current_thread = Thread::get_current_thread();
+
   MutexHolder holder(_object->_lock_mutex);
   while (_object->_lock_status == CopyOnWriteObject::LS_locked_write) {
+    if (_object->_locking_thread == current_thread) {
+      return _object;
+    }
     _object->_lock_cvar.wait();
   }
 
   _object->_lock_status = CopyOnWriteObject::LS_locked_read;
+  _object->_locking_thread = Thread::get_current_thread();
   return _object;
 }
 #endif  // HAVE_THREADS
@@ -67,15 +74,23 @@ get_write_pointer() {
     return NULL;
   }
 
+  Thread *current_thread = Thread::get_current_thread();
+
   MutexHolder holder(_object->_lock_mutex);
   while (_object->_lock_status == CopyOnWriteObject::LS_locked_write) {
+    if (_object->_locking_thread == current_thread) {
+      return _object;
+    }
     _object->_lock_cvar.wait();
   }
 
   if (_object->_lock_status == CopyOnWriteObject::LS_locked_read) {
-    // Someone else has a read copy of this pointer; we need to make
-    // our own writable copy.
     nassertr(_object->get_ref_count() > _object->get_cache_ref_count(), NULL);
+    if (_object->_locking_thread == current_thread) {
+      _object->_lock_status = CopyOnWriteObject::LS_locked_write;
+      return _object;
+    }
+
     if (util_cat.is_debug()) {
       util_cat.debug()
         << "Making copy of " << _object->get_type()
@@ -112,6 +127,7 @@ get_write_pointer() {
     // have saved himself a reference.
   }
   _object->_lock_status = CopyOnWriteObject::LS_locked_write;
+  _object->_locking_thread = Thread::get_current_thread();
 
   return _object;
 }

+ 4 - 0
panda/src/putil/linkedListNode.h

@@ -31,6 +31,10 @@
 //
 //               Typically, each node of the linked list, as well as
 //               the root of the list, will inherit from this class.
+//
+//               Note that this class is not inherently thread-safe;
+//               derived classes are responsible for protecting any
+//               calls into it within mutexes, if necessary.
 ////////////////////////////////////////////////////////////////////
 class EXPCL_PANDA LinkedListNode {
 protected: