Browse Source

better scheduling algorithm

David Rose 17 years ago
parent
commit
907527a9c5

+ 1 - 0
panda/src/event/asyncTaskChain.cxx

@@ -1406,6 +1406,7 @@ void AsyncTaskChain::AsyncTaskChainThread::
 thread_main() {
 thread_main() {
   MutexHolder holder(_chain->_manager->_lock);
   MutexHolder holder(_chain->_manager->_lock);
   while (_chain->_state != S_shutdown && _chain->_state != S_aborting) {
   while (_chain->_state != S_shutdown && _chain->_state != S_aborting) {
+    thread_consider_yield();
     if (!_chain->_active.empty() &&
     if (!_chain->_active.empty() &&
         _chain->_active.front()->get_sort() == _chain->_current_sort) {
         _chain->_active.front()->get_sort() == _chain->_current_sort) {
 
 

+ 9 - 1
panda/src/pgraph/loader.cxx

@@ -29,6 +29,8 @@
 #include "sceneGraphReducer.h"
 #include "sceneGraphReducer.h"
 #include "renderState.h"
 #include "renderState.h"
 #include "bamFile.h"
 #include "bamFile.h"
+#include "configVariableInt.h"
+#include "configVariableEnum.h"
 
 
 bool Loader::_file_types_loaded = false;
 bool Loader::_file_types_loaded = false;
 PT(Loader) Loader::_global_ptr;
 PT(Loader) Loader::_global_ptr;
@@ -59,8 +61,14 @@ Loader(const string &name) :
                 "asychronous thread.  You can set this higher, particularly if "
                 "asychronous thread.  You can set this higher, particularly if "
                 "you have many CPU's available, to allow loading multiple models "
                 "you have many CPU's available, to allow loading multiple models "
                 "simultaneously."));
                 "simultaneously."));
-    
     chain->set_num_threads(loader_num_threads);
     chain->set_num_threads(loader_num_threads);
+
+    ConfigVariableEnum<ThreadPriority> loader_thread_priority
+      ("loader-thread-priority", TP_low,
+       PRC_DESC("The default thread priority to assign to the threads created "
+                "for asynchronous loading.  The default is 'low'; you may "
+                "also specify 'normal', 'high', or 'urgent'."));
+    chain->set_thread_priority(loader_thread_priority);
   }
   }
 }
 }
 
 

+ 26 - 0
panda/src/pipeline/threadPriority.cxx

@@ -35,3 +35,29 @@ operator << (ostream &out, ThreadPriority pri) {
   nassertr(false, out);
   nassertr(false, out);
   return out;
   return out;
 }
 }
+
+istream &
+operator >> (istream &in, ThreadPriority &pri) {
+  string word;
+  in >> word;
+  if (word == "low") {
+    pri = TP_low;
+
+  } else if (word == "normal") {
+    pri = TP_normal;
+
+  } else if (word == "high") {
+    pri = TP_high;
+
+  } else if (word == "urgent") {
+    pri = TP_urgent;
+
+  } else {
+    pri = TP_normal;
+    pipeline_cat->error()
+      << "Invalid ThreadPriority string: " << word << "\n";
+    nassertr(false, in);
+  }
+
+  return in;
+}

+ 2 - 0
panda/src/pipeline/threadPriority.h

@@ -32,6 +32,8 @@ END_PUBLISH
 
 
 EXPCL_PANDA_PIPELINE ostream &
 EXPCL_PANDA_PIPELINE ostream &
 operator << (ostream &out, ThreadPriority pri);
 operator << (ostream &out, ThreadPriority pri);
+EXPCL_PANDA_PIPELINE istream &
+operator >> (istream &in, ThreadPriority &pri);
 
 
 
 
 #endif
 #endif

+ 4 - 4
panda/src/pipeline/threadSimpleImpl.I

@@ -108,19 +108,19 @@ consider_yield() {
 INLINE void ThreadSimpleImpl::
 INLINE void ThreadSimpleImpl::
 consider_yield_this() {
 consider_yield_this() {
   double now = _manager->get_current_time();
   double now = _manager->get_current_time();
-  if (now - _start_time > _time_per_epoch) {
+  if (now >= _stop_time) {
     yield_this();
     yield_this();
   }
   }
 }
 }
 
 
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
-//     Function: ThreadSimpleImpl::get_start_time
+//     Function: ThreadSimpleImpl::get_wake_time
 //       Access: Public
 //       Access: Public
 //  Description: 
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 INLINE double ThreadSimpleImpl::
 INLINE double ThreadSimpleImpl::
-get_start_time() const {
-  return _start_time;
+get_wake_time() const {
+  return _wake_time;
 }
 }
 
 
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////

+ 10 - 6
panda/src/pipeline/threadSimpleImpl.cxx

@@ -38,8 +38,11 @@ ThreadSimpleImpl(Thread *parent_obj) :
 
 
   _status = S_new;
   _status = S_new;
   _joinable = false;
   _joinable = false;
-  _time_per_epoch = 0.0;
+  _priority_weight = 1.0;
+  _run_ticks = 0;
   _start_time = 0.0;
   _start_time = 0.0;
+  _stop_time = 0.0;
+  _wake_time = 0.0;
 
 
   _stack = NULL;
   _stack = NULL;
   _stack_size = 0;
   _stack_size = 0;
@@ -64,6 +67,7 @@ ThreadSimpleImpl::
   if (_stack != (void *)NULL) {
   if (_stack != (void *)NULL) {
     memory_hook->mmap_free(_stack, _stack_size);
     memory_hook->mmap_free(_stack, _stack_size);
   }
   }
+  _manager->remove_thread(this);
 }
 }
 
 
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
@@ -76,7 +80,7 @@ ThreadSimpleImpl::
 void ThreadSimpleImpl::
 void ThreadSimpleImpl::
 setup_main_thread() {
 setup_main_thread() {
   _status = S_running;
   _status = S_running;
-  _time_per_epoch = 0.05;
+  _priority_weight = _manager->_simple_thread_normal_weight;
 
 
   _manager->set_current_thread(this);
   _manager->set_current_thread(this);
 }
 }
@@ -103,19 +107,19 @@ start(ThreadPriority priority, bool joinable) {
 
 
   switch (priority) {
   switch (priority) {
   case TP_low:
   case TP_low:
-    _time_per_epoch = 0.02;
+    _priority_weight = _manager->_simple_thread_low_weight;
     break;
     break;
     
     
   case TP_normal:
   case TP_normal:
-    _time_per_epoch = 0.05;
+    _priority_weight = _manager->_simple_thread_normal_weight;
     break;
     break;
     
     
   case TP_high:
   case TP_high:
-    _time_per_epoch = 0.20;
+    _priority_weight = _manager->_simple_thread_high_weight;
     break;
     break;
 
 
   case TP_urgent:
   case TP_urgent:
-    _time_per_epoch = 0.50;
+    _priority_weight = _manager->_simple_thread_urgent_weight;
     break;
     break;
   }
   }
 
 

+ 16 - 7
panda/src/pipeline/threadSimpleImpl.h

@@ -84,7 +84,7 @@ public:
   void yield_this();
   void yield_this();
   INLINE void consider_yield_this();
   INLINE void consider_yield_this();
 
 
-  INLINE double get_start_time() const;
+  INLINE double get_wake_time() const;
 
 
   INLINE static void write_status(ostream &out);
   INLINE static void write_status(ostream &out);
 
 
@@ -106,15 +106,24 @@ private:
   bool _joinable;
   bool _joinable;
   Status _status;
   Status _status;
 
 
-  // The (approx) amount of time this thread is allowed to run each
-  // epoch.
-  double _time_per_epoch;
+  // The relative weight of this thread, relative to other threads, in
+  // priority.
+  double _priority_weight;
 
 
-  // This serves both as the time at which the current thread started
-  // to run, and also records the time at which a sleeping thread
-  // should wake up.
+  // The amount of time this thread has run recently.
+  unsigned int _run_ticks;
+
+  // This is the time at which the currently-running thread started
+  // execution.
   double _start_time;
   double _start_time;
 
 
+  // This is the time at which the currently-running thread should
+  // yield.
+  double _stop_time;
+
+  // This records the time at which a sleeping thread should wake up.
+  double _wake_time;
+
   ThreadContext _context;
   ThreadContext _context;
   unsigned char *_stack;
   unsigned char *_stack;
   size_t _stack_size;
   size_t _stack_size;

+ 1 - 1
panda/src/pipeline/threadSimpleManager.I

@@ -75,5 +75,5 @@ get_global_ptr() {
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 INLINE bool ThreadSimpleManager::CompareStartTime::
 INLINE bool ThreadSimpleManager::CompareStartTime::
 operator ()(ThreadSimpleImpl *a, ThreadSimpleImpl *b) const {
 operator ()(ThreadSimpleImpl *a, ThreadSimpleImpl *b) const {
-  return a->get_start_time() > b->get_start_time();
+  return a->get_wake_time() > b->get_wake_time();
 }
 }

+ 235 - 51
panda/src/pipeline/threadSimpleManager.cxx

@@ -34,7 +34,36 @@ ThreadSimpleManager *ThreadSimpleManager::_global_ptr;
 //  Description: 
 //  Description: 
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 ThreadSimpleManager::
 ThreadSimpleManager::
-ThreadSimpleManager() {
+ThreadSimpleManager() :
+  _simple_thread_epoch_timeslice
+  ("simple-thread-epoch-timeslice", 0.01,
+   PRC_DESC("When SIMPLE_THREADS is defined, this defines the amount of time, "
+            "in seconds, that should be considered the "
+            "typical timeslice for one epoch (to run all threads once).")),
+  _simple_thread_window
+  ("simple-thread-window", 1.0,
+   PRC_DESC("When SIMPLE_THREADS is defined, this defines the amount of time, "
+            "in seconds, over which to average all the threads' runtimes, "
+            "for the purpose of scheduling threads.")),
+  _simple_thread_low_weight
+  ("simple-thread-low-weight", 0.1,
+   PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
+            "amount of time that is given to threads with priority TP_low.")),
+  _simple_thread_normal_weight
+  ("simple-thread-normal-weight", 1.0,
+   PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
+            "amount of time that is given to threads with priority TP_normal.")),
+  _simple_thread_high_weight
+  ("simple-thread-high-weight", 5.0,
+   PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
+            "amount of time that is given to threads with priority TP_high.")),
+  _simple_thread_urgent_weight
+  ("simple-thread-urgent-weight", 10.0,
+   PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
+            "amount of time that is given to threads with priority TP_urgent."))
+{
+  _tick_scale = 1000000.0;
+  _total_ticks = 0;
   _current_thread = NULL;
   _current_thread = NULL;
   _clock = TrueClock::get_global_ptr();
   _clock = TrueClock::get_global_ptr();
   _waiting_for_exit = NULL;
   _waiting_for_exit = NULL;
@@ -84,7 +113,7 @@ enqueue_sleep(ThreadSimpleImpl *thread, double seconds) {
   }
   }
 
 
   double now = get_current_time();
   double now = get_current_time();
-  thread->_start_time = now + seconds;
+  thread->_wake_time = now + seconds;
   _sleeping.push_back(thread);
   _sleeping.push_back(thread);
   push_heap(_sleeping.begin(), _sleeping.end(), CompareStartTime());
   push_heap(_sleeping.begin(), _sleeping.end(), CompareStartTime());
 }
 }
@@ -300,6 +329,30 @@ set_current_thread(ThreadSimpleImpl *current_thread) {
   _current_thread = current_thread;
   _current_thread = current_thread;
 }
 }
 
 
+////////////////////////////////////////////////////////////////////
+//     Function: ThreadSimpleManager::remove_thread
+//       Access: Public
+//  Description: Removes the indicated thread from the accounting, for
+//               instance just before the thread destructs.
+////////////////////////////////////////////////////////////////////
+void ThreadSimpleManager::
+remove_thread(ThreadSimpleImpl *thread) {
+  TickRecords new_records;
+  TickRecords::iterator ri;
+  for (ri = _tick_records.begin(); ri != _tick_records.end(); ++ri) {
+    if ((*ri)._thread != thread) {
+      // Keep this record.
+      new_records.push_back(*ri);
+    } else {
+      // Lose this record.
+      nassertv(_total_ticks >= (*ri)._tick_count);
+      _total_ticks -= (*ri)._tick_count;
+    }
+  }
+
+  _tick_records.swap(new_records);
+}
+
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 //     Function: ThreadSimpleManager::system_sleep
 //     Function: ThreadSimpleManager::system_sleep
 //       Access: Public, Static
 //       Access: Public, Static
@@ -346,7 +399,7 @@ write_status(ostream &out) const {
   sort(s2.begin(), s2.end(), CompareStartTime());
   sort(s2.begin(), s2.end(), CompareStartTime());
   Sleeping::const_iterator si;
   Sleeping::const_iterator si;
   for (si = s2.begin(); si != s2.end(); ++si) {
   for (si = s2.begin(); si != s2.end(); ++si) {
-    out << " " << *(*si)->_parent_obj << "(" << (*si)->_start_time - now
+    out << " " << *(*si)->_parent_obj << "(" << (*si)->_wake_time - now
         << "s)";
         << "s)";
   }
   }
   out << "\n";
   out << "\n";
@@ -430,65 +483,100 @@ st_choose_next_context(void *data) {
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 void ThreadSimpleManager::
 void ThreadSimpleManager::
 choose_next_context() {
 choose_next_context() {
-  _current_thread = NULL;
-
   double now = get_current_time();
   double now = get_current_time();
 
 
+  do_timeslice_accounting(_current_thread, now);
+  _current_thread = NULL;
+
   if (!_sleeping.empty()) {
   if (!_sleeping.empty()) {
     wake_sleepers(now);
     wake_sleepers(now);
   }
   }
 
 
+  bool new_epoch = !_ready.empty() && _next_ready.empty();
+
   // Choose a new thread to execute.
   // Choose a new thread to execute.
-  while (_ready.empty()) {
-    if (!_next_ready.empty()) {
-      // We've finished an epoch.
-      _ready.swap(_next_ready);
-      system_yield();
-
-    } else if (!_sleeping.empty()) {
-      // All threads are sleeping.
-      double wait = _sleeping.front()->_start_time - now;
-      if (wait > 0.0) {
-        if (thread_cat->is_debug()) {
-          thread_cat.debug()
-            << "Sleeping all threads " << wait << " seconds\n";
+  while (true) {
+    // If there are no threads, sleep.
+    while (_ready.empty()) {
+      if (!_next_ready.empty()) {
+        // We've finished an epoch.
+        _ready.swap(_next_ready);
+
+        if (new_epoch && !_tick_records.empty()) {
+          // Pop the oldest timeslice record off when we finish an
+          // epoch without executing any threads, to ensure we don't
+          // get caught in an "all threads reached budget" loop.
+          if (thread_cat->is_debug()) {
+            thread_cat.debug()
+              << "All threads exceeded budget.\n";
+          }
+          TickRecord &record = _tick_records.front();
+          _total_ticks -= record._tick_count;
+          nassertv(record._thread->_run_ticks >= record._tick_count);
+          record._thread->_run_ticks -= record._tick_count;
+          _tick_records.pop_front();
+
+        } else {
+          // Otherwise, we're legitimately at the end of an epoch.
+          // Yield, to give some time back to the system.
+          system_yield();
         }
         }
-        system_sleep(wait);
-      }
-      now = get_current_time();
-      wake_sleepers(now);
-
-    } else {
-      // No threads are ready!
-      if (!_blocked.empty()) {
+        new_epoch = true;
+        
+      } else if (!_sleeping.empty()) {
+        // All threads are sleeping.
+        double wait = _sleeping.front()->_wake_time - now;
+        if (wait > 0.0) {
+          if (thread_cat->is_debug()) {
+            thread_cat.debug()
+              << "Sleeping all threads " << wait << " seconds\n";
+          }
+          system_sleep(wait);
+        }
+        now = get_current_time();
+        wake_sleepers(now);
+        
+      } else {
+        // No threads are ready!
+        if (!_blocked.empty()) {
+          thread_cat->error()
+            << "Deadlock!  All threads blocked.\n";
+          report_deadlock();
+          abort();
+        }
+        
+        // All threads have finished execution.
+        if (_waiting_for_exit != NULL) {
+          // And one thread--presumably the main thread--was waiting for
+          // that.
+          _ready.push_back(_waiting_for_exit);
+          _waiting_for_exit = NULL;
+          break;
+        }
+        
+        // No threads are queued anywhere.  This is some kind of
+        // internal error, since normally the main thread, at least,
+        // should be queued somewhere.
         thread_cat->error()
         thread_cat->error()
-          << "Deadlock!  All threads blocked.\n";
-        report_deadlock();
-        abort();
-      }
-
-      // All threads have finished execution.
-      if (_waiting_for_exit != NULL) {
-        // And one thread--presumably the main thread--was waiting for
-        // that.
-        _ready.push_back(_waiting_for_exit);
-        _waiting_for_exit = NULL;
-        break;
+          << "All threads disappeared!\n";
+        exit(0);
       }
       }
+    }
 
 
-      // No threads are queued anywhere.  This is some kind of
-      // internal error, since normally the main thread, at least,
-      // should be queued somewhere.
-      thread_cat->error()
-        << "All threads disappeared!\n";
-      exit(0);
+    ThreadSimpleImpl *chosen_thread = _ready.front();
+    _ready.pop_front();
+    
+    double timeslice = determine_timeslice(chosen_thread);
+    if (timeslice > 0.0) {
+      // This thread is ready to roll.  Break out of the loop.
+      chosen_thread->_start_time = now;
+      chosen_thread->_stop_time = now + timeslice;
+      _current_thread = chosen_thread;
+      break;
     }
     }
+    _next_ready.push_back(chosen_thread);
   }
   }
 
 
-  _current_thread = _ready.front();
-  _ready.pop_front();
-  _current_thread->_start_time = now;
-
   // All right, the thread is ready to roll.  Begin.
   // All right, the thread is ready to roll.  Begin.
   if (thread_cat->is_debug()) {
   if (thread_cat->is_debug()) {
     size_t blocked_count = 0;
     size_t blocked_count = 0;
@@ -497,10 +585,12 @@ choose_next_context() {
       const FifoThreads &threads = (*bi).second;
       const FifoThreads &threads = (*bi).second;
       blocked_count += threads.size();
       blocked_count += threads.size();
     }
     }
-    
+
+    double timeslice = _current_thread->_stop_time - _current_thread->_start_time;
     thread_cat.debug()
     thread_cat.debug()
       << "Switching to " << *_current_thread->_parent_obj
       << "Switching to " << *_current_thread->_parent_obj
-      << " (" << _ready.size() + _next_ready.size()
+      << " for " << timeslice << " s ("
+      << _ready.size() + _next_ready.size()
       << " other threads ready, " << blocked_count
       << " other threads ready, " << blocked_count
       << " blocked, " << _sleeping.size() << " sleeping)\n";
       << " blocked, " << _sleeping.size() << " sleeping)\n";
   }
   }
@@ -512,6 +602,45 @@ choose_next_context() {
   abort();
   abort();
 }
 }
 
 
+////////////////////////////////////////////////////////////////////
+//     Function: ThreadSimpleManager::do_timeslice_accounting
+//       Access: Private
+//  Description: Records the amount of time the indicated thread has
+//               run, and updates the moving average.
+////////////////////////////////////////////////////////////////////
+void ThreadSimpleManager::
+do_timeslice_accounting(ThreadSimpleImpl *thread, double now) {
+  double elapsed = now - thread->_start_time;
+  if (thread_cat.is_debug()) {
+    thread_cat.debug()
+      << *thread->_parent_obj << " ran for " << elapsed << " s of "
+      << thread->_stop_time - thread->_start_time << " requested.\n";
+  }
+    
+  nassertv(elapsed >= 0.0);
+  unsigned int ticks = (unsigned int)(elapsed * _tick_scale + 0.5);
+  thread->_run_ticks += ticks;
+
+  // Now remove any old records.
+  unsigned int ticks_window = (unsigned int)(_simple_thread_window * _tick_scale + 0.5);
+  while (_total_ticks > ticks_window) {
+    nassertv(!_tick_records.empty());
+    TickRecord &record = _tick_records.front();
+    _total_ticks -= record._tick_count;
+    nassertv(record._thread->_run_ticks >= record._tick_count);
+    record._thread->_run_ticks -= record._tick_count;
+    _tick_records.pop_front();
+  }
+
+  // Finally, record the new record.
+  TickRecord record;
+  record._tick_count = ticks;
+  record._thread = thread;
+  _tick_records.push_back(record);
+  _total_ticks += ticks;
+}
+
+
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 //     Function: ThreadSimpleManager::wake_sleepers
 //     Function: ThreadSimpleManager::wake_sleepers
 //       Access: Private
 //       Access: Private
@@ -520,7 +649,7 @@ choose_next_context() {
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 void ThreadSimpleManager::
 void ThreadSimpleManager::
 wake_sleepers(double now) {
 wake_sleepers(double now) {
-  while (!_sleeping.empty() && _sleeping.front()->_start_time <= now) {
+  while (!_sleeping.empty() && _sleeping.front()->_wake_time <= now) {
     ThreadSimpleImpl *thread = _sleeping.front();
     ThreadSimpleImpl *thread = _sleeping.front();
     pop_heap(_sleeping.begin(), _sleeping.end(), CompareStartTime());
     pop_heap(_sleeping.begin(), _sleeping.end(), CompareStartTime());
     _sleeping.pop_back();
     _sleeping.pop_back();
@@ -556,6 +685,61 @@ report_deadlock() {
   }
   }
 }
 }
 
 
+////////////////////////////////////////////////////////////////////
+//     Function: ThreadSimpleManager::determine_timeslice
+//       Access: Private
+//  Description: Determines the amount of time that should be
+//               allocated to the next timeslice of this thread, based
+//               on its priority weight and the amount of time it has
+//               run recently relative to other threads.
+////////////////////////////////////////////////////////////////////
+double ThreadSimpleManager::
+determine_timeslice(ThreadSimpleImpl *chosen_thread) {
+  if (_ready.empty() && _next_ready.empty()) {
+    // This is the only ready thread.  It gets the full timeslice.
+    return _simple_thread_epoch_timeslice;
+  }
+
+  // Count up the total runtime and weight of all ready threads.
+  unsigned int total_ticks = chosen_thread->_run_ticks;
+  double total_weight = chosen_thread->_priority_weight;
+
+  FifoThreads::const_iterator ti;
+  for (ti = _ready.begin(); ti != _ready.end(); ++ti) {
+    total_ticks += (*ti)->_run_ticks;
+    total_weight += (*ti)->_priority_weight;
+  }
+  for (ti = _next_ready.begin(); ti != _next_ready.end(); ++ti) {
+    total_ticks += (*ti)->_run_ticks;
+    total_weight += (*ti)->_priority_weight;
+  }
+
+  nassertr(total_weight != 0.0, 0.0);
+  double budget_ratio = chosen_thread->_priority_weight / total_weight;
+
+  if (total_ticks == 0) {
+    // This must be the first thread.  Special case.
+    return budget_ratio * _simple_thread_epoch_timeslice;
+  }
+
+  double run_ratio = (double)chosen_thread->_run_ticks / (double)total_ticks;
+  double remaining_ratio = budget_ratio - run_ratio;
+
+  if (thread_cat->is_debug()) {
+    thread_cat.debug()
+      << *chosen_thread->_parent_obj << " accrued "
+      << chosen_thread->_run_ticks / _tick_scale << " s of "
+      << total_ticks / _tick_scale << "; budget is "
+      << budget_ratio * total_ticks / _tick_scale << ".\n";
+    if (remaining_ratio <= 0.0) {
+      thread_cat.debug()
+        << "Exceeded budget.\n";
+    }
+  }
+
+  return remaining_ratio * _simple_thread_epoch_timeslice;
+}
+
 ////////////////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////
 //     Function: ThreadSimpleManager::kill_non_joinable
 //     Function: ThreadSimpleManager::kill_non_joinable
 //       Access: Private
 //       Access: Private

+ 24 - 0
panda/src/pipeline/threadSimpleManager.h

@@ -24,6 +24,7 @@
 #include "pmap.h"
 #include "pmap.h"
 #include "pvector.h"
 #include "pvector.h"
 #include "trueClock.h"
 #include "trueClock.h"
+#include "configVariableDouble.h"
 #include <algorithm>
 #include <algorithm>
 
 
 #ifdef HAVE_POSIX_THREADS
 #ifdef HAVE_POSIX_THREADS
@@ -71,6 +72,7 @@ public:
   INLINE ThreadSimpleImpl *get_current_thread();
   INLINE ThreadSimpleImpl *get_current_thread();
   void set_current_thread(ThreadSimpleImpl *current_thread);
   void set_current_thread(ThreadSimpleImpl *current_thread);
   INLINE bool is_same_system_thread() const;
   INLINE bool is_same_system_thread() const;
+  void remove_thread(ThreadSimpleImpl *thread);
   static void system_sleep(double seconds);
   static void system_sleep(double seconds);
   static void system_yield();
   static void system_yield();
 
 
@@ -84,8 +86,10 @@ private:
 
 
   static void st_choose_next_context(void *data);
   static void st_choose_next_context(void *data);
   void choose_next_context();
   void choose_next_context();
+  void do_timeslice_accounting(ThreadSimpleImpl *thread, double now);
   void wake_sleepers(double now);
   void wake_sleepers(double now);
   void report_deadlock();
   void report_deadlock();
+  double determine_timeslice(ThreadSimpleImpl *chosen_thread);
 
 
   // STL function object to sort the priority queue of sleeping threads.
   // STL function object to sort the priority queue of sleeping threads.
   class CompareStartTime {
   class CompareStartTime {
@@ -99,6 +103,15 @@ private:
   void kill_non_joinable(FifoThreads &threads);
   void kill_non_joinable(FifoThreads &threads);
   void kill_non_joinable(Sleeping &threads);
   void kill_non_joinable(Sleeping &threads);
 
 
+public:
+  // Defined within the class to avoid static-init ordering problems.
+  ConfigVariableDouble _simple_thread_epoch_timeslice;
+  ConfigVariableDouble _simple_thread_window;
+  ConfigVariableDouble _simple_thread_low_weight;
+  ConfigVariableDouble _simple_thread_normal_weight;
+  ConfigVariableDouble _simple_thread_high_weight;
+  ConfigVariableDouble _simple_thread_urgent_weight;
+
 private:
 private:
   ThreadSimpleImpl *volatile _current_thread;
   ThreadSimpleImpl *volatile _current_thread;
 
 
@@ -118,6 +131,17 @@ private:
 
 
   TrueClock *_clock;
   TrueClock *_clock;
 
 
+  double _tick_scale;
+
+  class TickRecord {
+  public:
+    unsigned int _tick_count;
+    ThreadSimpleImpl *_thread;
+  };
+  typedef pdeque<TickRecord> TickRecords;
+  TickRecords _tick_records;
+  unsigned int _total_ticks;
+
   // We may not mix-and-match OS threads with Panda's SIMPLE_THREADS.
   // We may not mix-and-match OS threads with Panda's SIMPLE_THREADS.
   // If we ever get a Panda context switch request from a different OS
   // If we ever get a Panda context switch request from a different OS
   // thread than the original thread, that's a serious error that may
   // thread than the original thread, that's a serious error that may