- Only schedule Threads. Ditch support for Policies deciding
if they want to schedule Threads or Processes altogether - Move setter methods for last_acquisition/last_release from DynamicSchedulable to DynamicThread - Rewrite aforesaid methods, along with the respective getter methods, into Dynamic(Thread|Process) git-svn-id: svn://svn.gna.org/svn/sgpemv2/trunk@708 3ecf2c5c-341e-0410-92b4-d18e462d057c
This commit is contained in:
parent
736aa25456
commit
a378239d60
17 changed files with 119 additions and 194 deletions
|
@ -82,32 +82,6 @@ class ScriptAdapter :
|
|||
self._ret_val = self._policy.is_preemptive()
|
||||
self._g_mutex.unlock()
|
||||
|
||||
## @brief Asynchronously call Policy.get_time_slice()
|
||||
#
|
||||
# @param self The caller object
|
||||
def async_get_time_slice(self):
|
||||
self._g_mutex.lock(ScriptAdapter._wrap_get_time_slice, self)
|
||||
|
||||
def _wrap_get_time_slice(self):
|
||||
thread.start_new_thread(ScriptAdapter._wrap_get_time_slice_callback, (self,))
|
||||
|
||||
def _wrap_get_time_slice_callback(self):
|
||||
self._ret_val = self._policy.get_time_slice()
|
||||
self._g_mutex.unlock()
|
||||
|
||||
## @brief Asynchronously call Policy.wants()
|
||||
#
|
||||
# @param self The caller object
|
||||
def async_wants(self):
|
||||
self._g_mutex.lock(ScriptAdapter._wrap_wants, self)
|
||||
|
||||
def _wrap_wants(self):
|
||||
thread.start_new_thread(ScriptAdapter._wrap_wants_callback, (self,))
|
||||
|
||||
def _wrap_wants_callback(self):
|
||||
self._ret_val = self._policy.wants()
|
||||
self._g_mutex.unlock()
|
||||
|
||||
## @brief Return the global shared variable with the methods' last return value
|
||||
def get_return_value(self):
|
||||
return self._ret_val
|
||||
|
|
|
@ -36,9 +36,6 @@ class fcfs(Policy) :
|
|||
def get_time_slice(self):
|
||||
return -2
|
||||
|
||||
def wants(self):
|
||||
return policy_sorts_processes
|
||||
|
||||
def sort_queue(self, queue):
|
||||
cmpf = lambda a, b: \
|
||||
a.get_arrival_time() < \
|
||||
|
|
|
@ -36,9 +36,6 @@ class sjf(Policy) :
|
|||
def get_time_slice(self):
|
||||
return -1
|
||||
|
||||
def wants(self):
|
||||
return policy_sorts_processes
|
||||
|
||||
def sort_queue(self, queue):
|
||||
cmpf = lambda a, b: \
|
||||
a.get_remaining_time() < \
|
||||
|
|
|
@ -160,27 +160,6 @@ PythonPolicy::get_time_slice() const throw(UserInterruptException)
|
|||
return tmp < 0 ? numeric_limits<int>::max() : static_cast<int>(tmp);
|
||||
}
|
||||
|
||||
policy_sorts_type
|
||||
PythonPolicy::wants() const throw(UserInterruptException)
|
||||
{
|
||||
PyObject* retval = PyObject_CallMethod(_adapter, "async_wants", NULL);
|
||||
Py_DECREF(retval);
|
||||
|
||||
wait_unlock();
|
||||
|
||||
// Parse return value stored in global Python object
|
||||
retval = PyObject_CallMethod(_adapter, "get_return_value", NULL);
|
||||
assert(retval);
|
||||
long tmp = PyInt_AsLong(retval);
|
||||
Py_DECREF(retval);
|
||||
|
||||
//FIXME add the MalformedPolicyException class and throw it the else
|
||||
// branch instead
|
||||
if(tmp == policy_sorts_threads || tmp == policy_sorts_processes)
|
||||
return static_cast<policy_sorts_type>(tmp);
|
||||
else
|
||||
return policy_sorts_processes;
|
||||
}
|
||||
|
||||
void
|
||||
PythonPolicy::wait_unlock() const throw(UserInterruptException)
|
||||
|
|
|
@ -76,8 +76,6 @@ namespace sgpem
|
|||
*/
|
||||
int get_time_slice() const throw(UserInterruptException);
|
||||
|
||||
policy_sorts_type wants() const throw(UserInterruptException);
|
||||
|
||||
void activate()
|
||||
{
|
||||
//FIXME write code for me
|
||||
|
|
|
@ -47,9 +47,6 @@ namespace std {
|
|||
|
||||
namespace sgpem {
|
||||
|
||||
/** Don't get worried, order is not important! */
|
||||
enum policy_sorts_type { policy_sorts_threads, policy_sorts_processes };
|
||||
|
||||
class Policy {
|
||||
public:
|
||||
virtual ~Policy() = 0;
|
||||
|
@ -154,7 +151,7 @@ namespace sgpem {
|
|||
};
|
||||
|
||||
virtual unsigned int get_arrival_time() const = 0;
|
||||
virtual unsigned int get_remaining_time() const = 0;
|
||||
virtual unsigned int get_elapsed_time() const = 0;
|
||||
virtual int get_base_priority() const = 0;
|
||||
virtual int get_current_priority() const = 0;
|
||||
virtual unsigned int get_total_cpu_time() const = 0;
|
||||
|
@ -198,25 +195,7 @@ namespace sgpem {
|
|||
|
||||
size_t size() const;
|
||||
|
||||
// Dynamic cast to Process or to Thread so
|
||||
// that Python code sees the extra-methods
|
||||
%typename(out) sgpem::Schedulable*;
|
||||
{
|
||||
// OMG, Ponies!!
|
||||
Process* proc;
|
||||
Thread* thread;
|
||||
if((proc = dynamic_cast<Process*>($1)) != NULL)
|
||||
$result = SWIG_NewPointerObj(SWIG_as_voidptr(proc),
|
||||
SWIGTYPE_p_sgpem__Process, 0 | 0 );
|
||||
else if((thread = dynamic_cast<Thread*>($1)) != NULL)
|
||||
$result = SWIG_NewPointerObj(SWIG_as_voidptr(thread),
|
||||
SWIGTYPE_p_sgpem__Thread, 0 | 0 );
|
||||
else // Fall back to Schedulable* if no dynamic_cast went well:
|
||||
$result = SWIG_NewPointerObj(SWIG_as_voidptr(thread),
|
||||
$1_descriptor, 0 | 0 );
|
||||
}
|
||||
|
||||
sgpem::Schedulable* get_item_at(position index);
|
||||
sgpem::Thread* get_item_at(position index);
|
||||
|
||||
%typename(out) sgpem::Schedulable*;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue