revisions 846:897 into trunk, with approval and peer review of manager (Luca). git-svn-id: svn://svn.gna.org/svn/sgpemv2/trunk@898 3ecf2c5c-341e-0410-92b4-d18e462d057c
This commit is contained in:
parent
2ff87baadf
commit
e27ba77fed
15 changed files with 1399 additions and 1044 deletions
|
@ -60,7 +60,7 @@ ConcreteEnvironment::ConcreteEnvironment(const ConcreteEnvironment& ce) :
|
|||
{
|
||||
const Processes& ce_proc = ce._processes;
|
||||
insert_iterator<Processes> dest(_processes, _processes.begin());
|
||||
for (Iseq<Processes::const_iterator> orig = iseq(ce_proc); orig; orig++)
|
||||
for (Iseq<Processes::const_iterator> orig = const_iseq(ce_proc); orig; orig++)
|
||||
*dest++ = new DynamicProcess(dynamic_cast<const DynamicProcess&>(**orig));
|
||||
}
|
||||
|
||||
|
|
|
@ -24,14 +24,20 @@
|
|||
#include "serialize_visitor.hh"
|
||||
|
||||
#include "deletor.tcc"
|
||||
#include "sequences.tcc"
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
|
||||
using namespace sgpem;
|
||||
using namespace std;
|
||||
|
||||
typedef std::vector<DynamicThread*>::const_iterator ConstThreadIt;
|
||||
typedef std::vector<DynamicThread*>::iterator ThreadIt;
|
||||
|
||||
|
||||
DynamicProcess::DynamicProcess(StaticProcess* core) :
|
||||
DynamicSchedulable(), _core(core)
|
||||
{
|
||||
|
@ -41,13 +47,9 @@ DynamicProcess::DynamicProcess(StaticProcess* core) :
|
|||
DynamicProcess::DynamicProcess(const DynamicProcess &other) :
|
||||
Schedulable(), DynamicSchedulable(other), Process(),
|
||||
_core(other._core)
|
||||
{
|
||||
typedef vector<DynamicThread*>::const_iterator ThreadIt;
|
||||
|
||||
const vector<DynamicThread*>& other_threads = other._dynamic_threads;
|
||||
|
||||
for (ThreadIt it = other_threads.begin(); it != other_threads.end(); ++it)
|
||||
new DynamicThread(*(*it), this);
|
||||
{
|
||||
for (Iseq<ConstThreadIt> seq = const_iseq(other._dynamic_threads); seq; ++seq)
|
||||
new DynamicThread(*(*seq), this);
|
||||
}
|
||||
|
||||
DynamicProcess::~DynamicProcess()
|
||||
|
@ -71,101 +73,36 @@ DynamicProcess::get_threads() const
|
|||
Schedulable::state
|
||||
DynamicProcess::get_state() const
|
||||
{
|
||||
int total = _dynamic_threads.size();
|
||||
int running = 0;
|
||||
int ready = 0;
|
||||
int blocked = 0;
|
||||
int terminated = 0;
|
||||
int future = 0;
|
||||
|
||||
unsigned int closest = 0;
|
||||
|
||||
vector<DynamicThread*>::const_iterator it = _dynamic_threads.begin();
|
||||
for (; it != _dynamic_threads.end(); it++)
|
||||
{
|
||||
if ((**it).get_state() == state_running) running++;
|
||||
if ((**it).get_state() == state_ready) ready++;
|
||||
if ((**it).get_state() == state_blocked) blocked++;
|
||||
if ((**it).get_state() == state_terminated) terminated++;
|
||||
if ((**it).get_state() == state_future)
|
||||
{
|
||||
unsigned int arrival = (**it).get_arrival_time();
|
||||
// if this is the first future occurrence, record its arrival;
|
||||
// else record its arrival if and only if it is smaller then the recorded one
|
||||
if (future == 0)
|
||||
closest = arrival;
|
||||
else
|
||||
closest = (closest < arrival) ? closest : arrival;
|
||||
future++;
|
||||
}
|
||||
}
|
||||
|
||||
assert(total > 0);
|
||||
assert(running == 1 || running == 0);
|
||||
assert(running + ready + blocked + terminated + future == total);
|
||||
|
||||
if (running > 0)
|
||||
return state_running;
|
||||
if (ready > 0) // running == 0
|
||||
return state_ready;
|
||||
if (blocked > 0) // running == 0 && ready == 0
|
||||
return state_blocked;
|
||||
// Now check if a "hole" happens: if all threads are terminated
|
||||
// or blocked the next
|
||||
// thread to start, e.g. the one with the least arrival_time, has
|
||||
// start time greater than the current process elapsed time, then
|
||||
// pass from state_future to state_terminated:
|
||||
if (closest > get_elapsed_time())
|
||||
return state_terminated;
|
||||
if (terminated > 0) // running == 0 && ready == 0 && blocked == 0
|
||||
return state_terminated;
|
||||
if (future > 0) // running == 0 && ready == 0 && blocked == 0 && terminated == 0
|
||||
return state_future;
|
||||
|
||||
// I'm not sure if we can get here (maybe if there are no threads?),
|
||||
// but I don't like this compiler warning: 'control reaches end of non-void function'
|
||||
return state_future;
|
||||
|
||||
// Since premature optimization is the root of all evil, and the
|
||||
// following code was very fast but also very wrong, the coder
|
||||
// will be punished by allowing her to code in C++ just after
|
||||
// having passed "Algoritmi 3" exam with full marks.
|
||||
|
||||
/*
|
||||
typedef vector<DynamicThread*>::const_iterator ThreadIt;
|
||||
static const int uninitialized = -1;
|
||||
|
||||
assert(_dynamic_threads.size() > 0);
|
||||
|
||||
state result = state_future;
|
||||
state result = state_terminated;
|
||||
int next_thread_starts_at = uninitialized;
|
||||
|
||||
for(ThreadIt it = _dynamic_threads.begin(); it != _dynamic_threads.end(); ++it)
|
||||
// This is the logic behind the code:
|
||||
// If there is at least one running thread, the result is
|
||||
// running. If not, it may be either blocked, ready, future or terminated.
|
||||
|
||||
// We have these cases (a state takes precedence over some other one):
|
||||
// (a) if a thread is running, return immediately state_running
|
||||
// (b) if a thread is ready, puts unconditionally result as state_ready,
|
||||
// and continue iterating (to see if there's a running thread)
|
||||
// (c) if a thread is blocked, and result is not state_ready, result
|
||||
// becomes state_blocked, and continue iterating (to see if there are
|
||||
// ready or running threads)
|
||||
// (d) if a thread is future, and result is not state_ready or
|
||||
// state_blocked, put result as state_future, and remember
|
||||
// when the next thread will start (d1) (see at the end of this
|
||||
// method for the rationale (d2)). Then continue iterating.
|
||||
// (e) else (if all threads are state_terminated) put result as
|
||||
// state_terminated.
|
||||
|
||||
|
||||
for(Iseq<ConstThreadIt> seq = const_iseq(_dynamic_threads); seq; ++seq)
|
||||
{
|
||||
state thread_state = (*it)->get_state();
|
||||
|
||||
// This is the logic behind the code:
|
||||
// If there is at least one running thread, the result is
|
||||
// running. If not, it may be either blocked, ready, future or terminated.
|
||||
|
||||
// We have these cases (a state takes precedence over some other one):
|
||||
// (a) if a thread is running, return immediately state_running
|
||||
// (b) if a thread is ready, puts unconditionally result as state_ready,
|
||||
// and continue iterating (to see if there's a running thread)
|
||||
// (c) if a thread is blocked, and result is not state_ready, result
|
||||
// becomes state_blocked, and continue iterating (to see if there are
|
||||
// ready or running threads)
|
||||
// (d) if a thread is future, and result is not state_ready or
|
||||
// state_blocked, put result as state_future, and remember
|
||||
// when the next thread will start (d1) (see at the end of this
|
||||
// method for the rationale (d2)). Then continue iterating.
|
||||
// (e) else (if all threads are state_terminated) put result as
|
||||
// state_terminated.
|
||||
state thread_state = (*seq)->get_state();
|
||||
|
||||
// TODO Is this OK? Must be tested...
|
||||
|
||||
|
||||
int thread_starts_at;
|
||||
switch(thread_state)
|
||||
{
|
||||
case state_running: // (a)
|
||||
|
@ -174,23 +111,34 @@ DynamicProcess::get_state() const
|
|||
result = state_ready;
|
||||
continue;
|
||||
case state_blocked: // (c)
|
||||
result = state_blocked;
|
||||
if((result & state_ready) == 0)
|
||||
result = state_blocked;
|
||||
continue;
|
||||
case state_future: // (d)
|
||||
result = state_future;
|
||||
thread_starts_at = (*it)->get_arrival_time();
|
||||
if(next_thread_starts_at == uninitialized) // (d1)
|
||||
next_thread_starts_at = thread_starts_at;
|
||||
else
|
||||
next_thread_starts_at = std::min(thread_starts_at, next_thread_starts_at);
|
||||
if((result & (state_ready|state_blocked)) == 0)
|
||||
{
|
||||
result = state_future;
|
||||
int thread_starts_at = (*seq)->get_arrival_time();
|
||||
if(next_thread_starts_at == uninitialized) // (d1)
|
||||
next_thread_starts_at = thread_starts_at;
|
||||
else
|
||||
next_thread_starts_at = std::min(thread_starts_at, next_thread_starts_at);
|
||||
}
|
||||
continue;
|
||||
case state_terminated: // (e)
|
||||
// already put into terminated state as the default value
|
||||
continue;
|
||||
default: // (e)
|
||||
result = state_terminated;
|
||||
}
|
||||
} //~ "for" iterating over threads
|
||||
|
||||
// reused hole checking system
|
||||
*/
|
||||
// (d2) Now check if a "hole" happens: if all other threads are terminated
|
||||
// the next thread to start, e.g. the one with the least arrival_time,
|
||||
// has start time greater than the current process elapsed time, then
|
||||
// pass from state_future to state_terminated:
|
||||
if (result == state_future && next_thread_starts_at > static_cast<int>(get_elapsed_time()))
|
||||
return state_terminated;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
@ -225,10 +173,9 @@ unsigned int
|
|||
DynamicProcess::get_elapsed_time() const
|
||||
{
|
||||
unsigned int result = 0;
|
||||
for (std::vector<DynamicThread*>::const_iterator it = _dynamic_threads.begin();
|
||||
it != _dynamic_threads.end(); it++)
|
||||
for (Iseq<ConstThreadIt> seq = const_iseq(_dynamic_threads); seq; ++seq)
|
||||
{
|
||||
result += (*it)->get_elapsed_time();
|
||||
result += (*seq)->get_elapsed_time();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -237,10 +184,9 @@ int
|
|||
DynamicProcess::get_last_acquisition() const
|
||||
{
|
||||
int result = -1;
|
||||
for (std::vector<DynamicThread*>::const_iterator it = _dynamic_threads.begin();
|
||||
it != _dynamic_threads.end(); it++)
|
||||
for (Iseq<ConstThreadIt> seq = const_iseq(_dynamic_threads); seq; ++seq)
|
||||
{
|
||||
int acq = (*it)->get_last_acquisition();
|
||||
int acq = (*seq)->get_last_acquisition();
|
||||
if (result < acq)
|
||||
result = acq;
|
||||
}
|
||||
|
@ -251,10 +197,9 @@ int
|
|||
DynamicProcess::get_last_release() const
|
||||
{
|
||||
int result = -1;
|
||||
for (std::vector<DynamicThread*>::const_iterator it = _dynamic_threads.begin();
|
||||
it != _dynamic_threads.end(); it++)
|
||||
for (Iseq<ConstThreadIt> seq = const_iseq(_dynamic_threads); seq; ++seq)
|
||||
{
|
||||
int acq = (*it)->get_last_release();
|
||||
int acq = (*seq)->get_last_release();
|
||||
if (result < acq)
|
||||
result = acq;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -28,7 +28,7 @@ namespace sgpem
|
|||
|
||||
#include "config.h"
|
||||
|
||||
#include "concrete_history.hh"
|
||||
#include "history.hh"
|
||||
#include "cpu_policy.hh"
|
||||
#include "ready_queue.hh"
|
||||
#include "user_interrupt_exception.hh"
|
||||
|
@ -59,18 +59,6 @@ namespace sgpem
|
|||
{
|
||||
friend class Singleton<Scheduler>;
|
||||
public:
|
||||
/**
|
||||
Returns a pointer to the queue containing all the ready
|
||||
schedulable objects (for the policy to sort it).
|
||||
\return a pointer to the queue containing all the ready
|
||||
schedulable objects (for the policy to sort it).
|
||||
*/
|
||||
ReadyQueue* get_ready_queue();
|
||||
/**
|
||||
Resets the simulation to the initial state.
|
||||
Deprecated.
|
||||
*/
|
||||
// void reset_status();
|
||||
/**
|
||||
Generates a new ReadyQueue representing the status of the processes
|
||||
at the simulation instant next to the current one, and extends the History by
|
||||
|
@ -79,19 +67,32 @@ namespace sgpem
|
|||
|
||||
\return false If the simulation has ended, true otherwise
|
||||
*/
|
||||
bool step_forward(ConcreteHistory& history, CPUPolicy& cpu_policy) throw(UserInterruptException, MalformedPolicyException);
|
||||
bool step_forward(History& history, CPUPolicy& cpu_policy) throw(UserInterruptException, MalformedPolicyException);
|
||||
|
||||
/**
|
||||
Returns the policy that will be used to generate the simulation at the next instant.
|
||||
\return the policy that will be used to generate the simulation at the next instant.
|
||||
\brief Returns the policy that will be used to generate the simulation at the next instant.
|
||||
|
||||
\warning This is a callback method: it can only be used in methods of CPUPolicy called
|
||||
by step_forward(). Else, a NULL pointer will be returned.
|
||||
\return A pointer to the active policy, or NULL if not inside step_forward()
|
||||
*/
|
||||
CPUPolicy* get_policy();
|
||||
|
||||
/**
|
||||
\brief Returns a pointer to the queue containing all the ready
|
||||
schedulable objects (for the policy to sort it).
|
||||
|
||||
\warning This is a callback method: it can only be used in methods of CPUPolicy called
|
||||
by step_forward(). Else, a NULL pointer will be returned.
|
||||
\return A pointer to the queue, or NULL if not inside step_forward()
|
||||
*/
|
||||
ReadyQueue* get_ready_queue();
|
||||
|
||||
private:
|
||||
Scheduler(); //private constructor.
|
||||
|
||||
ReadyQueue* _ready_queue;
|
||||
CPUPolicy* _policy;
|
||||
CPUPolicy* _policy;
|
||||
|
||||
Glib::Mutex _step_mutex;
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue