- Added full request support, altough still not working

at all. Compiles and runs, but the output is not
  correct. Anyway, now it is just a matter of logics :P
- Corrected some minor bugs in step forward.
- Step forward is now rather messy, but still better than
  four hours ago.. altough this can be hard to believe.
- Added some wizards that help finding bugs.
- Rewritten dyn process get_state as it was guilty of
  premature optimization :)


git-svn-id: svn://svn.gna.org/svn/sgpemv2/trunk@825 3ecf2c5c-341e-0410-92b4-d18e462d057c
This commit is contained in:
matrevis 2006-08-06 01:47:02 +00:00
parent efe7dedd61
commit 574723a35b
6 changed files with 658 additions and 363 deletions

View File

@ -71,6 +71,63 @@ DynamicProcess::get_threads() const
Schedulable::state
DynamicProcess::get_state() const
{
int total = _dynamic_threads.size();
int running = 0;
int ready = 0;
int blocked = 0;
int terminated = 0;
int future = 0;
unsigned int closest = 0;
vector<DynamicThread*>::const_iterator it = _dynamic_threads.begin();
for(; it != _dynamic_threads.end(); it++)
{
if ((**it).get_state() == state_running) running++;
if ((**it).get_state() == state_ready) ready++;
if ((**it).get_state() == state_blocked) blocked++;
if ((**it).get_state() == state_terminated) terminated++;
if ((**it).get_state() == state_future)
{
unsigned int arrival = (**it).get_arrival_time();
// if this is the first future occurrence, record its arrival;
// else record its arrival if and only if it is smaller then the recorded one
if (future == 0)
closest = arrival;
else
closest = (closest < arrival) ? closest : arrival;
future++;
}
}
assert(total > 0);
assert(running == 1 || running == 0);
assert(running + ready + blocked + terminated + future == total);
if (running > 0)
return state_running;
if (ready > 0) // running == 0
return state_ready;
if (blocked > 0) // running == 0 && ready == 0
return state_blocked;
// Now check if a "hole" happens: if all threads are terminated
// or blocked the next
// thread to start, e.g. the one with the least arrival_time, has
// start time greater than the current process elapsed time, then
// pass from state_future to state_terminated:
if (closest > get_elapsed_time())
return state_terminated;
if (terminated > 0) // running == 0 && ready == 0 && blocked == 0
return state_terminated;
if (future > 0) // running == 0 && ready == 0 && blocked == 0 && terminated == 0
return state_future;
// Since premature optimization is the root of all evil, and the
// following code was very fast but also very wrong, the coder
// will be punished by allowing her to code in C++ just after
// having passed "Algoritmi 3" exam with full marks.
/*
typedef vector<DynamicThread*>::const_iterator ThreadIt;
static const int uninitialized = -1;
@ -103,6 +160,7 @@ DynamicProcess::get_state() const
// TODO Is this OK? Must be tested...
int thread_starts_at;
switch(thread_state)
{
@ -125,20 +183,10 @@ DynamicProcess::get_state() const
default: // (e)
result = state_terminated;
}
} //~ "for" iterating over threads
// Now check if a "hole" happens: if result == state_future, but the next
// thread to start, e.g. the one with the least arrival_time, has
// start time greater than the current process elapsed time, then
// pass from state_future to state_terminated:
// (d2)
int elapsed_time = get_elapsed_time();
if(result == state_future && next_thread_starts_at > elapsed_time )
result = state_terminated;
return result;
// reused hole checking system
*/
}

View File

@ -70,10 +70,6 @@ namespace sgpem
virtual unsigned int get_elapsed_time() const = 0;
virtual int get_last_acquisition() const = 0;
virtual int get_last_release() const = 0;
/** \brief Returns a pointer to the schedulable object
*
* This function returns a reference to the actual schedable object

View File

@ -59,6 +59,10 @@ namespace sgpem
virtual unsigned int get_elapsed_time() const = 0;
virtual int get_last_acquisition() const = 0;
virtual int get_last_release() const = 0;
virtual int get_base_priority() const = 0;
virtual unsigned int get_total_cpu_time() const = 0;

View File

@ -30,6 +30,7 @@
#include <glibmm/thread.h>
#include <cassert>
#include <iostream>
#include <memory>
using namespace std;
@ -45,6 +46,384 @@ typedef std::vector<DynamicSubRequest*> SubRequests;
typedef std::vector<DynamicThread*> Threads;
class Extender
{
public:
enum walk_purpose
{
walk_to_sweep = 0,
walk_to_advance = 1,
walk_to_update = 2,
walk_to_allocate_cpu = 3,
walk_to_allocate_resources = 4
};
Extender(auto_ptr<ConcreteEnvironment> & _env, walk_purpose _walk, unsigned int _front)
: env(_env), walk(_walk), front(_front)
{}
/// \brief Manages a single SubRequest object, depending on its state.
/// Zero step: any -> terminated. Added to cope with malformed threads.
/// First step: allocated -> terminated.
/// Second step: non-allocable -> allocable.
/// Third step: allocable -> allocated, or future -> allocated.
///
/// The name and the structure of this method are ugly. They are inherited
/// from the whole visitor's structure, anyway we could simply switch on of
/// the state the SubRequest obejct, and we could (should?) factor out the
/// operations which check if the request is allocable or not, depending on
/// the queue position. Anyway, this factoring may lead to code dispersion.
/// I think it is better to hold the computational core in one single place.
void
extendSubRequest(DynamicSubRequest* sp)
{
DynamicSubRequest& s = *sp;
switch (walk)
{
/// Terminates directly the subrequest
case walk_to_sweep:
{
s.set_state(Request::state_exhausted);
/// Remove the subrequest (pointer) from the queue.
bool found = false;
typedef Environment::SubRequestQueue SubRequestQueue;
SubRequestQueue& queue = env->get_request_queue(s.get_resource_key());
SubRequestQueue::iterator it = queue.begin();
for (; !found && it != queue.end(); it++)
if ((*it) == sp)
{
found = true;
queue.erase(it);
}
break;
}
/// Updates the state of an ALLOCATED subrequest, decreasing appropriate
/// counters, and checks if it become TERMINATED. In the latter case the
/// function finds the position of the subrequest (pointer) in the
/// requested resource's queue and removes it.
case walk_to_advance:
{
if (s.get_state() != Request::state_allocated)
break;
/// Decrease remaining time, since the resource has been used.
s.decrease_remaining_time();
/// Check for termination.
if (s.get_remaining_time() == 0)
{
s.set_state(Request::state_exhausted);
/// Remove the subrequest (pointer) from the queue.
bool found = false;
typedef Environment::SubRequestQueue SubRequestQueue;
SubRequestQueue& queue = env->get_request_queue(s.get_resource_key());
SubRequestQueue::iterator it = queue.begin();
for (; !found && it != queue.end(); it++)
{
if ((*it) == &s)
{
found = true;
queue.erase(it);
}
}
}
break;
}
/// Updates the state of a non-ALLOCATED subrequest, in case it become
/// ALLOCABLE or UNALLOCABLE, which may happen only when a resource has
/// been released.
/// We could jump this check if no resource were released.
/// It finds the position of the subrequest (pointer) in the requested
/// resource's queue. If the position is within the places of the
/// resource, the subrequest is ALLOCABLE.
///
case walk_to_update:
{
if (s.get_state() != Request::state_unallocable
&& s.get_state() != Request::state_allocable)
break;
/// The subrequest is in the queue for sure. Let's find it!
/// I really need an integer for this operation.
uint position = 0;
Environment::SubRequestQueue& queue = env->get_request_queue(s.get_resource_key());
Environment::SubRequestQueue::iterator it = queue.begin();
while (it != queue.end())
{
if (*it == &s)
break;
it++;
position++;
}
/// Watch out: in a resource with 2 places, 0 and 1 are valid queue
/// positions, 2 is right one place out.
s.set_state(position >= env->get_resources().find(s.get_resource_key())->second->get_places() ?
Request::state_unallocable : Request::state_allocable);
break;
}
/// Updates the state of a FUTURE subrequest when the time has come
/// for it to be raised, setting it as allocable it if it is the case,
/// or blocking it. Enqueues the subrequest (pointer) at the end of the
/// requested resource's queue.
/// The resource policy should be called to manage the queue.
/// If the position is within the places of the resource, the subrequest
/// is ALLOCABLE, otherwise it is NON-ALLOCABLE.
case walk_to_allocate_cpu:
{
if (s.get_state() != Request::state_future)
break;
Environment::SubRequestQueue& queue = env->get_request_queue(s.get_resource_key());
/// Enqueue the subrequest at the back of the queue.
queue.push_back(&s);
/// TODO: right here, right now we should call the resource policy to
/// update the queue. Updates the state of the subrequest depending
/// on the position in the queue, as explained before.
s.set_state(queue.size() > env->get_resources().find(s.get_resource_key())->second->get_places() ?
Request::state_unallocable : Request::state_allocable);
// Oh I miss ML so much.
break;
}
/// This is ugly, but hey, none's perfect.
/// Updates the state of a ALLOCABLE subrequest allocating it.
case walk_to_allocate_resources:
{
if (s.get_state() == Request::state_allocable)
s.set_state(Request::state_allocated);
break;
}
}
}
/// \brief Manages a single Request object, depending on its state.
/// Updates the state of a request, depending on its state, recursively
/// updating the contained subrequests. The state of the request is then
/// a function of the states of the subrequests.
///
/// Zero step: any -> terminated. Added to cope with malformed threads.
/// First step: allocated -> terminated.
/// Second step: non-allocable -> allocable.
/// Third step: allocable -> allocated, or future -> allocated.
///
/// The following function may be reduced to a pair of lines.
///
/// Longer, safer and more efficient version (and hopefully much easier
/// to understand!)
void
extendRequest(DynamicRequest& r)
{
switch (walk)
{
case walk_to_sweep:
{
typedef vector<DynamicSubRequest*> SubRequests;
SubRequests list = r.get_dynamic_subrequests();
for (SubRequests::iterator it = list.begin(); it != list.end(); it++)
extendSubRequest(*it);
break;
}
/// Updates the state of an ALLOCATED request.
case walk_to_advance:
{
if (r.get_state() != Request::state_allocated)
break;
typedef vector<DynamicSubRequest*> SubRequests;
SubRequests list = r.get_dynamic_subrequests();
for (SubRequests::iterator it = list.begin(); it != list.end(); it++)
extendSubRequest(*it);
break;
}
/// Updates the state of a NON-ALLOCABLE request.
case walk_to_update:
{
if (r.get_state() != Request::state_unallocable)
break;
typedef vector<DynamicSubRequest*> SubRequests;
SubRequests list = r.get_dynamic_subrequests();
for (SubRequests::iterator it = list.begin(); it != list.end(); it++)
extendSubRequest(*it);
break;
}
/// Updates the state of an ALLOCABLE or FUTURE request.
case walk_to_allocate_cpu:
{
/// This is the only interesting case. If the current instant, measured
/// over the containing process execution time, is equal to the instant
/// in which the request has to be raised, the subrequests are
/// recursively updated for the first time ever, and their status
/// changes from FUTURE to something else.
if (r.get_state() == Request::state_future
&& r.get_instant() == front)
{
typedef vector<DynamicSubRequest*> SubRequests;
SubRequests list = r.get_dynamic_subrequests();
for (SubRequests::iterator it = list.begin(); it != list.end(); it++)
extendSubRequest(*it);
}
/// Finally, allocates the reuqest if possible.
if (r.get_state() == Request::state_allocable)
{
typedef vector<DynamicSubRequest*> SubRequests;
SubRequests list = r.get_dynamic_subrequests();
walk = walk_to_allocate_resources;
for (SubRequests::iterator it = list.begin(); it != list.end(); it++)
extendSubRequest(*it);
walk = walk_to_allocate_cpu;
}
break;
}
}
}
/// \brief Manages a single Thread object, depending on its state.
///
/// First step: running -> terminated, or running -> running, or
/// running -> ready.
/// Second step: future -> ready or blocked -> ready.
/// Third step: ready -> running, or ready -> blocked
///
/// The front is shifted to reflect the thread time: this is useful
/// for reasons analogous to those found in extendProcess.
void
extendThread(DynamicThread& t)
{
/// Shifts the front. The old front will be restored on exit.
int old_front = front;
front = t.get_elapsed_time();
switch (walk)
{
/*
/// If the thread is RUNNING, its requests are updated in cascade,
/// the counters are decreased, and the state is updated depending
/// on the remaining time and the remaining quantum.
case walk_to_advance:
{
if (t.state != "RUNNING")
break;
/// Update the requests.
for (int j = 0; j < t.requests.size(); j++)
t.requests[j].accept(*this);
t.decrease_remaining_time();
/// If the quantum is finished, we may need to change the state
/// of the thread to ready.
if (t.get_remaining_cpu_time() != 0 && t.remaining_quantum == 0
&& !priority_preemptable && quantum_preemptable)
{
t.state = "READY";
env->readyqueue.erase(env->readyqueue.begin());
env->readyqueue.push_back(&t);
/// FIXME: we should call the policy, with event "arrival";
/// this works for round robin only.
}
/// If the remaining is finished, the thread is terminated.
if (t.get_remaining_cpu_time() == 0)
{
t.state = "TERMINATED";
env->readyqueue.erase(env->readyqueue.begin());
}
break;
}
/// If the thread is FUTURE, checks if it's time to arrive.
/// If the thread is BLOCKED, checks if requests have changed
/// their status since the last time, and if none of them is
/// BLOCKED, the thread state is set to READY.
case walk_to_update:
{
/// Remember, front has been localized to current thread time.
if (t.state == "FUTURE" && t.arrival == old_front)
{
t.state = "READY";
env->readyqueue.push_back(&t);
/// FIXME: we should call the policy, with event "new arrival".
}
if (t.state == "BLOCKED")
{
bool blocked = false;
for (int j = 0; j < t.requests.size() && !blocked; j++)
{
t.requests[j].accept(*this);
if (t.requests[j].get_state() == "NON-ALLOCABLE")
blocked = true;
}
if (!blocked)
{
t.state = "READY";
env->readyqueue.push_back(&t);
/// FIXME: we should call the policy, with event "arrival".
}
}
break;
}
*/
/// If the thread is the first on the ready_queue, try to run. The
/// thread may block on a request.
case walk_to_allocate_cpu:
{
ReadyQueue & queue = env->get_sorted_queue();
/// Check if we are eligible to run.
if (queue.size() != 0 && &t == &queue.get_item_at(0))
{
/// Lets' try to run!
t.set_state(Schedulable::state_running);
typedef vector<DynamicRequest*> Requests;
Requests list = t.get_dynamic_requests();
for (Requests::iterator it = list.begin(); it != list.end()
&& t.get_state() != Schedulable::state_blocked; it++)
{
extendRequest(**it);
/// If one request is not allocable, the thread can't run.
if ((**it).get_state() == Request::state_unallocable)
{
t.set_state(Schedulable::state_blocked);
queue.erase_first();
}
}
// /// If the thread is runnable, we may need to refill its quantum.
// if (t.state == "RUNNING" && t.remaining_quantum == 0)
// t.remaining_quantum = quantum_size;
}
break;
}
}
front = old_front;
}
// private class members
private:
auto_ptr<ConcreteEnvironment> & env;
int walk;
unsigned int front;
};
// ------------------ Static helper functions --------------
@ -70,235 +449,13 @@ static void prepare_ready_queue(ConcreteEnvironment& snapshot,
typedef std::vector<DynamicThread*> Threads;
ReadyQueue& queue = snapshot.get_sorted_queue();
assert(queue.size() == 0);
for(Threads::const_iterator it = all_threads.begin();
it != all_threads.end(); it++)
{
if((*it)->get_state() == Schedulable::state_ready)
queue.append(**it);
}
}
// For the current thread, see if there are requests that are exhausted
// see extendRequest, case 0 and 1
// static void
// update_requests_for_old_running_thread(DynamicThread& running_thread)
// {
// }
/// \brief Manages a single SubRequest object, depending on its state.
/// Zero step: any -> terminated. Added to cope with malformed threads.
/// First step: allocated -> terminated.
/// Second step: non-allocable -> allocable.
/// Third step: allocable -> allocated, or future -> allocated.
///
/// The name and the structure of this method are ugly. They are inherited
/// from the whole visitor's structure, anyway we could simply switch on of
/// the state the SubRequest obejct, and we could (should?) factor out the
/// operations which check if the request is allocable or not, depending on
/// the queue position. Anyway, this factoring may lead to code dispersion.
/// I think it is better to hold the computational core in one single place.
void
extendSubRequest(DynamicSubRequest* sp, auto_ptr<ConcreteEnvironment> & env, int walk, int front)
{
DynamicSubRequest& s = *sp;
switch (walk)
{
/// Terminates directly the subrequest
case 0:
{
s.set_state(Request::state_exhausted);
/// Remove the subrequest (pointer) from the queue.
bool found = false;
typedef Environment::SubRequestQueue SubRequestQueue;
SubRequestQueue& queue = env->get_request_queue(s.get_resource_key());
SubRequestQueue::iterator it = queue.begin();
for (; !found && it != queue.end(); it++)
if ((*it) == sp)
{
found = true;
queue.erase(it);
}
break;
}
/// Updates the state of an ALLOCATED subrequest, decreasing appropriate
/// counters, and checks if it become TERMINATED. In the latter case the
/// function finds the position of the subrequest (pointer) in the
/// requested resource's queue and removes it.
case 1:
{
if (s.get_state() != Request::state_allocated)
break;
/// Decrease remaining time, since the resource has been used.
s.decrease_remaining_time();
/// Check for termination.
if (s.get_remaining_time() == 0)
{
s.set_state(Request::state_exhausted);
/// Remove the subrequest (pointer) from the queue.
bool found = false;
typedef Environment::SubRequestQueue SubRequestQueue;
SubRequestQueue& queue = env->get_request_queue(s.get_resource_key());
SubRequestQueue::iterator it = queue.begin();
for (; !found && it != queue.end(); it++)
{
if ((*it) == sp)
{
found = true;
queue.erase(it);
}
}
}
break;
}
/*
/// Updates the state of a NON-ALLOCABLE subrequest, in case it become
/// ALLOCABLE, which may happen only when a resource has been released.
/// We could jump this check if no resource were released.
/// It finds the position of the subrequest (pointer) in the requested
/// resource's queue. If the position is within the places of the
/// resource, the subrequest is ALLOCABLE.
case 2:
{
if (s.get_state() != Request::state_allocated)
break;
/// The subrequest is in the queue for sure. Let's find it!
/// I really need an integer for this operation.
int position = 0;
while (position <= s.resource_ptr->queue.size())
{
if (s.resource_ptr->queue[position]->has_same_id(s))
/// Found!
break;
/// This statement is not executed if we find it.
position++;
}
/// Watch out: in a resource with 2 places, 0 and 1 are valid queue
/// positions, 2 is right one place out.
if (position < s.resource_ptr->places)
/// If so, set it ALLOCABLE.
s.set_state("ALLOCABLE");
break;
}
/// Updates the state of a FUTURE subrequest when the time has come
/// for it to be raised, setting it as allocable it if it is the case,
/// or blocking it. Enqueues the subrequest (pointer) at the end of the
/// requested resource's queue.
/// The resource policy should be called to manage the queue.
/// If the position is within the places of the resource, the subrequest
/// is ALLOCABLE, otherwise it is NON-ALLOCABLE.
case 3:
{
if (s.get_state() != "FUTURE")
break;
/// Enqueue the subrequest at the back of the queue.
s.resource_ptr->queue.push_back(&s);
/// TODO: right here, right now we should call the resource policy to
/// update the queue. Updates the state of the subrequest depending
/// on the position in the queue, as explained before.
s.set_state(s.resource_ptr->queue.size() > s.resource_ptr->places ?
"NON-ALLOCABLE" : "ALLOCABLE");
// Oh I miss ML so much.
break;
}
/// This is ugly, but hey, none's perfect.
/// Updates the state of a ALLOCABLE subrequest allocating it.
case 4:
{
if (s.get_state() == "ALLOCABLE")
s.set_state("ALLOCATED");
break;
}
*/
}
}
/// \brief Manages a single Request object, depending on its state.
/// Updates the state of a request, depending on its state, recursively
/// updating the contained subrequests. The state of the request is then
/// a function of the states of the subrequests.
///
/// Zero step: any -> terminated. Added to cope with malformed threads.
/// First step: allocated -> terminated.
/// Second step: non-allocable -> allocable.
/// Third step: allocable -> allocated, or future -> allocated.
///
/// The following function may be reduced to a pair of lines.
///
/// Longer, safer and more efficient version (and hopefully much easier
/// to understand!)
void
extendRequest(DynamicRequest* rp, auto_ptr<ConcreteEnvironment> & env, int walk, int front)
{
DynamicRequest& r = *rp;
switch (walk)
{
case 0:
{
typedef vector<DynamicSubRequest*> SubRequests;
SubRequests list = r.get_dynamic_subrequests();
for (SubRequests::iterator it = list.begin(); it != list.end(); it++)
extendSubRequest(*it, env, walk, front);
break;
}
/// Updates the state of an ALLOCATED request.
case 1:
{
if (r.get_state() != Request::state_allocated)
break;
typedef vector<DynamicSubRequest*> SubRequests;
SubRequests list = r.get_dynamic_subrequests();
for (SubRequests::iterator it = list.begin(); it != list.end(); it++)
extendSubRequest(*it, env, walk, front);
break;
}
/*
/// Updates the state of a NON-ALLOCABLE request.
case 2:
{
if (r.get_state() != "NON-ALLOCABLE")
break;
for (int j = 0; j < r.subrequests.size(); j++)
r.subrequests[j].accept(*this);
break;
}
/// Updates the state of an ALLOCABLE or FUTURE request.
case 3:
{
/// This is the only interesting case. If the current instant, measured
/// over the containing process execution time, is equal to the instant
/// in which the request has to be raised, the subrequests are
/// recursively updated for the first time ever, and their status
/// changes from FUTURE to something else.
if (r.get_state() == "FUTURE" && r.at == front)
for (int j = 0; j < r.subrequests.size(); j++)
r.subrequests[j].accept(*this);
if (r.get_state() == "ALLOCABLE")
{
walk = 4; // this is an ugly patch please forgive me
for (int j = 0; j < r.subrequests.size(); j++)
r.subrequests[j].accept(*this);
walk = 3;
}
break;
}
*/
}
}
// ---------------------------------------------------------
@ -373,7 +530,12 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy) throw(UserInter
{
Process& parent = current.get_process();
if(parent.get_elapsed_time() == current.get_arrival_time())
{
current.set_state(Schedulable::state_ready);
// in this way we will never have threads ready having remaining time == 0
if (current.get_elapsed_time() == current.get_total_cpu_time())
current.set_state(Schedulable::state_terminated);
}
}
// Save the current running thread for future usage, if it hasn't ended
@ -406,40 +568,59 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy) throw(UserInter
if(running_thread != NULL)
{
bool running_terminated = running_thread->get_state() == Schedulable::state_terminated;
Extender e(new_snapshot,
running_terminated ? Extender::walk_to_sweep : Extender::walk_to_advance,
running_thread->get_elapsed_time());
Requests& reqs = running_thread->get_dynamic_requests();
for(Requests::iterator r_it = reqs.begin(); r_it != reqs.end(); r_it++)
extendRequest(*r_it, new_snapshot, running_terminated ? 0 : 1, running_thread->get_elapsed_time());
e.extendRequest(**r_it);
}
// Unblock blocked threads.. by discovering just-now-allocable requests
for(Threads::iterator it = all_threads.begin(); it != all_threads.end(); it++)
{
DynamicThread& current = **it;
// for each still blocked thread
if(current.get_state() == Schedulable::state_blocked)
{
// Since it was blocked then one and only one (why?) request is unallocable.
// lets' find it and see if our information is outdated (i.e. now it is
// allocable.
bool blocked = false;
// for each request
Requests& reqs = current.get_dynamic_requests();
Extender e(new_snapshot, Extender::walk_to_advance, current.get_elapsed_time());
for(Requests::iterator r_it = reqs.begin(); r_it != reqs.end() && !blocked; r_it++)
{
// update its state.
e.extendRequest(**r_it);
// if it is still unallocable, leave the thread blocked
if ((**r_it).get_state() == Request::state_unallocable)
blocked = true;
}
// if no blocked request has been found, the thread is ready.
if (!blocked)
current.set_state(Schedulable::state_ready);
}
}
// ---------- FIXME ----------------
// Check correctness: Now if the simulation ended we
// append the newly created environment and return false
if(simulation_ended) goto final_cleanup;
/* /
* /
* /
* (I'M HERE) < * * * * * * * * * * *
* \
* \
* \
*
* (is it visible enough for you?)
*/
bool preemptible_policy;
unsigned int time_slice;
try
{
{
// Temporarily set the _ready_queue param and the _policy one for
// use from external plugins
_policy = &cpu_policy;
_ready_queue = &new_snapshot->get_sorted_queue();
// ?. Use the policy to sort the queue
preemptible_policy = cpu_policy.is_pre_emptive();
@ -458,107 +639,79 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy) throw(UserInter
// Try to continue running the current running thread
if (running_thread != NULL && running_thread->get_state() == Schedulable::state_running)
{
// the thread may block on raising a request
Requests& reqs = running_thread->get_dynamic_requests();
for(Requests::iterator r_it = reqs.begin(); r_it != reqs.end(); r_it++)
{
DynamicRequest& rq = **r_it;
if (rq.get_state() == Request::state_future
&& rq.get_instant() == running_thread->get_elapsed_time())
{
SubRequests& subreqs = rq.get_dynamic_subrequests();
for(SubRequests::iterator s_it = subreqs.begin(); s_it != subreqs.end(); s_it++)
{
DynamicSubRequest& subr = **s_it;
Environment::SubRequestQueue& queue = new_snapshot->get_request_queue(subr.get_resource_key());
queue.push_back(*s_it);
// FIXME this code has to control the position in the queue
if(subr.get_state() == Request::state_future)
subr.set_state(Request::state_allocated);
}
}
}
}
// ---------------------------------------------------------------------------------
// ------------------------------- 3 ----------------------------------------
// ---------------------------------------------------------------------------------
// the problem is that a blocked thread is in the ready queue, but here we
// should collect ready threads only.
// an obvious solution is to remove it manually, but hey, we must understand
// what s happening
prepare_ready_queue(*new_snapshot, all_threads);
if (_ready_queue->size() != 0)
cpu_policy.sort_queue();
bool found = false;
// ?. Ask the policy to sort the queue. We do this multiple time if we must select
// a new thread and it can't run for some reason (goes blocked, or terminates).
bool found = true;
do
// Try to continue running the current running thread
if (running_thread != NULL && running_thread->get_state() == Schedulable::state_running)
{
found = true;
// the thread may block on raising a request
Requests& reqs = running_thread->get_dynamic_requests();
for(Requests::iterator r_it = reqs.begin(); r_it != reqs.end()
&& running_thread->get_state() != Schedulable::state_blocked; r_it++)
{
Extender e(new_snapshot, Extender::walk_to_allocate_cpu, running_thread->get_elapsed_time());
e.extendRequest(**r_it);
if ((**r_it).get_state() == Request::state_unallocable)
{
found = true;
prepare_ready_queue(*new_snapshot, all_threads);
if(_ready_queue->size() == 0)
break; // No sense in trying to schedule something that isn't there
cpu_policy.sort_queue();
DynamicThread& candidate = (DynamicThread&) _ready_queue->get_item_at(0);
// the thread may block on raising a request
Requests& reqs = candidate.get_dynamic_requests();
for(Requests::iterator r_it = reqs.begin(); r_it != reqs.end(); r_it++)
{
DynamicRequest& rq = **r_it;
if (rq.get_state() == Request::state_future
&& rq.get_instant() == candidate.get_elapsed_time())
{
SubRequests& subreqs = rq.get_dynamic_subrequests();
for(SubRequests::iterator s_it = subreqs.begin(); s_it != subreqs.end(); s_it++)
{
DynamicSubRequest& subr = **s_it;
Environment::SubRequestQueue& queue = new_snapshot->get_request_queue(subr.get_resource_key());
queue.push_back(*s_it);
// FIXME this code has to control the position in the queue
if(subr.get_state() == Request::state_future)
subr.set_state(Request::state_allocated);
}
}
}
// We could have threads with 0 duration. Silly, but possible.
// the silly thing was to permit the user to do this :P
if(candidate.get_total_cpu_time() - candidate.get_elapsed_time() == 0)
{
candidate.set_last_acquisition(current_instant);
candidate.set_last_release(current_instant);
candidate.set_state(Schedulable::state_terminated);
// FIXED : check requests for thread at instant 0?
// the real question is: should we check for requests raised at
// the thread last instant? the answer is: who cares?
// doing it or not is a matter of cut-n-paste
found = false;
continue;
}
// FIXME : check if the first thread of the queue blocks
running_thread->set_state(Schedulable::state_blocked);
found = false;
}
while(!found);
}
} // end trying to continue old running thread
// ?. Finally select the new thread (if appropriate); now we're sure it can run
if(_ready_queue->size() > 0 && (running_thread == NULL || running_thread->get_state() != Schedulable::state_running))
// if the old running thread may not directly continue, pick from the ready queue.
//int debug = 1000;
while(_ready_queue->size() != 0 && !found)//&& debug-- > 0);
{
// try with the first on the queue
found = true;
DynamicThread& candidate = (DynamicThread&) _ready_queue->get_item_at(0);
Requests& reqs = candidate.get_dynamic_requests();
for(Requests::iterator r_it = reqs.begin(); r_it != reqs.end(); r_it++)
{
Extender e(new_snapshot, Extender::walk_to_allocate_cpu, candidate.get_elapsed_time());
e.extendRequest(**r_it);
if ((**r_it).get_state() == Request::state_unallocable)
{
// Fix fields of running thread
DynamicThread& new_running = (DynamicThread&) _ready_queue->get_item_at(0);
new_running.set_state(Schedulable::state_running);
new_running.set_last_acquisition(current_instant);
// removes running element from the ready queue
// since no method was provided to erase an item in the queue, we should rebuild it.
// this is pointless. I just added the method.
// rebuilding the ready queue may corrupt the order set by the policy:
// this is not acceptable, nor it is asking the policy to resort it.
_ready_queue->erase_first();
candidate.set_state(Schedulable::state_blocked);
// the element is not ready any more, so we must remove it from the queue?
found = false;
}
}
if (found)
{
candidate.set_state(Schedulable::state_running);
candidate.set_last_acquisition(current_instant);
}
_ready_queue->erase_first();
//cpu_policy.sort_queue();
} // end picking from ready queue
if (!found)
simulation_ended = true;
}
} // end of try
catch(UserInterruptException& e)
{
// Reset values that the policy doesn't need anymore
@ -586,19 +739,3 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy) throw(UserInter
// If we got there, a step has been performed
return simulation_ended == false;
}

View File

@ -0,0 +1,56 @@
set cpu-policy 3
add resource
forchetta
false
1
0
add process
Scuola di Mileto
0
0
add thread 1
Anassimandro
3
0
0
add thread 1
Anassimene
2
2
0
add thread 1
Eraclito
2
2
0
add request 1 1
1
add subrequest 1 1 1
0
3
add request 1 1
2
add subrequest 1 1 2
0
2
add request 1 2
0
add subrequest 1 2 1
0
1
add request 1 3
0
add subrequest 1 3 1
0
1
run

View File

@ -0,0 +1,54 @@
set cpu-policy 3
add resource
ashi
false
2
0
add resource
forchetta
false
1
0
add process
Scuola di Mileto
0
0
add thread 1
Anassimandro
4
0
0
add thread 1
Anassimene
6
1
0
add thread 1
Pitagora
2
1
0
add request 1 1
0
add subrequest 1 1 1
1
4
add request 1 2
0
add subrequest 1 2 1
1
2
add request 1 3
0
add subrequest 1 3 1
1
2
run