- Minor modifications to step_forward()
git-svn-id: svn://svn.gna.org/svn/sgpemv2/trunk@711 3ecf2c5c-341e-0410-92b4-d18e462d057c
This commit is contained in:
parent
401c569a9f
commit
5ab575dffd
|
@ -202,7 +202,6 @@ pkginclude_HEADERS += \
|
||||||
src/backend/process.hh \
|
src/backend/process.hh \
|
||||||
src/backend/schedulable.hh \
|
src/backend/schedulable.hh \
|
||||||
src/backend/scheduler.hh \
|
src/backend/scheduler.hh \
|
||||||
src/backend/slice.hh \
|
|
||||||
src/backend/sub_request.hh \
|
src/backend/sub_request.hh \
|
||||||
src/backend/thread.hh \
|
src/backend/thread.hh \
|
||||||
src/backend/user_interrupt_exception.hh
|
src/backend/user_interrupt_exception.hh
|
||||||
|
|
|
@ -116,127 +116,133 @@ Scheduler::get_policy()
|
||||||
void
|
void
|
||||||
Scheduler::step_forward(History& history, Policy& cpu_policy) throw(UserInterruptException)
|
Scheduler::step_forward(History& history, Policy& cpu_policy) throw(UserInterruptException)
|
||||||
{
|
{
|
||||||
// // This very method should be exclusive: no concurrent behaviour, from when we
|
// This very method should be exclusive: no concurrent behaviour, from when we
|
||||||
// // store a readyqueue and policy pointer for the user-policy to retrieve, to when
|
// store a readyqueue and policy pointer for the user-policy to retrieve, to when
|
||||||
// // the policy returns
|
// the policy returns
|
||||||
// // TODO: restrict this area to maximise parallelism
|
// TODO: restrict this area to maximise parallelism
|
||||||
// Glib::Mutex::Lock lock(_mutex);
|
Glib::Mutex::Lock lock(_mutex);
|
||||||
|
|
||||||
// // NOTE: Be sure to read the *ORIGINAL* documentation in the design document for this method!
|
// NOTE: Be sure to read the *ORIGINAL* documentation in the design document for this method!
|
||||||
|
|
||||||
// // FIXME: handle me! I'm not just a pretty boolean, I want to be *USED*! *EXPLOITED*!
|
// FIXME: handle me! I'm not just a pretty boolean, I want to be *USED*! *EXPLOITED*!
|
||||||
// // *RAPED*! *MAKE ME BLEED*!
|
// *RAPED*! *MAKE ME BLEED*!
|
||||||
// bool simulation_ended = true; // Assume we've finished. Then prove me wrong.
|
bool simulation_ended = true; // Assume we've finished. Then prove me wrong.
|
||||||
|
|
||||||
// ConcreteHistory& concrete_history = (ConcreteHistory&) history;
|
ConcreteHistory& concrete_history = (ConcreteHistory&) history;
|
||||||
|
|
||||||
|
// Use an auto_ptr since we've some exceptions in the coming...
|
||||||
|
auto_ptr<ConcreteEnvironment> new_snapshot(new ConcreteEnvironment(concrete_history.get_last_environment()));
|
||||||
|
|
||||||
|
typedef std::vector<DynamicProcess*> Processes;
|
||||||
|
typedef std::vector<DynamicRequest*> Requests;
|
||||||
|
typedef std::vector<DynamicSubRequest*> SubRequests;
|
||||||
|
typedef std::vector<DynamicThread*> Threads;
|
||||||
|
|
||||||
|
Threads all_threads;
|
||||||
|
DynamicThread* running_thread = NULL;
|
||||||
|
|
||||||
|
collect_threads(new_snapshot->get_processes(), all_threads);
|
||||||
|
|
||||||
|
// designer + implementer (Matteo) comment follows:
|
||||||
|
|
||||||
|
for(Threads::iterator it = all_threads.begin(); it != all_threads.end(); it++)
|
||||||
|
{
|
||||||
|
DynamicThread& current = **it;
|
||||||
|
|
||||||
|
// 1. mark future threads as ready, if appropriate
|
||||||
|
if(current.get_state() == Schedulable::state_future)
|
||||||
|
{
|
||||||
|
Process& parent = current.get_process();
|
||||||
|
if(parent.get_elapsed_time() == current.get_arrival_time())
|
||||||
|
current.set_state(Schedulable::state_ready);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the current running thread for future usage, if it hasn't ended
|
||||||
|
// its allotted time
|
||||||
|
if(current.get_state() == Schedulable::state_running)
|
||||||
|
{
|
||||||
|
// increasing the time elapsed of the running thread + process
|
||||||
|
// should be done here as the first thing, instead than
|
||||||
|
// directly after selecting them
|
||||||
|
running_thread->decrease_remaining_time();
|
||||||
|
|
||||||
|
running_thread = ¤t; // Even if we change its state to terminated
|
||||||
|
// 2. mark threads that used all their allotted time as terminated
|
||||||
|
if(current.get_total_cpu_time() - current.get_elapsed_time() == 0)
|
||||||
|
current.set_state(Schedulable::state_terminated);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. check for simulation termination (we can directly use threads
|
||||||
|
// for this check, since processes' state is based upon threads' one)
|
||||||
|
if( /* we still think that */ simulation_ended &&
|
||||||
|
(current.get_state() & (Schedulable::state_blocked |
|
||||||
|
Schedulable::state_terminated)) == 0)
|
||||||
|
simulation_ended = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ---------- FIXME ----------------
|
||||||
|
// What to do now if the simulation ended?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// 4a. Requests for the running thread exhausted
|
||||||
|
if(running_thread != NULL) {
|
||||||
|
Requests& reqs = running_thread->get_dynamic_requests();
|
||||||
|
if(running_thread->get_state() == Schedulable::state_terminated)
|
||||||
|
{
|
||||||
|
// for(Requests::iterator it = reqs.begin();
|
||||||
|
|
||||||
|
|
||||||
|
// FIXME we lack a way to tell and/or remember for how
|
||||||
|
// much a subrequest has been being fulfilled
|
||||||
|
// THIS MEANS this part is NOT complete
|
||||||
|
// We should check if a request has been fulfilled
|
||||||
|
|
||||||
|
// FIXME If a request was being fulfilled to the running thread,
|
||||||
|
// we should decrease the request remaining time here.
|
||||||
|
|
||||||
|
// This is why we kept a ref to the old running thread,
|
||||||
|
// even if it was terminated
|
||||||
|
|
||||||
|
free_all_resources_of(*running_thread); // this function isn't complete
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// /
|
||||||
|
// /
|
||||||
|
// /
|
||||||
|
// (I'M HERE) < * * * * * * * * * * *
|
||||||
|
// \
|
||||||
|
// \
|
||||||
|
// \
|
||||||
//
|
//
|
||||||
// // Use an auto_ptr since we've some exceptions in the coming...
|
// (is it visible enough for you?)
|
||||||
// auto_ptr<ConcreteEnvironment> new_snapshot(new ConcreteEnvironment(concrete_history.get_last_environment()));
|
|
||||||
//
|
|
||||||
// typedef std::vector<DynamicProcess*> Processes;
|
|
||||||
// typedef std::vector<DynamicRequest*> Requests;
|
|
||||||
// typedef std::vector<DynamicSubRequest*> SubRequests;
|
|
||||||
// typedef std::vector<DynamicThread*> Threads;
|
|
||||||
//
|
|
||||||
// Threads all_threads;
|
|
||||||
// DynamicThread* running_thread = NULL;
|
|
||||||
|
|
||||||
// collect_threads(new_snapshot->get_processes(), all_threads);
|
|
||||||
|
|
||||||
// // designer + implementer (Matteo) comment follows:
|
|
||||||
|
|
||||||
// for(Threads::iterator it = all_threads.begin(); it != all_threads.end(); it++)
|
|
||||||
// {
|
|
||||||
// DynamicThread& current = **it;
|
|
||||||
//
|
|
||||||
// // 1. mark future threads as ready, if appropriate
|
|
||||||
// if(current.get_state() == Schedulable::state_future)
|
|
||||||
// {
|
|
||||||
// Process& parent = current.get_process();
|
|
||||||
// if(parent.get_elapsed_time() == current.get_arrival_time())
|
|
||||||
// current.set_state(Schedulable::state_ready);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // Save the current running thread for future usage, if it hasn't ended
|
|
||||||
// // its allotted time
|
|
||||||
// if(current.get_state() == Schedulable::state_running)
|
|
||||||
// {
|
|
||||||
// running_thread = ¤t; // Even if we change its state to terminated
|
|
||||||
// // 2. mark threads that used all their allotted time as terminated
|
|
||||||
// if(current.get_total_cpu_time() - current.get_elapsed_time() == 0)
|
|
||||||
// current.set_state(Schedulable::state_terminated);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // 3. check for simulation termination (we can directly use threads
|
|
||||||
// // for this check, since processes' state is based upon threads' one)
|
|
||||||
// if( /* we still think that */ simulation_ended &&
|
|
||||||
// (current.get_state() & (Schedulable::state_blocked |
|
|
||||||
// Schedulable::state_terminated)) == 0)
|
|
||||||
// simulation_ended = false;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // What to do now if the simulation ended?
|
|
||||||
|
|
||||||
|
|
||||||
// // FIXME: increasing the time elapsed of the running thread + process
|
ReadyQueue& ready_queue = new_snapshot->get_sorted_queue();
|
||||||
// // should maybe be done here as the first thing, instead than
|
prepare_ready_queue(ready_queue);
|
||||||
// // directly when selecting them
|
try
|
||||||
// if(running_thread != NULL)
|
{
|
||||||
// running_thread->decrease_remaining_time();
|
// ?. Use the policy to sort the queue
|
||||||
//
|
|
||||||
// // 4a. Requests for the running thread exhausted
|
|
||||||
// if(running_thread != NULL) {
|
|
||||||
// Requests& reqs = running_thread->get_dynamic_requests();
|
|
||||||
//
|
|
||||||
// // FIXME we lack a way to tell and/or remember for how
|
|
||||||
// // much a subrequest has been being fulfilled
|
|
||||||
// // THIS MEANS this part is NOT complete
|
|
||||||
// // We should check if a request has been fulfilled
|
|
||||||
|
|
||||||
// // FIXME If a request was being fulfilled to the running thread,
|
// FIXME: how does it get the queue?
|
||||||
// // we should decrease the request remaining time here.
|
cpu_policy.sort_queue();
|
||||||
|
}
|
||||||
|
catch(UserInterruptException& e)
|
||||||
|
{
|
||||||
|
_policy_manager.init();
|
||||||
|
// ^^^^^
|
||||||
|
// Do we need to update something else?
|
||||||
|
|
||||||
// // This is why we kept a ref to the old running thread,
|
// Going up unwinding the stack, tell:
|
||||||
// // even if it was terminated
|
// - the user that the policy sucks
|
||||||
// if(running_thread->get_state() == Schedulable::state_terminated)
|
// - SimulationController that everything stopped
|
||||||
// free_all_resources_of(*running_thread); // this function isn't complete
|
throw;
|
||||||
//
|
}
|
||||||
// }
|
|
||||||
//
|
|
||||||
|
|
||||||
// // /
|
// append the new snapshot...
|
||||||
// // /
|
// ...and remember to release the auto_ptr!
|
||||||
// // /
|
concrete_history.append_new_environment(new_snapshot.release());
|
||||||
// // (I'M HERE) < * * * * * * * * * * *
|
|
||||||
// // \
|
|
||||||
// // \
|
|
||||||
// // \
|
|
||||||
// //
|
|
||||||
// // (is it visible enough for you?)
|
|
||||||
|
|
||||||
//
|
|
||||||
// ReadyQueue& ready_queue = new_snapshot->get_sorted_queue();
|
|
||||||
// prepare_ready_queue(ready_queue);
|
|
||||||
// try
|
|
||||||
// {
|
|
||||||
// // ?. Use the policy to sort the queue
|
|
||||||
|
|
||||||
// // FIXME: how does it get the queue?
|
|
||||||
// cpu_policy.sort_queue();
|
|
||||||
// }
|
|
||||||
// catch(UserInterruptException& e)
|
|
||||||
// {
|
|
||||||
// _policy_manager.init();
|
|
||||||
// // ^^^^^
|
|
||||||
// // Do we need to update something else?
|
|
||||||
|
|
||||||
// // Going up unwinding the stack, tell:
|
|
||||||
// // - the user that the policy sucks
|
|
||||||
// // - SimulationController that everything stopped
|
|
||||||
// throw;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // append the new snapshot...
|
|
||||||
// // ...and remember to release the auto_ptr!
|
|
||||||
// concrete_history.append_new_environment(new_snapshot.release());
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue