sgpemv2/src/backend/scheduler.cc
tchernobog 899e20323a - Write some more of Scheduler::step_forward()
- Noted some design lackings, warning the designers


git-svn-id: svn://svn.gna.org/svn/sgpemv2/trunk@706 3ecf2c5c-341e-0410-92b4-d18e462d057c
2006-07-04 09:30:45 +00:00

235 lines
7.3 KiB
C++

// src/backend/scheduler.cc - Copyright 2005, 2006, University
// of Padova, dept. of Pure and Applied
// Mathematics
//
// This file is part of SGPEMv2.
//
// This is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// SGPEMv2 is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with SGPEMv2; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#include "concrete_environment.hh"
#include "concrete_history.hh"
#include "policy.hh"
#include "schedulable.hh"
#include "scheduler.hh"
#include "policy_manager.hh"
#include "policies_gatekeeper.hh"
#include "user_interrupt_exception.hh"
// Do not include full template definition in the header file
#include "singleton.tcc"
#include <memory>
using namespace std;
using namespace sgpem;
// Explicit template instantiation to allow to export symbols from the DSO.
template class SG_DLLEXPORT Singleton<Scheduler>;
// ------------------ Static helper functions --------------
// Collects all threads of an environment into a single vector
static void
collect_threads(const std::vector<Process*>& procs,
std::vector<DynamicThread*>& collected_threads)
{
typedef std::vector<Process*> Processes;
typedef std::vector<DynamicThread*> Threads;
collected_threads.clear();
for(Processes::const_iterator it1 = procs.begin(); it1 != procs.end(); it1++)
{
const Threads& ts = ((DynamicProcess&) **it1).get_dynamic_threads();
collected_threads.insert(collected_threads.end(), ts.begin(), ts.end());
}
}
static void
free_all_resources_of(DynamicThread& ended_thread)
{
typedef std::vector<DynamicRequest*> Requests;
typedef std::vector<DynamicSubRequest*> SubRequests;
Requests& reqs = ended_thread.get_dynamic_requests();
for(Requests::iterator it = reqs.begin(); it != reqs.end(); it++)
{
// FIXME : write me
// Where is "state_fulfilled" or similar in Request::state?
}
}
// ---------------------------------------------------------
//private constructor. The parameter is discarded
Scheduler::Scheduler()
: _policy_manager(PolicyManager::get_registered_manager())
{
_policy_manager.init();
}
ReadyQueue*
Scheduler::get_ready_queue()
{
// FIXME return the correct queue accordingly to the value returned by Policy::wants()
return &_ready_queue;
}
/** \note E' fondamentale che questo metodo memorizzi localmente qualora la politica
attuale sia a prerilascio o meno, e la durata del quanto di tempo, in quanto la politica
e' libera di variare questi parametri a piacere durante l'esecuzione della simulazione
*/
void
Scheduler::reset_status()
{
// DEPRECATED (?)
}
Policy&
Scheduler::get_policy()
{
// FIXME : fixme.
// return *PoliciesGatekeeper::get_instance().get_current_policy(&History::get_instance());
}
void
Scheduler::step_forward(History& history, Policy& cpu_policy) throw(UserInterruptException)
{
// NOTE: Be sure to read the *ORIGINAL* documentation in the design document for this method!
// FIXME: handle me! I'm not just a pretty boolean, I want to be *USED*! *EXPLOITED*!
// *RAPED*! *MAKE ME BLEED*!
bool simulation_ended = true; // Assume we've finished. Then prove me wrong.
ConcreteHistory& concrete_history = (ConcreteHistory&) history;
// Use an auto_ptr since we've some exceptions in the coming...
auto_ptr<ConcreteEnvironment> new_snapshot(new ConcreteEnvironment(concrete_history.get_last_environment()));
typedef std::vector<DynamicProcess*> Processes;
typedef std::vector<DynamicRequest*> Requests;
typedef std::vector<DynamicSubRequest*> SubRequests;
typedef std::vector<DynamicThread*> Threads;
Threads all_threads;
DynamicThread* running_thread = NULL;
collect_threads(new_snapshot->get_processes(), all_threads);
// designer + implementer (Matteo) comment follows:
for(Threads::iterator it = all_threads.begin(); it != all_threads.end(); it++)
{
DynamicThread& current = **it;
// 1. mark future threads as ready, if appropriate
if(current.get_state() == Schedulable::state_future)
{
Process& parent = current.get_process();
if(parent.get_elapsed_time() == current.get_arrival_time())
current.set_state(Schedulable::state_ready);
}
// Save the current running thread for future usage, if it hasn't ended
// its allotted time
if(current.get_state() == Schedulable::state_running)
{
running_thread = &current; // Even if we change its state to terminated
// 2. mark threads that used all their allotted time as terminated
if(current.get_total_cpu_time() - current.get_elapsed_time() == 0)
current.set_state(Schedulable::state_terminated);
}
// 3. check for simulation termination (we can directly use threads
// for this check, since processes' state is based upon threads' one)
if(simulation_ended &&
((*it)->get_state() & (Schedulable::state_blocked |
Schedulable::state_terminated)) == 0)
simulation_ended = false;
}
// FIXME: increasing the time elapsed of the running thread + process
// should maybe be done here as the first thing, instead than
// directly when selecting them
if(running_thread != NULL)
{
running_thread->decrease_remaining_time();
running_thread->get_process().decrease_remaining_time();
}
// 4a. Requests for the running thread exhausted
if(running_thread != NULL) {
Requests& reqs = running_thread->get_dynamic_requests();
// FIXME we lack a way to tell and/or remember for how
// much a subrequest has been being fulfilled
// THIS MEANS this part is NOT complete
// We should check if a request has been fulfilled
// FIXME If a request was being fulfilled to the running thread,
// we should decrease the request remaining time here.
// This is why we kept a ref to the old running thread,
// even if it was terminated
if(running_thread->get_state() == Schedulable::state_terminated)
free_all_resources_of(*running_thread); // this function isn't complete
}
// /
// /
// /
// (I'M HERE) < * * * * * * * * * * *
// \
// \
// \
//
// (is it visible enough for you?)
ReadyQueue& ready_queue = new_snapshot->get_sorted_queue();
prepare_ready_queue(ready_queue);
try
{
// ?. Use the policy to sort the queue
// FIXME: how does it get the queue?
cpu_policy.sort_queue();
}
catch(UserInterruptException& e)
{
_policy_manager.init();
// ^^^^^
// Do we need to update something else?
// Going up unwinding the stack, tell:
// - the user that the policy sucks
// - SimulationController that everything stopped
throw;
}
// append the new snapshot...
// ...and remember to release the auto_ptr!
concrete_history.append_new_environment(new_snapshot.release());
}