- Merge branch 0.3-r1003--scheduler-manage-preemption into trunk

git-svn-id: svn://svn.gna.org/svn/sgpemv2/trunk@1023 3ecf2c5c-341e-0410-92b4-d18e462d057c
This commit is contained in:
tchernobog 2006-09-06 12:29:22 +00:00
parent cb5d958790
commit df4b1b4205
11 changed files with 158 additions and 55 deletions

View file

@ -61,6 +61,7 @@ typedef Environment::SubRequestQueue SubRequestQueue;
// ------------------ Static helper functions --------------
inline bool is_running(const Thread* running_thread);
static void collect_threads(const std::vector<Process*>& procs, Threads& collected_threads);
static void prepare_ready_queue(ConcreteEnvironment& snapshot, const Threads& all_threads);
static void terminate_all_requests_of(DynamicThread& thread, ConcreteEnvironment& environment);
@ -75,6 +76,12 @@ static void determine_subr_allocable_status(const Resource& res, SubRequestQueue
// ---------------------------------------------------------
bool
is_running(const Thread* running_thread)
{
return running_thread != NULL && running_thread->get_state() == Schedulable::state_running;
}
// Collects all threads of an environment into a single vector
void
@ -480,6 +487,7 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy, ResourcePolicy&
current.set_state(Schedulable::state_terminated);
current.set_last_release(current_instant);
terminate_all_requests_of(current, *new_snapshot);
running_thread = NULL;
}
// if we found the running thread, there isn't another one,
@ -536,29 +544,31 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy, ResourcePolicy&
_ready_queue = &new_snapshot->get_sorted_queue();
// Determine if the policy is pre_emptive, and what time slice it uses
bool preemptible_policy = cpu_policy.is_pre_emptive();
bool preemptive_policy = cpu_policy.is_pre_emptive();
int time_slice = cpu_policy.get_time_slice();
// ?. See if old running_thread has to be put to ready state
// This happens when the policy makes use of preemptability by
// priority, or when a time slice ended
if (running_thread != NULL && running_thread->get_state() == Schedulable::state_running &&
(preemptible_policy || (time_slice > 0 &&
// A process can be preempted every n-th time-slice, so we use the modulo operator
(current_instant - running_thread->get_last_acquisition()) % time_slice == 0) ))
// This happens when a time slice ends
if (is_running(running_thread) && time_slice > 0 &&
// A process can be preempted every n-th time-slice, so we use the modulo operator:
(current_instant - running_thread->get_last_acquisition()) % time_slice == 0)
{
running_thread->set_state(Schedulable::state_ready);
// We don't set the last_release parameter here. If necessary,
// we'll do that below, when selecting a new running thread,
// only if it's different from the previous one.
running_thread->set_last_release(current_instant);
}
prepare_ready_queue(*new_snapshot, all_threads);
// If the policy is preemptible, and we still have a running thread,
// add it to the queue.
if(preemptive_policy && is_running(running_thread))
_ready_queue->append(*running_thread);
// ?. Ask the policy to sort the queue. If we must select
// a new thread and it can't run for some reason (it goes blocked, or
// terminates), then we remove it from the built ReadyQueue and
// check if the next one can run.
prepare_ready_queue(*new_snapshot, all_threads);
// check if the next one can run (see while loop further below).
if(_ready_queue->size() > 0) cpu_policy.sort_queue();
// If we don't have to select a new running thread, because the old one didn't
@ -566,7 +576,11 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy, ResourcePolicy&
// * if the current running thread doesn't block, we can perform the final cleanup,
// since the queue is already sorted
// * else we've to select another running thread, so we continue down in the method
if(running_thread != NULL && running_thread->get_state() == Schedulable::state_running)
if( // Non-preemptive policy, not ended time-slice:
(!preemptive_policy && is_running(running_thread)) ||
// Pre-emptive policy, running thread still the first of the queue.
// Note: if is_running(running_thread) == true, then _ready_queue->size() > 0
(preemptive_policy && is_running(running_thread) && &_ready_queue->get_item_at(0) == running_thread) )
{
raise_new_requests(*running_thread, *new_snapshot, resource_policy);
if(running_thread->get_state() != Schedulable::state_blocked)
@ -602,18 +616,6 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy, ResourcePolicy&
continue;
}
// If the new running is different from the old one,
// remember to release our old pal, and to acquire our
// new runner.
// It'll sufficit that *one* candidate is different from
// the old running to trigger this, and it's rightly so.
if(&candidate != running_thread)
{
if(running_thread != NULL)
running_thread->set_last_release(current_instant);
candidate.set_last_acquisition(current_instant);
}
// Now we check if our candidate blocks on a new request
raise_new_requests(candidate, *new_snapshot, resource_policy);
if(candidate.get_state() != Schedulable::state_blocked)
@ -633,6 +635,19 @@ Scheduler::step_forward(History& history, CPUPolicy& cpu_policy, ResourcePolicy&
// Fix fields of running thread
DynamicThread& new_running = (DynamicThread&) _ready_queue->get_item_at(0);
new_running.set_state(Schedulable::state_running);
// If the new running is different from the old one,
// remember to release our old pal, and to acquire our
// new runner.
if(&new_running != running_thread)
{
if(running_thread != NULL)
{
running_thread->set_state(Schedulable::state_ready);
running_thread->set_last_release(current_instant);
}
new_running.set_last_acquisition(current_instant);
}
}
}