一个小实验看清Linux内核调度机制

0号进程(idle进程)完成一系列初始化之后,就进入一个while循环

While(1)
{
    ….
    static void  do_idle(void)
    {
        ….

schedule_idle();~~~~


        ….
     }
}

void __sched schedule_idle(void)
{
  do {
    __schedule(false);
  } while (need_resched());
}

接着会调用schedule_idle(),然后自愿调用__schedule(false),让调度器挑出
下一个任务去执行。

现在要做的一个实验很简单,就是注释掉do_idle()中的schedule_idle().
这样系统应该怎么都调用不到schedule() ,此时系统会stall了。实验结果果真如此,此时终端也不会有任何响应。如果打开内核选项:
RCU_STALL_COMMON = y
CONFIG_RCU_CPU_STALL_TIMEOUT = 10(手动设置10s)
系统会打印出如下信息:
INFO: rcu_sched detected stalls onCPUs/tasks:
RCU_GP_WAIT_FQS(3) ->state=0x0->cpu=0
因为就算不能调用schedule(),此时一直有时钟中断过来,系统利用中断来检测。

此时时钟中断在进行,就代表说scheduler_tick函数在正常工作。这个函数完成的功能就是调用本进程调度类的task_tick(),可惜这个函数在idle调用类中是个空函数.

/*
 * This function gets called by the timer code, with HZ frequency.
 * We call it with interrupts disabled.
 */
void scheduler_tick(void)
{
  curr->sched_class->task_tick(rq, curr, 0);
}

const struct sched_class idle_sched_class = {
  .task_tick    = task_tick_idle,
}

static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{}

如果当中断来临的时候当前不是idle进程,而是属于公平调度类的进程:
此时调用scheduler_tick会调用到公平调度类的
task_tick_fair()

onst struct sched_class fair_sched_class = {
.task_tick    = task_tick_fair,
 };

static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
    struct sched_entity *se = &curr->se;
    for_each_sched_entity(se) {
      cfs_rq = cfs_rq_of(se);
                  entity_tick(cfs_rq, se, queued);
  }
}
static void
entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
{
 check_preempt_tick(cfs_rq, curr);

}

static void

check_preempt_tick(struct cfs_rq cfs_rq, struct sched_entity curr)

{
  if (delta_exec > ideal_runtime) {
    resched_curr(rq_of(cfs_rq)); 
}

if (delta > ideal_runtime)
    resched_curr(rq_of(cfs_rq));

}

大概的说就是所运行的虚拟时间比同一运行队列红黑树上的最小虚拟时间大,或者时间片使用完。调用resched_curr()给当前进程设置一个标志位TIF_NEED_RESCHED.

设置完以后,此进程也不是马上就被切走,要等到一个调度点。调度点在哪里呢,中断完成之后就会来一个调度点。(关于调度点,不展开了)

<arch/arm/kernel/early-common.s>
ENTRY(ret_to_user_from_irq)
    ldr  r2, [tsk, #TI_ADDR_LIMIT]
    cmp  r2, #TASK_SIZE
    blne  addr_limit_check_failed
    ldr  r1, [tsk, #TI_FLAGS]
    tst  r1, #_TIF_WORK_MASK

bne slow_work_pending

slow_work_pending:
    mov  r0, sp        @ 'regs'
    mov  r2, why        @ 'syscall'

bl do_work_pending


     cmp  r0, #0
    beq  no_work_pending
    movlt  scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
    ldmia  sp, {r0 - r6}      @ have to reload r0 - r6
    b  local_restart      @ ... and off we go
asmlinkage int
  do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
    do {
      if (likely(thread_flags & _TIF_NEED_RESCHED)) {
        schedule();
    ...
}  

而schedule()函数中有pick_next_task

/*
 * Pick up the highest-prio task:
 */
static inline struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
  const struct sched_class *class;
  struct task_struct *p;

  /*
   * Optimization: we know that if all tasks are in the fair class we can
   * call that function directly, but only if the @prev task wasn't of a
   * higher scheduling class, because otherwise those loose the
   * opportunity to pull in more work from other CPUs.
   */
  if (likely((prev->sched_class == &idle_sched_class ||
        prev->sched_class == &fair_sched_class) &&
       rq->nr_running == rq->cfs.h_nr_running)) {

    p = fair_sched_class.pick_next_task(rq, prev, rf);
    if (unlikely(p == RETRY_TASK))
      goto again;

    /* Assumes fair_sched_class->next == idle_sched_class */
    if (unlikely(!p))
      p = idle_sched_class.pick_next_task(rq, prev, rf);

    return p;
  }

again:
  for_each_class(class) {
    p = class->pick_next_task(rq, prev, rf);
    if (p) {
      if (unlikely(p == RETRY_TASK))
        goto again;
      return p;
    }
  }

  /* The idle class should always have a runnable task: */
  BUG();
}

这样中断退出时检查当前进程标示位_TIF_NEED_RESCHED,然后找到下一个合适的进程发生进程切换。

上一篇:linux 命令 边用边记


下一篇:css——样式表分类,选择器