2015年01月29日
情報科学類 オペレーティングシステム II
筑波大学 システム情報工学研究科
コンピュータサイエンス専攻, 電子・情報工学系
新城 靖
<yas@cs.tsukuba.ac.jp>
このページは、次の URL にあります。
http://www.coins.tsukuba.ac.jp/~yas/coins/os2-2014/2015-01-29
あるいは、次のページから手繰っていくこともできます。
http://www.coins.tsukuba.ac.jp/~yas/
http://www.cs.tsukuba.ac.jp/~yas/
struct timeval {
time_t tv_sec; /* seconds. long int */
suseconds_t tv_usec; /* microseconds. long int */
};
int gettimeofday(struct timeval *tp, struct timezone *tzp);
int settimeofday(const struct timeval *tp, const struct timezone *tzp);
使い方
1: /*
2: gettimeofday-print.c -- get colander time and print
3: Created on: 2014/01/22 20:40:34
4: */
5:
6: #include <sys/time.h> /* gettimeofday() */
7: #include <time.h> /* ctime() */
8: #include <stdio.h>
9:
10: main()
11: {
12: struct timeval tv;
13: time_t sec;
14: gettimeofday( &tv, NULL );
15: sec = tv.tv_sec;
16: printf("%s", ctime(&sec) );
17: }
$ make gettimeofday-print
cc gettimeofday-print.c -o gettimeofday-print
$ ./gettimeofday-print
Wed Jan 22 20:46:12 2014
$ date
Wed Jan 22 20:46:13 JST 2014
$
POSIX 1003.1, 2003 の
struct timespec
では、ナノ秒単位。
struct timespec {
time_t tv_sec; /* Seconds. */
long int tv_nsec; /* Nanoseconds. */
};
int clock_settime(clockid_t clock_id, const struct timespec *tp);
int clock_gettime(clockid_t clock_id, struct timespec *tp);
int clock_getres(clockid_t clock_id, struct timespec *res);
clock_id としては、CLOCK_REALTIME (カレンダ時刻)やCLOCK_MONOTONIC があ
る。
カレンダ時刻は、変更できる。逆走させることも可能。
順方向のジャンプや逆走を避けて、カレンダ時刻を合わせるには、adjtime() を使う。
int adjtime(const struct timeval *delta, struct timeval *olddelta);
struct itimerval {
struct timeval it_interval; /* next value */
struct timeval it_value; /* current value */
};
int setitimer(int which, const struct itimerval *value,
struct itimerval *ovalue);
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
struct timeval *timeout);
int poll(struct pollfd *fds, nfds_t nfds, int timeout);
ネットワーク・プログラムでよく使う。複数の入力を監視する。指定された時
間、入力がなければ、システム・コールから復帰する。
なにもしない時間切れ。
unsigned int sleep(unsigned int seconds); int usleep(useconds_t usec) int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);

図? タイマ関連のハードウェアの基本モデル
2つの機能がある。
その他の割込み
linux-3.18.1/include/asm-generic/param.h 7: # define HZ CONFIG_HZ /* Internal kernel timer frequency */ linux-3.18.1/include/generated/autoconf.h 616: #define CONFIG_HZ 1000 linux-3.18.1/kernel/time/timer.c 55: __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; linux-3.18.1/include/linux/jiffies.h 76: extern u64 __jiffy_data jiffies_64; 77: extern unsigned long volatile __jiffy_data jiffies;
linux-3.18.1/kernel/time/tick-common.c
78: static void tick_periodic(int cpu)
79: {
80: if (tick_do_timer_cpu == cpu) {
...
86: do_timer(1);
...
88: update_wall_time();
89: }
...
91: update_process_times(user_mode(get_irq_regs()));
...
93: }
linux-3.18.1/kernel/time/timer.c
55: __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
linux-3.18.1/kernel/time/timekeeping.c
1671: void do_timer(unsigned long ticks)
1672: {
1673: jiffies_64 += ticks;
...
1675: }
xtime_nsec >> shift でナノ秒を表す。
linux-3.18.1/include/linux/timekeeper_internal.h
30: struct tk_read_base {
36: u32 shift;
37: u64 xtime_nsec;
...
39: };
77: struct timekeeper {
...
78: struct tk_read_base tkr;
79: u64 xtime_sec;
...
105: };
linux-3.18.1/kernel/time/timekeeping.c
77: static inline struct timespec64 tk_xtime(struct timekeeper *tk)
78: {
79: struct timespec64 ts;
80:
81: ts.tv_sec = tk->xtime_sec;
82: ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
83: return ts;
84: }
linux-3.18.1/kernel/time/time.c
102: SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
103: struct timezone __user *, tz)
104: {
105: if (likely(tv != NULL)) {
106: struct timeval ktv;
107: do_gettimeofday(&ktv);
108: if (copy_to_user(tv, &ktv, sizeof(ktv)))
109: return -EFAULT;
110: }
111: if (unlikely(tz != NULL)) {
112: if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
113: return -EFAULT;
114: }
115: return 0;
116: }
695: void do_gettimeofday(struct timeval *tv)
696: {
697: struct timespec64 now;
698:
699: getnstimeofday64(&now);
700: tv->tv_sec = now.tv_sec;
701: tv->tv_usec = now.tv_nsec/1000;
702: }
526: void getnstimeofday64(struct timespec64 *ts)
527: {
528: WARN_ON(__getnstimeofday64(ts));
529: }
493: int __getnstimeofday64(struct timespec64 *ts)
494: {
495: struct timekeeper *tk = &tk_core.timekeeper;
...
497: s64 nsecs = 0;
...
502: ts->tv_sec = tk->xtime_sec;
503: nsecs = timekeeping_get_ns(&tk->tkr);
...
507: ts->tv_nsec = 0;
508: timespec64_add_ns(ts, nsecs);
...
516: return 0;
517: }
linux-3.18.1/include/linux/timer.h
12: struct timer_list {
...
18: unsigned long expires;
...
21: void (*function)(unsigned long);
22: unsigned long data;
...
34: };
jiffies が増加して expires に達すれば、(*function)(data) を呼ぶ。
主に次の関数で操作する。
{
struct timer_list my_timer; // 構造体の宣言
init_timer(&my_timer); // 初期化
my_timer.expires = jiffies + delay; // どのくらい待ちたいか
my_timer.data = (unsigned long)data; // 渡したいデータ
my_timer.function = my_timer_func; // 関数
add_timer(&my_timer); // 登録
}
void my_timer_func(unsigned long data) {
...
}
linux-3.18.1/include/linux/hrtimer.h
33: enum hrtimer_mode {
34: HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
35: HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
...
39: };
44: enum hrtimer_restart {
45: HRTIMER_NORESTART, /* Timer is not restarted */
46: HRTIMER_RESTART, /* Timer must be restarted */
47: };
108: struct hrtimer {
...
111: enum hrtimer_restart (*function)(struct hrtimer *);
...
119: };
主に次の関数で操作する。
struct hrtimer my_timer;
hrtimer_init(&my_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
my_timer.function = my_timer_handler;
...
hrtimer_start(&my_timer, ktime_set(0, t_nano), HRTIMER_MODE_REL);
...
enum hrtimer_restart my_timer_handler(struct hrtimer *timer)
{
...
return HRTIMER_NORESTART;
}
例: Ethernet のドライバでモードを変更して 2 マイクロ秒だけ待つ。
様々な方法がある。
例1: 10 tick (インターバル・タイマによる割り込み)を待つ。
unsigned long timeout = jiffies + 10; // 10 ticks
while (time_before(jiffies,timeout))
continue;
例2: 2秒待つ
unsigned long delay = jiffies + 2*HZ; // 2秒
while (time_before(jiffies,timeout))
continue;
unsigned long timeout = jiffies + 10; // 10 ticks
while (jiffies<timeout)
continue;
引き算して 0 と比較すると、オーバフローの問題が解決できる。
unsigned long timeout = jiffies + 10; // 10 ticks
while (jiffies-timeout<0)
continue;
次のマクロを使う方法もある。
linux-3.18.1/include/linux/jiffies.h 101: #define time_after(a,b) \ 102: (typecheck(unsigned long, a) && \ 103: typecheck(unsigned long, b) && \ 104: ((long)((b) - (a)) < 0)) 105: #define time_before(a,b) time_after(b,a) 106: 107: #define time_after_eq(a,b) \ 108: (typecheck(unsigned long, a) && \ 109: typecheck(unsigned long, b) && \ 110: ((long)((a) - (b)) >= 0)) 111: #define time_before_eq(a,b) time_after_eq(b,a)
unsigned long delay = jiffies + 2*HZ; // 2秒
while (time_before(jiffies,timeout))
cond_resched();
他に実行すべき重要なプロセスが存在する(条件)時には、スケジューラを呼ん
で、実行する。存在しなければ、空ループと同じ。ただし、スケジューラを呼
ぶ(sleepする可能性がある)ので、割り込みコンテキストからは使えない。
void ndelay(unsigned long nsecs) void udelay(unsigned long usecs) void mdelay(unsigned long msecs)udelay() は、ある回数のループで実装されている。回数は、CPUの速度等で決 まる。ndelay(), mdelay() は、udelay() を呼んでいる。
udelay() で1ミリ秒以上待ってはいけない。 ループのインデックスがオーバフローする可能性がある。
set_current_state( TASK_INTERRUPTIBLE ); // signal で起きる可能性がある schedule_timeout( s * HZ );実装には struct timer_list が使われている。
| 表示 | 説明 |
| NI | Nice。優先度を表す値。 |
$ /bin/ps l
F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND
0 1013 20638 20636 20 0 123572 2100 wait Ss pts/2 0:00 -bash
0 1013 21139 20638 20 0 155660 5900 poll_s S pts/2 0:02 xterm -class UXTerm -title uxterm -u8
0 1013 21150 21139 20 0 123552 2144 wait Ss pts/3 0:00 bash
0 1013 21560 20638 20 0 267808 22928 poll_s S+ pts/2 0:09 emacs -nw
0 1013 21784 21150 20 0 103748 956 signal T pts/3 0:00 lv kernel/time/timer.c
0 1013 27031 21150 20 0 108132 980 - R+ pts/3 0:00 /bin/ps l
$ /bin/nice /bin/ps l
F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND
0 1013 20638 20636 20 0 123572 2100 wait Ss pts/2 0:00 -bash
0 1013 21139 20638 20 0 155660 5900 poll_s S pts/2 0:02 xterm -class UXTerm -title uxterm -u8
0 1013 21150 21139 20 0 123552 2144 wait Ss pts/3 0:00 bash
0 1013 21560 20638 20 0 267808 22928 poll_s S+ pts/2 0:09 emacs -nw
0 1013 21784 21150 20 0 103748 956 signal T pts/3 0:00 lv kernel/time/timer.c
0 1013 27034 21150 30 10 108136 984 - RN+ pts/3 0:00 /bin/ps l
$ /bin/nice -19 /bin/ps l
F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND
0 1013 20638 20636 20 0 123572 2100 wait Ss pts/2 0:00 -bash
0 1013 21139 20638 20 0 155660 5900 - R pts/2 0:02 xterm -class UXTerm -title uxterm -u8
0 1013 21150 21139 20 0 123552 2144 wait Ss pts/3 0:00 bash
0 1013 21560 20638 20 0 267808 22928 poll_s S+ pts/2 0:09 emacs -nw
0 1013 21784 21150 20 0 103748 956 signal T pts/3 0:00 lv kernel/time/timer.c
0 1013 27035 21150 39 19 108132 984 - RN+ pts/3 0:00 /bin/ps l
$
1: /*
2: getpriority-pid.c -- 優先度の表示
3: ~yas/syspro/proc/getpriority-pid.c
4: Created on: 2009/12/14 12:15:11
5: */
6:
7: #include <stdio.h> /* stderr, fprintf() */
8: #include <sys/time.h> /* getpriority() */
9: #include <sys/resource.h> /* getpriority() */
10: #include <stdlib.h> /* strtol() */
11: #include <limits.h> /* strtol() */
12:
13: main( int argc, char *argv[] )
14: {
15: int which, who, prio;
16: pid_t pid;
17: if( argc != 2 )
18: {
19: fprintf(stderr,"Usage: %% %s pid\n",argv[0] );
20: exit( 1 );
21: }
22: pid = strtol( argv[1], NULL, 10 );
23: prio = getpriority( PRIO_PROCESS, pid );
24: printf("pid==%d, priority==%d\n", pid, prio);
25: }
$ ./getpriority-pid
Usage: % ./getpriority-pid pid
$ echo $$
21150
$ ./getpriority-pid
Usage: % ./getpriority-pid pid
$ ./getpriority-pid $$
pid==21150, priority==0
$ ./getpriority-pid 0
pid==0, priority==0
$ /bin/nice -10 ./getpriority-pid 0
pid==0, priority==10
$ /bin/nice -20 ./getpriority-pid 0
pid==0, priority==19
$
linux-3.18.1/include/linux/sched.h
1235: struct task_struct {
1236: volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
...
1253: int prio, static_prio, normal_prio;
1254: unsigned int rt_priority;
1255: const struct sched_class *sched_class;
1256: struct sched_entity se;
1257: struct sched_rt_entity rt;
...
1272: unsigned int policy;
...
1664: };
1122: struct sched_entity {
...
1124: struct rb_node run_node;
...
1126: unsigned int on_rq;
...
1130: u64 vruntime;
...
1152: };
struct task_struct の中に、prio 等のフィールドやstruct sched_entity が
ある。
linux-3.18.1/include/uapi/linux/sched.h 36: #define SCHED_NORMAL 0 37: #define SCHED_FIFO 1 38: #define SCHED_RR 2 39: #define SCHED_BATCH 3 40: /* SCHED_ISO: reserved but not implemented yet */ 41: #define SCHED_IDLE 5 42: #define SCHED_DEADLINE 6
linux-3.18.1/kernel/sys.c
226:
227: /*
228: * Ugh. To avoid negative return values, "getpriority()" will
229: * not return the normal nice-value, but a negated value that
230: * has been offset by 20 (ie it returns 40..1 instead of -20..19)
231: * to stay compatible.
232: */
233: SYSCALL_DEFINE2(getpriority, int, which, int, who)
234: {
235: struct task_struct *g, *p;
236: struct user_struct *user;
237: const struct cred *cred = current_cred();
238: long niceval, retval = -ESRCH;
239: struct pid *pgrp;
240: kuid_t uid;
241:
242: if (which > PRIO_USER || which < PRIO_PROCESS)
243: return -EINVAL;
...
247: switch (which) {
248: case PRIO_PROCESS:
249: if (who)
250: p = find_task_by_vpid(who);
251: else
252: p = current;
253: if (p) {
254: niceval = nice_to_rlimit(task_nice(p));
255: if (niceval > retval)
256: retval = niceval;
257: }
258: break;
...
259: case PRIO_PGRP:
...
270: case PRIO_USER:
...
290: }
...
295: return retval;
296: }
linux-3.18.1/include/linux/sched/rt.h
4: #define MAX_NICE 19
5: #define MIN_NICE -20
6: #define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
...
21: #define MAX_USER_RT_PRIO 100
22: #define MAX_RT_PRIO MAX_USER_RT_PRIO
23:
24: #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
25: #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
32: #define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO)
33: #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
linux-3.18.1/include/linux/sched.h
2207: static inline int task_nice(const struct task_struct *p)
2208: {
2209: return PRIO_TO_NICE((p)->static_prio);
2210: }
glibc-2.12/sysdeps/unix/sysv/linux/getpriority.c
28: #define PZERO 20
...
35: int
36: getpriority (enum __priority_which which, id_t who)
37: {
38: int res;
39:
40: res = INLINE_SYSCALL (getpriority, 2, (int) which, who);
41: if (res >= 0)
42: res = PZERO - res;
43: return res;
44: }
linux-3.18.1/kernel/sched/sched.h
1034: /*
1035: * Nice levels are multiplicative, with a gentle 10% change for every
1036: * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1037: * nice 1, it will get ~10% less CPU time than another CPU-bound task
1038: * that remained on nice 0.
1039: *
1040: * The "10% effect" is relative and cumulative: from _any_ nice level,
1041: * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1042: * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1043: * If a task goes up by ~10% and another task goes down by ~10% then
1044: * the relative distance between them is ~25%.)
1045: */
1046: static const int prio_to_weight[40] = {
1047: /* -20 */ 88761, 71755, 56483, 46273, 36291,
1048: /* -15 */ 29154, 23254, 18705, 14949, 11916,
1049: /* -10 */ 9548, 7620, 6100, 4904, 3906,
1050: /* -5 */ 3121, 2501, 1991, 1586, 1277,
1051: /* 0 */ 1024, 820, 655, 526, 423,
1052: /* 5 */ 335, 272, 215, 172, 137,
1053: /* 10 */ 110, 87, 70, 56, 45,
1054: /* 15 */ 36, 29, 23, 18, 15,
1055: };
54: # define scale_load(w) (w)
linux-3.18.1/kernel/sched/core.c
820: static void set_load_weight(struct task_struct *p)
821: {
822: int prio = p->static_prio - MAX_RT_PRIO;
823: struct load_weight *load = &p->se.load;
...
834: load->weight = scale_load(prio_to_weight[prio]);
835: load->inv_weight = prio_to_wmult[prio];
836: }
| 名前 | 説明 |
|---|---|
| enqueue_task | プロセスが実行可能(runnable)になった |
| dequeue_task | プロセスが実行可能ではなくなった |
| yield_task | CPUを譲る。dequeueしてenqueue |
| check_preempt_curr | 実行可能になった時にCPUを横取りすべきかをチェック |
| pick_next_task | 次に実行すべきプロセスを選ぶ |
| set_curr_task | スケジューリング・クラスが変更された |
| task_tick | タイマ割込み(tick)の時に呼ばれる |
| task_new | 新しいプロセスが生成された |
linux-3.18.1/kernel/sched/core.c
838: static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
839: {
840: update_rq_clock(rq);
841: sched_info_queued(rq, p);
842: p->sched_class->enqueue_task(rq, p, flags);
843: }
844:
845: static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
846: {
847: update_rq_clock(rq);
848: sched_info_dequeued(rq, p);
849: p->sched_class->dequeue_task(rq, p, flags);
850: }
linux-3.18.1/kernel/sched/core.c
3317: static void __setscheduler(struct rq *rq, struct task_struct *p,
3318: const struct sched_attr *attr)
3319: {
3320: __setscheduler_params(p, attr);
...
3326: p->prio = normal_prio(p);
3327:
3328: if (dl_prio(p->prio))
3329: p->sched_class = &dl_sched_class;
3330: else if (rt_prio(p->prio))
3331: p->sched_class = &rt_sched_class;
3332: else
3333: p->sched_class = &fair_sched_class;
3334: }
3291: static void __setscheduler_params(struct task_struct *p,
3292: const struct sched_attr *attr)
3293: {
3294: int policy = attr->sched_policy;
3295:
3296: if (policy == SETPARAM_POLICY)
3297: policy = p->policy;
3298:
3299: p->policy = policy;
3300:
3301: if (dl_policy(policy))
3302: __setparam_dl(p, attr);
3303: else if (fair_policy(policy))
3304: p->static_prio = NICE_TO_PRIO(attr->sched_nice);
...
3311: p->rt_priority = attr->sched_priority;
3312: p->normal_prio = normal_prio(p);
3313: set_load_weight(p);
3314: }
p->prio
をpolicy に応じて設定する。
p->prio の値に応じて
&dl_sched_class か
&rt_sched_class か
&fair_sched_class のいずれかを指すようにする。
Linux CFS は、次の方法でスケジューリングを行なう。

図? runqueueの構造
linux-3.18.1/kernel/sched/sched.h
519: struct rq {
...
549: struct cfs_rq cfs;
550: struct rt_rq rt;
551: struct dl_rq dl;
...
652: };
317: struct cfs_rq {
...
327: struct rb_root tasks_timeline;
328: struct rb_node *rb_leftmost;
...
395: };
linux-3.18.1/kernel/sched/core.c
114: DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

図? runqueueの構造(red-black tree)
linux-3.18.1/kernel/sched/fair.c
491: static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
492: {
493: struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
494: struct rb_node *parent = NULL;
495: struct sched_entity *entry;
496: int leftmost = 1;
497:
498: /*
499: * Find the right place in the rbtree:
500: */
501: while (*link) {
502: parent = *link;
503: entry = rb_entry(parent, struct sched_entity, run_node);
504: /*
505: * We dont care about collisions. Nodes with
506: * the same key stay together.
507: */
508: if (entity_before(se, entry)) {
509: link = &parent->rb_left;
510: } else {
511: link = &parent->rb_right;
512: leftmost = 0;
513: }
514: }
515:
516: /*
517: * Maintain a cache of leftmost tree entries (it is frequently
518: * used):
519: */
520: if (leftmost)
521: cfs_rq->rb_leftmost = &se->run_node;
522:
523: rb_link_node(&se->run_node, parent, link);
524: rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
525: }
456: static inline int entity_before(struct sched_entity *a,
457: struct sched_entity *b)
458: {
459: return (s64)(a->vruntime - b->vruntime) < 0;
460: }
&parent->rb_left), 大きければ右(&parent->rb_right) に進む。
cfs_rq->rb_leftmost にも保存。
linux-3.18.1/kernel/sched/core.c
2524: void scheduler_tick(void)
2525: {
2526: int cpu = smp_processor_id();
2527: struct rq *rq = cpu_rq(cpu);
2528: struct task_struct *curr = rq->curr;
...
2534: curr->sched_class->task_tick(rq, curr, 0);
...
2545: }
linux-3.18.1/kernel/sched/fair.c
7521: static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
7522: {
7523: struct cfs_rq *cfs_rq;
7524: struct sched_entity *se = &curr->se;
7525:
7526: for_each_sched_entity(se) {
7527: cfs_rq = cfs_rq_of(se);
7528: entity_tick(cfs_rq, se, queued);
7529: }
...
7535: }
3097: static void
3098: entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3099: {
...
3103: update_curr(cfs_rq);
...
3131: }
694: static void update_curr(struct cfs_rq *cfs_rq)
695: {
696: struct sched_entity *curr = cfs_rq->curr;
697: u64 now = rq_clock_task(rq_of(cfs_rq));
698: u64 delta_exec;
...
703: delta_exec = now - curr->exec_start;
...
707: curr->exec_start = now;
...
712: curr->sum_exec_runtime += delta_exec;
...
715: curr->vruntime += calc_delta_fair(delta_exec, curr);
...
727: }
601: static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
602: {
603: if (unlikely(se->load.weight != NICE_0_LOAD))
604: delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
605:
606: return delta;
607: }
$ cat /proc/sched_debug
Sched Debug Version: v0.09, 2.6.32-431.3.1.el6.x86_64 #1
now at 7955627655.961573 msecs
.jiffies : 12250294951
...
cpu#0, 2100.000 MHz
.nr_running : 1
...
.curr->pid : 30990
...
cfs_rq[0]:/
.exec_clock : 40812852.059736
...
rt_rq[0]:/
.rt_nr_running : 0
...
.nr_running : 1
...
runnable tasks:
task PID tree-key switches prio exec-runtime sum-exec sum-sleep
----------------------------------------------------------------------------------------------------------
R cat 30990 32644150.029656 2 120 32644150.029656 1.072543 0.366310 /
...
cpu#1, 2100.000 MHz
...
cpu#2, 2100.000 MHz
...
cpu#3, 2100.000 MHz
...
$ cat /proc/self/sched
cat (31354, #threads: 1)
---------------------------------------------------------
se.exec_start : 7962193228.073935
se.vruntime : 51856286.476132
se.sum_exec_runtime : 1.211193
...
se.load.weight : 1024
policy : 0
prio : 120
clock-delta : 127
$
void h(int a,int b, int c) {
....
}
これを実現するために、どのようなコードを書けばよいか。以下の空欄を埋め
なさい。
struct timer_list my_timer;
int my_arg_a,my_arg_b,my_arg_c;
void f(unsigned long data) {
init_timer( /*空欄(a)*/ );
my_timer.expires = /*空欄(b)*/;
my_timer.data = 0;
my_timer.function = /*空欄(c)*/;
/*空欄(d)*/;
}
void my_timer_func(unsigned long data) {
h( my_arg_a,my_arg_b,my_arg_c );
}

図? 4つの要素を持つリスト構造
注意: 正しい二分探索木は、複数存在する。