Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xenomai
xenomai4
linux-evl
Commits
b970974b
Commit
b970974b
authored
May 04, 2019
by
Philippe Gerum
Browse files
evl/sched: add temporal partitioning policy
Signed-off-by:
Philippe Gerum
<
rpm@xenomai.org
>
parent
6a675a0d
Changes
9
Hide whitespace changes
Inline
Side-by-side
include/evl/sched.h
View file @
b970974b
...
...
@@ -16,6 +16,7 @@
#include <evl/sched/queue.h>
#include <evl/sched/weak.h>
#include <evl/sched/quota.h>
#include <evl/sched/tp.h>
#include <evl/assert.h>
#include <evl/init.h>
...
...
@@ -84,6 +85,10 @@ struct evl_rq {
#ifdef CONFIG_EVL_SCHED_QUOTA
/* Context of runtime quota scheduling. */
struct
evl_sched_quota
quota
;
#endif
#ifdef CONFIG_EVL_SCHED_TP
/* Context for time partitioning policy. */
struct
evl_sched_tp
tp
;
#endif
struct
evl_timer
inband_timer
;
/* Round-robin timer. */
...
...
include/evl/sched/param.h
View file @
b970974b
...
...
@@ -25,6 +25,11 @@ struct evl_quota_param {
int
tgid
;
/* thread group id. */
};
struct
evl_tp_param
{
int
prio
;
int
ptid
;
/* partition id. */
};
union
evl_sched_param
{
struct
evl_idle_param
idle
;
struct
evl_rt_param
rt
;
...
...
@@ -32,6 +37,9 @@ union evl_sched_param {
#ifdef CONFIG_EVL_SCHED_QUOTA
struct
evl_quota_param
quota
;
#endif
#ifdef CONFIG_EVL_SCHED_TP
struct
evl_tp_param
tp
;
#endif
};
#endif
/* !_EVL_SCHED_PARAM_H */
include/evl/sched/tp.h
0 → 100644
View file @
b970974b
/*
* SPDX-License-Identifier: GPL-2.0
*
* Derived from Xenomai Cobalt, https://xenomai.org/
* Copyright (C) 2008, 2019 Philippe Gerum <rpm@xenomai.org>
*/
#ifndef _EVL_SCHED_TP_H
#define _EVL_SCHED_TP_H
#ifndef _EVL_SCHED_H
#error "please don't include evl/sched/tp.h directly"
#endif
#ifdef CONFIG_EVL_SCHED_TP
#define EVL_TP_MIN_PRIO 1
#define EVL_TP_MAX_PRIO EVL_CORE_MAX_PRIO
#define EVL_TP_NR_PRIO (EVL_TP_MAX_PRIO - EVL_TP_MIN_PRIO + 1)
extern
struct
evl_sched_class
evl_sched_tp
;
struct
evl_tp_window
{
ktime_t
w_offset
;
int
w_part
;
};
struct
evl_tp_schedule
{
int
pwin_nr
;
ktime_t
tf_duration
;
atomic_t
refcount
;
struct
evl_tp_window
pwins
[
0
];
};
struct
evl_sched_tp
{
struct
evl_tp_rq
{
struct
evl_multilevel_queue
runnable
;
}
partitions
[
CONFIG_EVL_SCHED_TP_NR_PART
];
struct
evl_tp_rq
idle
;
struct
evl_tp_rq
*
tps
;
struct
evl_timer
tf_timer
;
struct
evl_tp_schedule
*
gps
;
int
wnext
;
ktime_t
tf_start
;
struct
list_head
threads
;
};
static
inline
int
evl_tp_init_thread
(
struct
evl_thread
*
thread
)
{
thread
->
tps
=
NULL
;
return
0
;
}
#endif
/* !CONFIG_EVL_SCHED_TP */
#endif
/* !_EVL_SCHED_TP_H */
include/evl/thread.h
View file @
b970974b
...
...
@@ -64,6 +64,10 @@ struct evl_thread {
struct
evl_quota_group
*
quota
;
/* Quota scheduling group. */
struct
list_head
quota_expired
;
struct
list_head
quota_next
;
#endif
#ifdef CONFIG_EVL_SCHED_TP
struct
evl_tp_rq
*
tps
;
/* Current runqueue slot */
struct
list_head
tp_link
;
/* evl_rq->tp.threads */
#endif
struct
cpumask
affinity
;
/* Processor affinity. */
...
...
include/uapi/evl/sched.h
View file @
b970974b
...
...
@@ -24,15 +24,6 @@ struct __sched_quota_param {
int
__sched_group
;
};
struct
evl_sched_attrs
{
int
sched_policy
;
int
sched_priority
;
union
{
struct
__sched_rr_param
rr
;
struct
__sched_quota_param
quota
;
}
sched_u
;
};
struct
evl_quota_ctlparam
{
enum
{
evl_quota_add
,
...
...
@@ -56,10 +47,6 @@ struct evl_quota_ctlparam {
}
u
;
};
union
evl_sched_ctlparam
{
struct
evl_quota_ctlparam
quota
;
};
struct
evl_quota_ctlinfo
{
int
tgid
;
int
quota
;
...
...
@@ -67,8 +54,57 @@ struct evl_quota_ctlinfo {
int
quota_sum
;
};
#define SCHED_TP 45
#define sched_tp_partition sched_u.tp.__sched_partition
struct
__sched_tp_param
{
int
__sched_partition
;
};
#define EVL_TP_IDLE -1
/* Idle pseudo-partition */
struct
evl_tp_ctlparam
{
enum
{
evl_install_tp
,
evl_uninstall_tp
,
evl_start_tp
,
evl_stop_tp
,
evl_get_tp
,
}
op
;
int
nr_windows
;
struct
__sched_tp_window
{
struct
timespec
offset
;
struct
timespec
duration
;
int
ptid
;
}
windows
[
0
];
};
struct
evl_tp_ctlinfo
{
int
nr_windows
;
struct
__sched_tp_window
windows
[
0
];
};
#define evl_tp_paramlen(__p) \
(sizeof(*__p) + (__p)->nr_windows * sizeof((__p)->windows))
struct
evl_sched_attrs
{
int
sched_policy
;
int
sched_priority
;
union
{
struct
__sched_rr_param
rr
;
struct
__sched_quota_param
quota
;
struct
__sched_tp_param
tp
;
}
sched_u
;
};
union
evl_sched_ctlparam
{
struct
evl_quota_ctlparam
quota
;
struct
evl_tp_ctlparam
tp
;
};
union
evl_sched_ctlinfo
{
struct
evl_quota_ctlinfo
quota
;
struct
evl_tp_ctlinfo
tp
;
};
struct
evl_sched_ctlreq
{
...
...
kernel/evl/Kconfig
View file @
b970974b
...
...
@@ -14,6 +14,30 @@ config EVL_SCHED_QUOTA
If in doubt, say N.
config EVL_SCHED_TP
bool "Temporal partitioning policy"
default n
help
This option enables the SCHED_TP scheduling policy in the
EVL core.
This policy runs threads which execution is confined to
dedicated time windows defined within a recurring time frame
or global period. The ARINC653 standard describes such
scheduling policy.
If in doubt, say N.
config EVL_SCHED_TP_NR_PART
int "Number of partitions"
default 4
range 1 1024
depends on EVL_SCHED_TP
help
Define the maximum number of temporal partitions the TP
scheduler may have to handle.
config EVL_RUNSTATS
bool "Collect runtime statistics"
default y
...
...
kernel/evl/control.c
View file @
b970974b
...
...
@@ -132,6 +132,56 @@ static int do_quota_control(struct evl_sched_ctlreq *ctl)
#endif
#ifdef CONFIG_EVL_SCHED_TP
static
int
do_tp_control
(
struct
evl_sched_ctlreq
*
ctl
)
{
union
evl_sched_ctlinfo
*
info
=
NULL
,
__user
*
u_infp
;
union
evl_sched_ctlparam
param
,
__user
*
u_ctlp
;
size_t
len
;
int
ret
;
u_ctlp
=
(
typeof
(
u_ctlp
))
ctl
->
param
;
ret
=
raw_copy_from_user
(
&
param
.
tp
,
&
u_ctlp
->
tp
,
sizeof
(
param
.
tp
));
if
(
ret
)
return
-
EFAULT
;
if
(
ctl
->
info
)
{
/* Quick check to prevent creepy memalloc. */
if
(
param
.
tp
.
nr_windows
>
CONFIG_EVL_SCHED_TP_NR_PART
)
return
-
EINVAL
;
len
=
evl_tp_paramlen
(
&
param
.
tp
);
info
=
evl_alloc
(
len
);
if
(
info
==
NULL
)
return
-
ENOMEM
;
}
ret
=
evl_sched_tp
.
sched_control
(
ctl
->
cpu
,
&
param
,
info
);
if
(
ret
||
info
==
NULL
)
goto
out
;
u_infp
=
(
typeof
(
u_infp
))
ctl
->
info
;
len
=
evl_tp_paramlen
(
&
info
->
tp
);
ret
=
raw_copy_to_user
(
&
u_infp
->
tp
,
&
info
->
tp
,
len
);
if
(
ret
)
ret
=
-
EFAULT
;
out:
if
(
info
)
evl_free
(
info
);
return
ret
;
}
#else
static
int
do_tp_control
(
struct
evl_sched_ctlreq
*
ctl
)
{
return
-
EINVAL
;
}
#endif
static
int
do_sched_control
(
struct
evl_sched_ctlreq
*
ctl
)
{
int
ret
;
...
...
@@ -140,6 +190,9 @@ static int do_sched_control(struct evl_sched_ctlreq *ctl)
case
SCHED_QUOTA
:
ret
=
do_quota_control
(
ctl
);
break
;
case
SCHED_TP
:
ret
=
do_tp_control
(
ctl
);
break
;
default:
return
-
EINVAL
;
}
...
...
kernel/evl/sched/Makefile
View file @
b970974b
...
...
@@ -9,3 +9,4 @@ evl-y := \
weak.o
evl-$(CONFIG_EVL_SCHED_QUOTA)
+=
quota.o
evl-$(CONFIG_EVL_SCHED_TP)
+=
tp.o
kernel/evl/sched/tp.c
0 → 100644
View file @
b970974b
/*
* SPDX-License-Identifier: GPL-2.0
*
* Derived from Xenomai Cobalt, https://xenomai.org/
* Copyright (C) 2008, 2018 Philippe Gerum <rpm@xenomai.org>
*/
#include <evl/sched.h>
#include <evl/memory.h>
#include <uapi/evl/sched.h>
static
void
tp_schedule_next
(
struct
evl_sched_tp
*
tp
)
{
struct
evl_tp_window
*
w
;
struct
evl_rq
*
rq
;
ktime_t
t
,
now
;
int
p_next
;
for
(;;)
{
/*
* Switch to the next partition. Time holes in a
* global time frame are defined as partition windows
* assigned to part# -1, in which case the (always
* empty) idle queue will be polled for runnable
* threads. Therefore, we may assume that a window
* begins immediately after the previous one ends,
* which simplifies the implementation a lot.
*/
w
=
&
tp
->
gps
->
pwins
[
tp
->
wnext
];
p_next
=
w
->
w_part
;
tp
->
tps
=
p_next
<
0
?
&
tp
->
idle
:
&
tp
->
partitions
[
p_next
];
/* Schedule tick to advance to the next window. */
tp
->
wnext
=
(
tp
->
wnext
+
1
)
%
tp
->
gps
->
pwin_nr
;
w
=
&
tp
->
gps
->
pwins
[
tp
->
wnext
];
t
=
ktime_add
(
tp
->
tf_start
,
w
->
w_offset
);
/*
* If we are late, make sure to remain within the
* bounds of a valid time frame before advancing to
* the next window. Otherwise, fix up by advancing to
* the next time frame immediately.
*/
now
=
evl_read_clock
(
&
evl_mono_clock
);
if
(
ktime_compare
(
now
,
t
)
>
0
)
{
t
=
ktime_add
(
tp
->
tf_start
,
tp
->
gps
->
tf_duration
);
if
(
ktime_compare
(
now
,
t
)
>
0
)
{
tp
->
tf_start
=
t
;
tp
->
wnext
=
0
;
}
}
evl_start_timer
(
&
tp
->
tf_timer
,
t
,
EVL_INFINITE
);
}
rq
=
container_of
(
tp
,
struct
evl_rq
,
tp
);
evl_set_resched
(
rq
);
}
static
void
tp_tick_handler
(
struct
evl_timer
*
timer
)
{
struct
evl_sched_tp
*
tp
=
container_of
(
timer
,
struct
evl_sched_tp
,
tf_timer
);
/*
* Advance beginning date of time frame by a full period if we
* are processing the last window.
*/
if
(
tp
->
wnext
+
1
==
tp
->
gps
->
pwin_nr
)
tp
->
tf_start
=
ktime_add
(
tp
->
tf_start
,
tp
->
gps
->
tf_duration
);
tp_schedule_next
(
tp
);
}
static
void
tp_init
(
struct
evl_rq
*
rq
)
{
struct
evl_sched_tp
*
tp
=
&
rq
->
tp
;
int
n
;
for
(
n
=
0
;
n
<
CONFIG_EVL_SCHED_TP_NR_PART
;
n
++
)
evl_init_schedq
(
&
tp
->
partitions
[
n
].
runnable
);
tp
->
tps
=
NULL
;
tp
->
gps
=
NULL
;
INIT_LIST_HEAD
(
&
tp
->
threads
);
evl_init_schedq
(
&
tp
->
idle
.
runnable
);
evl_init_timer
(
&
tp
->
tf_timer
,
&
evl_mono_clock
,
tp_tick_handler
,
rq
,
EVL_TIMER_IGRAVITY
);
evl_set_timer_name
(
&
tp
->
tf_timer
,
"[tp-tick]"
);
}
static
bool
tp_setparam
(
struct
evl_thread
*
thread
,
const
union
evl_sched_param
*
p
)
{
struct
evl_rq
*
rq
=
evl_thread_rq
(
thread
);
thread
->
tps
=
&
rq
->
tp
.
partitions
[
p
->
tp
.
ptid
];
thread
->
state
&=
~
T_WEAK
;
return
evl_set_effective_thread_priority
(
thread
,
p
->
tp
.
prio
);
}
static
void
tp_getparam
(
struct
evl_thread
*
thread
,
union
evl_sched_param
*
p
)
{
p
->
tp
.
prio
=
thread
->
cprio
;
p
->
tp
.
ptid
=
thread
->
tps
-
evl_thread_rq
(
thread
)
->
tp
.
partitions
;
}
static
void
tp_trackprio
(
struct
evl_thread
*
thread
,
const
union
evl_sched_param
*
p
)
{
/*
* The assigned partition never changes internally due to PI
* (see evl_track_thread_policy()), since this would be pretty
* wrong with respect to TP scheduling: i.e. we may not allow
* a thread from another partition to consume CPU time from
* the current one, despite this would help enforcing PI (see
* note). In any case, introducing resource contention between
* threads that belong to different partitions is utterly
* wrong in the first place. Only an explicit call to
* evl_set_thread_policy() may change the partition assigned
* to a thread. For that reason, a policy reset action only
* boils down to reinstating the base priority.
*
* NOTE: we do allow threads from lower scheduling classes to
* consume CPU time from the current window as a result of a
* PI boost, since this is aimed at speeding up the release of
* a synchronization object a TP thread needs.
*/
if
(
p
)
{
/* We should never cross partition boundaries. */
EVL_WARN_ON
(
CORE
,
thread
->
base_class
==
&
evl_sched_tp
&&
thread
->
tps
-
evl_thread_rq
(
thread
)
->
tp
.
partitions
!=
p
->
tp
.
ptid
);
thread
->
cprio
=
p
->
tp
.
prio
;
}
else
thread
->
cprio
=
thread
->
bprio
;
}
static
void
tp_ceilprio
(
struct
evl_thread
*
thread
,
int
prio
)
{
if
(
prio
>
EVL_TP_MAX_PRIO
)
prio
=
EVL_TP_MAX_PRIO
;
thread
->
cprio
=
prio
;
}
static
int
tp_chkparam
(
struct
evl_thread
*
thread
,
const
union
evl_sched_param
*
p
)
{
struct
evl_sched_tp
*
tp
=
&
evl_thread_rq
(
thread
)
->
tp
;
if
(
tp
->
gps
==
NULL
||
p
->
tp
.
prio
<
EVL_TP_MIN_PRIO
||
p
->
tp
.
prio
>
EVL_TP_MAX_PRIO
)
return
-
EINVAL
;
return
0
;
}
static
int
tp_declare
(
struct
evl_thread
*
thread
,
const
union
evl_sched_param
*
p
)
{
struct
evl_rq
*
rq
=
evl_thread_rq
(
thread
);
list_add_tail
(
&
thread
->
tp_link
,
&
rq
->
tp
.
threads
);
return
0
;
}
static
void
tp_forget
(
struct
evl_thread
*
thread
)
{
list_del
(
&
thread
->
tp_link
);
thread
->
tps
=
NULL
;
}
static
void
tp_enqueue
(
struct
evl_thread
*
thread
)
{
evl_add_schedq_tail
(
&
thread
->
tps
->
runnable
,
thread
);
}
static
void
tp_dequeue
(
struct
evl_thread
*
thread
)
{
evl_del_schedq
(
&
thread
->
tps
->
runnable
,
thread
);
}
static
void
tp_requeue
(
struct
evl_thread
*
thread
)
{
evl_add_schedq
(
&
thread
->
tps
->
runnable
,
thread
);
}
static
struct
evl_thread
*
tp_pick
(
struct
evl_rq
*
rq
)
{
/* Never pick a thread if we don't schedule partitions. */
if
(
!
evl_timer_is_running
(
&
rq
->
tp
.
tf_timer
))
return
NULL
;
return
evl_get_schedq
(
&
rq
->
tp
.
tps
->
runnable
);
}
static
void
tp_migrate
(
struct
evl_thread
*
thread
,
struct
evl_rq
*
rq
)
{
union
evl_sched_param
param
;
/*
* Since our partition schedule is a per-rq property, it
* cannot apply to a thread that moves to another CPU
* anymore. So we upgrade that thread to the RT class when a
* CPU migration occurs. A subsequent call to
* __evl_set_thread_schedparam() may move it back to TP
* scheduling, with a partition assignment that fits the
* remote CPU's partition schedule.
*/
param
.
rt
.
prio
=
thread
->
cprio
;
__evl_set_thread_schedparam
(
thread
,
&
evl_sched_rt
,
&
param
);
}
static
ssize_t
tp_show
(
struct
evl_thread
*
thread
,
char
*
buf
,
ssize_t
count
)
{
int
ptid
=
thread
->
tps
-
evl_thread_rq
(
thread
)
->
tp
.
partitions
;
return
snprintf
(
buf
,
count
,
"%d
\n
"
,
ptid
);
}
static
void
start_tp_schedule
(
struct
evl_rq
*
rq
)
{
struct
evl_sched_tp
*
tp
=
&
rq
->
tp
;
if
(
tp
->
gps
==
NULL
)
return
;
tp
->
wnext
=
0
;
tp
->
tf_start
=
evl_read_clock
(
&
evl_mono_clock
);
tp_schedule_next
(
tp
);
}
static
void
stop_tp_schedule
(
struct
evl_rq
*
rq
)
{
struct
evl_sched_tp
*
tp
=
&
rq
->
tp
;
if
(
tp
->
gps
)
evl_stop_timer
(
&
tp
->
tf_timer
);
}
static
struct
evl_tp_schedule
*
set_tp_schedule
(
struct
evl_rq
*
rq
,
struct
evl_tp_schedule
*
gps
)
{
struct
evl_sched_tp
*
tp
=
&
rq
->
tp
;
struct
evl_thread
*
thread
,
*
tmp
;
struct
evl_tp_schedule
*
old_gps
;
union
evl_sched_param
param
;
if
(
EVL_WARN_ON
(
CORE
,
gps
!=
NULL
&&
(
gps
->
pwin_nr
<=
0
||
gps
->
pwins
[
0
].
w_offset
!=
0
)))
return
tp
->
gps
;
stop_tp_schedule
(
rq
);
/*
* Move all TP threads on this scheduler to the RT class,
* until we call __evl_set_thread_schedparam() for them again.
*/
if
(
list_empty
(
&
tp
->
threads
))
goto
done
;
list_for_each_entry_safe
(
thread
,
tmp
,
&
tp
->
threads
,
tp_link
)
{
param
.
rt
.
prio
=
thread
->
cprio
;
__evl_set_thread_schedparam
(
thread
,
&
evl_sched_rt
,
&
param
);
}
done:
old_gps
=
tp
->
gps
;
tp
->
gps
=
gps
;
return
old_gps
;
}
static
struct
evl_tp_schedule
*
get_tp_schedule
(
struct
evl_rq
*
rq
)
{
struct
evl_tp_schedule
*
gps
=
rq
->
tp
.
gps
;
if
(
gps
==
NULL
)
return
NULL
;
atomic_inc
(
&
gps
->
refcount
);
return
gps
;
}
static
void
put_tp_schedule
(
struct
evl_tp_schedule
*
gps
)
{
if
(
atomic_dec_and_test
(
&
gps
->
refcount
))
evl_free
(
gps
);
}
static
int
tp_control
(
int
cpu
,
union
evl_sched_ctlparam
*
ctlp
,
union
evl_sched_ctlinfo
*
infp
)
{
struct
evl_tp_ctlparam
*
pt
=
&
ctlp
->
tp
;
ktime_t
offset
,
duration
,
next_offset
;
struct
evl_tp_schedule
*
gps
,
*
ogps
;
struct
__sched_tp_window
*
p
,
*
pp
;
struct
evl_tp_window
*
w
,
*
pw
;
struct
evl_tp_ctlinfo
*
it
;
unsigned
long
flags
;
struct
evl_rq
*
rq
;
int
n
;
if
(
cpu
<
0
||
!
cpu_present
(
cpu
)
||
!
is_threading_cpu
(
cpu
))
return
-
EINVAL
;
xnlock_get_irqsave
(
&
nklock
,
flags
);
rq
=
evl_cpu_rq
(
cpu
);
switch
(
pt
->
op
)
{
case
evl_install_tp
:
if
(
pt
->
nr_windows
>
0
)
goto
install_schedule
;
/* Fallback wanted. */
case
evl_uninstall_tp
:
gps
=
NULL
;
goto
switch_schedule
;
case
evl_start_tp
:
start_tp_schedule
(
rq
);
xnlock_put_irqrestore
(
&
nklock
,
flags
);
return
0
;
case
evl_stop_tp
:
stop_tp_schedule
(
rq
);
xnlock_put_irqrestore
(
&
nklock
,
flags
);