Commit 86a99529 authored by Philippe Gerum's avatar Philippe Gerum Committed by Jan Kiszka

cobalt/clock: pipeline: abstract handling of CLOCK_REALTIME

Dovetail allows the client core to share the common kernel clocks,
including CLOCK_REALTIME. This means the core does not have to
maintain the latter clock, but should hand over all requests to read
the clock and change its epoch to the corresponding in-band kernel
services instead. Conversely, Cobalt should keep on maintaining
CLOCK_REALTIME when running on top of the legacy I-pipe.

Abstract the management of CLOCK_REALTIME to enable such split based
on the underlying IRQ pipeline layer.
Signed-off-by: Philippe Gerum's avatarPhilippe Gerum <rpm@xenomai.org>
parent f580f120
......@@ -43,7 +43,7 @@ struct xnclock_gravity {
struct xnclock {
/** (ns) */
xnticks_t wallclock_offset;
xnsticks_t wallclock_offset;
/** (ns) */
xnticks_t resolution;
/** (raw clock ticks). */
......@@ -112,9 +112,6 @@ void xnclock_deregister(struct xnclock *clock);
void xnclock_tick(struct xnclock *clock);
void xnclock_adjust(struct xnclock *clock,
xnsticks_t delta);
void xnclock_core_local_shot(struct xnsched *sched);
void xnclock_core_remote_shot(struct xnsched *sched);
......@@ -319,6 +316,8 @@ static inline void xnclock_reset_gravity(struct xnclock *clock)
static inline xnticks_t xnclock_read_realtime(struct xnclock *clock)
{
if (likely(clock == &nkclock))
return pipeline_read_wallclock();
/*
* Return an adjusted value of the monotonic time with the
* translated system wallclock offset.
......@@ -326,6 +325,11 @@ static inline xnticks_t xnclock_read_realtime(struct xnclock *clock)
return xnclock_read_monotonic(clock) + xnclock_get_offset(clock);
}
void xnclock_apply_offset(struct xnclock *clock,
xnsticks_t delta_ns);
void xnclock_set_wallclock(xnticks_t epoch_ns);
unsigned long long xnclock_divrem_billion(unsigned long long value,
unsigned long *rem);
......
......@@ -8,6 +8,7 @@
#include <cobalt/uapi/kernel/types.h>
#include <cobalt/kernel/assert.h>
#include <linux/ktime.h>
#include <linux/errno.h>
struct timespec64;
......@@ -23,6 +24,16 @@ static inline u64 pipeline_read_cycle_counter(void)
return ktime_get_mono_fast_ns();
}
static inline xnticks_t pipeline_read_wallclock(void)
{
return ktime_get_real_fast_ns();
}
static inline int pipeline_set_wallclock(xnticks_t epoch_ns)
{
return -EOPNOTSUPP;
}
void pipeline_set_timer_shot(unsigned long cycles);
const char *pipeline_timer_name(void);
......
......@@ -6,6 +6,7 @@
#define _COBALT_KERNEL_IPIPE_CLOCK_H
#include <linux/ipipe_tickdev.h>
#include <cobalt/uapi/kernel/types.h>
struct timespec64;
......@@ -16,6 +17,10 @@ static inline u64 pipeline_read_cycle_counter(void)
return t;
}
xnticks_t pipeline_read_wallclock(void);
int pipeline_set_wallclock(xnticks_t epoch_ns);
static inline void pipeline_set_timer_shot(unsigned long cycles)
{
ipipe_timer_set(cycles);
......
......@@ -226,18 +226,28 @@ enqueue:
xntimer_enqueue(timer, q);
}
static void adjust_clock_timers(struct xnclock *clock, xnsticks_t delta)
void xnclock_apply_offset(struct xnclock *clock, xnsticks_t delta_ns)
{
struct xntimer *timer, *tmp;
struct list_head adjq;
struct xnsched *sched;
xnsticks_t delta;
xntimerq_it_t it;
unsigned int cpu;
xntimerh_t *h;
xntimerq_t *q;
atomic_only();
/*
* The (real-time) epoch just changed for the clock. Since
* timeout dates of timers are expressed as monotonic ticks
* internally, we need to apply the new offset to the
* monotonic clock to all outstanding timers based on the
* affected clock.
*/
INIT_LIST_HEAD(&adjq);
delta = xnclock_ns_to_ticks(clock, delta);
delta = xnclock_ns_to_ticks(clock, delta_ns);
for_each_online_cpu(cpu) {
sched = xnsched_struct(cpu);
......@@ -265,34 +275,28 @@ static void adjust_clock_timers(struct xnclock *clock, xnsticks_t delta)
xnclock_program_shot(clock, sched);
}
}
EXPORT_SYMBOL_GPL(xnclock_apply_offset);
/**
* @fn void xnclock_adjust(struct xnclock *clock, xnsticks_t delta)
* @brief Adjust a clock time.
*
* This service changes the epoch for the given clock by applying the
* specified tick delta on its wallclock offset.
*
* @param clock The clock to adjust.
*
* @param delta The adjustment value expressed in nanoseconds.
*
* @coretags{task-unrestricted, atomic-entry}
*
* @note Xenomai tracks the system time in @a nkclock, as a
* monotonously increasing count of ticks since the epoch. The epoch
* is initially the same as the underlying machine time.
*/
void xnclock_adjust(struct xnclock *clock, xnsticks_t delta)
void xnclock_set_wallclock(xnticks_t epoch_ns)
{
xnticks_t now;
xnsticks_t old_offset_ns, offset_ns;
spl_t s;
nkclock.wallclock_offset += delta;
nkvdso->wallclock_offset = nkclock.wallclock_offset;
now = xnclock_read_monotonic(clock) + nkclock.wallclock_offset;
adjust_clock_timers(clock, delta);
/*
* The epoch of CLOCK_REALTIME just changed. Since timeouts
* are expressed as monotonic ticks, we need to apply the
* wallclock-to-monotonic offset to all outstanding timers
* based on this clock.
*/
xnlock_get_irqsave(&nklock, s);
old_offset_ns = nkclock.wallclock_offset;
offset_ns = (xnsticks_t)(epoch_ns - xnclock_core_read_monotonic());
nkclock.wallclock_offset = offset_ns;
nkvdso->wallclock_offset = offset_ns;
xnclock_apply_offset(&nkclock, offset_ns - old_offset_ns);
xnlock_put_irqrestore(&nklock, s);
}
EXPORT_SYMBOL_GPL(xnclock_adjust);
EXPORT_SYMBOL_GPL(xnclock_set_wallclock);
xnticks_t xnclock_core_read_monotonic(void)
{
......@@ -464,7 +468,7 @@ static int clock_show(struct xnvfile_regular_iterator *it, void *data)
if (clock->id >= 0) /* External clock, print id. */
xnvfile_printf(it, "%7s: %d\n", "id", __COBALT_CLOCK_EXT(clock->id));
xnvfile_printf(it, "%7s: irq=%Ld kernel=%Ld user=%Ld\n", "gravity",
xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, irq)),
xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, kernel)),
......@@ -700,7 +704,7 @@ void xnclock_tick(struct xnclock *clock)
else
#endif
tmq = &xnclock_this_timerdata(clock)->q;
/*
* Optimisation: any local timer reprogramming triggered by
* invoked timer handlers can wait until we leave the tick
......@@ -807,11 +811,17 @@ void xnclock_cleanup(void)
int __init xnclock_init()
{
spl_t s;
#ifdef XNARCH_HAVE_NODIV_LLIMD
xnarch_init_u32frac(&bln_frac, 1, 1000000000);
#endif
pipeline_init_clock();
xnclock_reset_gravity(&nkclock);
xnlock_get_irqsave(&nklock, s);
nkclock.wallclock_offset = pipeline_read_wallclock() -
xnclock_core_read_monotonic();
xnlock_put_irqrestore(&nklock, s);
xnclock_register(&nkclock, &xnsched_realtime_cpus);
return 0;
......
......@@ -15,6 +15,7 @@
#include <cobalt/kernel/thread.h>
#include <cobalt/kernel/clock.h>
#include <cobalt/kernel/vdso.h>
#include <cobalt/kernel/init.h>
#include <rtdm/driver.h>
#include <trace/events/cobalt-core.h>
#include "../posix/process.h"
......@@ -526,6 +527,15 @@ void handle_inband_event(enum inband_event_type event, void *data)
}
}
/*
* Called by the in-band kernel when the CLOCK_REALTIME epoch changes.
*/
void inband_clock_was_set(void)
{
if (realtime_core_enabled())
xnclock_set_wallclock(ktime_get_real_fast_ns());
}
#ifdef CONFIG_MMU
static inline int disable_ondemand_memory(void)
{
......
......@@ -7,6 +7,8 @@
#include <cobalt/kernel/clock.h>
#include <cobalt/kernel/vdso.h>
#include <cobalt/kernel/arith.h>
#include <cobalt/kernel/timer.h>
#include <xenomai/posix/clock.h>
#include <pipeline/machine.h>
static unsigned long long clockfreq;
......@@ -121,6 +123,19 @@ int pipeline_get_host_time(struct timespec64 *tp)
#endif
}
xnticks_t pipeline_read_wallclock(void)
{
return xnclock_read_monotonic(&nkclock) + xnclock_get_offset(&nkclock);
}
EXPORT_SYMBOL_GPL(pipeline_read_wallclock);
int pipeline_set_wallclock(xnticks_t epoch_ns)
{
xnclock_set_wallclock(epoch_ns);
return 0;
}
void pipeline_update_clock_freq(unsigned long long freq)
{
spl_t s;
......
......@@ -184,9 +184,6 @@ int pipeline_install_tick_proxy(void)
per_cpu(ipipe_percpu.hrtimer_irq, 0), NULL, NULL, 0);
#endif /* CONFIG_XENO_OPT_STATS_IRQS */
nkclock.wallclock_offset =
ktime_to_ns(ktime_get_real()) - xnclock_read_monotonic(&nkclock);
#ifdef CONFIG_SMP
ret = ipipe_request_irq(&cobalt_pipeline.domain,
IPIPE_HRTIMER_IPI,
......
......@@ -142,18 +142,13 @@ COBALT_SYSCALL(clock_gettime, current,
int __cobalt_clock_settime(clockid_t clock_id, const struct timespec64 *ts)
{
int _ret, ret = 0;
xnticks_t now;
spl_t s;
if ((unsigned long)ts->tv_nsec >= ONE_BILLION)
return -EINVAL;
switch (clock_id) {
case CLOCK_REALTIME:
xnlock_get_irqsave(&nklock, s);
now = xnclock_read_realtime(&nkclock);
xnclock_adjust(&nkclock, (xnsticks_t) (ts2ns(ts) - now));
xnlock_put_irqrestore(&nklock, s);
ret = pipeline_set_wallclock(ts2ns(ts));
break;
default:
_ret = do_ext_clock(clock_id, set_time, ret, ts);
......@@ -163,7 +158,7 @@ int __cobalt_clock_settime(clockid_t clock_id, const struct timespec64 *ts)
trace_cobalt_clock_settime(clock_id, ts);
return 0;
return ret;
}
int __cobalt_clock_adjtime(clockid_t clock_id, struct __kernel_timex *tx)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment