Commit d8047b7a authored by Philippe Gerum's avatar Philippe Gerum
Browse files

cobalt/kernel: convert legacy trace markers to kernel tracepoints

Rebase trace statements in the Cobalt core over the generic tracepoint
API. A few former trace markers which did not help in analysing the
dynamic behavior of the system have been dropped in the process.

This work is originally based on Jan Kiszka's trace events conversion
patch series for Xenomai 2.6.
parent 79423149
......@@ -152,6 +152,9 @@ int xnsynch_acquire(struct xnsynch *synch,
xnticks_t timeout,
xntmode_t timeout_mode);
struct xnthread *xnsynch_release(struct xnsynch *synch,
struct xnthread *thread);
struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
int xnsynch_flush(struct xnsynch *synch, int reason);
......
......@@ -317,31 +317,6 @@ static inline int xnthread_try_grab(struct xnthread *thread,
return 1;
}
/*
* XXX: Mutual dependency issue with synch.h, we have to define
* xnsynch_release() here.
*/
static inline struct xnthread *
xnsynch_release(struct xnsynch *synch, struct xnthread *thread)
{
atomic_long_t *lockp;
xnhandle_t threadh;
XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
trace_mark(xn_nucleus, synch_release, "synch %p", synch);
if (unlikely(xnthread_test_state(thread, XNWEAK)))
__xnsynch_fixup_rescnt(thread);
lockp = xnsynch_fastlock(synch);
threadh = xnthread_handle(thread);
if (likely(xnsynch_fast_release(lockp, threadh)))
return NULL;
return __xnsynch_transfer_ownership(synch, thread);
}
static inline int normalize_priority(int prio)
{
return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
......
......@@ -23,12 +23,9 @@
#include <linux/ipipe_trace.h>
#include <cobalt/uapi/kernel/trace.h>
#ifdef CONFIG_LTT
#include <linux/marker.h>
#else
/* TEMP */
#undef trace_mark
#define trace_mark(channel, ev, fmt, args...) do { } while (0)
#endif
static inline int xntrace_max_begin(unsigned long v)
{
......
......@@ -30,6 +30,7 @@
#include <cobalt/kernel/arith.h>
#include <cobalt/kernel/vdso.h>
#include <asm/xenomai/calibration.h>
#include <trace/events/cobalt-core.h>
unsigned long nktimerlat;
......@@ -307,9 +308,6 @@ void xnclock_adjust(struct xnclock *clock, xnsticks_t delta)
nkvdso->wallclock_offset = nkclock.wallclock_offset;
now = xnclock_read_monotonic(clock) + nkclock.wallclock_offset;
adjust_clock_timers(clock, delta);
trace_mark(xn_nucleus, clock_adjust, "clock %s, delta %Lu",
clock->name, delta);
}
EXPORT_SYMBOL_GPL(xnclock_adjust);
......@@ -482,8 +480,6 @@ int xnclock_register(struct xnclock *clock)
secondary_mode_only();
trace_mark(xn_nucleus, clock_register, "clock %s", clock->name);
/* Allocate the percpu timer queue slot. */
clock->timerdata = alloc_percpu(struct xntimerdata);
if (clock->timerdata == NULL)
......@@ -525,8 +521,6 @@ void xnclock_deregister(struct xnclock *clock)
secondary_mode_only();
trace_mark(xn_nucleus, clock_deregister, "clock %s", clock->name);
cleanup_clock_proc(clock);
for_each_online_cpu(cpu) {
......@@ -588,7 +582,7 @@ void xnclock_tick(struct xnclock *clock)
if (delta > (xnsticks_t)clock->gravity)
break;
trace_mark(xn_nucleus, timer_expire, "timer %p", timer);
trace_cobalt_timer_expire(timer);
xntimer_dequeue(timer, timerq);
xntimer_account_fired(timer);
......
......@@ -86,8 +86,6 @@ static void disable_timesource(void)
{
int cpu;
trace_mark(xn_nucleus, disable_timesource, MARK_NOARGS);
/*
* We must not hold the nklock while stopping the hardware
* timer, since this could cause deadlock situations to arise
......@@ -238,8 +236,6 @@ static __init int enable_timesource(void)
int ret, cpu, _cpu;
spl_t s;
trace_mark(xn_nucleus, enable_timesource, MARK_NOARGS);
#ifdef CONFIG_XENO_OPT_STATS
/*
* Only for statistical purpose, the timer interrupt is
......
......@@ -24,12 +24,12 @@
*/
#include <linux/mutex.h>
#include <cobalt/kernel/sched.h>
#include <cobalt/kernel/intr.h>
#include <cobalt/kernel/stat.h>
#include <cobalt/kernel/clock.h>
#include <cobalt/kernel/assert.h>
#include <trace/events/cobalt-core.h>
#define XNINTR_MAX_UNHANDLED 1000
......@@ -78,7 +78,7 @@ static inline void stat_counter_dec(void) {}
static inline void sync_stat_references(struct xnintr *intr) {}
#endif /* CONFIG_XENO_OPT_STATS */
static void xnintr_irq_handler(unsigned irq, void *cookie);
static void xnintr_irq_handler(unsigned int irq, void *cookie);
void xnintr_host_tick(struct xnsched *sched) /* Interrupts off. */
{
......@@ -110,9 +110,7 @@ void xnintr_core_clock_handler(void)
prev = xnstat_exectime_switch(sched, &statp->account);
xnstat_counter_inc(&statp->hits);
trace_mark(xn_nucleus, irq_enter, "irq %u",
per_cpu(ipipe_percpu.hrtimer_irq, cpu));
trace_mark(xn_nucleus, clock_tick, MARK_NOARGS);
trace_cobalt_clock_entry(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
++sched->inesting;
sched->lflags |= XNINIRQ;
......@@ -121,6 +119,7 @@ void xnintr_core_clock_handler(void)
xnclock_tick(&nkclock);
xnlock_put(&nklock);
trace_cobalt_clock_exit(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
xnstat_exectime_switch(sched, prev);
if (--sched->inesting == 0) {
......@@ -138,10 +137,6 @@ void xnintr_core_clock_handler(void)
if ((sched->lflags & XNHTICK) &&
xnthread_test_state(sched->curr, XNROOT))
xnintr_host_tick(sched);
/* We keep tracing the entry CPU, regardless of migration. */
trace_mark(xn_nucleus, irq_exit, "irq %u",
per_cpu(ipipe_percpu.hrtimer_irq, cpu));
}
/* Optional support for shared interrupts. */
......@@ -156,7 +151,7 @@ struct xnintr_irq {
static struct xnintr_irq xnirqs[IPIPE_NR_IRQS];
static inline struct xnintr *xnintr_shirq_first(unsigned irq)
static inline struct xnintr *xnintr_shirq_first(unsigned int irq)
{
return xnirqs[irq].handlers;
}
......@@ -170,7 +165,7 @@ static inline struct xnintr *xnintr_shirq_next(struct xnintr *prev)
* Low-level interrupt handler dispatching the user-defined ISRs for
* shared interrupts -- Called with interrupts off.
*/
static void xnintr_shirq_handler(unsigned irq, void *cookie)
static void xnintr_shirq_handler(unsigned int irq, void *cookie)
{
struct xnsched *sched = xnsched_current();
struct xnintr_irq *shirq = &xnirqs[irq];
......@@ -182,7 +177,7 @@ static void xnintr_shirq_handler(unsigned irq, void *cookie)
prev = xnstat_exectime_get_current(sched);
start = xnstat_exectime_now();
trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
trace_cobalt_irq_entry(irq);
++sched->inesting;
sched->lflags |= XNINIRQ;
......@@ -231,14 +226,14 @@ static void xnintr_shirq_handler(unsigned irq, void *cookie)
xnsched_run();
}
trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
trace_cobalt_irq_exit(irq);
}
/*
* Low-level interrupt handler dispatching the user-defined ISRs for
* shared edge-triggered interrupts -- Called with interrupts off.
*/
static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
static void xnintr_edge_shirq_handler(unsigned int irq, void *cookie)
{
const int MAX_EDGEIRQ_COUNTER = 128;
struct xnsched *sched = xnsched_current();
......@@ -251,7 +246,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
prev = xnstat_exectime_get_current(sched);
start = xnstat_exectime_now();
trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
trace_cobalt_irq_entry(irq);
++sched->inesting;
sched->lflags |= XNINIRQ;
......@@ -312,7 +307,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
xnsched_run();
}
trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
trace_cobalt_irq_exit(irq);
}
static inline int xnintr_irq_attach(struct xnintr *intr)
......@@ -433,7 +428,7 @@ static inline void xnintr_irq_detach(struct xnintr *intr)
* Low-level interrupt handler dispatching non-shared ISRs -- Called
* with interrupts off.
*/
static void xnintr_irq_handler(unsigned irq, void *cookie)
static void xnintr_irq_handler(unsigned int irq, void *cookie)
{
struct xnsched *sched = xnsched_current();
struct xnirqstat *statp;
......@@ -444,7 +439,7 @@ static void xnintr_irq_handler(unsigned irq, void *cookie)
prev = xnstat_exectime_get_current(sched);
start = xnstat_exectime_now();
trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
trace_cobalt_irq_entry(irq);
++sched->inesting;
sched->lflags |= XNINIRQ;
......@@ -499,7 +494,7 @@ unlock_and_exit:
xnsched_run();
}
trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
trace_cobalt_irq_exit(irq);
}
int __init xnintr_mount(void)
......@@ -522,7 +517,7 @@ static void clear_irqstats(struct xnintr *intr)
}
/**
* @fn int xnintr_init(struct xnintr *intr,const char *name,unsigned irq,xnisr_t isr,xniack_t iack,int flags)
* @fn int xnintr_init(struct xnintr *intr,const char *name,unsigned int irq,xnisr_t isr,xniack_t iack,int flags)
* @brief Initialize an interrupt object.
*
* Associates an interrupt object with an IRQ line.
......@@ -693,9 +688,6 @@ int xnintr_attach(struct xnintr *intr, void *cookie)
secondary_mode_only();
trace_mark(xn_nucleus, irq_attach, "irq %u name %s",
intr->irq, intr->name);
intr->cookie = cookie;
clear_irqstats(intr);
......@@ -745,8 +737,6 @@ void xnintr_detach(struct xnintr *intr)
{
secondary_mode_only();
trace_mark(xn_nucleus, irq_detach, "irq %u", intr->irq);
mutex_lock(&intrlock);
if (intr->flags & XN_ISR_ATTACHED) {
......@@ -775,7 +765,7 @@ EXPORT_SYMBOL_GPL(xnintr_detach);
void xnintr_enable(struct xnintr *intr)
{
secondary_mode_only();
trace_mark(xn_nucleus, irq_enable, "irq %u", intr->irq);
trace_cobalt_irq_enable(intr->irq);
ipipe_enable_irq(intr->irq);
}
EXPORT_SYMBOL_GPL(xnintr_enable);
......@@ -797,7 +787,7 @@ EXPORT_SYMBOL_GPL(xnintr_enable);
void xnintr_disable(struct xnintr *intr)
{
secondary_mode_only();
trace_mark(xn_nucleus, irq_disable, "irq %u", intr->irq);
trace_cobalt_irq_disable(intr->irq);
ipipe_disable_irq(intr->irq);
}
EXPORT_SYMBOL_GPL(xnintr_disable);
......@@ -823,9 +813,6 @@ EXPORT_SYMBOL_GPL(xnintr_disable);
void xnintr_affinity(struct xnintr *intr, cpumask_t cpumask)
{
secondary_mode_only();
trace_mark(xn_nucleus, irq_affinity, "irq %u %lu",
intr->irq, *(unsigned long *)&cpumask);
#ifdef CONFIG_SMP
ipipe_set_irq_affinity(intr->irq, cpumask);
#endif
......
......@@ -25,6 +25,8 @@
#include <cobalt/kernel/heap.h>
#include <cobalt/kernel/shadow.h>
#include <cobalt/kernel/arith.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cobalt-core.h>
DEFINE_PER_CPU(struct xnsched, nksched);
EXPORT_PER_CPU_SYMBOL_GPL(nksched);
......@@ -108,9 +110,7 @@ static void watchdog_handler(struct xntimer *timer)
if (likely(++sched->wdcount < wd_timeout_arg))
return;
trace_mark(xn_nucleus, watchdog_signal,
"thread %p thread_name %s",
curr, xnthread_name(curr));
trace_cobalt_watchdog_signal(curr);
if (xnthread_test_state(curr, XNUSER)) {
printk(XENO_WARN "watchdog triggered on CPU #%d -- runaway thread "
......@@ -721,7 +721,7 @@ static inline void leave_root(struct xnthread *root)
void __xnsched_run_handler(void) /* hw interrupts off. */
{
trace_mark(xn_nucleus, sched_remote, MARK_NOARGS);
trace_cobalt_schedule_remote(xnsched_current());
xnsched_run();
}
......@@ -734,7 +734,7 @@ int __xnsched_run(struct xnsched *sched)
if (xnarch_escalate())
return 0;
trace_mark(xn_nucleus, sched, MARK_NOARGS);
trace_cobalt_schedule(sched);
xnlock_get_irqsave(&nklock, s);
......@@ -758,11 +758,7 @@ reschedule:
prev = curr;
trace_mark(xn_nucleus, sched_switch,
"prev %p prev_name %s "
"next %p next_name %s",
prev, xnthread_name(prev),
next, xnthread_name(next));
trace_cobalt_switch_context(prev, next);
if (xnthread_test_state(next, XNROOT))
xnsched_reset_watchdog(sched);
......
......@@ -58,6 +58,7 @@
#include <cobalt/kernel/ppd.h>
#include <cobalt/kernel/vdso.h>
#include <cobalt/kernel/thread.h>
#include <trace/events/cobalt-core.h>
#include <asm/xenomai/features.h>
#include <asm/xenomai/syscall.h>
#include <asm-generic/xenomai/mayday.h>
......@@ -259,8 +260,7 @@ static void lostage_task_wakeup(struct ipipe_work_header *work)
rq = container_of(work, struct lostage_wakeup, work);
p = rq->task;
trace_mark(xn_nucleus, lostage_wakeup, "comm %s pid %d",
p->comm, p->pid);
trace_cobalt_lostage_wakeup(p);
wake_up_process(p);
}
......@@ -275,6 +275,8 @@ static void post_wakeup(struct task_struct *p)
.task = p,
};
trace_cobalt_lostage_request("wakeup", wakework.task);
ipipe_post_work_root(&wakework, work);
}
......@@ -312,8 +314,7 @@ static void lostage_task_signal(struct ipipe_work_header *work)
signo = rq->signo;
trace_mark(xn_nucleus, lostage_signal, "comm %s pid %d sig %d",
p->comm, p->pid, signo);
trace_cobalt_lostage_signal(p, signo);
if (signo == SIGSHADOW || signo == SIGDEBUG) {
memset(&si, '\0', sizeof(si));
......@@ -484,9 +485,7 @@ int xnshadow_harden(void)
if (signal_pending(p))
return -ERESTARTSYS;
trace_mark(xn_nucleus, shadow_gohard,
"thread %p name %s comm %s",
thread, xnthread_name(thread), p->comm);
trace_cobalt_shadow_gohard(thread);
xnthread_clear_sync_window(thread, XNRELAX);
......@@ -504,8 +503,7 @@ int xnshadow_harden(void)
xnsched_resched_after_unlocked_switch();
xnthread_test_cancel();
trace_mark(xn_nucleus, shadow_hardened, "thread %p name %s",
thread, xnthread_name(thread));
trace_cobalt_shadow_hardened(thread);
/*
* Recheck pending signals once again. As we block task
......@@ -561,8 +559,7 @@ void xnshadow_relax(int notify, int reason)
* domain to the Linux domain. This will cause the Linux task
* to resume using the register state of the shadow thread.
*/
trace_mark(xn_nucleus, shadow_gorelax, "thread %p thread_name %s",
thread, xnthread_name(thread));
trace_cobalt_shadow_gorelax(thread);
/*
* If you intend to change the following interrupt-free
......@@ -623,9 +620,7 @@ void xnshadow_relax(int notify, int reason)
}
#endif
trace_mark(xn_nucleus, shadow_relaxed,
"thread %p thread_name %s comm %s",
thread, xnthread_name(thread), p->comm);
trace_cobalt_shadow_relaxed(thread);
}
EXPORT_SYMBOL_GPL(xnshadow_relax);
......@@ -935,10 +930,7 @@ int xnshadow_map_user(struct xnthread *thread,
__xn_put_user(xnheap_mapped_offset(sem_heap, u_window), u_window_offset);
pin_to_initial_cpu(thread);
trace_mark(xn_nucleus, shadow_map_user,
"thread %p thread_name %s pid %d priority %d",
thread, xnthread_name(thread), current->pid,
xnthread_base_priority(thread));
trace_cobalt_shadow_map(thread);
/*
* CAUTION: we enable the pipeline notifier only when our
......@@ -1001,6 +993,8 @@ static inline void wakeup_parent(struct completion *done)
.done = done,
};
trace_cobalt_lostage_request("wakeup", current);
ipipe_post_work_root(&wakework, work);
}
......@@ -1061,10 +1055,7 @@ int xnshadow_map_kernel(struct xnthread *thread, struct completion *done)
thread->u_window = NULL;
pin_to_initial_cpu(thread);
trace_mark(xn_nucleus, shadow_map_kernel,
"thread %p thread_name %s pid %d priority %d",
thread, xnthread_name(thread), p->pid,
xnthread_base_priority(thread));
trace_cobalt_shadow_map(thread);
xnthread_init_shadow_tcb(thread, p);
xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
......@@ -1113,9 +1104,7 @@ EXPORT_SYMBOL_GPL(xnshadow_map_kernel);
void xnshadow_finalize(struct xnthread *thread)
{
trace_mark(xn_nucleus, shadow_finalize,
"thread %p thread_name %s pid %d",
thread, xnthread_name(thread), xnthread_host_pid(thread));
trace_cobalt_shadow_finalize(thread);
xnthread_run_handler_stack(thread, finalize_thread);
}
......@@ -1684,6 +1673,8 @@ void xnshadow_send_sig(struct xnthread *thread, int sig, int arg)
.sigval = arg,
};
trace_cobalt_lostage_request("signal", sigwork.task);
ipipe_post_work_root(&sigwork, work);
}
EXPORT_SYMBOL_GPL(xnshadow_send_sig);
......@@ -1878,10 +1869,7 @@ static int handle_head_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
muxid = __xn_mux_id(regs);
muxop = __xn_mux_op(regs);
trace_mark(xn_nucleus, syscall_histage_entry,
"thread %p thread_name %s muxid %d muxop %d",
thread, thread ? xnthread_name(thread) : NULL,
muxid, muxop);
trace_cobalt_head_sysentry(thread, muxid, muxop);
if (muxid < 0 || muxid >= NR_PERSONALITIES || muxop < 0)
goto bad_syscall;
......@@ -2024,8 +2012,7 @@ ret_handled:
xnthread_sync_window(thread);
}
trace_mark(xn_nucleus, syscall_histage_exit,
"ret %ld", __xn_reg_rval(regs));
trace_cobalt_head_sysexit(thread, __xn_reg_rval(regs));
return EVENT_STOP;
......@@ -2090,11 +2077,7 @@ static int handle_root_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
muxid = __xn_mux_id(regs);
muxop = __xn_mux_op(regs);
trace_mark(xn_nucleus, syscall_lostage_entry,
"thread %p thread_name %s muxid %d muxop %d",
xnsched_current_thread(),
xnthread_name(xnsched_current_thread()),
muxid, muxop);
trace_cobalt_root_sysentry(thread, muxid, muxop);
/* Processing a Xenomai syscall. */
......@@ -2166,8 +2149,7 @@ ret_handled:
xnthread_sync_window(thread);
}
trace_mark(xn_nucleus, syscall_lostage_exit,
"ret %ld", __xn_reg_rval(regs));
trace_cobalt_root_sysexit(thread, __xn_reg_rval(regs));
return EVENT_STOP;
}
......@@ -2193,11 +2175,9 @@ static int handle_taskexit_event(struct task_struct *p) /* p == current */
secondary_mode_only();
thread = xnshadow_current();
XENO_BUGON(NUCLEUS, thread == NULL);
trace_cobalt_shadow_unmap(thread);
personality = thread->personality;
trace_mark(xn_nucleus, shadow_exit, "thread %p thread_name %s",
thread, xnthread_name(thread));
if (xnthread_test_state(thread, XNDEBUG))
unlock_timers();
......@@ -2501,8 +2481,8 @@ int ipipe_kevent_hook(int kevent, void *data)
static inline int handle_exception(struct ipipe_trap_data *d)
{
struct xnsched *sched;
struct xnthread *thread;
struct xnsched *sched;
sched = xnsched_current();
thread = sched->curr;
......@@ -2510,11 +2490,7 @@ static inline int handle_exception(struct ipipe_trap_data *d)
if (xnthread_test_state(thread, XNROOT))
return 0;
trace_mark(xn_nucleus, thread_fault,
"thread %p thread_name %s ip %p type 0x%x",
thread, xnthread_name(thread),
(void *)xnarch_fault_pc(d),
xnarch_fault_trap(d));
trace_cobalt_thread_fault(thread, d);
if (xnarch_fault_fpu_p(d)) {
#ifdef CONFIG_XENO_HW_FPU
......
......@@ -26,6 +26,7 @@
#include <cobalt/kernel/thread.h>
#include <cobalt/kernel/clock.h>
#include <cobalt/kernel/shadow.h>
#include <trace/events/cobalt-core.h>
/**
* @fn void xnsynch_init(struct xnsynch *synch, int flags,
......@@ -141,9 +142,7 @@ int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
xnlock_get_irqsave(&nklock, s);
trace_mark(xn_nucleus, synch_sleepon,
"thread %p thread_name %s synch %p",
thread, xnthread_name(thread), synch);
trace_cobalt_synch_sleepon(synch, thread);
if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */
list_add_tail(&thread->plink, &synch->pendq);
......@@ -193,12 +192,10 @@ struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
goto out;
}
trace_cobalt_synch_wakeup(synch);
thread = list_first_entry(&synch->pendq, struct xnthread, plink);
list_del(&thread->plink);
thread->wchan = NULL;
trace_mark(xn_nucleus, synch_wakeup_one,
"thread %p thread_name %s synch %p",
thread, xnthread_name(thread), synch);
xnthread_resume(thread, XNPEND);
out:
xnlock_put_irqrestore(&nklock, s);
......@@ -220,14 +217,13 @@ int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr)
if (list_empty(&synch->pendq))
goto out;
trace_cobalt_synch_wakeup_many(synch);
list_for_each_entry_safe(thread, tmp, &synch->pendq, plink) {
if (nwakeups++ >= nr)
break;
list_del(&thread->plink);
thread->wchan = NULL;
trace_mark(xn_nucleus, synch_wakeup_many,
"thread %p thread_name %s synch %p",
thread, xnthread_name(thread), synch);
xnthread_resume(thread, XNPEND);
}
out:
......@@ -267,11 +263,9 @@ void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper
xnlock_get_irqsave(&nklock, s);
trace_cobalt_synch_wakeup(synch);
list_del(&sleeper->plink);
sleeper->wchan = NULL;
trace_mark(xn_nucleus, synch_wakeup_this,
"thread %p thread_name %s synch %p",
sleeper, xnthread_name(sleeper), synch);
xnthread_resume(sleeper, XNPEND);
xnlock_put_irqrestore(&nklock, s);
......@@ -349,7 +343,7 @@ int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
trace_mark(xn_nucleus, synch_acquire, "synch %p", synch);
trace_cobalt_synch_acquire(synch, thread);
redo:
fastlock = atomic_long_cmpxchg(lockp, XN_NO_HANDLE, threadh);
......@@ -505,6 +499,27 @@ EXPORT_SYMBOL_GPL(xnsynch_acquire);
*
* @remark Tags: none.
*/
struct xnthread *xnsynch_release(struct xnsynch *synch,
struct xnthread *thread)
{
atomic_long_t *lockp;
xnhandle_t threadh;