Commit 67c30d92 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

cobalt/thread: sanitize header, drop pedantic accessors

parent 83ffe382
......@@ -101,7 +101,7 @@ static inline struct xnthread *xnsynch_owner(struct xnsynch *synch)
#define xnsynch_fastlock(synch) ((synch)->fastlock)
#define xnsynch_fastlock_p(synch) ((synch)->fastlock != NULL)
#define xnsynch_owner_check(synch, thread) \
xnsynch_fast_owner_check((synch)->fastlock, xnthread_handle(thread))
xnsynch_fast_owner_check((synch)->fastlock, thread->handle)
#define xnsynch_fast_is_claimed(fastlock) \
xnhandle_test_spare(fastlock, XNSYNCH_FLCLAIM)
......
......@@ -119,7 +119,7 @@ struct xnthread {
*/
int wprio;
unsigned long schedlck; /** Scheduler lock count. */
int lock_count; /** Scheduler lock count. */
/**
* Thread holder in xnsched runnable queue. Prioritized by
......@@ -146,7 +146,7 @@ struct xnthread {
struct xnsynch *wwake; /* Wait channel the thread was resumed from */
int hrescnt; /* Held resources count */
int res_count; /* Held resources count */
struct xntimer rtimer; /* Resource timer */
......@@ -167,16 +167,9 @@ struct xnthread {
struct xnselector *selector; /* For select. */
int imode; /* Initial mode */
xnhandle_t handle; /* Handle in registry */
struct xnsched_class *init_class; /* Initial scheduling class */
union xnsched_policy_param init_schedparam; /* Initial scheduling parameters */
struct {
xnhandle_t handle; /* Handle in registry */
const char *waitkey; /* Pended key */
} registry;
const char *waitkey; /* Pended key */
char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
......@@ -195,11 +188,10 @@ struct xnthread {
struct xnsynch join_synch;
};
#define xnthread_name(thread) ((thread)->name)
#define xnthread_clear_name(thread) do { *(thread)->name = 0; } while(0)
#define xnthread_sched(thread) ((thread)->sched)
#define xnthread_start_time(thread) ((thread)->stime)
#define xnthread_state_flags(thread) ((thread)->state)
static inline int xnthread_get_state(const struct xnthread *thread)
{
return thread->state;
}
static inline int xnthread_test_state(struct xnthread *thread, int bits)
{
......@@ -236,28 +228,28 @@ static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
return &thread->tcb;
}
#define xnthread_lock_count(thread) ((thread)->schedlck)
#define xnthread_init_schedparam(thread) ((thread)->init_schedparam)
#define xnthread_base_priority(thread) ((thread)->bprio)
#define xnthread_current_priority(thread) ((thread)->cprio)
#define xnthread_init_class(thread) ((thread)->init_class)
#define xnthread_base_class(thread) ((thread)->base_class)
#define xnthread_sched_class(thread) ((thread)->sched_class)
#define xnthread_time_slice(thread) ((thread)->rrperiod)
#define xnthread_timeout(thread) xntimer_get_timeout(&(thread)->rtimer)
#define xnthread_handle(thread) ((thread)->registry.handle)
#define xnthread_host_task(thread) (xnthread_archtcb(thread)->core.host_task)
#define xnthread_host_pid(thread) (xnthread_test_state((thread),XNROOT) ? 0 : \
xnthread_archtcb(thread)->core.host_task->pid)
#define xnthread_host_mm(thread) (xnthread_host_task(thread)->mm)
#define xnthread_affinity(thread) ((thread)->affinity)
#define xnthread_affine_p(thread, cpu) cpu_isset(cpu, (thread)->affinity)
#define xnthread_get_exectime(thread) xnstat_exectime_get_total(&(thread)->stat.account)
#define xnthread_get_lastswitch(thread) xnstat_exectime_get_last_switch((thread)->sched)
#define xnthread_inc_rescnt(thread) ({ (thread)->hrescnt++; })
#define xnthread_dec_rescnt(thread) ({ --(thread)->hrescnt; })
#define xnthread_get_rescnt(thread) ((thread)->hrescnt)
#define xnthread_personality(thread) ((thread)->personality)
static inline int xnthread_base_priority(const struct xnthread *thread)
{
return thread->bprio;
}
static inline int xnthread_current_priority(const struct xnthread *thread)
{
return thread->cprio;
}
static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
{
return xnthread_archtcb(thread)->core.host_task;
}
static inline pid_t xnthread_host_pid(struct xnthread *thread)
{
if (xnthread_test_state(thread, XNROOT))
return 0;
return xnthread_host_task(thread)->pid;
}
#define xnthread_for_each_claimed(__pos, __thread) \
list_for_each_entry(__pos, &(__thread)->claimq, link)
......@@ -291,14 +283,14 @@ struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
static inline
int xnthread_register(struct xnthread *thread, const char *name)
{
return xnregistry_enter(name, thread, &xnthread_handle(thread), NULL);
return xnregistry_enter(name, thread, &thread->handle, NULL);
}
static inline
struct xnthread *xnthread_lookup(xnhandle_t threadh)
{
struct xnthread *thread = xnregistry_lookup(threadh, NULL);
return thread && xnthread_handle(thread) == threadh ? thread : NULL;
return thread && thread->handle == threadh ? thread : NULL;
}
static inline void xnthread_sync_window(struct xnthread *thread)
......@@ -338,7 +330,7 @@ static inline int xnthread_try_grab(struct xnthread *thread,
xnsynch_set_owner(synch, thread);
if (xnthread_test_state(thread, XNWEAK))
xnthread_inc_rescnt(thread);
thread->res_count++;
return 1;
}
......
......@@ -73,8 +73,8 @@ void __xnsys_fatal(const char *format, ...)
xnthread_host_pid(thread),
pbuf,
xnthread_get_timeout(thread, now),
xnthread_state_flags(thread),
xnthread_name(thread));
xnthread_get_state(thread),
thread->name);
}
}
......
......@@ -126,11 +126,11 @@ static int monitor_enter(xnhandle_t handle, struct xnthread *curr)
*
* NOTE: monitors do not support recursive entries.
*/
ret = xnsynch_fast_acquire(mon->gate.fastlock, xnthread_handle(curr));
ret = xnsynch_fast_acquire(mon->gate.fastlock, curr->handle);
switch(ret) {
case 0:
if (xnthread_test_state(curr, XNWEAK))
xnthread_inc_rescnt(curr);
curr->res_count++;
break;
default:
/* Nah, we really have to wait. */
......
......@@ -182,7 +182,7 @@ int cobalt_mutex_timedlock_break(struct cobalt_mutex *mutex,
int ret;
/* We need a valid thread handle for the fast lock. */
if (xnthread_handle(curr) == XN_NO_HANDLE)
if (curr->handle == XN_NO_HANDLE)
return -EPERM;
ret = cobalt_mutex_acquire(curr, mutex, timed, u_ts);
......@@ -333,12 +333,11 @@ COBALT_SYSCALL(mutex_trylock, primary,
goto err_unlock;
}
err = xnsynch_fast_acquire(mutex->synchbase.fastlock,
xnthread_handle(curr));
err = xnsynch_fast_acquire(mutex->synchbase.fastlock, curr->handle);
switch(err) {
case 0:
if (xnthread_test_state(curr, XNWEAK))
xnthread_inc_rescnt(curr);
curr->res_count++;
break;
/* This should not happen, as recursive mutexes are handled in
......
......@@ -1183,8 +1183,8 @@ no_ptrace:
show_stack(xnthread_host_task(next), NULL);
xnsys_fatal
("hardened thread %s[%d] running in Linux domain?! "
"(status=0x%lx, sig=%d, prev=%s[%d])",
next->name, next_task->pid, xnthread_state_flags(next),
"(status=0x%x, sig=%d, prev=%s[%d])",
next->name, next_task->pid, xnthread_get_state(next),
signal_pending(next_task), prev_task->comm, prev_task->pid);
} else if (!(next_task->ptrace & PT_PTRACED) &&
/*
......@@ -1197,8 +1197,8 @@ no_ptrace:
show_stack(xnthread_host_task(next), NULL);
xnsys_fatal
("blocked thread %s[%d] rescheduled?! "
"(status=0x%lx, sig=%d, prev=%s[%d])",
next->name, next_task->pid, xnthread_state_flags(next),
"(status=0x%x, sig=%d, prev=%s[%d])",
next->name, next_task->pid, xnthread_get_state(next),
signal_pending(next_task), prev_task->comm, prev_task->pid);
}
}
......
......@@ -231,7 +231,7 @@ done:
sigs = 1;
prepare_for_signal(p, thread, regs, sysflags);
} else if (xnthread_test_state(thread, XNWEAK) &&
xnthread_get_rescnt(thread) == 0) {
thread->res_count == 0) {
if (switched)
switched = 0;
else
......@@ -365,7 +365,7 @@ restart:
sigs = 1;
prepare_for_signal(p, thread, regs, sysflags);
} else if (xnthread_test_state(thread, XNWEAK) &&
xnthread_get_rescnt(thread) == 0)
thread->res_count == 0)
sysflags |= __xn_exec_switchback;
}
if (!sigs && (sysflags & __xn_exec_switchback) != 0
......@@ -502,7 +502,7 @@ static COBALT_SYSCALL(get_current, current,
if (cur == NULL)
return -EPERM;
return __xn_safe_copy_to_user(u_handle, &xnthread_handle(cur),
return __xn_safe_copy_to_user(u_handle, &cur->handle,
sizeof(*u_handle));
}
......
......@@ -257,7 +257,7 @@ pthread_setschedparam_ex(struct cobalt_thread *thread,
goto out;
}
tslice = xnthread_time_slice(&thread->threadbase);
tslice = thread->threadbase.rrperiod;
sched_class = cobalt_sched_policy_param(&param, policy,
param_ex, &tslice);
if (sched_class == NULL) {
......@@ -295,14 +295,14 @@ pthread_getschedparam_ex(struct cobalt_thread *thread,
}
base_thread = &thread->threadbase;
base_class = xnthread_base_class(base_thread);
base_class = base_thread->base_class;
*policy_r = thread->sched_u_policy;
prio = xnthread_base_priority(base_thread);
param_ex->sched_priority = prio;
if (base_class == &xnsched_class_rt) {
if (xnthread_test_state(base_thread, XNRRB))
ns2ts(&param_ex->sched_rr_quantum, xnthread_time_slice(base_thread));
ns2ts(&param_ex->sched_rr_quantum, base_thread->rrperiod);
goto unlock_and_exit;
}
......@@ -648,7 +648,7 @@ COBALT_SYSCALL(thread_setname, current,
return -ESRCH;
}
ksformat(xnthread_name(&thread->threadbase),
ksformat(thread->threadbase.name,
XNOBJECT_NAME_LEN - 1, "%s", name);
p = xnthread_host_task(&thread->threadbase);
get_task_struct(p);
......@@ -762,19 +762,20 @@ COBALT_SYSCALL(thread_getstat, current,
/* We have to hold the nklock to keep most values consistent. */
stat.cpu = xnsched_cpu(thread->sched);
stat.cprio = xnthread_current_priority(thread);
xtime = xnthread_get_exectime(thread);
if (xnthread_sched(thread)->curr == thread)
xtime += xnstat_exectime_now() - xnthread_get_lastswitch(thread);
xtime = xnstat_exectime_get_total(&thread->stat.account);
if (thread->sched->curr == thread)
xtime += xnstat_exectime_now() -
xnstat_exectime_get_last_switch(thread->sched);
stat.xtime = xnclock_ticks_to_ns(&nkclock, xtime);
stat.msw = xnstat_counter_get(&thread->stat.ssw);
stat.csw = xnstat_counter_get(&thread->stat.csw);
stat.xsc = xnstat_counter_get(&thread->stat.xsc);
stat.pf = xnstat_counter_get(&thread->stat.pf);
stat.status = xnthread_state_flags(thread);
stat.status = xnthread_get_state(thread);
stat.timeout = xnthread_get_timeout(thread,
xnclock_read_monotonic(&nkclock));
strcpy(stat.name, xnthread_name(thread));
strcpy(stat.personality, xnthread_personality(thread)->name);
strcpy(stat.name, thread->name);
strcpy(stat.personality, thread->personality->name);
xnlock_put_irqrestore(&nklock, s);
return __xn_safe_copy_to_user(u_stat, &stat, sizeof(stat));
......@@ -805,7 +806,7 @@ void cobalt_thread_restrict(void)
struct cobalt_thread *thread = cobalt_current_thread();
trace_cobalt_pthread_restrict(thread->hkey.u_pth,
xnthread_personality(&thread->threadbase)->name);
thread->threadbase.personality->name);
cobalt_pop_personality(&cobalt_personality);
cobalt_set_extref(&thread->extref, NULL, NULL);
}
......
......@@ -86,7 +86,7 @@ timer_init(struct cobalt_timer *timer,
* want to deliver a signal when a timer elapses.
*/
xntimer_init(&timer->timerbase, &nkclock, cobalt_timer_handler,
xnthread_sched(&target->threadbase), XNTIMER_UGRAVITY);
target->threadbase.sched, XNTIMER_UGRAVITY);
return target;
}
......@@ -347,7 +347,7 @@ static inline int timer_set(struct cobalt_timer *timer, int flags,
* Make the timer affine to the CPU running the thread to be
* signaled.
*/
xntimer_set_sched(&timer->timerbase, xnthread_sched(&thread->threadbase));
xntimer_set_sched(&timer->timerbase, thread->threadbase.sched);
return cobalt_xntimer_settime(&timer->timerbase,
clock_flag(flags, timer->clockid), value);
......
......@@ -186,7 +186,7 @@ COBALT_SYSCALL(timerfd_create, lostage,
tfd->clockid = clockid;
curr = xnthread_current();
xntimer_init(&tfd->timer, &nkclock, timerfd_handler,
curr ? xnthread_sched(curr) : NULL, XNTIMER_UGRAVITY);
curr ? curr->sched : NULL, XNTIMER_UGRAVITY);
xnsynch_init(&tfd->readers, XNSYNCH_PRIO | XNSYNCH_NOPIP, NULL);
xnselect_init(&tfd->read_select);
tfd->target = NULL;
......
......@@ -565,9 +565,9 @@ static inline int registry_wakeup_sleepers(const char *key)
int cnt = 0;
xnsynch_for_each_sleeper_safe(sleeper, tmp, &register_synch) {
if (*key == *sleeper->registry.waitkey &&
strcmp(key, sleeper->registry.waitkey) == 0) {
sleeper->registry.waitkey = NULL;
if (*key == *sleeper->waitkey &&
strcmp(key, sleeper->waitkey) == 0) {
sleeper->waitkey = NULL;
xnsynch_wakeup_this_sleeper(&register_synch, sleeper);
++cnt;
}
......@@ -761,7 +761,7 @@ int xnregistry_bind(const char *key, xnticks_t timeout, int timeout_mode,
}
thread = xnthread_current();
thread->registry.waitkey = key;
thread->waitkey = key;
info = xnsynch_sleep_on(&register_synch, timeout, timeout_mode);
if (info & XNTIMEO) {
ret = -ETIMEDOUT;
......
......@@ -315,10 +315,10 @@ static int xnsched_sporadic_declare(struct xnthread *thread,
return -ENOMEM;
xntimer_init(&pss->repl_timer, &nkclock, sporadic_replenish_handler,
xnthread_sched(thread), XNTIMER_IGRAVITY);
thread->sched, XNTIMER_IGRAVITY);
xntimer_set_name(&pss->repl_timer, "pss-replenish");
xntimer_init(&pss->drop_timer, &nkclock, sporadic_drop_handler,
xnthread_sched(thread), XNTIMER_IGRAVITY);
thread->sched, XNTIMER_IGRAVITY);
xntimer_set_name(&pss->drop_timer, "pss-drop");
thread->pss = pss;
......
......@@ -120,11 +120,11 @@ static void watchdog_handler(struct xntimer *timer)
if (xnthread_test_state(curr, XNUSER)) {
printk(XENO_WARN "watchdog triggered on CPU #%d -- runaway thread "
"'%s' signaled\n", xnsched_cpu(sched), xnthread_name(curr));
"'%s' signaled\n", xnsched_cpu(sched), curr->name);
xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG);
} else {
printk(XENO_WARN "watchdog triggered on CPU #%d -- runaway thread "
"'%s' canceled\n", xnsched_cpu(sched), xnthread_name(curr));
"'%s' canceled\n", xnsched_cpu(sched), curr->name);
/*
* On behalf on an IRQ handler, xnthread_cancel()
* would go half way cancelling the preempted
......@@ -324,7 +324,7 @@ void ___xnsched_lock(struct xnsched *sched)
{
struct xnthread *curr = sched->curr;
if (xnthread_lock_count(curr)++ == 0) {
if (curr->lock_count++ == 0) {
sched->lflags |= XNINLOCK;
xnthread_set_state(curr, XNLOCK);
}
......@@ -335,10 +335,10 @@ void ___xnsched_unlock(struct xnsched *sched)
{
struct xnthread *curr = sched->curr;
if (!XENO_ASSERT(COBALT, xnthread_lock_count(curr) > 0))
if (!XENO_ASSERT(COBALT, curr->lock_count > 0))
return;
if (--xnthread_lock_count(curr) == 0) {
if (--curr->lock_count == 0) {
xnthread_clear_state(curr, XNLOCK);
xnthread_clear_info(curr, XNLBALERT);
sched->lflags &= ~XNINLOCK;
......@@ -351,7 +351,7 @@ void ___xnsched_unlock_fully(struct xnsched *sched)
{
struct xnthread *curr = sched->curr;
xnthread_lock_count(curr) = 0;
curr->lock_count = 0;
xnthread_clear_state(curr, XNLOCK);
xnthread_clear_info(curr, XNLBALERT);
sched->lflags &= ~XNINLOCK;
......@@ -808,7 +808,13 @@ int __xnsched_run(struct xnsched *sched)
xnlock_get_irqsave(&nklock, s);
curr = sched->curr;
xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
/*
* CAUTION: xnthread_host_task(curr) may be unsynced and even
* stale if curr = &rootcb, since the task logged by
* leave_root() may not still be the current one. Use
* "current" for disambiguating.
*/
xntrace_pid(current->pid, xnthread_current_priority(curr));
reschedule:
switched = 0;
if (!test_resched(sched))
......@@ -873,14 +879,14 @@ reschedule:
*/
curr = sched->curr;
xnthread_switch_fpu(sched);
xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
xntrace_pid(current->pid, xnthread_current_priority(curr));
out:
if (switched &&
xnsched_maybe_resched_after_unlocked_switch(sched))
goto reschedule;
if (xnthread_lock_count(curr))
if (curr->lock_count)
sched->lflags |= XNINLOCK;
xnlock_put_irqrestore(&nklock, s);
......@@ -932,7 +938,7 @@ struct vfile_schedlist_data {
char personality[XNOBJECT_NAME_LEN];
int cprio;
xnticks_t timeout;
unsigned long state;
int state;
};
static struct xnvfile_snapshot_ops vfile_schedlist_ops;
......@@ -976,7 +982,7 @@ static int vfile_schedlist_next(struct xnvfile_snapshot_iterator *it,
p->pid = xnthread_host_pid(thread);
memcpy(p->name, thread->name, sizeof(p->name));
p->cprio = thread->cprio;
p->state = xnthread_state_flags(thread);
p->state = xnthread_get_state(thread);
knamecpy(p->sched_class, thread->sched_class->name);
knamecpy(p->personality, thread->personality->name);
period = xnthread_get_period(thread);
......@@ -1073,7 +1079,7 @@ struct vfile_schedstat_priv {
struct vfile_schedstat_data {
int cpu;
pid_t pid;
unsigned long state;
int state;
char name[XNOBJECT_NAME_LEN];
unsigned long ssw;
unsigned long csw;
......@@ -1140,7 +1146,7 @@ static int vfile_schedstat_next(struct xnvfile_snapshot_iterator *it,
p->cpu = xnsched_cpu(sched);
p->pid = xnthread_host_pid(thread);
memcpy(p->name, thread->name, sizeof(p->name));
p->state = xnthread_state_flags(thread);
p->state = xnthread_get_state(thread);
p->ssw = xnstat_counter_get(&thread->stat.ssw);
p->csw = xnstat_counter_get(&thread->stat.csw);
p->xsc = xnstat_counter_get(&thread->stat.xsc);
......@@ -1219,7 +1225,7 @@ static int vfile_schedstat_show(struct xnvfile_snapshot_iterator *it,
p->account_period, NULL);
}
xnvfile_printf(it,
"%3u %-6d %-10lu %-10lu %-10lu %-4lu %.8lx %3u.%u"
"%3u %-6d %-10lu %-10lu %-10lu %-4lu %.8x %3u.%u"
" %s\n",
p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
usage / 10, usage % 10, p->name);
......@@ -1236,7 +1242,7 @@ static int vfile_schedacct_show(struct xnvfile_snapshot_iterator *it,
if (p == NULL)
return 0;
xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8lx %Lu %Lu %Lu %s %s %d %Lu\n",
xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8x %Lu %Lu %Lu %s %s %d %Lu\n",
p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
xnclock_ticks_to_ns(&nkclock, p->account_period),
xnclock_ticks_to_ns(&nkclock, p->exectime_period),
......
......@@ -346,7 +346,7 @@ int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
XENO_BUGON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
thread = xnthread_current();
threadh = xnthread_handle(thread);
threadh = thread->handle;
lockp = xnsynch_fastlock(synch);
trace_cobalt_synch_acquire(synch, thread);
redo:
......@@ -354,7 +354,7 @@ redo:
if (likely(fastlock == XN_NO_HANDLE)) {
if (xnthread_test_state(thread, XNWEAK))
xnthread_inc_rescnt(thread);
thread->res_count++;
xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
return 0;
}
......@@ -461,7 +461,7 @@ block:
}
grab:
if (xnthread_test_state(thread, XNWEAK))
xnthread_inc_rescnt(thread);
thread->res_count++;
if (xnsynch_pended_p(synch))
threadh = xnsynch_fast_set_claimed(threadh, 1);
......@@ -534,7 +534,7 @@ static struct xnthread *transfer_ownership(struct xnsynch *synch,
if (synch->status & XNSYNCH_CLAIMED)
clear_boost(synch, lastowner);
nextownerh = xnsynch_fast_set_claimed(xnthread_handle(nextowner),
nextownerh = xnsynch_fast_set_claimed(nextowner->handle,
xnsynch_pended_p(synch));
atomic_set(lockp, nextownerh);
......@@ -584,15 +584,15 @@ struct xnthread *xnsynch_release(struct xnsynch *synch,
trace_cobalt_synch_release(synch);
if (unlikely(xnthread_test_state(thread, XNWEAK))) {
if (xnthread_get_rescnt(thread) == 0)
if (thread->res_count == 0)
xnthread_signal(thread, SIGDEBUG,
SIGDEBUG_RESCNT_IMBALANCE);
else
xnthread_dec_rescnt(thread);
thread->res_count--;
}
lockp = xnsynch_fastlock(synch);
threadh = xnthread_handle(thread);
threadh = thread->handle;
if (likely(xnsynch_fast_release(lockp, threadh)))
return NULL;
......
......@@ -173,20 +173,19 @@ int __xnthread_init(struct xnthread *thread,
thread->sched = sched;
thread->state = flags;
thread->info = 0;
thread->schedlck = 0;
thread->lock_count = 0;
thread->rrperiod = XN_INFINITE;
thread->wchan = NULL;
thread->wwake = NULL;
thread->wcontext = NULL;
thread->hrescnt = 0;
thread->registry.handle = XN_NO_HANDLE;
thread->registry.waitkey = NULL;
thread->res_count = 0;
thread->handle = XN_NO_HANDLE;
thread->waitkey = NULL;
memset(&thread->stat, 0, sizeof(thread->stat));
thread->selector = NULL;
INIT_LIST_HEAD(&thread->claimq);
xnsynch_init(&thread->join_synch, XNSYNCH_FIFO, NULL);
/* These will be filled by xnthread_start() */
thread->imode = 0;
thread->entry = NULL;
thread->cookie = NULL;
......@@ -201,9 +200,7 @@ int __xnthread_init(struct xnthread *thread,
xntimer_set_name(&thread->ptimer, thread->name);
xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);
thread->init_class = sched_class;
thread->base_class = NULL; /* xnsched_set_policy() will set it. */
thread->init_schedparam = *sched_param;
ret = xnsched_init_thread(thread);
if (ret)
goto err_out;
......@@ -261,10 +258,10 @@ void xnthread_init_root_tcb(struct xnthread *thread)
void xnthread_deregister(struct xnthread *thread)
{
if (thread->registry.handle != XN_NO_HANDLE)
xnregistry_remove(thread->registry.handle);
if (thread->handle != XN_NO_HANDLE)
xnregistry_remove(thread->handle);
thread->registry.handle = XN_NO_HANDLE;
thread->handle = XN_NO_HANDLE;
}
char *xnthread_format_status(unsigned long status, char *buf, int size)
......@@ -349,7 +346,7 @@ xnticks_t xnthread_get_period(struct xnthread *thread)
if (xntimer_running_p(&thread->ptimer))
period = xntimer_interval(&thread->ptimer);
else if (xnthread_test_state(thread,XNRRB))
period = xnthread_time_slice(thread);
period = thread->rrperiod;
return period;
}
......@@ -644,7 +641,6 @@ int xnthread_start(struct xnthread *thread,