Commit 41848412 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

evl: rename match inband kernel naming with evl_current()



Signed-off-by: Philippe Gerum's avatarPhilippe Gerum <rpm@xenomai.org>
parent c31b0680
......@@ -18,7 +18,7 @@ void __evl_commit_monitor_ceiling(void);
static inline void evl_commit_monitor_ceiling(void)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
if (curr->u_window->pp_pending != EVL_NO_HANDLE)
__evl_commit_monitor_ceiling();
......
......@@ -173,7 +173,7 @@ void __evl_test_cancel(struct evl_thread *curr);
void evl_discard_thread(struct evl_thread *thread);
static inline struct evl_thread *evl_current_thread(void)
static inline struct evl_thread *evl_current(void)
{
return dovetail_current_state()->thread;
}
......@@ -184,9 +184,9 @@ struct evl_rq *evl_thread_rq(struct evl_thread *thread)
return thread->rq;
}
static inline struct evl_rq *evl_current_thread_rq(void)
static inline struct evl_rq *evl_current_rq(void)
{
return evl_thread_rq(evl_current_thread());
return evl_thread_rq(evl_current());
}
static inline
......@@ -197,7 +197,7 @@ struct evl_thread *evl_thread_from_task(struct task_struct *p)
static inline void evl_test_cancel(void)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
if (curr && (curr->info & T_CANCELD))
__evl_test_cancel(curr);
......@@ -334,7 +334,7 @@ static inline void evl_cancel_kthread(struct evl_kthread *kthread)
static inline int evl_kthread_should_stop(void)
{
return evl_current_thread()->info & T_CANCELD;
return evl_current()->info & T_CANCELD;
}
void evl_set_kthread_priority(struct evl_kthread *thread,
......
......@@ -322,7 +322,7 @@ TRACE_EVENT(evl_sleep_on,
),
TP_fast_assign(
__entry->pid = evl_get_inband_pid(evl_current_thread());
__entry->pid = evl_get_inband_pid(evl_current());
__entry->timeout = timeout;
__entry->timeout_mode = timeout_mode;
__entry->wchan = wchan;
......
......@@ -415,7 +415,7 @@ static long restart_clock_delay(struct restart_block *param)
static int clock_delay(struct evl_clock *clock,
struct evl_clock_delayreq __user *u_req)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct evl_clock_delayreq req;
struct restart_block *restart;
struct timespec remain;
......
......@@ -91,7 +91,7 @@ int evl_signal_monitor_targeted(struct evl_thread *target, int monfd)
void __evl_commit_monitor_ceiling(void) /* nklock held, irqs off, OOB */
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct evl_monitor *gate;
/*
......@@ -181,7 +181,7 @@ static int __enter_monitor(struct evl_monitor *gate)
/* nklock held, irqs off */
static int enter_monitor(struct evl_monitor *gate)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
if (gate->type == EVL_MONITOR_EV)
return -EINVAL;
......@@ -210,7 +210,7 @@ static void __exit_monitor(struct evl_monitor *gate,
/* nklock held, irqs off */
static int exit_monitor(struct evl_monitor *gate)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct evl_monitor_state *state = gate->state;
struct evl_monitor *event, *n;
......@@ -255,7 +255,7 @@ static int wait_monitor(struct evl_monitor *event,
struct evl_monitor_waitreq *req,
__s32 *r_op_ret)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
int ret = 0, op_ret = 0, info;
struct evl_monitor *gate;
struct evl_file *sfilp;
......
......@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(evl_destroy_mutex);
int evl_trylock_mutex(struct evl_mutex *mutex)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
atomic_t *lockp = mutex->fastlock;
fundle_t h;
......@@ -391,7 +391,7 @@ EXPORT_SYMBOL_GPL(evl_trylock_mutex);
int evl_lock_mutex_timeout(struct evl_mutex *mutex, ktime_t timeout,
enum evl_tmode timeout_mode)
{
struct evl_thread *curr = evl_current_thread(), *owner;
struct evl_thread *curr = evl_current(), *owner;
atomic_t *lockp = mutex->fastlock;
fundle_t currh, h, oldh;
unsigned long flags;
......@@ -586,7 +586,7 @@ static void transfer_ownership(struct evl_mutex *mutex,
void __evl_unlock_mutex(struct evl_mutex *mutex)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
unsigned long flags;
fundle_t currh, h;
atomic_t *lockp;
......@@ -713,7 +713,7 @@ void evl_reorder_mutex_wait(struct evl_thread *thread)
void evl_commit_mutex_ceiling(struct evl_mutex *mutex)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
fundle_t oldh, h;
atomic_t *lockp;
......
......@@ -285,7 +285,7 @@ static int collect_events(struct event_poller *poller,
struct evl_poll_event __user *u_ev,
int maxevents, bool do_poll)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct evl_poll_watchpoint *wpt, *table;
int ret, n, nr, count = 0, ready;
struct evl_poll_event ev;
......@@ -375,7 +375,7 @@ static int collect_events(struct event_poller *poller,
static inline void clear_wait(void)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct evl_poll_watchpoint *wpt;
unsigned long flags;
int n;
......
......@@ -905,7 +905,7 @@ static int yield_inband(void) /* OOB only */
int evl_sched_yield(void)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
oob_context_only();
......
......@@ -33,7 +33,7 @@ struct sem_wait_data {
static int acquire_sem(struct evl_sem *sem,
struct evl_sem_waitreq *req)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct evl_sem_state *state = sem->state;
struct sem_wait_data wda;
enum evl_tmode tmode;
......
......@@ -100,7 +100,7 @@ static int do_oob_syscall(struct irq_stage *stage, struct pt_regs *regs)
goto do_inband;
nr = oob_syscall_nr(regs);
curr = evl_current_thread();
curr = evl_current();
if (curr == NULL || !cap_raised(current_cap(), CAP_SYS_NICE)) {
if (EVL_DEBUG(CORE))
printk(EVL_WARNING
......@@ -167,7 +167,7 @@ static int do_oob_syscall(struct irq_stage *stage, struct pt_regs *regs)
static int do_inband_syscall(struct irq_stage *stage, struct pt_regs *regs)
{
struct evl_thread *curr = evl_current_thread(); /* Always valid. */
struct evl_thread *curr = evl_current(); /* Always valid. */
struct task_struct *p;
unsigned int nr;
int ret;
......
......@@ -268,7 +268,7 @@ static void do_cleanup_current(struct evl_thread *curr)
static void cleanup_current_thread(void)
{
struct oob_thread_state *p = dovetail_current_state();
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
/*
* We are called for exiting kernel and user threads over the
......@@ -285,12 +285,12 @@ static void cleanup_current_thread(void)
if (waitqueue_active(&join_all))
wake_up(&join_all);
p->thread = NULL; /* evl_current_thread() <- NULL */
p->thread = NULL; /* evl_current() <- NULL */
}
static void put_current_thread(void)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
cleanup_current_thread();
evl_put_element(&curr->element);
......@@ -466,7 +466,7 @@ void evl_sleep_on(ktime_t timeout, enum evl_tmode timeout_mode,
struct evl_clock *clock,
struct evl_wait_channel *wchan)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
unsigned long oldstate, flags;
struct evl_rq *rq;
......@@ -704,7 +704,7 @@ static void inband_task_wakeup(struct irq_work *work)
void evl_switch_inband(int cause)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct task_struct *p = current;
struct kernel_siginfo si;
int cpu __maybe_unused;
......@@ -799,7 +799,7 @@ int evl_switch_oob(void)
inband_context_only();
curr = evl_current_thread();
curr = evl_current();
if (curr == NULL)
return -EPERM;
......@@ -910,7 +910,7 @@ EXPORT_SYMBOL_GPL(evl_get_thread_period);
ktime_t evl_delay_thread(ktime_t timeout, enum evl_tmode timeout_mode,
struct evl_clock *clock)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
unsigned long flags;
ktime_t rem = 0;
......@@ -950,7 +950,7 @@ EXPORT_SYMBOL_GPL(evl_sleep);
int evl_set_thread_period(struct evl_clock *clock,
ktime_t idate, ktime_t period)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
unsigned long flags;
int ret = 0;
......@@ -1000,7 +1000,7 @@ int evl_wait_thread_period(unsigned long *overruns_r)
if (!EVL_ASSERT(CORE, !evl_cannot_block()))
return -EPERM;
curr = evl_current_thread();
curr = evl_current();
xnlock_get_irqsave(&nklock, flags);
......@@ -1070,7 +1070,7 @@ void evl_cancel_thread(struct evl_thread *thread)
}
check_self_cancel:
if (evl_current_thread() == thread) {
if (evl_current() == thread) {
xnlock_put_irqrestore(&nklock, flags);
evl_test_cancel();
/*
......@@ -1104,7 +1104,7 @@ EXPORT_SYMBOL_GPL(evl_cancel_thread);
int evl_detach_self(void)
{
if (evl_current_thread() == NULL)
if (evl_current() == NULL)
return -EPERM;
put_current_thread();
......@@ -1150,7 +1150,7 @@ static void wait_for_rcu_grace_period(struct pid *pid)
int evl_join_thread(struct evl_thread *thread, bool uninterruptible)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
bool switched = false;
unsigned long flags;
struct pid *pid;
......@@ -1621,7 +1621,7 @@ int evl_killall(int mask)
inband_context_only();
if (evl_current_thread())
if (evl_current())
return -EPERM;
/*
......@@ -1688,7 +1688,7 @@ void handle_oob_trap(unsigned int trapnr, struct pt_regs *regs)
oob_context_only();
curr = evl_current_thread();
curr = evl_current();
trace_evl_thread_fault(trapnr, regs);
#if defined(CONFIG_EVENLESS_DEBUG_CORE) || defined(CONFIG_EVENLESS_DEBUG_USER)
......@@ -1720,7 +1720,7 @@ void handle_oob_trap(unsigned int trapnr, struct pt_regs *regs)
void handle_oob_mayday(struct pt_regs *regs)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
if (EVL_WARN_ON(CORE, !(curr->state & T_USER)))
return;
......@@ -1967,7 +1967,7 @@ static void handle_sigwake_event(struct task_struct *p)
static void handle_cleanup_event(struct mm_struct *mm)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
/*
* Detect an EVL thread running exec(), i.e. still attached to
......@@ -2113,7 +2113,7 @@ static int get_sched_attrs(struct evl_thread *thread,
static int update_state_bits(struct evl_thread *thread,
__u32 mask, bool set)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
unsigned long flags;
if (curr != thread)
......@@ -2292,7 +2292,7 @@ thread_factory_build(struct evl_factory *fac, const char *name,
struct evl_thread *curr;
int ret;
if (evl_current_thread())
if (evl_current())
return ERR_PTR(-EBUSY);
curr = kzalloc(sizeof(*curr), GFP_KERNEL);
......@@ -2356,7 +2356,7 @@ static void thread_factory_dispose(struct evl_element *e)
* state.
*/
if (!(thread->state & T_ZOMBIE)) {
if (EVL_WARN_ON(CORE, evl_current_thread() != thread))
if (EVL_WARN_ON(CORE, evl_current() != thread))
return;
cleanup_current_thread();
}
......
......@@ -430,7 +430,7 @@ unsigned long evl_get_timer_overruns(struct evl_timer *timer)
* Hide overruns due to the most recent ptracing session from
* the caller.
*/
thread = evl_current_thread();
thread = evl_current();
if (thread->local_info & T_HICCUP)
return 0;
......
......@@ -69,7 +69,7 @@ static int set_timerfd(struct evl_timerfd *timerfd,
get_timer_value(&timerfd->timer, &sreq->ovalue);
xnlock_get_irqsave(&nklock, flags);
evl_set_timer_rq(&timerfd->timer, evl_current_thread_rq());
evl_set_timer_rq(&timerfd->timer, evl_current_rq());
xnlock_put_irqrestore(&nklock, flags);
return set_timer_value(&timerfd->timer, &sreq->value);
......
......@@ -38,7 +38,7 @@ EXPORT_SYMBOL_GPL(evl_destroy_wait);
int evl_wait_timeout(struct evl_wait_queue *wq, ktime_t timeout,
enum evl_tmode timeout_mode)
{
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
unsigned long flags;
int ret;
......
......@@ -342,7 +342,7 @@ static void inbound_signal_input(struct xbuf_ring *ring)
static int inbound_wait_output(struct xbuf_ring *ring, size_t len)
{
struct evl_xbuf *xbuf = container_of(ring, struct evl_xbuf, ibnd.ring);
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct xbuf_wait_data wait;
wait.len = len;
......@@ -452,7 +452,7 @@ static long xbuf_oob_ioctl(struct file *filp,
static int outbound_wait_input(struct xbuf_ring *ring, size_t len)
{
struct evl_xbuf *xbuf = container_of(ring, struct evl_xbuf, obnd.ring);
struct evl_thread *curr = evl_current_thread();
struct evl_thread *curr = evl_current();
struct xbuf_wait_data wait;
wait.len = len;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment