Commit 7143dad3 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

evl: introduce synchronous breakpoint support



Synchronous breakpoints make sure to keep a ptrace-stepped thread
synchronized with its siblings from the same process running in the
background, as follows:

- as soon as a ptracer (e.g. gdb) regains control over a thread which
  just hit a breakpoint or received SIGINT, sibling threads from the
  same process which run out-of-band are immediately frozen.

- all sibling threads which have been frozen are set to wait on a
  common barrier before they can be released. Such release happens
  once all of them have joined the barrier in out-of-band context,
  after the (single-)stepped thread resumed.
Signed-off-by: Philippe Gerum's avatarPhilippe Gerum <rpm@xenomai.org>
parent 15cbcccc
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _EVL_DOVETAIL_MM_INFO_H
#define _EVL_DOVETAIL_MM_INFO_H
#include <asm-generic/evl/mm_info.h>
#endif /* !_EVL_DOVETAIL_MM_INFO_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _EVL_DOVETAIL_MM_INFO_H
#define _EVL_DOVETAIL_MM_INFO_H
#include <asm-generic/evl/mm_info.h>
#endif /* !_EVL_DOVETAIL_MM_INFO_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _EVL_DOVETAIL_MM_INFO_H
#define _EVL_DOVETAIL_MM_INFO_H
#include <asm-generic/evl/mm_info.h>
#endif /* !_EVL_DOVETAIL_MM_INFO_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_EVL_MM_INFO_H
#define _ASM_GENERIC_EVL_MM_INFO_H
#ifdef CONFIG_EVL
#include <linux/list.h>
#define EVL_MM_PTSYNC_BIT 0
#define EVL_MM_ACTIVE_BIT 30
#define EVL_MM_INIT_BIT 31
struct evl_wait_queue;
struct oob_mm_state {
unsigned long flags; /* Guaranteed zero initially. */
struct list_head ptrace_sync;
struct evl_wait_queue *ptsync_barrier;
};
#else
struct oob_mm_state { };
#endif /* !CONFIG_EVL */
#endif /* !_ASM_GENERIC_EVL_MM_INFO_H */
......@@ -6,7 +6,7 @@
#ifndef _EVL_LOCK_H
#define _EVL_LOCK_H
#include <linux/irq_pipeline.h>
#include <linux/spinlock.h>
/*
* The spinlock API used in the EVL core, which preserves Dovetail's
......
......@@ -10,6 +10,7 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/irq_pipeline.h>
#include <evl/lock.h>
#include <evl/thread.h>
#include <evl/sched/queue.h>
......@@ -281,6 +282,8 @@ void evl_migrate_thread(struct evl_thread *thread,
#endif /* !CONFIG_SMP */
void evl_start_ptsync(struct evl_thread *stopper);
#define for_each_evl_cpu(cpu) \
for_each_online_cpu(cpu) \
if (is_evl_cpu(cpu))
......
......@@ -27,9 +27,19 @@
#include <uapi/evl/sched.h>
#include <asm/evl/thread.h>
#define EVL_THREAD_BLOCK_BITS (T_SUSP|T_PEND|T_DELAY|T_WAIT|T_DORMANT|T_INBAND|T_HALT)
/* All bits which may cause an EVL thread to block in oob context. */
#define EVL_THREAD_BLOCK_BITS (T_SUSP|T_PEND|T_DELAY|T_WAIT|T_DORMANT|T_INBAND|T_HALT|T_PTSYNC)
/* Information bits an EVL thread may receive from a blocking op. */
#define EVL_THREAD_INFO_MASK (T_RMID|T_TIMEO|T_BREAK|T_WAKEN|T_ROBBED|T_KICKED|T_BCAST)
/*
* These are special internal values of SIGDEBUG causes which are
* never sent to user-space, but specifically handled by
* evl_switch_inband().
*/
#define SIGDEBUG_NONE 0
#define SIGDEBUG_TRAP -1
struct evl_thread;
struct evl_rq;
struct evl_sched_class;
......@@ -54,7 +64,7 @@ struct evl_thread {
evl_spinlock_t lock;
/*
* Shared data, covered by ->lock.
* Shared thread-specific data, covered by ->lock.
*/
struct evl_rq *rq;
struct evl_sched_class *base_class;
......@@ -84,11 +94,18 @@ struct evl_thread {
struct evl_timer rtimer; /* Resource timer */
struct evl_timer ptimer; /* Periodic timer */
ktime_t rrperiod; /* Round-robin period (ns) */
ktime_t rrperiod; /* Round-robin period (ns) */
/*
* Shared data, covered by both thread->lock AND
* thread->rq->lock.
* Shared scheduler-specific data covered by both thread->lock
* AND thread->rq->lock. For such data, the first lock
* protects against the thread moving to a different rq, it
* may be omitted if the target cannot be subject to such
* migration (i.e. @thread == evl_this_rq()->curr, which
* implies that we are out-of-band and thus cannot trigger
* evl_migrate_thread()). The second one serializes with the
* scheduler core and must ALWAYS be taken for accessing this
* data.
*/
__u32 state;
__u32 info;
......@@ -105,7 +122,8 @@ struct evl_thread {
struct list_head next; /* in evl_thread_list */
/*
* Thread-local data the owner may modify locklessly.
* Thread-local data only the owner may modify, therefore it
* may do so locklessly.
*/
struct dovetail_altsched_context altsched;
__u32 local_info;
......@@ -136,6 +154,8 @@ struct evl_thread {
struct completion exited;
kernel_cap_t raised_cap;
struct list_head kill_next;
struct oob_mm_state *oob_mm; /* Mostly RO. */
struct list_head ptsync_next; /* covered by oob_mm->lock. */
char *name;
};
......@@ -270,7 +290,8 @@ void evl_get_thread_state(struct evl_thread *thread,
int evl_detach_self(void);
void evl_kick_thread(struct evl_thread *thread);
void evl_kick_thread(struct evl_thread *thread,
int info);
void evl_demote_thread(struct evl_thread *thread);
......@@ -343,4 +364,6 @@ void evl_set_kthread_priority(struct evl_kthread *thread,
pid_t evl_get_inband_pid(struct evl_thread *thread);
int activate_oob_mm_state(struct oob_mm_state *p);
#endif /* !_EVL_THREAD_H */
......@@ -487,7 +487,7 @@ DEFINE_EVENT(curr_thread_event, evl_watchdog_signal,
TP_ARGS(curr)
);
DEFINE_EVENT(curr_thread_event, evl_switching_oob,
DEFINE_EVENT(curr_thread_event, evl_switch_oob,
TP_PROTO(struct evl_thread *curr),
TP_ARGS(curr)
);
......@@ -499,12 +499,18 @@ DEFINE_EVENT(curr_thread_event, evl_switched_oob,
#define evl_print_switch_cause(cause) \
__print_symbolic(cause, \
{ SIGDEBUG_NONE, "undefined" }, \
{ SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
{ SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
{ SIGDEBUG_MIGRATE_FAULT, "fault" })
TRACE_EVENT(evl_switching_inband,
{ SIGDEBUG_TRAP, "breakpoint trap" }, \
{ SIGDEBUG_NONE, "undefined" }, \
{ SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
{ SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
{ SIGDEBUG_MIGRATE_FAULT, "fault" }, \
{ SIGDEBUG_MIGRATE_PRIOINV, "priority inversion" }, \
{ SIGDEBUG_WATCHDOG, "watchdog" }, \
{ SIGDEBUG_MUTEX_IMBALANCE, "mutex imbalance" }, \
{ SIGDEBUG_MUTEX_SLEEP, "mutex sleep" }, \
{ SIGDEBUG_STAGE_LOCKED, "stage exclusion" } )
TRACE_EVENT(evl_switch_inband,
TP_PROTO(int cause),
TP_ARGS(cause),
......
......@@ -10,7 +10,7 @@
#include <linux/types.h>
#include <uapi/evl/sched.h>
#define EVL_ABI_LEVEL 15
#define EVL_ABI_LEVEL 16
#define EVL_CONTROL_DEV "/dev/evl/control"
......
......@@ -25,7 +25,6 @@
((sigdebug_code(si) & 0xffff0000) == sigdebug_marker)
/* Possible values of sigdebug_cause() */
#define SIGDEBUG_NONE 0
#define SIGDEBUG_MIGRATE_SIGNAL 1
#define SIGDEBUG_MIGRATE_SYSCALL 2
#define SIGDEBUG_MIGRATE_FAULT 3
......
......@@ -15,45 +15,49 @@
/* State flags (shared) */
#define T_SUSP 0x00000001 /*< Suspended */
#define T_PEND 0x00000002 /*< Blocked on a wait_queue/mutex */
#define T_DELAY 0x00000004 /*< Delayed/timed */
#define T_WAIT 0x00000008 /*< Periodic wait */
#define T_READY 0x00000010 /*< Ready to run (in rq) */
#define T_DORMANT 0x00000020 /*< Not started yet */
#define T_ZOMBIE 0x00000040 /*< Dead, waiting for disposal */
#define T_INBAND 0x00000080 /*< Running in-band */
#define T_HALT 0x00000100 /*< Halted */
#define T_BOOST 0x00000200 /*< PI/PP boost undergoing */
#define T_SSTEP 0x00000400 /*< Single-stepped by debugger */
#define T_RRB 0x00000800 /*< Undergoes round-robin scheduling */
#define T_ROOT 0x00001000 /*< Root thread (in-band kernel placeholder) */
#define T_WEAK 0x00002000 /*< Weak scheduling (non real-time) */
#define T_USER 0x00004000 /*< Userland thread */
#define T_WOSS 0x00008000 /*< Warn on stage switch (SIGDEBUG) */
#define T_WOLI 0x00010000 /*< Warn on locking inconsistency (SIGDEBUG) */
#define T_WOSX 0x00020000 /*< Warn on stage exclusion (SIGDEBUG) */
#define T_SUSP 0x00000001 /* Suspended */
#define T_PEND 0x00000002 /* Blocked on a wait_queue/mutex */
#define T_DELAY 0x00000004 /* Delayed/timed */
#define T_WAIT 0x00000008 /* Periodic wait */
#define T_READY 0x00000010 /* Ready to run (in rq) */
#define T_DORMANT 0x00000020 /* Not started yet */
#define T_ZOMBIE 0x00000040 /* Dead, waiting for disposal */
#define T_INBAND 0x00000080 /* Running in-band */
#define T_HALT 0x00000100 /* Halted */
#define T_BOOST 0x00000200 /* PI/PP boost undergoing */
#define T_PTSYNC 0x00000400 /* Synchronizing on ptrace event */
#define T_RRB 0x00000800 /* Undergoes round-robin scheduling */
#define T_ROOT 0x00001000 /* Root thread (in-band kernel placeholder) */
#define T_WEAK 0x00002000 /* Weak scheduling (in-band) */
#define T_USER 0x00004000 /* Userland thread */
#define T_WOSS 0x00008000 /* Warn on stage switch (SIGDEBUG) */
#define T_WOLI 0x00010000 /* Warn on locking inconsistency (SIGDEBUG) */
#define T_WOSX 0x00020000 /* Warn on stage exclusion (SIGDEBUG) */
#define T_PTRACE 0x00040000 /* Stopped on ptrace event */
/* Information flags (shared) */
#define T_TIMEO 0x00000001 /*< Woken up due to a timeout condition */
#define T_RMID 0x00000002 /*< Pending on a removed resource */
#define T_BREAK 0x00000004 /*< Forcibly awaken from a wait state */
#define T_KICKED 0x00000008 /*< Forced out of OOB context */
#define T_WAKEN 0x00000010 /*< Thread waken up upon resource availability */
#define T_ROBBED 0x00000020 /*< Robbed from resource ownership */
#define T_CANCELD 0x00000040 /*< Cancellation request is pending */
#define T_PIALERT 0x00000080 /*< Priority inversion alert (SIGDEBUG sent) */
#define T_SCHEDP 0x00000100 /*< schedparam propagation is pending */
#define T_BCAST 0x00000200 /*< Woken up upon resource broadcast */
#define T_SIGNAL 0x00000400 /*< Event monitor signaled */
#define T_SXALERT 0x00000800 /*< Stage exclusion alert (SIGDEBUG sent) */
#define T_TIMEO 0x00000001 /* Woken up due to a timeout condition */
#define T_RMID 0x00000002 /* Pending on a removed resource */
#define T_BREAK 0x00000004 /* Forcibly awaken from a wait state */
#define T_KICKED 0x00000008 /* Forced out of OOB context */
#define T_WAKEN 0x00000010 /* Thread waken up upon resource availability */
#define T_ROBBED 0x00000020 /* Robbed from resource ownership */
#define T_CANCELD 0x00000040 /* Cancellation request is pending */
#define T_PIALERT 0x00000080 /* Priority inversion alert (SIGDEBUG sent) */
#define T_SCHEDP 0x00000100 /* Schedparam propagation is pending */
#define T_BCAST 0x00000200 /* Woken up upon resource broadcast */
#define T_SIGNAL 0x00000400 /* Event monitor signaled */
#define T_SXALERT 0x00000800 /* Stage exclusion alert (SIGDEBUG sent) */
#define T_PTSIG 0x00001000 /* Ptrace signal is pending */
#define T_PTSTOP 0x00002000 /* Ptrace stop is ongoing */
#define T_PTJOIN 0x00004000 /* Ptracee should join ptsync barrier */
/* Local information flags (private to current thread) */
#define T_SYSRST 0x00000001 /*< Thread awaiting syscall restart after signal */
#define T_HICCUP 0x00000002 /*< Just left from ptracing - timings wrecked */
#define T_INFAULT 0x00000004 /*< In fault handling */
#define T_SYSRST 0x00000001 /* Thread awaiting syscall restart after signal */
#define T_IGNOVR 0x00000002 /* Overrun detection temporarily disabled */
#define T_INFAULT 0x00000004 /* In fault handling */
/*
* Must follow strictly the declaration order of the state flags
......@@ -67,14 +71,14 @@
* 'U' -> Dormant
* 'Z' -> Zombie
* 'X' -> Running in-band
* 'H' -> Held in emergency
* 'H' -> Halted
* 'b' -> Priority boost undergoing
* 'T' -> Ptraced and stopped
* '#' -> Ptrace sync ongoing
* 'r' -> Undergoes round-robin
* 'g' -> Warned on stage switch (SIGDEBUG)
* 'G' -> Warned on locking inconsistency (SIGDEBUG)
* 't' -> Warned on stage switch (T_WOSS -> SIGDEBUG)
* 'T' -> Stopped on ptrace event
*/
#define EVL_THREAD_STATE_LABELS "SWDpRUZXHbTr...12"
#define EVL_THREAD_STATE_LABELS "SWDpRUZXHb#r...t..T"
struct evl_user_window {
__u32 state;
......
......@@ -10,6 +10,7 @@
#include <evl/memory.h>
#include <evl/thread.h>
#include <evl/factory.h>
#include <evl/flag.h>
#include <evl/tick.h>
#include <evl/sched.h>
#include <evl/control.h>
......@@ -251,6 +252,34 @@ static long control_common_ioctl(struct file *filp, unsigned int cmd,
return ret;
}
static int control_open(struct inode *inode, struct file *filp)
{
struct oob_mm_state *oob_mm = dovetail_mm_state();
int ret = 0;
/*
* Opening the control device is a strong hint that we are
* about to host EVL threads in the current process, so this
* makes sense to allocate the resources we'll need to
* maintain them here. The in-band kernel has no way to figure
* out when initializing the oob context for a new mm might be
* relevant, so this has to be done on demand based on some
* information only EVL has. This is the reason why there is
* no initialization call for the oob_mm state defined in the
* Dovetail interface, the in-band kernel would not know when
* to call it.
*/
if (!oob_mm) /* Userland only. */
return -EPERM;
/* The control device might be opened multiple times. */
if (!test_and_set_bit(EVL_MM_INIT_BIT, &oob_mm->flags))
ret = activate_oob_mm_state(oob_mm);
return ret;
}
static long control_oob_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
......@@ -291,6 +320,7 @@ static int control_mmap(struct file *filp, struct vm_area_struct *vma)
}
static const struct file_operations control_fops = {
.open = control_open,
.oob_ioctl = control_oob_ioctl,
.unlocked_ioctl = control_ioctl,
.mmap = control_mmap,
......
......@@ -19,6 +19,7 @@
#include <linux/uaccess.h>
#include <linux/hashtable.h>
#include <linux/stringhash.h>
#include <linux/dovetail.h>
#include <evl/assert.h>
#include <evl/file.h>
#include <evl/control.h>
......@@ -83,11 +84,12 @@ void evl_destroy_element(struct evl_element *e)
void evl_get_element(struct evl_element *e)
{
unsigned long flags;
int old_refs;
raw_spin_lock_irqsave(&e->ref_lock, flags);
EVL_WARN_ON(CORE, e->refs == 0);
e->refs++;
old_refs = e->refs++;
raw_spin_unlock_irqrestore(&e->ref_lock, flags);
EVL_WARN_ON(CORE, old_refs == 0);
}
int evl_open_element(struct inode *inode, struct file *filp)
......@@ -268,6 +270,7 @@ static struct device *create_device(dev_t rdev, struct evl_factory *fac,
dev->groups = fac->attrs;
dev->release = release_device;
dev_set_drvdata(dev, drvdata);
ret = dev_set_name(dev, "%s", name);
if (ret)
goto fail;
......
......@@ -23,6 +23,7 @@
#include <evl/tick.h>
#include <evl/monitor.h>
#include <evl/mutex.h>
#include <evl/flag.h>
#include <uapi/evl/signal.h>
#include <trace/events/evl.h>
......@@ -728,8 +729,7 @@ static inline void set_next_running(struct evl_rq *rq,
evl_stop_timer(&rq->rrbtimer);
}
/* rq->curr->lock + rq->lock held, irqs off. */
static struct evl_thread *pick_next_thread(struct evl_rq *rq)
static struct evl_thread *__pick_next_thread(struct evl_rq *rq)
{
struct evl_sched_class *sched_class;
struct evl_thread *curr = rq->curr;
......@@ -763,15 +763,51 @@ static struct evl_thread *pick_next_thread(struct evl_rq *rq)
*/
for_each_evl_sched_class(sched_class) {
next = sched_class->sched_pick(rq);
if (likely(next)) {
set_next_running(rq, next);
if (likely(next))
return next;
}
}
return NULL; /* NOT REACHED (idle class). */
}
/* rq->curr->lock + rq->lock held, irqs off. */
static struct evl_thread *pick_next_thread(struct evl_rq *rq)
{
struct oob_mm_state *oob_mm;
struct evl_thread *next;
for (;;) {
next = __pick_next_thread(rq);
oob_mm = next->oob_mm;
if (unlikely(!oob_mm)) /* Includes the root thread. */
break;
/*
* Obey any pending request for a ptsync freeze.
* Either we freeze @next before a sigwake event lifts
* T_PTSYNC, setting T_PTSTOP, or after in which case
* we already have T_PTSTOP set so we don't have to
* raise T_PTSYNC. The basic assumption is that we
* should get SIGSTOP/SIGTRAP for any thread involved.
*/
if (likely(!test_bit(EVL_MM_PTSYNC_BIT, &oob_mm->flags)))
break; /* Fast and most likely path. */
if (next->info & (T_PTSTOP|T_PTSIG|T_KICKED))
break;
/*
* NOTE: We hold next->rq->lock by construction, so
* changing next->state is ok despite that we don't
* hold next->lock. This properly serializes with
* evl_kick_thread() which might raise T_PTSTOP.
*/
next->state |= T_PTSYNC;
next->state &= ~T_READY;
}
set_next_running(rq, next);
return next;
}
static inline void prepare_rq_switch(struct evl_rq *this_rq,
struct evl_thread *next)
{
......@@ -924,10 +960,44 @@ void __evl_schedule(void) /* oob or oob stalled (CPU migration-safe) */
}
EXPORT_SYMBOL_GPL(__evl_schedule);
/* this_rq->lock held, oob stage stalled. */
static void start_ptsync_locked(struct evl_thread *stopper,
struct evl_rq *this_rq)
{
struct oob_mm_state *oob_mm = stopper->oob_mm;
if (!test_and_set_bit(EVL_MM_PTSYNC_BIT, &oob_mm->flags)) {
#ifdef CONFIG_SMP
cpumask_copy(&this_rq->resched_cpus, &evl_oob_cpus);
cpumask_clear_cpu(raw_smp_processor_id(), &this_rq->resched_cpus);
#endif
evl_set_self_resched(this_rq);
}
}
void evl_start_ptsync(struct evl_thread *stopper)
{
struct evl_rq *this_rq;
unsigned long flags;
if (EVL_WARN_ON(CORE, !(stopper->state & T_USER)))
return;
flags = oob_irq_save();
this_rq = this_evl_rq();
evl_spin_lock(&this_rq->lock);
start_ptsync_locked(stopper, this_rq);
evl_spin_unlock_irqrestore(&this_rq->lock, flags);
}
void resume_oob_task(struct task_struct *p) /* inband, oob stage stalled */
{
struct evl_thread *thread = evl_thread_from_task(p);
/*
* If T_PTSTOP is set, pick_next_thread() is not allowed to
* freeze @thread while in flight to the out-of-band stage.
*/
if (check_cpu_affinity(p))
evl_release_thread(thread, T_INBAND, 0);
......@@ -936,20 +1006,20 @@ void resume_oob_task(struct task_struct *p) /* inband, oob stage stalled */
int evl_switch_oob(void)
{
struct evl_thread *curr = evl_current();
struct task_struct *p = current;
struct evl_thread *curr;
unsigned long flags;
int ret;
inband_context_only();
curr = evl_current();
if (curr == NULL)
return -EPERM;
if (signal_pending(p))
return -ERESTARTSYS;
trace_evl_switching_oob(curr);
trace_evl_switch_oob(curr);
evl_clear_sync_uwindow(curr, T_INBAND);
......@@ -968,21 +1038,23 @@ int evl_switch_oob(void)
*/
oob_context_only();
finish_rq_switch_from_inband();
evl_test_cancel();
trace_evl_switched_oob(curr);
/*
* Recheck pending signals once again. As we block task
* wakeups during the stage transition and handle_sigwake_event()
* ignores signals until T_INBAND is cleared, any signal in
* between is just silently queued up to here.
* Since handle_sigwake_event()->evl_kick_thread() won't set
* T_KICKED unless T_INBAND is cleared, a signal received
* during the stage transition process might have gone
* unnoticed. Recheck for signals here and raise T_KICKED if
* some are pending, so that we switch back in-band asap for
* handling them.
*/
if (signal_pending(p)) {
evl_switch_inband(!(curr->state & T_SSTEP) ?
SIGDEBUG_MIGRATE_SIGNAL:
SIGDEBUG_NONE);
return -ERESTARTSYS;
evl_spin_lock_irqsave(&curr->rq->lock, flags);
curr->info |= T_KICKED;
evl_spin_unlock_irqrestore(&curr->rq->lock, flags);
}
return 0;
......@@ -994,39 +1066,65 @@ void evl_switch_inband(int cause)
struct evl_thread *curr = evl_current();
struct task_struct *p = current;
struct kernel_siginfo si;
struct evl_rq *rq;
struct evl_rq *this_rq;
bool notify;
oob_context_only();
trace_evl_switching_inband(cause);
trace_evl_switch_inband(cause);
/*
* This is the only location where we may assert T_INBAND for
* a thread. Basic assumption: switching to the inband stage
* only applies to the current thread running out-of-band on
* this CPU.
*
* CAVEAT: dovetail_leave_oob() must run _before_ the in-band
* kernel is allowed to take interrupts again, so that
* try_to_wake_up() does not block the wake up request for the
* switching thread as a result of testing task_is_off_stage().
* this CPU. See caveat about dovetail_leave_oob() below.
*/
oob_irq_disable();
irq_work_queue(&curr->inband_work);
evl_spin_lock(&curr->lock);
rq = curr->rq;
evl_spin_lock(&rq->lock);
this_rq = curr->rq;
evl_spin_lock(&this_rq->lock);
if (curr->state & T_READY) {
evl_dequeue_thread(curr);
curr->state &= ~T_READY;
}
curr->info &= ~EVL_THREAD_INFO_MASK;
curr->state |= T_INBAND;
curr->local_info &= ~T_SYSRST;
evl_set_resched(rq);
evl_spin_unlock(&rq->lock);
notify = curr->state & T_USER && cause > SIGDEBUG_NONE;
/*
* If we are initiating the ptsync sequence on breakpoint or
* SIGSTOP/SIGINT is pending, do not send SIGDEBUG since
* switching in-band is ok.
*/
if (cause == SIGDEBUG_TRAP) {
curr->info |= T_PTSTOP;
curr->info &= ~T_PTJOIN;
start_ptsync_locked(curr, this_rq);
} else if (curr->info & T_PTSIG) {
curr->info &= ~T_PTSIG;
notify = false;
}
curr->