wait.c 2.98 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
/*
 * SPDX-License-Identifier: GPL-2.0
 *
 * Derived from Xenomai Cobalt (http://git.xenomai.org/xenomai-3.git/)
 * Copyright (C) 2001, 2019 Philippe Gerum  <rpm@xenomai.org>
 */

#include <evenless/sched.h>
#include <evenless/wait.h>
#include <evenless/thread.h>
#include <evenless/clock.h>
#include <uapi/evenless/signal.h>
#include <trace/events/evenless.h>

void evl_init_wait(struct evl_wait_queue *wq,
		struct evl_clock *clock, int flags)
{
	wq->flags = flags;
	wq->clock = clock;
	INIT_LIST_HEAD(&wq->wait_list);
	wq->wchan.abort_wait = evl_abort_wait;
	wq->wchan.reorder_wait = evl_reorder_wait;
	raw_spin_lock_init(&wq->wchan.lock);
}
EXPORT_SYMBOL_GPL(evl_init_wait);

void evl_destroy_wait(struct evl_wait_queue *wq)
{
	evl_flush_wait(wq, T_RMID);
	evl_schedule();
}
EXPORT_SYMBOL_GPL(evl_destroy_wait);

34
/* nklock held, irqs off */
35
36
void evl_add_wait_queue(struct evl_wait_queue *wq, ktime_t timeout,
			enum evl_tmode timeout_mode)
37
{
38
	struct evl_thread *curr = evl_current();
39
40

	trace_evl_wait(wq);
41
42
43
44
45
46
47
48
49
50
51

	if (IS_ENABLED(CONFIG_EVENLESS_DEBUG_MUTEX_SLEEP) &&
		atomic_read(&curr->inband_disable_count) &&
		(curr->state & T_WARN))
		evl_signal_thread(curr, SIGDEBUG, SIGDEBUG_MUTEX_SLEEP);

	if (!(wq->flags & EVL_WAIT_PRIO))
		list_add_tail(&curr->wait_next, &wq->wait_list);
	else
		list_add_priff(curr, &wq->wait_list, wprio, wait_next);

52
	evl_sleep_on(timeout, timeout_mode, wq->clock, &wq->wchan);
53
}
54
EXPORT_SYMBOL_GPL(evl_add_wait_queue);
55

56
/* nklock held, irqs off */
57
58
59
60
61
62
63
64
65
66
67
struct evl_thread *evl_wake_up(struct evl_wait_queue *wq,
			struct evl_thread *waiter)
{
	trace_evl_wait_wakeup(wq);

	if (list_empty(&wq->wait_list))
		waiter = NULL;
	else {
		if (waiter == NULL)
			waiter = list_first_entry(&wq->wait_list,
						struct evl_thread, wait_next);
68
		evl_wakeup_thread(waiter, T_PEND, 0);
69
70
71
72
73
74
	}

	return waiter;
}
EXPORT_SYMBOL_GPL(evl_wake_up);

75
76
/* nklock held, irqs off */
void evl_flush_wait_locked(struct evl_wait_queue *wq, int reason)
77
78
79
80
81
{
	struct evl_thread *waiter, *tmp;

	trace_evl_wait_flush(wq);

82
83
	list_for_each_entry_safe(waiter, tmp, &wq->wait_list, wait_next)
		evl_wakeup_thread(waiter, T_PEND, reason);
84
85
}
EXPORT_SYMBOL_GPL(evl_flush_wait_locked);
86

87
88
89
90
91
92
void evl_flush_wait(struct evl_wait_queue *wq, int reason)
{
	unsigned long flags;

	xnlock_get_irqsave(&nklock, flags);
	evl_flush_wait_locked(wq, reason);
93
94
95
96
97
	xnlock_put_irqrestore(&nklock, flags);
}
EXPORT_SYMBOL_GPL(evl_flush_wait);

/* nklock held, irqs off */
98
99
void evl_abort_wait(struct evl_thread *thread,
		struct evl_wait_channel *wchan)
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
{
	list_del(&thread->wait_next);
}
EXPORT_SYMBOL_GPL(evl_abort_wait);

static inline struct evl_wait_queue *
wchan_to_wait_queue(struct evl_wait_channel *wchan)
{
	return container_of(wchan, struct evl_wait_queue, wchan);
}

/* nklock held, irqs off */
void evl_reorder_wait(struct evl_thread *thread)
{
	struct evl_wait_queue *wq = wchan_to_wait_queue(thread->wchan);

	if (wq->flags & EVL_WAIT_PRIO) {
		list_del(&thread->wait_next);
		list_add_priff(thread, &wq->wait_list, wprio, wait_next);
	}
}
EXPORT_SYMBOL_GPL(evl_reorder_wait);