Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xenomai
xenomai4
linux-evl
Commits
aae2ae01
Commit
aae2ae01
authored
Feb 23, 2019
by
Philippe Gerum
Browse files
evl/poll: drop watchpoints on fd closure
Signed-off-by:
Philippe Gerum
<
rpm@xenomai.org
>
parent
c99f8d23
Changes
4
Hide whitespace changes
Inline
Side-by-side
include/evenless/file.h
View file @
aae2ae01
...
...
@@ -10,12 +10,14 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/irq_work.h>
struct
file
;
struct
files_struct
;
struct
evl_element
;
struct
evl_poll_node
;
struct
evl_file
{
struct
file
*
filp
;
...
...
@@ -29,6 +31,7 @@ struct evl_fd {
struct
evl_file
*
efilp
;
struct
files_struct
*
files
;
struct
rb_node
rb
;
struct
list_head
poll_nodes
;
/* poll_item->node */
};
struct
evl_file_binding
{
...
...
@@ -58,6 +61,11 @@ void evl_put_file(struct evl_file *efilp) /* OOB */
__evl_put_file
(
efilp
);
}
struct
evl_file
*
evl_watch_fd
(
unsigned
int
fd
,
struct
evl_poll_node
*
node
);
void
evl_ignore_fd
(
struct
evl_poll_node
*
node
);
int
evl_init_files
(
void
);
void
evl_cleanup_files
(
void
);
...
...
include/evenless/poll.h
View file @
aae2ae01
...
...
@@ -26,6 +26,10 @@ struct evl_poll_head {
hard_spinlock_t
lock
;
};
struct
evl_poll_node
{
struct
list_head
next
;
/* in evl_fd->poll_nodes */
};
static
inline
void
evl_init_poll_head
(
struct
evl_poll_head
*
head
)
{
...
...
@@ -64,4 +68,6 @@ evl_clear_poll_events(struct evl_poll_head *head,
void
evl_drop_poll_table
(
struct
evl_thread
*
thread
);
void
evl_drop_watchpoints
(
struct
list_head
*
drop_list
);
#endif
/* !_EVENLESS_POLL_H */
kernel/evenless/file.c
View file @
aae2ae01
...
...
@@ -12,10 +12,12 @@
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/irq_work.h>
#include <linux/spinlock.h>
#include <evenless/file.h>
#include <evenless/memory.h>
#include <evenless/assert.h>
#include <evenless/sched.h>
#include <evenless/poll.h>
static
struct
rb_root
fd_tree
=
RB_ROOT
;
...
...
@@ -44,7 +46,7 @@ static inline bool lean_right(struct evl_fd *lh, struct evl_fd *rh)
return
lh
->
files
>
rh
->
files
;
}
static
inline
int
index_
s
fd
(
struct
evl_fd
*
s
fd
,
struct
file
*
filp
)
static
inline
int
index_
e
fd
(
struct
evl_fd
*
e
fd
,
struct
file
*
filp
)
{
struct
rb_node
**
rbp
,
*
parent
=
NULL
;
struct
evl_fd
*
tmp
;
...
...
@@ -53,113 +55,126 @@ static inline int index_sfd(struct evl_fd *sfd, struct file *filp)
while
(
*
rbp
)
{
tmp
=
rb_entry
(
*
rbp
,
struct
evl_fd
,
rb
);
parent
=
*
rbp
;
if
(
lean_left
(
s
fd
,
tmp
))
if
(
lean_left
(
e
fd
,
tmp
))
rbp
=
&
(
*
rbp
)
->
rb_left
;
else
if
(
lean_right
(
s
fd
,
tmp
))
else
if
(
lean_right
(
e
fd
,
tmp
))
rbp
=
&
(
*
rbp
)
->
rb_right
;
else
return
-
EEXIST
;
}
rb_link_node
(
&
s
fd
->
rb
,
parent
,
rbp
);
rb_insert_color
(
&
s
fd
->
rb
,
&
fd_tree
);
rb_link_node
(
&
e
fd
->
rb
,
parent
,
rbp
);
rb_insert_color
(
&
e
fd
->
rb
,
&
fd_tree
);
return
0
;
}
static
inline
struct
evl_fd
*
lookup_
s
fd
(
unsigned
int
fd
,
struct
evl_fd
*
lookup_
e
fd
(
unsigned
int
fd
,
struct
files_struct
*
files
)
{
struct
evl_fd
*
s
fd
,
tmp
;
struct
evl_fd
*
e
fd
,
tmp
;
struct
rb_node
*
rb
;
tmp
.
fd
=
fd
;
tmp
.
files
=
files
;
rb
=
fd_tree
.
rb_node
;
while
(
rb
)
{
s
fd
=
rb_entry
(
rb
,
struct
evl_fd
,
rb
);
if
(
lean_left
(
&
tmp
,
s
fd
))
e
fd
=
rb_entry
(
rb
,
struct
evl_fd
,
rb
);
if
(
lean_left
(
&
tmp
,
e
fd
))
rb
=
rb
->
rb_left
;
else
if
(
lean_right
(
&
tmp
,
s
fd
))
else
if
(
lean_right
(
&
tmp
,
e
fd
))
rb
=
rb
->
rb_right
;
else
return
s
fd
;
return
e
fd
;
}
return
NULL
;
}
static
inline
struct
evl_fd
*
unindex_
s
fd
(
unsigned
int
fd
,
struct
evl_fd
*
unindex_
e
fd
(
unsigned
int
fd
,
struct
files_struct
*
files
)
{
struct
evl_fd
*
s
fd
=
lookup_
s
fd
(
fd
,
files
);
struct
evl_fd
*
e
fd
=
lookup_
e
fd
(
fd
,
files
);
if
(
s
fd
)
rb_erase
(
&
s
fd
->
rb
,
&
fd_tree
);
if
(
e
fd
)
rb_erase
(
&
e
fd
->
rb
,
&
fd_tree
);
return
s
fd
;
return
e
fd
;
}
/* in-band, caller may hold files->file_lock */
void
install_inband_fd
(
unsigned
int
fd
,
struct
file
*
filp
,
struct
files_struct
*
files
)
{
struct
evl_fd
*
sfd
;
unsigned
long
flags
;
struct
evl_fd
*
efd
;
int
ret
=
-
ENOMEM
;
if
(
filp
->
oob_data
==
NULL
)
return
;
sfd
=
evl_alloc
(
sizeof
(
struct
evl_fd
));
if
(
sfd
)
{
sfd
->
fd
=
fd
;
sfd
->
files
=
files
;
sfd
->
efilp
=
filp
->
oob_data
;
efd
=
evl_alloc
(
sizeof
(
struct
evl_fd
));
if
(
efd
)
{
efd
->
fd
=
fd
;
efd
->
files
=
files
;
efd
->
efilp
=
filp
->
oob_data
;
INIT_LIST_HEAD
(
&
efd
->
poll_nodes
);
raw_spin_lock_irqsave
(
&
fdt_lock
,
flags
);
ret
=
index_
s
fd
(
s
fd
,
filp
);
ret
=
index_
e
fd
(
e
fd
,
filp
);
raw_spin_unlock_irqrestore
(
&
fdt_lock
,
flags
);
}
EVL_WARN_ON
(
CORE
,
ret
);
}
/* fdt_lock held, irqs off. CAUTION: resched required on exit. */
static
void
drop_watchpoints
(
struct
evl_fd
*
efd
)
{
if
(
!
list_empty
(
&
efd
->
poll_nodes
))
evl_drop_watchpoints
(
&
efd
->
poll_nodes
);
}
/* in-band, caller holds files->file_lock */
void
uninstall_inband_fd
(
unsigned
int
fd
,
struct
file
*
filp
,
struct
files_struct
*
files
)
{
struct
evl_fd
*
sfd
;
unsigned
long
flags
;
struct
evl_fd
*
efd
;
if
(
filp
->
oob_data
==
NULL
)
return
;
raw_spin_lock_irqsave
(
&
fdt_lock
,
flags
);
sfd
=
unindex_sfd
(
fd
,
files
);
efd
=
unindex_efd
(
fd
,
files
);
if
(
efd
)
drop_watchpoints
(
efd
);
raw_spin_unlock_irqrestore
(
&
fdt_lock
,
flags
);
evl_schedule
();
if
(
s
fd
)
evl_free
(
s
fd
);
if
(
e
fd
)
evl_free
(
e
fd
);
}
/* in-band, caller holds files->file_lock */
void
replace_inband_fd
(
unsigned
int
fd
,
struct
file
*
filp
,
struct
files_struct
*
files
)
{
struct
evl_fd
*
sfd
;
unsigned
long
flags
;
struct
evl_fd
*
efd
;
if
(
filp
->
oob_data
==
NULL
)
return
;
raw_spin_lock_irqsave
(
&
fdt_lock
,
flags
);
sfd
=
lookup_sfd
(
fd
,
files
);
if
(
sfd
)
{
sfd
->
efilp
=
filp
->
oob_data
;
efd
=
lookup_efd
(
fd
,
files
);
if
(
efd
)
{
drop_watchpoints
(
efd
);
efd
->
efilp
=
filp
->
oob_data
;
raw_spin_unlock_irqrestore
(
&
fdt_lock
,
flags
);
evl_schedule
();
return
;
}
...
...
@@ -168,16 +183,16 @@ void replace_inband_fd(unsigned int fd, struct file *filp,
install_inband_fd
(
fd
,
filp
,
files
);
}
struct
evl_file
*
evl_get_file
(
unsigned
int
fd
)
/* OOB */
struct
evl_file
*
evl_get_file
(
unsigned
int
fd
)
{
struct
evl_file
*
efilp
=
NULL
;
struct
evl_fd
*
sfd
;
unsigned
long
flags
;
struct
evl_fd
*
efd
;
raw_spin_lock_irqsave
(
&
fdt_lock
,
flags
);
s
fd
=
lookup_
s
fd
(
fd
,
current
->
files
);
if
(
s
fd
)
{
efilp
=
s
fd
->
efilp
;
e
fd
=
lookup_
e
fd
(
fd
,
current
->
files
);
if
(
e
fd
)
{
efilp
=
e
fd
->
efilp
;
evl_get_fileref
(
efilp
);
}
raw_spin_unlock_irqrestore
(
&
fdt_lock
,
flags
);
...
...
@@ -199,6 +214,34 @@ void __evl_put_file(struct evl_file *efilp)
irq_work_queue
(
&
efilp
->
oob_work
);
}
struct
evl_file
*
evl_watch_fd
(
unsigned
int
fd
,
struct
evl_poll_node
*
node
)
{
struct
evl_file
*
efilp
=
NULL
;
unsigned
long
flags
;
struct
evl_fd
*
efd
;
raw_spin_lock_irqsave
(
&
fdt_lock
,
flags
);
efd
=
lookup_efd
(
fd
,
current
->
files
);
if
(
efd
)
{
efilp
=
efd
->
efilp
;
evl_get_fileref
(
efilp
);
list_add
(
&
node
->
next
,
&
efd
->
poll_nodes
);
}
raw_spin_unlock_irqrestore
(
&
fdt_lock
,
flags
);
return
efilp
;
}
void
evl_ignore_fd
(
struct
evl_poll_node
*
node
)
{
unsigned
long
flags
;
raw_spin_lock_irqsave
(
&
fdt_lock
,
flags
);
list_del
(
&
node
->
next
);
raw_spin_unlock_irqrestore
(
&
fdt_lock
,
flags
);
}
/**
* evl_open_file - Open new file with OOB capabilities
*
...
...
kernel/evenless/poll.c
View file @
aae2ae01
...
...
@@ -10,6 +10,7 @@
#include <linux/rbtree.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <evenless/file.h>
#include <evenless/thread.h>
#include <evenless/memory.h>
...
...
@@ -19,21 +20,27 @@
#include <evenless/mutex.h>
#include <asm/evenless/syscall.h>
struct
event_poller
{
struct
rb_root
node_index
;
/* struct poll_node */
struct
list_head
node_list
;
/* struct poll_node */
struct
poll_group
{
struct
rb_root
item_index
;
/* struct poll_item */
struct
list_head
item_list
;
/* struct poll_item */
struct
list_head
waiter_list
;
/* struct poll_waiter */
hard_spinlock_t
wait_lock
;
struct
evl_file
efile
;
struct
evl_kmutex
lock
;
int
n
odenr
;
struct
evl_kmutex
item_
lock
;
int
n
r_items
;
unsigned
int
generation
;
};
struct
poll_
node
{
struct
poll_
item
{
unsigned
int
fd
;
struct
evl_file
*
efilp
;
int
events_polled
;
struct
rb_node
rb
;
/* in poller->node_index */
struct
list_head
next
;
/* in poller->node_list */
struct
rb_node
rb
;
/* in group->item_index */
struct
list_head
next
;
/* in group->item_list */
};
struct
poll_waiter
{
struct
evl_flag
flag
;
struct
list_head
next
;
};
/*
...
...
@@ -41,14 +48,16 @@ struct poll_node {
* files not elements, so that we can monitor any type of EVL files.
*/
struct
evl_poll_watchpoint
{
struct
poll_node
node
;
unsigned
int
fd
;
int
events_polled
;
int
events_received
;
struct
oob_poll_wait
wait
;
struct
evl_flag
*
flag
;
struct
evl_poll_head
*
head
;
struct
evl_poll_node
node
;
};
/* Maximum nesting depth (poll
er
watching
poller
(s) */
/* Maximum nesting depth (poll
group
watching
other group
(s)
)
*/
#define POLLER_NEST_MAX 4
static
const
struct
file_operations
poll_fops
;
...
...
@@ -60,8 +69,9 @@ void evl_poll_watch(struct evl_poll_head *head,
unsigned
long
flags
;
wpt
=
container_of
(
wait
,
struct
evl_poll_watchpoint
,
wait
);
wpt
->
head
=
head
;
/* Add to driver's poll
head
. */
raw_spin_lock_irqsave
(
&
head
->
lock
,
flags
);
wpt
->
head
=
head
;
wpt
->
events_received
=
0
;
list_add
(
&
wait
->
next
,
&
head
->
watchpoints
);
raw_spin_unlock_irqrestore
(
&
head
->
lock
,
flags
);
...
...
@@ -78,7 +88,7 @@ void __evl_signal_poll_events(struct evl_poll_head *head,
raw_spin_lock_irqsave
(
&
head
->
lock
,
flags
);
list_for_each_entry
(
wpt
,
&
head
->
watchpoints
,
wait
.
next
)
{
ready
=
events
&
wpt
->
node
.
events_polled
;
ready
=
events
&
wpt
->
events_polled
;
if
(
ready
)
{
wpt
->
events_received
|=
ready
;
evl_raise_flag_nosched
(
wpt
->
flag
);
...
...
@@ -116,208 +126,245 @@ void evl_drop_poll_table(struct evl_thread *thread)
}
static
inline
int
__
index_
node
(
struct
rb_root
*
root
,
struct
poll_
node
*
node
)
int
index_
item
(
struct
rb_root
*
root
,
struct
poll_
item
*
item
)
{
struct
rb_node
**
rbp
,
*
parent
=
NULL
;
struct
poll_
node
*
tmp
;
struct
poll_
item
*
tmp
;
rbp
=
&
root
->
rb_node
;
while
(
*
rbp
)
{
tmp
=
rb_entry
(
*
rbp
,
struct
poll_
node
,
rb
);
tmp
=
rb_entry
(
*
rbp
,
struct
poll_
item
,
rb
);
parent
=
*
rbp
;
if
(
node
->
fd
<
tmp
->
fd
)
if
(
item
->
fd
<
tmp
->
fd
)
rbp
=
&
(
*
rbp
)
->
rb_left
;
else
if
(
node
->
fd
>
tmp
->
fd
)
else
if
(
item
->
fd
>
tmp
->
fd
)
rbp
=
&
(
*
rbp
)
->
rb_right
;
else
return
-
EEXIST
;
}
rb_link_node
(
&
node
->
rb
,
parent
,
rbp
);
rb_insert_color
(
&
node
->
rb
,
root
);
rb_link_node
(
&
item
->
rb
,
parent
,
rbp
);
rb_insert_color
(
&
item
->
rb
,
root
);
return
0
;
}
static
inline
void
new_generation
(
struct
event_poller
*
poller
)
static
inline
void
new_generation
(
struct
poll_group
*
group
)
{
if
(
++
poller
->
generation
==
0
)
/* Keep zero for init state. */
poller
->
generation
=
1
;
if
(
++
group
->
generation
==
0
)
/* Keep zero for init state. */
group
->
generation
=
1
;
}
static
int
check_no_loop_deeper
(
struct
event_poller
*
origin
,
struct
poll_
node
*
node
,
static
int
check_no_loop_deeper
(
struct
poll_group
*
origin
,
struct
poll_
item
*
item
,
int
depth
)
{
struct
event_poller
*
poller
;
struct
poll_node
*
_node
;
struct
poll_group
*
group
;
struct
poll_item
*
_item
;
struct
evl_file
*
efilp
;
struct
file
*
filp
;
int
ret
=
0
;
if
(
depth
>=
POLLER_NEST_MAX
)
return
-
EINVAL
;
filp
=
node
->
efilp
->
filp
;
if
(
filp
->
f_op
!=
&
poll_fops
)
e
filp
=
evl_get_file
(
item
->
fd
)
;
if
(
e
filp
==
NULL
)
return
0
;
poller
=
filp
->
private_data
;
if
(
poller
==
origin
)
return
-
EINVAL
;
filp
=
efilp
->
filp
;
if
(
filp
->
f_op
!=
&
poll_fops
)
goto
out
;
group
=
filp
->
private_data
;
if
(
group
==
origin
)
{
ret
=
-
EINVAL
;
goto
out
;
}
evl_lock_kmutex
(
&
poller
->
lock
);
evl_lock_kmutex
(
&
group
->
item_
lock
);
list_for_each_entry
(
_
node
,
&
poller
->
node
_list
,
next
)
{
ret
=
check_no_loop_deeper
(
origin
,
_
node
,
depth
+
1
);
list_for_each_entry
(
_
item
,
&
group
->
item
_list
,
next
)
{
ret
=
check_no_loop_deeper
(
origin
,
_
item
,
depth
+
1
);
if
(
ret
)
break
;
}
evl_unlock_kmutex
(
&
poller
->
lock
);
evl_unlock_kmutex
(
&
group
->
item_lock
);
out:
evl_put_file
(
efilp
);
return
ret
;
}
static
int
check_no_loop
(
struct
event_poller
*
poller
,
struct
poll_
node
*
node
)
static
int
check_no_loop
(
struct
poll_group
*
group
,
struct
poll_
item
*
item
)
{
return
check_no_loop_deeper
(
poller
,
node
,
0
);
return
check_no_loop_deeper
(
group
,
item
,
0
);
}
static
int
add_
node
(
struct
file
*
filp
,
struct
event_poller
*
poller
,
static
int
add_
item
(
struct
file
*
filp
,
struct
poll_group
*
group
,
struct
evl_poll_ctlreq
*
creq
)
{
struct
poll_node
*
node
;
int
ret
;
struct
poll_item
*
item
;
struct
evl_file
*
efilp
;
int
ret
,
events
;
events
=
creq
->
events
&
~
POLLFREE
;
if
(
events
==
0
)
return
-
EINVAL
;
node
=
evl_alloc
(
sizeof
(
*
node
));
if
(
node
==
NULL
)
item
=
evl_alloc
(
sizeof
(
*
item
));
if
(
item
==
NULL
)
return
-
ENOMEM
;
node
->
fd
=
creq
->
fd
;
node
->
events_polled
=
creq
->
events
;
node
->
efilp
=
evl_get_file
(
creq
->
fd
);
if
(
node
->
efilp
==
NULL
)
{
ret
=
-
EBADF
;
item
->
fd
=
creq
->
fd
;
item
->
events_polled
=
events
;
efilp
=
evl_get_file
(
creq
->
fd
);
if
(
efilp
==
NULL
)
{
ret
=
-
EINVAL
;
goto
fail_get
;
}
evl_lock_kmutex
(
&
poller
->
lock
);
evl_lock_kmutex
(
&
group
->
item_
lock
);
/* Check for cyclic deps. */
ret
=
check_no_loop
(
poller
,
node
);
ret
=
check_no_loop
(
group
,
item
);
if
(
ret
)
goto
fail_add
;
ret
=
__
index_
node
(
&
poller
->
node
_index
,
node
);
ret
=
index_
item
(
&
group
->
item
_index
,
item
);
if
(
ret
)
goto
fail_add
;
list_add
(
&
node
->
next
,
&
poller
->
node
_list
);
poller
->
nodenr
++
;
new_generation
(
poller
);
list_add
(
&
item
->
next
,
&
group
->
item
_list
);
group
->
nr_items
++
;
new_generation
(
group
);
evl_unlock_kmutex
(
&
poller
->
lock
);
evl_unlock_kmutex
(
&
group
->
item_lock
);
evl_put_file
(
efilp
);
return
0
;
fail_add:
evl_unlock_kmutex
(
&
poller
->
lock
);
evl_put_file
(
node
->
efilp
);
evl_unlock_kmutex
(
&
group
->
item_
lock
);
evl_put_file
(
efilp
);
fail_get:
evl_free
(
node
);
evl_free
(
item
);
return
ret
;
}
static
struct
poll_
node
*
lookup_
node
(
struct
rb_root
*
root
,
unsigned
int
fd
)
static
struct
poll_
item
*
lookup_
item
(
struct
rb_root
*
root
,
unsigned
int
fd
)
{
struct
poll_
node
*
node
;
struct
poll_
item
*
item
;
struct
rb_node
*
rb
;
rb
=
root
->
rb_node
;
while
(
rb
)
{
node
=
rb_entry
(
rb
,
struct
poll_
node
,
rb
);
if
(
fd
<
node
->
fd
)
item
=
rb_entry
(
rb
,
struct
poll_
item
,
rb
);
if
(
fd
<
item
->
fd
)
rb
=
rb
->
rb_left
;
else
if
(
fd
>
node
->
fd
)
else
if
(
fd
>
item
->
fd
)
rb
=
rb
->
rb_right
;
else
return
node
;
return
item
;
}
return
NULL
;
}
static
void
__del_node
(
struct
poll_node
*
node
)
{
evl_put_file
(
node
->
efilp
);
evl_free
(
node
);
}
static
int
del_node
(
struct
event_poller
*
poller
,
static
int
del_item
(
struct
poll_group
*
group
,
struct
evl_poll_ctlreq
*
creq
)
{
struct
poll_
node
*
node
;
struct
poll_
item
*
item
;
evl_lock_kmutex
(
&
poller
->
lock
);
evl_lock_kmutex
(
&
group
->
item_
lock
);
node
=
lookup_
node
(
&
poller
->
node
_index
,
creq
->
fd
);
if
(
node
==
NULL
)
{
evl_unlock_kmutex
(
&
poller
->
lock
);
return
-
E
BADF
;
item
=
lookup_
item
(
&
group
->
item
_index
,
creq
->
fd
);
if
(
item
==
NULL
)
{
evl_unlock_kmutex
(
&
group
->
item_
lock
);
return
-
E
NOENT
;
}
rb_erase
(
&
node
->
rb
,
&
poller
->
node
_index
);
list_del
(
&
node
->
next
);
poller
->
nodenr
--
;
new_generation
(
poller
);
rb_erase
(
&
item
->
rb
,
&
group
->
item
_index
);
list_del
(
&
item
->
next
);
group
->
nr_items
--
;
new_generation
(
group
);
evl_unlock_kmutex
(
&
poller
->
lock
);
evl_unlock_kmutex
(
&
group
->
item_
lock
);
__del_node
(
node
);
evl_free
(
item
);
return
0
;