Commit aae2ae01 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

evl/poll: drop watchpoints on fd closure


Signed-off-by: Philippe Gerum's avatarPhilippe Gerum <rpm@xenomai.org>
parent c99f8d23
......@@ -10,12 +10,14 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/irq_work.h>
struct file;
struct files_struct;
struct evl_element;
struct evl_poll_node;
struct evl_file {
struct file *filp;
......@@ -29,6 +31,7 @@ struct evl_fd {
struct evl_file *efilp;
struct files_struct *files;
struct rb_node rb;
struct list_head poll_nodes; /* poll_item->node */
};
struct evl_file_binding {
......@@ -58,6 +61,11 @@ void evl_put_file(struct evl_file *efilp) /* OOB */
__evl_put_file(efilp);
}
struct evl_file *evl_watch_fd(unsigned int fd,
struct evl_poll_node *node);
void evl_ignore_fd(struct evl_poll_node *node);
int evl_init_files(void);
void evl_cleanup_files(void);
......
......@@ -26,6 +26,10 @@ struct evl_poll_head {
hard_spinlock_t lock;
};
struct evl_poll_node {
struct list_head next; /* in evl_fd->poll_nodes */
};
static inline
void evl_init_poll_head(struct evl_poll_head *head)
{
......@@ -64,4 +68,6 @@ evl_clear_poll_events(struct evl_poll_head *head,
void evl_drop_poll_table(struct evl_thread *thread);
void evl_drop_watchpoints(struct list_head *drop_list);
#endif /* !_EVENLESS_POLL_H */
......@@ -12,10 +12,12 @@
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/irq_work.h>
#include <linux/spinlock.h>
#include <evenless/file.h>
#include <evenless/memory.h>
#include <evenless/assert.h>
#include <evenless/sched.h>
#include <evenless/poll.h>
static struct rb_root fd_tree = RB_ROOT;
......@@ -44,7 +46,7 @@ static inline bool lean_right(struct evl_fd *lh, struct evl_fd *rh)
return lh->files > rh->files;
}
static inline int index_sfd(struct evl_fd *sfd, struct file *filp)
static inline int index_efd(struct evl_fd *efd, struct file *filp)
{
struct rb_node **rbp, *parent = NULL;
struct evl_fd *tmp;
......@@ -53,113 +55,126 @@ static inline int index_sfd(struct evl_fd *sfd, struct file *filp)
while (*rbp) {
tmp = rb_entry(*rbp, struct evl_fd, rb);
parent = *rbp;
if (lean_left(sfd, tmp))
if (lean_left(efd, tmp))
rbp = &(*rbp)->rb_left;
else if (lean_right(sfd, tmp))
else if (lean_right(efd, tmp))
rbp = &(*rbp)->rb_right;
else
return -EEXIST;
}
rb_link_node(&sfd->rb, parent, rbp);
rb_insert_color(&sfd->rb, &fd_tree);
rb_link_node(&efd->rb, parent, rbp);
rb_insert_color(&efd->rb, &fd_tree);
return 0;
}
static inline
struct evl_fd *lookup_sfd(unsigned int fd,
struct evl_fd *lookup_efd(unsigned int fd,
struct files_struct *files)
{
struct evl_fd *sfd, tmp;
struct evl_fd *efd, tmp;
struct rb_node *rb;
tmp.fd = fd;
tmp.files = files;
rb = fd_tree.rb_node;
while (rb) {
sfd = rb_entry(rb, struct evl_fd, rb);
if (lean_left(&tmp, sfd))
efd = rb_entry(rb, struct evl_fd, rb);
if (lean_left(&tmp, efd))
rb = rb->rb_left;
else if (lean_right(&tmp, sfd))
else if (lean_right(&tmp, efd))
rb = rb->rb_right;
else
return sfd;
return efd;
}
return NULL;
}
static inline
struct evl_fd *unindex_sfd(unsigned int fd,
struct evl_fd *unindex_efd(unsigned int fd,
struct files_struct *files)
{
struct evl_fd *sfd = lookup_sfd(fd, files);
struct evl_fd *efd = lookup_efd(fd, files);
if (sfd)
rb_erase(&sfd->rb, &fd_tree);
if (efd)
rb_erase(&efd->rb, &fd_tree);
return sfd;
return efd;
}
/* in-band, caller may hold files->file_lock */
void install_inband_fd(unsigned int fd, struct file *filp,
struct files_struct *files)
{
struct evl_fd *sfd;
unsigned long flags;
struct evl_fd *efd;
int ret = -ENOMEM;
if (filp->oob_data == NULL)
return;
sfd = evl_alloc(sizeof(struct evl_fd));
if (sfd) {
sfd->fd = fd;
sfd->files = files;
sfd->efilp = filp->oob_data;
efd = evl_alloc(sizeof(struct evl_fd));
if (efd) {
efd->fd = fd;
efd->files = files;
efd->efilp = filp->oob_data;
INIT_LIST_HEAD(&efd->poll_nodes);
raw_spin_lock_irqsave(&fdt_lock, flags);
ret = index_sfd(sfd, filp);
ret = index_efd(efd, filp);
raw_spin_unlock_irqrestore(&fdt_lock, flags);
}
EVL_WARN_ON(CORE, ret);
}
/* fdt_lock held, irqs off. CAUTION: resched required on exit. */
static void drop_watchpoints(struct evl_fd *efd)
{
if (!list_empty(&efd->poll_nodes))
evl_drop_watchpoints(&efd->poll_nodes);
}
/* in-band, caller holds files->file_lock */
void uninstall_inband_fd(unsigned int fd, struct file *filp,
struct files_struct *files)
{
struct evl_fd *sfd;
unsigned long flags;
struct evl_fd *efd;
if (filp->oob_data == NULL)
return;
raw_spin_lock_irqsave(&fdt_lock, flags);
sfd = unindex_sfd(fd, files);
efd = unindex_efd(fd, files);
if (efd)
drop_watchpoints(efd);
raw_spin_unlock_irqrestore(&fdt_lock, flags);
evl_schedule();
if (sfd)
evl_free(sfd);
if (efd)
evl_free(efd);
}
/* in-band, caller holds files->file_lock */
void replace_inband_fd(unsigned int fd, struct file *filp,
struct files_struct *files)
{
struct evl_fd *sfd;
unsigned long flags;
struct evl_fd *efd;
if (filp->oob_data == NULL)
return;
raw_spin_lock_irqsave(&fdt_lock, flags);
sfd = lookup_sfd(fd, files);
if (sfd) {
sfd->efilp = filp->oob_data;
efd = lookup_efd(fd, files);
if (efd) {
drop_watchpoints(efd);
efd->efilp = filp->oob_data;
raw_spin_unlock_irqrestore(&fdt_lock, flags);
evl_schedule();
return;
}
......@@ -168,16 +183,16 @@ void replace_inband_fd(unsigned int fd, struct file *filp,
install_inband_fd(fd, filp, files);
}
struct evl_file *evl_get_file(unsigned int fd) /* OOB */
struct evl_file *evl_get_file(unsigned int fd)
{
struct evl_file *efilp = NULL;
struct evl_fd *sfd;
unsigned long flags;
struct evl_fd *efd;
raw_spin_lock_irqsave(&fdt_lock, flags);
sfd = lookup_sfd(fd, current->files);
if (sfd) {
efilp = sfd->efilp;
efd = lookup_efd(fd, current->files);
if (efd) {
efilp = efd->efilp;
evl_get_fileref(efilp);
}
raw_spin_unlock_irqrestore(&fdt_lock, flags);
......@@ -199,6 +214,34 @@ void __evl_put_file(struct evl_file *efilp)
irq_work_queue(&efilp->oob_work);
}
struct evl_file *evl_watch_fd(unsigned int fd,
struct evl_poll_node *node)
{
struct evl_file *efilp = NULL;
unsigned long flags;
struct evl_fd *efd;
raw_spin_lock_irqsave(&fdt_lock, flags);
efd = lookup_efd(fd, current->files);
if (efd) {
efilp = efd->efilp;
evl_get_fileref(efilp);
list_add(&node->next, &efd->poll_nodes);
}
raw_spin_unlock_irqrestore(&fdt_lock, flags);
return efilp;
}
void evl_ignore_fd(struct evl_poll_node *node)
{
unsigned long flags;
raw_spin_lock_irqsave(&fdt_lock, flags);
list_del(&node->next);
raw_spin_unlock_irqrestore(&fdt_lock, flags);
}
/**
* evl_open_file - Open new file with OOB capabilities
*
......
......@@ -10,6 +10,7 @@
#include <linux/rbtree.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <evenless/file.h>
#include <evenless/thread.h>
#include <evenless/memory.h>
......@@ -19,21 +20,27 @@
#include <evenless/mutex.h>
#include <asm/evenless/syscall.h>
struct event_poller {
struct rb_root node_index; /* struct poll_node */
struct list_head node_list; /* struct poll_node */
struct poll_group {
struct rb_root item_index; /* struct poll_item */
struct list_head item_list; /* struct poll_item */
struct list_head waiter_list; /* struct poll_waiter */
hard_spinlock_t wait_lock;
struct evl_file efile;
struct evl_kmutex lock;
int nodenr;
struct evl_kmutex item_lock;
int nr_items;
unsigned int generation;
};
struct poll_node {
struct poll_item {
unsigned int fd;
struct evl_file *efilp;
int events_polled;
struct rb_node rb; /* in poller->node_index */
struct list_head next; /* in poller->node_list */
struct rb_node rb; /* in group->item_index */
struct list_head next; /* in group->item_list */
};
struct poll_waiter {
struct evl_flag flag;
struct list_head next;
};
/*
......@@ -41,14 +48,16 @@ struct poll_node {
* files not elements, so that we can monitor any type of EVL files.
*/
struct evl_poll_watchpoint {
struct poll_node node;
unsigned int fd;
int events_polled;
int events_received;
struct oob_poll_wait wait;
struct evl_flag *flag;
struct evl_poll_head *head;
struct evl_poll_node node;
};
/* Maximum nesting depth (poller watching poller(s) */
/* Maximum nesting depth (poll group watching other group(s)) */
#define POLLER_NEST_MAX 4
static const struct file_operations poll_fops;
......@@ -60,8 +69,9 @@ void evl_poll_watch(struct evl_poll_head *head,
unsigned long flags;
wpt = container_of(wait, struct evl_poll_watchpoint, wait);
wpt->head = head;
/* Add to driver's poll head. */
raw_spin_lock_irqsave(&head->lock, flags);
wpt->head = head;
wpt->events_received = 0;
list_add(&wait->next, &head->watchpoints);
raw_spin_unlock_irqrestore(&head->lock, flags);
......@@ -78,7 +88,7 @@ void __evl_signal_poll_events(struct evl_poll_head *head,
raw_spin_lock_irqsave(&head->lock, flags);
list_for_each_entry(wpt, &head->watchpoints, wait.next) {
ready = events & wpt->node.events_polled;
ready = events & wpt->events_polled;
if (ready) {
wpt->events_received |= ready;
evl_raise_flag_nosched(wpt->flag);
......@@ -116,208 +126,245 @@ void evl_drop_poll_table(struct evl_thread *thread)
}
static inline
int __index_node(struct rb_root *root, struct poll_node *node)
int index_item(struct rb_root *root, struct poll_item *item)
{
struct rb_node **rbp, *parent = NULL;
struct poll_node *tmp;
struct poll_item *tmp;
rbp = &root->rb_node;
while (*rbp) {
tmp = rb_entry(*rbp, struct poll_node, rb);
tmp = rb_entry(*rbp, struct poll_item, rb);
parent = *rbp;
if (node->fd < tmp->fd)
if (item->fd < tmp->fd)
rbp = &(*rbp)->rb_left;
else if (node->fd > tmp->fd)
else if (item->fd > tmp->fd)
rbp = &(*rbp)->rb_right;
else
return -EEXIST;
}
rb_link_node(&node->rb, parent, rbp);
rb_insert_color(&node->rb, root);
rb_link_node(&item->rb, parent, rbp);
rb_insert_color(&item->rb, root);
return 0;
}
static inline void new_generation(struct event_poller *poller)
static inline void new_generation(struct poll_group *group)
{
if (++poller->generation == 0) /* Keep zero for init state. */
poller->generation = 1;
if (++group->generation == 0) /* Keep zero for init state. */
group->generation = 1;
}
static int check_no_loop_deeper(struct event_poller *origin,
struct poll_node *node,
static int check_no_loop_deeper(struct poll_group *origin,
struct poll_item *item,
int depth)
{
struct event_poller *poller;
struct poll_node *_node;
struct poll_group *group;
struct poll_item *_item;
struct evl_file *efilp;
struct file *filp;
int ret = 0;
if (depth >= POLLER_NEST_MAX)
return -EINVAL;
filp = node->efilp->filp;
if (filp->f_op != &poll_fops)
efilp = evl_get_file(item->fd);
if (efilp == NULL)
return 0;
poller = filp->private_data;
if (poller == origin)
return -EINVAL;
filp = efilp->filp;
if (filp->f_op != &poll_fops)
goto out;
group = filp->private_data;
if (group == origin) {
ret = -EINVAL;
goto out;
}
evl_lock_kmutex(&poller->lock);
evl_lock_kmutex(&group->item_lock);
list_for_each_entry(_node, &poller->node_list, next) {
ret = check_no_loop_deeper(origin, _node, depth + 1);
list_for_each_entry(_item, &group->item_list, next) {
ret = check_no_loop_deeper(origin, _item, depth + 1);
if (ret)
break;
}
evl_unlock_kmutex(&poller->lock);
evl_unlock_kmutex(&group->item_lock);
out:
evl_put_file(efilp);
return ret;
}
static int check_no_loop(struct event_poller *poller,
struct poll_node *node)
static int check_no_loop(struct poll_group *group,
struct poll_item *item)
{
return check_no_loop_deeper(poller, node, 0);
return check_no_loop_deeper(group, item, 0);
}
static int add_node(struct file *filp, struct event_poller *poller,
static int add_item(struct file *filp, struct poll_group *group,
struct evl_poll_ctlreq *creq)
{
struct poll_node *node;
int ret;
struct poll_item *item;
struct evl_file *efilp;
int ret, events;
events = creq->events & ~POLLFREE;
if (events == 0)
return -EINVAL;
node = evl_alloc(sizeof(*node));
if (node == NULL)
item = evl_alloc(sizeof(*item));
if (item == NULL)
return -ENOMEM;
node->fd = creq->fd;
node->events_polled = creq->events;
node->efilp = evl_get_file(creq->fd);
if (node->efilp == NULL) {
ret = -EBADF;
item->fd = creq->fd;
item->events_polled = events;
efilp = evl_get_file(creq->fd);
if (efilp == NULL) {
ret = -EINVAL;
goto fail_get;
}
evl_lock_kmutex(&poller->lock);
evl_lock_kmutex(&group->item_lock);
/* Check for cyclic deps. */
ret = check_no_loop(poller, node);
ret = check_no_loop(group, item);
if (ret)
goto fail_add;
ret = __index_node(&poller->node_index, node);
ret = index_item(&group->item_index, item);
if (ret)
goto fail_add;
list_add(&node->next, &poller->node_list);
poller->nodenr++;
new_generation(poller);
list_add(&item->next, &group->item_list);
group->nr_items++;
new_generation(group);
evl_unlock_kmutex(&poller->lock);
evl_unlock_kmutex(&group->item_lock);
evl_put_file(efilp);
return 0;
fail_add:
evl_unlock_kmutex(&poller->lock);
evl_put_file(node->efilp);
evl_unlock_kmutex(&group->item_lock);
evl_put_file(efilp);
fail_get:
evl_free(node);
evl_free(item);
return ret;
}
static struct poll_node *
lookup_node(struct rb_root *root, unsigned int fd)
static struct poll_item *
lookup_item(struct rb_root *root, unsigned int fd)
{
struct poll_node *node;
struct poll_item *item;
struct rb_node *rb;
rb = root->rb_node;
while (rb) {
node = rb_entry(rb, struct poll_node, rb);
if (fd < node->fd)
item = rb_entry(rb, struct poll_item, rb);
if (fd < item->fd)
rb = rb->rb_left;
else if (fd > node->fd)
else if (fd > item->fd)
rb = rb->rb_right;
else
return node;
return item;
}
return NULL;
}
static void __del_node(struct poll_node *node)
{
evl_put_file(node->efilp);
evl_free(node);
}
static int del_node(struct event_poller *poller,
static int del_item(struct poll_group *group,
struct evl_poll_ctlreq *creq)
{
struct poll_node *node;
struct poll_item *item;
evl_lock_kmutex(&poller->lock);
evl_lock_kmutex(&group->item_lock);
node = lookup_node(&poller->node_index, creq->fd);
if (node == NULL) {
evl_unlock_kmutex(&poller->lock);
return -EBADF;
item = lookup_item(&group->item_index, creq->fd);
if (item == NULL) {
evl_unlock_kmutex(&group->item_lock);
return -ENOENT;
}
rb_erase(&node->rb, &poller->node_index);
list_del(&node->next);
poller->nodenr--;
new_generation(poller);
rb_erase(&item->rb, &group->item_index);
list_del(&item->next);
group->nr_items--;
new_generation(group);
evl_unlock_kmutex(&poller->lock);
evl_unlock_kmutex(&group->item_lock);
__del_node(node);
evl_free(item);
return 0;