Commit c8e9e166 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

cobalt/rtdm: introduce mmap() support

The new ->mmap() handler is introduced in the device operation
descriptor. This service is restricted to secondary mode, switching a
primary mode caller accordingly when invoked from an application.

The prototype is:

int mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma);

The semantics are identical to the corresponding handler from the
regular linux file operations. The only difference is about receiving
a RTDM file descriptor (struct rtdm_fd) instead of a regular struct
file pointer. As such, a valid vma descriptor covering the target user
address space is passed to the handler, therefore common mapping
operations may be performed on it.

Two new convenience routines for mapping a chunk of kernel memory and
a physical I/O address range are provided, i.e. rtdm_mmap_kmem() and
rtdm_mmap_iomem() respectively. Both take a vma descriptor as received
by the ->mmap() handler, and the appropriate virtual/physical start
address of the memory range to map to the target address range in
user-space.

The existing rtdm_mmap_to_user() and rtdm_iomap_to_user() calls are
kept unmodified. The ->mmap() introduction has no impact on existing
drivers currently relying on these calls.
parent 225f242a
......@@ -87,7 +87,7 @@ enum rtdm_selecttype;
* @{
*/
/** Version of struct rtdm_device */
#define RTDM_DEVICE_STRUCT_VER 6
#define RTDM_DEVICE_STRUCT_VER 7
/** Version of struct rtdm_dev_context */
#define RTDM_CONTEXT_STRUCT_VER 4
......@@ -414,13 +414,19 @@ void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
* variable updated by the real-time core will hold the information
* required to leave the atomic section properly.
*
* @note Atomic sections may be nested.
* @note Atomic sections may be nested. The caller is allowed to sleep
* on a blocking Xenomai service from primary mode within an atomic
* section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls.
* On the contrary, sleeping on a regular Linux kernel service while
* holding such lock is NOT valid.
*
* @note Since the strongest lock is acquired by this service, it can
* be used to synchronize real-time and non-real-time contexts.
*
* @warning This service is not portable to the Mercury core, and
* should be restricted to Cobalt-specific use cases.
* should be restricted to Cobalt-specific use cases, mainly for the
* purpose of porting existing dual-kernel drivers which still depend
* on the obsolete RTDM_EXECUTE_ATOMICALLY() construct.
*/
#define cobalt_atomic_enter(context) \
do { \
......@@ -471,6 +477,8 @@ void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
*
* @deprecated This construct will be phased out in Xenomai
* 3.0. Please use rtdm_waitqueue services instead.
*
* @see cobalt_atomic_enter().
*/
#ifdef DOXYGEN_CPP /* Beautify doxygen output */
#define RTDM_EXECUTE_ATOMICALLY(code_block) \
......@@ -1160,12 +1168,18 @@ int rtdm_mmap_to_user(struct rtdm_fd *fd,
int prot, void **pptr,
struct vm_operations_struct *vm_ops,
void *vm_private_data);
int rtdm_iomap_to_user(struct rtdm_fd *fd,
phys_addr_t src_addr, size_t len,
int prot, void **pptr,
struct vm_operations_struct *vm_ops,
void *vm_private_data);
int rtdm_munmap(struct rtdm_fd *fd, void *ptr, size_t len);
int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va);
int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa);
int rtdm_munmap(void *ptr, size_t len);
static inline int rtdm_read_user_ok(struct rtdm_fd *fd,
const void __user *ptr, size_t size)
......
......@@ -25,7 +25,9 @@
#include <linux/socket.h>
#include <cobalt/kernel/tree.h>
struct vm_area_struct;
struct rtdm_fd;
struct _rtdm_mmap_request;
struct xnselector;
struct xnsys_ppd;
......@@ -124,9 +126,13 @@ struct rtdm_fd_ops {
rtdm_fd_recvmsg_t *recvmsg_nrt;
rtdm_fd_sendmsg_t *sendmsg_rt;
rtdm_fd_sendmsg_t *sendmsg_nrt;
int (*select_bind)(struct rtdm_fd *fd, struct xnselector *selector,
unsigned int type, unsigned int index);
int (*select_bind)(struct rtdm_fd *fd,
struct xnselector *selector,
unsigned int type,
unsigned int index);
void (*close)(struct rtdm_fd *fd);
int (*mmap)(struct rtdm_fd *fd,
struct vm_area_struct *vma);
};
struct rtdm_fd {
......@@ -161,22 +167,26 @@ void rtdm_fd_put(struct rtdm_fd *fd);
void rtdm_fd_unlock(struct rtdm_fd *fd);
int rtdm_fd_ioctl(struct xnsys_ppd *p, int fd, unsigned int request, ...);
int rtdm_fd_ioctl(struct xnsys_ppd *p, int ufd, unsigned int request, ...);
ssize_t rtdm_fd_read(struct xnsys_ppd *p, int fd,
ssize_t rtdm_fd_read(struct xnsys_ppd *p, int ufd,
void __user *buf, size_t size);
ssize_t rtdm_fd_write(struct xnsys_ppd *p, int fd,
ssize_t rtdm_fd_write(struct xnsys_ppd *p, int ufd,
const void __user *buf, size_t size);
int rtdm_fd_close(struct xnsys_ppd *p, int fd, unsigned int magic);
int rtdm_fd_close(struct xnsys_ppd *p, int ufd, unsigned int magic);
ssize_t rtdm_fd_recvmsg(struct xnsys_ppd *p, int fd,
ssize_t rtdm_fd_recvmsg(struct xnsys_ppd *p, int ufd,
struct msghdr *msg, int flags);
ssize_t rtdm_fd_sendmsg(struct xnsys_ppd *p, int fd,
ssize_t rtdm_fd_sendmsg(struct xnsys_ppd *p, int ufd,
const struct msghdr *msg, int flags);
int rtdm_fd_mmap(struct xnsys_ppd *p, int ufd,
struct _rtdm_mmap_request *rma,
void * __user *u_addrp);
int rtdm_fd_valid_p(int ufd);
int rtdm_fd_select_bind(int ufd, struct xnselector *selector,
......
......@@ -29,5 +29,6 @@
#define sc_rtdm_write 5
#define sc_rtdm_recvmsg 6
#define sc_rtdm_sendmsg 7
#define sc_rtdm_mmap 8
#endif /* !_COBALT_UAPI_RTDM_SYSCALL_H */
......@@ -192,6 +192,14 @@ struct _rtdm_setsockaddr_args {
#define _RTIOC_SHUTDOWN _IOW(RTIOC_TYPE_COMMON, 0x28, \
int)
/* Internally used for mmap() */
struct _rtdm_mmap_request {
size_t length;
off_t offset;
int prot;
int flags;
};
#ifndef RTDM_NO_DEFAULT_USER_API
static inline ssize_t rt_dev_recv(int fd, void *buf, size_t len, int flags)
......
......@@ -33,18 +33,18 @@
#define RTDM_DEVICE_MAGIC 0x82846877
#define SET_DEFAULT_OP(device, operation) \
(device).operation##_rt = (void *)rtdm_no_support; \
(device).operation##_nrt = (void *)rtdm_no_support
#define SET_DEFAULT_OP_IF_NULL(device, operation) \
if (!(device).operation##_rt) \
(device).operation##_rt = (void *)rtdm_no_support; \
if (!(device).operation##_nrt) \
(device).operation##_nrt = (void *)rtdm_no_support
#define ANY_HANDLER(device, operation) \
((device).operation##_rt || (device).operation##_nrt)
#define SET_DEFAULT_DUAL_OP_IF_NULL(device, operation, handler) \
if ((device).operation##_rt == NULL) \
(device).operation##_rt = \
(__typeof__((device).operation##_rt))handler; \
if ((device).operation##_nrt == NULL) \
(device).operation##_nrt = \
(__typeof__((device).operation##_nrt))handler;
#define SET_DEFAULT_OP_IF_NULL(device, operation, handler) \
if ((device).operation == NULL) \
(device).operation = \
(__typeof__((device).operation))handler;
struct list_head rtdm_named_devices; /* hash table */
struct rb_root rtdm_protocol_devices;
......@@ -56,18 +56,21 @@ int rtdm_initialised = 0;
extern void __rt_dev_close(struct rtdm_fd *fd);
int rtdm_no_support(void)
static int enosys(void)
{
return -ENOSYS;
}
int rtdm_select_bind_no_support(struct rtdm_fd *fd,
struct xnselector *selector,
unsigned int type, unsigned int index)
static int ebadf(void)
{
return -EBADF;
}
static int enodev(void)
{
return -ENODEV;
}
static inline unsigned long long get_proto_id(int pf, int type)
{
unsigned long long llpf = (unsigned)pf;
......@@ -186,7 +189,7 @@ int rtdm_dev_register(struct rtdm_device *device)
printk(XENO_ERR "missing open handler for RTDM device\n");
return -EINVAL;
}
device->socket = (typeof(device->socket))rtdm_no_support;
device->socket = (typeof(device->socket))enosys;
break;
case RTDM_PROTOCOL_DEVICE:
......@@ -195,7 +198,7 @@ int rtdm_dev_register(struct rtdm_device *device)
printk(XENO_ERR "missing socket handler for RTDM device\n");
return -EINVAL;
}
device->open = (typeof(device->open))rtdm_no_support;
device->open = (typeof(device->open))enosys;
break;
default:
......@@ -204,20 +207,20 @@ int rtdm_dev_register(struct rtdm_device *device)
/* Sanity check: non-RT close handler?
* (Always required for forced cleanup) */
if (!device->ops.close) {
if (device->ops.close == NULL) {
printk(XENO_ERR "missing close handler for RTDM device\n");
return -EINVAL;
}
device->reserved.close = device->ops.close;
device->ops.close = __rt_dev_close;
SET_DEFAULT_OP_IF_NULL(device->ops, ioctl);
SET_DEFAULT_OP_IF_NULL(device->ops, read);
SET_DEFAULT_OP_IF_NULL(device->ops, write);
SET_DEFAULT_OP_IF_NULL(device->ops, recvmsg);
SET_DEFAULT_OP_IF_NULL(device->ops, sendmsg);
if (!device->ops.select_bind)
device->ops.select_bind = rtdm_select_bind_no_support;
SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, ioctl, enosys);
SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, read, enosys);
SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, write, enosys);
SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, recvmsg, enosys);
SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, sendmsg, enosys);
SET_DEFAULT_OP_IF_NULL(device->ops, select_bind, ebadf);
SET_DEFAULT_OP_IF_NULL(device->ops, mmap, enodev);
atomic_set(&device->reserved.refcount, 0);
device->reserved.exclusive_context = NULL;
......
......@@ -1438,96 +1438,160 @@ void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig);
* @{
*/
struct rtdm_mmap_data {
struct mmap_tramp_data {
struct rtdm_fd *fd;
int (*mmap_handler)(struct rtdm_fd *fd,
struct vm_area_struct *vma);
unsigned long
(*unmapped_area_handler)(struct file *filp,
struct mmap_tramp_data *tramp_data,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
};
struct mmap_helper_data {
void *src_vaddr;
phys_addr_t src_paddr;
struct vm_operations_struct *vm_ops;
void *vm_private_data;
struct mmap_tramp_data tramp_data;
};
static int rtdm_mmap_buffer(struct file *filp, struct vm_area_struct *vma)
static int mmap_kmem_helper(struct vm_area_struct *vma, void *va)
{
struct rtdm_mmap_data *mmap_data = filp->private_data;
unsigned long vaddr, maddr, size;
unsigned long vaddr, maddr, len;
phys_addr_t paddr;
int ret;
vma->vm_ops = mmap_data->vm_ops;
vma->vm_private_data = mmap_data->vm_private_data;
vaddr = (unsigned long)mmap_data->src_vaddr;
paddr = mmap_data->src_paddr;
if (paddr == 0) /* kmalloc memory? */
paddr = __pa(vaddr);
vaddr = (unsigned long)va;
paddr = __pa(vaddr);
maddr = vma->vm_start;
size = vma->vm_end - vma->vm_start;
len = vma->vm_end - vma->vm_start;
#ifdef CONFIG_MMU
/* Catch vmalloc memory (vaddr is 0 for I/O mapping) */
if ((vaddr >= VMALLOC_START) && (vaddr < VMALLOC_END)) {
unsigned long mapped_size = 0;
if (!XENO_ASSERT(RTDM, vaddr == PAGE_ALIGN(vaddr)))
return -EINVAL;
if (!XENO_ASSERT(RTDM, vaddr == PAGE_ALIGN(vaddr)))
return -EINVAL;
if (!XENO_ASSERT(RTDM, (size % PAGE_SIZE) == 0))
#ifdef CONFIG_MMU
/* Catch vmalloc memory */
if (vaddr >= VMALLOC_START && vaddr < VMALLOC_END) {
if (!XENO_ASSERT(RTDM, (len & ~PAGE_MASK) == 0))
return -EINVAL;
while (mapped_size < size) {
while (len >= PAGE_SIZE) {
if (xnheap_remap_vm_page(vma, maddr, vaddr))
return -EAGAIN;
maddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
mapped_size += PAGE_SIZE;
len -= PAGE_SIZE;
}
if (xnarch_machdesc.prefault)
xnarch_machdesc.prefault(vma);
ret = 0;
} else
return 0;
}
#else
vma->vm_pgoff = paddr >> PAGE_SHIFT;
#endif /* CONFIG_MMU */
if (mmap_data->src_paddr)
ret = xnheap_remap_io_page_range(filp, vma, maddr, paddr,
size, PAGE_SHARED);
else
ret = xnheap_remap_kmem_page_range(vma, maddr, paddr,
size, PAGE_SHARED);
if (xnarch_machdesc.prefault && ret == 0)
ret = xnheap_remap_kmem_page_range(vma, maddr, paddr,
len, PAGE_SHARED);
if (ret)
return ret;
if (xnarch_machdesc.prefault)
xnarch_machdesc.prefault(vma);
return 0;
}
static int mmap_iomem_helper(struct vm_area_struct *vma, phys_addr_t pa)
{
unsigned long maddr, len;
maddr = vma->vm_start;
len = vma->vm_end - vma->vm_start;
#ifndef CONFIG_MMU
vma->vm_pgoff = pa >> PAGE_SHIFT;
#endif /* CONFIG_MMU */
return xnheap_remap_io_page_range(vma, maddr, pa,
len, PAGE_SHARED);
}
static int mmap_buffer_helper(struct rtdm_fd *fd, struct vm_area_struct *vma)
{
struct mmap_tramp_data *tramp_data = vma->vm_private_data;
struct mmap_helper_data *helper_data;
int ret;
helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
vma->vm_ops = helper_data->vm_ops;
vma->vm_private_data = helper_data->vm_private_data;
if (helper_data->src_paddr)
ret = mmap_iomem_helper(vma, helper_data->src_paddr);
else
ret = mmap_kmem_helper(vma, helper_data->src_vaddr);
return ret;
}
static int mmap_trampoline(struct file *filp, struct vm_area_struct *vma)
{
struct mmap_tramp_data *tramp_data = filp->private_data;
vma->vm_private_data = tramp_data;
return tramp_data->mmap_handler(tramp_data->fd, vma);
}
#ifndef CONFIG_MMU
static unsigned long rtdm_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
static
unsigned long unmapped_area_helper(struct file *filp,
struct mmap_tramp_data *tramp_data,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct rtdm_mmap_data *mmap_data = file->private_data;
struct mmap_helper_data *helper_data;
unsigned long pa;
pa = mmap_data->src_paddr;
if (pa == 0)
pa = __pa(mmap_data->src_vaddr);
helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
pa = helper_data->src_paddr;
if (pa)
return (unsigned long)__va(pa);
return (unsigned long)mmap_data->src_vaddr;
}
static unsigned long
unmapped_area_trampoline(struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct mmap_tramp_data *tramp_data = filp->private_data;
if (tramp_data->unmapped_area_handler == NULL)
return -ENOSYS; /* We don't know. */
return pa;
return tramp_data->unmapped_area_handler(filp, tramp_data, addr,
len, pgoff, flags);
}
#else
#define rtdm_unmapped_area NULL
#define unmapped_area_helper NULL
#define unmapped_area_trampoline NULL
#endif
static struct file_operations rtdm_mmap_fops = {
.mmap = rtdm_mmap_buffer,
.get_unmapped_area = rtdm_unmapped_area
static struct file_operations mmap_fops = {
.mmap = mmap_trampoline,
.get_unmapped_area = unmapped_area_trampoline
};
static int rtdm_do_mmap(struct rtdm_fd *fd,
struct rtdm_mmap_data *mmap_data,
size_t len, int prot, void **pptr)
static int rtdm_mmap(struct mmap_tramp_data *tramp_data,
size_t len, off_t offset, int prot, int flags,
void **pptr)
{
const struct file_operations *old_fops;
unsigned long u_addr;
......@@ -1542,14 +1606,10 @@ static int rtdm_do_mmap(struct rtdm_fd *fd,
return PTR_ERR(filp);
old_fops = filp->f_op;
filp->f_op = &rtdm_mmap_fops;
filp->f_op = &mmap_fops;
old_priv_data = filp->private_data;
filp->private_data = mmap_data;
u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot,
MAP_SHARED, 0);
filp->private_data = tramp_data;
u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset);
filp->f_op = (typeof(filp->f_op))old_fops;
filp->private_data = old_priv_data;
......@@ -1563,6 +1623,18 @@ static int rtdm_do_mmap(struct rtdm_fd *fd,
return 0;
}
int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset,
int prot, int flags, void *__user *pptr)
{
struct mmap_tramp_data tramp_data = {
.fd = fd,
.mmap_handler = fd->ops->mmap,
.unmapped_area_handler = NULL,
};
return rtdm_mmap(&tramp_data, len, offset, prot, flags, pptr);
}
/**
* Map a kernel memory range into the address space of the user.
*
......@@ -1574,9 +1646,9 @@ static int rtdm_do_mmap(struct rtdm_fd *fd,
* either PROT_READ or PROT_READ|PROT_WRITE
* @param[in,out] pptr Address of a pointer containing the desired user
* address or NULL on entry and the finally assigned address on return
* @param[in] vm_ops vm_operations to be executed on the vma_area of the
* @param[in] vm_ops vm_operations to be executed on the vm_area of the
* user memory range or NULL
* @param[in] vm_private_data Private data to be stored in the vma_area,
* @param[in] vm_private_data Private data to be stored in the vm_area,
* primarily useful for vm_operation handlers
*
* @return 0 on success, otherwise (most common values):
......@@ -1597,16 +1669,17 @@ static int rtdm_do_mmap(struct rtdm_fd *fd,
* vmalloc(). To map physical I/O memory to user-space use
* rtdm_iomap_to_user() instead.
*
* @note RTDM supports two models for unmapping the user memory range again.
* One is explicit unmapping via rtdm_munmap(), either performed when the
* user requests it via an IOCTL etc. or when the related device is closed.
* The other is automatic unmapping, triggered by the user invoking standard
* munmap() or by the termination of the related process. To track release of
* the mapping and therefore relinquishment of the referenced physical memory,
* the caller of rtdm_mmap_to_user() can pass a vm_operations_struct on
* invocation, defining a close handler for the vm_area. See Linux
* documentaion (e.g. Linux Device Drivers book) on virtual memory management
* for details.
* @note RTDM supports two models for unmapping the memory area:
* - manual unmapping via rtdm_munmap(), which may be issued from a
* driver in response to an IOCTL call, or by a call to the regular
* munmap() call from the application.
* - automatic unmapping, triggered by the termination of the process
* which owns the mapping.
* To track the number of references pending on the resource mapped,
* the driver can pass the address of a close handler for the vm_area
* considered, in the @a vm_ops descriptor. See the relevant Linux
* kernel programming documentation (e.g. Linux Device Drivers book)
* on virtual memory management for details.
*
* @coretags{secondary-only}
*/
......@@ -1616,16 +1689,20 @@ int rtdm_mmap_to_user(struct rtdm_fd *fd,
struct vm_operations_struct *vm_ops,
void *vm_private_data)
{
struct rtdm_mmap_data mmap_data = {
struct mmap_helper_data helper_data = {
.tramp_data = {
.fd = fd,
.mmap_handler = mmap_buffer_helper,
.unmapped_area_handler = unmapped_area_helper,
},
.src_vaddr = src_addr,
.src_paddr = 0,
.vm_ops = vm_ops,
.vm_private_data = vm_private_data
};
return rtdm_do_mmap(fd, &mmap_data, len, prot, pptr);
return rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
}
EXPORT_SYMBOL_GPL(rtdm_mmap_to_user);
/**
......@@ -1639,9 +1716,9 @@ EXPORT_SYMBOL_GPL(rtdm_mmap_to_user);
* either PROT_READ or PROT_READ|PROT_WRITE
* @param[in,out] pptr Address of a pointer containing the desired user
* address or NULL on entry and the finally assigned address on return
* @param[in] vm_ops vm_operations to be executed on the vma_area of the
* @param[in] vm_ops vm_operations to be executed on the vm_area of the
* user memory range or NULL
* @param[in] vm_private_data Private data to be stored in the vma_area,
* @param[in] vm_private_data Private data to be stored in the vm_area,
* primarily useful for vm_operation handlers
*
* @return 0 on success, otherwise (most common values):
......@@ -1658,16 +1735,17 @@ EXPORT_SYMBOL_GPL(rtdm_mmap_to_user);
* - -EPERM @e may be returned if an illegal invocation environment is
* detected.
*
* @note RTDM supports two models for unmapping the user memory range again.
* One is explicit unmapping via rtdm_munmap(), either performed when the
* user requests it via an IOCTL etc. or when the related device is closed.
* The other is automatic unmapping, triggered by the user invoking standard
* munmap() or by the termination of the related process. To track release of
* the mapping and therefore relinquishment of the referenced physical memory,
* the caller of rtdm_iomap_to_user() can pass a vm_operations_struct on
* invocation, defining a close handler for the vm_area. See Linux
* documentaion (e.g. Linux Device Drivers book) on virtual memory management
* for details.
* @note RTDM supports two models for unmapping the memory area:
* - manual unmapping via rtdm_munmap(), which may be issued from a
* driver in response to an IOCTL call, or by a call to the regular
* munmap() call from the application.
* - automatic unmapping, triggered by the termination of the process
* which owns the mapping.
* To track the number of references pending on the resource mapped,
* the driver can pass the address of a close handler for the vm_area
* considered, in the @a vm_ops descriptor. See the relevant Linux
* kernel programming documentation (e.g. Linux Device Drivers book)
* on virtual memory management for details.
*
* @coretags{secondary-only}
*/
......@@ -1677,23 +1755,72 @@ int rtdm_iomap_to_user(struct rtdm_fd *fd,
struct vm_operations_struct *vm_ops,
void *vm_private_data)
{
struct rtdm_mmap_data mmap_data = {
struct mmap_helper_data helper_data = {
.tramp_data = {
.fd = fd,
.mmap_handler = mmap_buffer_helper,
.unmapped_area_handler = unmapped_area_helper,
},
.src_vaddr = NULL,
.src_paddr = src_addr,
.vm_ops = vm_ops,
.vm_private_data = vm_private_data
};
return rtdm_do_mmap(fd, &mmap_data, len, prot, pptr);
return rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
}