Commit 0888f326 authored by Gilles Chanteperdrix's avatar Gilles Chanteperdrix Committed by Philippe Gerum
Browse files

rtnet: rtskb_pool changes

An rtskb_pool now has lock/unlock operations, called for the first dequeued
buffer and the last queued one. At this chance, the struct rtskb_pool is
introduced, and used everywhere a pool was used.

rtskb_module_pool_init creates an rtskb_pool whose lock/unlock operations
lock a module (the module from which rtskb_module_pool_init was called).

The socket skb_pool lock/unlocks the socket rtdm_fd.

Drivers pool not fixed yet.
parent c643a7bc
......@@ -54,7 +54,7 @@ MODULE_PARM_DESC(rtcap_rtskbs, "Number of real-time socket buffers per "
static rtdm_nrtsig_t cap_signal;
static struct rtskb_queue cap_queue;
static struct rtskb_queue cap_pool;
static struct rtskb_pool cap_pool;
static struct tap_device_t {
struct net_device *tap_dev;
......@@ -69,13 +69,10 @@ static struct tap_device_t {
void rtcap_rx_hook(struct rtskb *rtskb)
{
if ((rtskb->cap_comp_skb = rtskb_dequeue(&cap_pool)) == 0) {
if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) {
tap_device[rtskb->rtdev->ifindex].tap_dev_stats.rx_dropped++;
return;
}
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
cap_pool.pool_balance--;
#endif
if (cap_queue.first == NULL)
cap_queue.first = rtskb;
......@@ -97,13 +94,10 @@ int rtcap_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev)
rtdm_lockctx_t context;
if ((rtskb->cap_comp_skb = rtskb_dequeue(&cap_pool)) == 0) {
if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) {
tap_dev->tap_dev_stats.rx_dropped++;
return tap_dev->orig_xmit(rtskb, rtdev);
}
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
cap_pool.pool_balance--;
#endif
rtskb->cap_next = NULL;
rtskb->cap_start = rtskb->data;
......@@ -156,10 +150,7 @@ void rtcap_kfree_rtskb(struct rtskb *rtskb)
rtdm_lock_put_irqrestore(&rtcap_lock, context);
rtskb_queue_tail(comp_skb->pool, comp_skb);
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
comp_skb->pool->pool_balance++;
#endif
rtskb_pool_queue_tail(comp_skb->pool, comp_skb);
return;
}
......@@ -167,10 +158,7 @@ void rtcap_kfree_rtskb(struct rtskb *rtskb)
rtdm_lock_put_irqrestore(&rtcap_lock, context);
rtskb->chain_end = rtskb;
rtskb_queue_tail(rtskb->pool, rtskb);
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
rtskb->pool->pool_balance++;
#endif
rtskb_pool_queue_tail(rtskb->pool, rtskb);
}
......@@ -510,7 +498,7 @@ int __init rtcap_init(void)
goto error2;
}
if (rtskb_pool_init(&cap_pool, rtcap_rtskbs * devices) <
if (rtskb_module_pool_init(&cap_pool, rtcap_rtskbs * devices) <
rtcap_rtskbs * devices) {
rtskb_pool_release(&cap_pool);
ret = -ENOMEM;
......
......@@ -71,7 +71,7 @@ static unsigned int proxy_rtskbs = DEFAULT_PROXY_RTSKBS;
module_param(proxy_rtskbs, uint, 0444);
MODULE_PARM_DESC(proxy_rtskbs, "Number of realtime socket buffers in proxy pool");
static struct rtskb_queue rtskb_pool;
static struct rtskb_pool rtskb_pool;
static struct rtskb_queue tx_queue;
static struct rtskb_queue rx_queue;
......@@ -361,7 +361,7 @@ static int __init rtnetproxy_init_module(void)
#endif
/* Initialize the proxy's rtskb pool (JK) */
if (rtskb_pool_init(&rtskb_pool, proxy_rtskbs) < proxy_rtskbs) {
if (rtskb_module_pool_init(&rtskb_pool, proxy_rtskbs) < proxy_rtskbs) {
err = -ENOMEM;
goto err1;
}
......
......@@ -48,10 +48,10 @@
enum rtnet_link_state {
__RTNET_LINK_STATE_XOFF = 0,
__RTNET_LINK_STATE_START,
__RTNET_LINK_STATE_PRESENT,
__RTNET_LINK_STATE_NOCARRIER,
__RTNET_LINK_STATE_XOFF = 0,
__RTNET_LINK_STATE_START,
__RTNET_LINK_STATE_PRESENT,
__RTNET_LINK_STATE_NOCARRIER,
};
/***
......@@ -143,8 +143,8 @@ struct rtnet_device {
unsigned int (*get_mtu)(struct rtnet_device *rtdev,
unsigned int priority);
int (*do_ioctl)(struct rtnet_device *rtdev,
unsigned int request, void * cmd);
int (*do_ioctl)(struct rtnet_device *rtdev,
unsigned int request, void * cmd);
struct net_device_stats *(*get_stats)(struct rtnet_device *rtdev);
/* DMA pre-mapping hooks */
......@@ -199,12 +199,13 @@ struct rtnet_device *rtdev_get_loopback(void);
static inline void rtdev_reference(struct rtnet_device *rtdev)
{
smp_mb__before_atomic();
atomic_inc(&rtdev->refcount);
}
static inline void rtdev_dereference(struct rtnet_device *rtdev)
{
smp_mb__before_atomic_dec();
smp_mb__before_atomic();
atomic_dec(&rtdev->refcount);
}
......
......@@ -41,7 +41,7 @@ struct rtmac_priv {
int (*orig_start_xmit)(struct rtskb *skb, struct rtnet_device *dev);
struct net_device *vnic;
struct net_device_stats vnic_stats;
struct rtskb_queue vnic_skb_pool;
struct rtskb_pool vnic_skb_pool;
unsigned int vnic_max_mtu;
u8 disc_priv[0] __attribute__ ((aligned(16)));
......@@ -77,13 +77,17 @@ struct rtmac_disc {
struct rtnet_ioctls ioctls;
struct rtmac_proc_entry *proc_entries;
struct module *owner;
};
int rtmac_disc_attach(struct rtnet_device *rtdev, struct rtmac_disc *disc);
int rtmac_disc_detach(struct rtnet_device *rtdev);
int rtmac_disc_register(struct rtmac_disc *disc);
int __rtmac_disc_register(struct rtmac_disc *disc, struct module *module);
#define rtmac_disc_register(disc) __rtmac_disc_register(disc, THIS_MODULE)
void rtmac_disc_deregister(struct rtmac_disc *disc);
#ifdef CONFIG_PROC_FS
......
......@@ -39,7 +39,7 @@ int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev);
void rtmac_vnic_set_max_mtu(struct rtnet_device *rtdev, unsigned int max_mtu);
int rtmac_vnic_add(struct rtnet_device *rtdev, vnic_xmit_handler vnic_xmit);
void rtmac_vnic_unregister(struct rtnet_device *rtdev);
int rtmac_vnic_unregister(struct rtnet_device *rtdev);
static inline void rtmac_vnic_cleanup(struct rtnet_device *rtdev)
{
......
......@@ -113,11 +113,11 @@ struct tdma_priv {
#ifdef ALIGN_RTOS_TASK
__u8 __align[(ALIGN_RTOS_TASK -
((sizeof(unsigned int) +
sizeof(struct rtnet_device *) +
sizeof(struct rtdm_device)
) & (ALIGN_RTOS_TASK-1))
) & (ALIGN_RTOS_TASK-1)];
((sizeof(unsigned int) +
sizeof(struct rtnet_device *) +
sizeof(struct rtdm_device)
) & (ALIGN_RTOS_TASK-1))
) & (ALIGN_RTOS_TASK-1)];
#endif
rtdm_task_t worker_task;
rtdm_event_t worker_wakeup;
......@@ -145,7 +145,7 @@ struct tdma_priv {
rtdm_lock_t lock;
#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
struct rtskb_queue cal_rtskb_pool;
struct rtskb_pool cal_rtskb_pool;
u64 cycle_period;
u64 backup_sync_inc;
#endif
......@@ -162,7 +162,7 @@ extern struct rtmac_disc tdma_disc;
struct tdma_job *entry; \
rtdm_printk("%s:%d - ", __FUNCTION__, __LINE__); \
list_for_each_entry(entry, &tdma->first_job->entry, entry) \
rtdm_printk("%d ", entry->id); \
rtdm_printk("%d ", entry->id); \
rtdm_printk("\n"); \
} while (0)
......
......@@ -41,7 +41,7 @@
struct rtsocket {
unsigned short protocol;
struct rtskb_queue skb_pool;
struct rtskb_pool skb_pool;
unsigned int pool_size;
struct mutex pool_nrt_lock;
......@@ -92,7 +92,7 @@ static inline struct rtdm_fd *rt_socket_fd(struct rtsocket *sock)
rtdm_fd_unlock(rt_socket_fd(sock))
int rt_socket_init(struct rtdm_fd *fd, unsigned short protocol);
int rt_socket_cleanup(struct rtdm_fd *fd);
void rt_socket_cleanup(struct rtdm_fd *fd);
int rt_socket_common_ioctl(struct rtdm_fd *fd, int request, void *arg);
int rt_socket_if_ioctl(struct rtdm_fd *fd, int request, void *arg);
int rt_socket_select_bind(struct rtdm_fd *fd,
......@@ -100,7 +100,7 @@ int rt_socket_select_bind(struct rtdm_fd *fd,
enum rtdm_selecttype type,
unsigned fd_index);
int rt_bare_socket_init(struct rtsocket *sock, unsigned short protocol,
int rt_bare_socket_init(struct rtdm_fd *fd, unsigned short protocol,
unsigned int priority, unsigned int pool_size);
static inline void rt_bare_socket_cleanup(struct rtsocket *sock)
......
......@@ -158,7 +158,7 @@ struct rtskb {
struct rtskb *chain_end; /* marks the end of a rtskb chain starting
with this very rtskb */
struct rtskb_queue *pool; /* owning pool */
struct rtskb_pool *pool; /* owning pool */
unsigned int priority; /* bit 0..15: prio, 16..31: user-defined */
......@@ -214,8 +214,6 @@ struct rtskb {
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
unsigned char *buf_end;
int chain_len;
#endif
#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP
......@@ -234,9 +232,18 @@ struct rtskb_queue {
struct rtskb *first;
struct rtskb *last;
rtdm_lock_t lock;
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
int pool_balance;
#endif
};
struct rtskb_pool_lock_ops {
int (*trylock)(void *cookie);
void (*unlock)(void *cookie);
};
struct rtskb_pool {
struct rtskb_queue queue;
const struct rtskb_pool_lock_ops *lock_ops;
unsigned lock_count;
void *lock_cookie;
};
#define QUEUE_MAX_PRIO 0
......@@ -278,7 +285,11 @@ extern void rtskb_over_panic(struct rtskb *skb, int len, void *here);
extern void rtskb_under_panic(struct rtskb *skb, int len, void *here);
#endif
extern struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_queue *pool);
extern struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool);
extern void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb);
extern struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool);
#define dev_alloc_rtskb(len, pool) alloc_rtskb(len, pool)
extern void kfree_rtskb(struct rtskb *skb);
......@@ -700,44 +711,29 @@ static inline dma_addr_t rtskb_data_dma_addr(struct rtskb *rtskb,
return rtskb->buf_dma_addr + rtskb->data - rtskb->buf_start + offset;
}
extern struct rtskb_queue global_pool;
extern struct rtskb_pool global_pool;
extern unsigned int rtskb_pool_init(struct rtskb_queue *pool,
unsigned int initial_size);
extern unsigned int rtskb_pool_init_rt(struct rtskb_queue *pool,
unsigned int initial_size);
extern void __rtskb_pool_release(struct rtskb_queue *pool);
extern void __rtskb_pool_release_rt(struct rtskb_queue *pool);
extern unsigned int rtskb_pool_init(struct rtskb_pool *pool,
unsigned int initial_size,
const struct rtskb_pool_lock_ops *lock_ops,
void *lock_cookie);
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
#define rtskb_pool_release(pool) \
do { \
RTNET_ASSERT((pool)->pool_balance == 0, \
rtdm_printk("pool: %p\n", (pool));); \
__rtskb_pool_release((pool)); \
} while (0)
#define rtskb_pool_release_rt(pool) \
do { \
RTNET_ASSERT((pool)->pool_balance == 0, \
rtdm_printk("pool: %p\n", (pool));); \
__rtskb_pool_release_rt((pool)); \
} while (0)
#else
#define rtskb_pool_release __rtskb_pool_release
#define rtskb_pool_release_rt __rtskb_pool_release_rt
#endif
extern unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
unsigned int initial_size,
struct module *module);
#define rtskb_module_pool_init(pool, size) \
__rtskb_module_pool_init(pool, size, THIS_MODULE)
extern int rtskb_pool_release(struct rtskb_pool *pool);
extern unsigned int rtskb_pool_extend(struct rtskb_queue *pool,
extern unsigned int rtskb_pool_extend(struct rtskb_pool *pool,
unsigned int add_rtskbs);
extern unsigned int rtskb_pool_extend_rt(struct rtskb_queue *pool,
unsigned int add_rtskbs);
extern unsigned int rtskb_pool_shrink(struct rtskb_queue *pool,
extern unsigned int rtskb_pool_shrink(struct rtskb_pool *pool,
unsigned int rem_rtskbs);
extern unsigned int rtskb_pool_shrink_rt(struct rtskb_queue *pool,
unsigned int rem_rtskbs);
extern int rtskb_acquire(struct rtskb *rtskb, struct rtskb_queue *comp_pool);
extern int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool);
extern struct rtskb* rtskb_clone(struct rtskb *rtskb,
struct rtskb_queue *pool);
struct rtskb_pool *pool);
extern int rtskb_pools_init(void);
extern void rtskb_pools_release(void);
......
/* rtwlan.h
*
* This file is a rtnet adaption from ieee80211/ieee80211.h used by the
* This file is a rtnet adaption from ieee80211/ieee80211.h used by the
* rt2x00-2.0.0-b3 sourceforge project
*
* Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11
......@@ -17,7 +17,7 @@
* <jketreno@linux.intel.com>
* Copyright (c) 2004-2005, Intel Corporation
*
* Adaption to rtnet
* Adaption to rtnet
* Copyright (c) 2006, Daniel Gregorek <dxg@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
......@@ -75,7 +75,7 @@
/* management */
#define IEEE80211_STYPE_ASSOC_REQ 0x0000
#define IEEE80211_STYPE_ASSOC_RESP 0x0010
#define IEEE80211_STYPE_ASSOC_RESP 0x0010
#define IEEE80211_STYPE_REASSOC_REQ 0x0020
#define IEEE80211_STYPE_REASSOC_RESP 0x0030
#define IEEE80211_STYPE_PROBE_REQ 0x0040
......@@ -112,12 +112,12 @@
#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
#define IEEE80211_DSSS_RATE_1MB 0x02
#define IEEE80211_DSSS_RATE_2MB 0x04
#define IEEE80211_DSSS_RATE_5MB 0x0B
#define IEEE80211_DSSS_RATE_11MB 0x16
#define IEEE80211_OFDM_RATE_6MB 0x0C
#define IEEE80211_OFDM_RATE_9MB 0x12
#define IEEE80211_DSSS_RATE_1MB 0x02
#define IEEE80211_DSSS_RATE_2MB 0x04
#define IEEE80211_DSSS_RATE_5MB 0x0B
#define IEEE80211_DSSS_RATE_11MB 0x16
#define IEEE80211_OFDM_RATE_6MB 0x0C
#define IEEE80211_OFDM_RATE_9MB 0x12
#define IEEE80211_OFDM_RATE_12MB 0x18
#define IEEE80211_OFDM_RATE_18MB 0x24
#define IEEE80211_OFDM_RATE_24MB 0x30
......@@ -154,7 +154,7 @@ struct rtwlan_device {
struct rtwlan_stats stats;
struct rtskb_queue skb_pool;
struct rtskb_pool skb_pool;
int mode;
......
......@@ -48,9 +48,9 @@ struct rtpacket_type {
int (*handler)(struct rtskb *, struct rtpacket_type *);
int (*err_handler)(struct rtskb *, struct rtnet_device *,
struct rtpacket_type *);
bool (*trylock)(struct rtpacket_type *)
void (*unlock)(struct rtpacket_type *)
struct rtpacket_type *);
bool (*trylock)(struct rtpacket_type *);
void (*unlock)(struct rtpacket_type *);
};
......@@ -59,7 +59,7 @@ int rtdev_remove_pack(struct rtpacket_type *pt);
static inline bool rtdev_lock_pack(struct rtpacket_type *pt)
{
++pt->recount;
++pt->refcount;
return true;
}
......
......@@ -87,7 +87,8 @@ static struct {
struct rtsocket socket;
} icmp_socket_container;
#define icmp_socket icmp_socket_container.socket
#define icmp_fd (&icmp_socket_container.dummy.fd)
#define icmp_socket ((struct rtsocket *)rtdm_fd_to_private(icmp_fd))
void rt_icmp_queue_echo_request(struct rt_proc_call *call)
......@@ -133,7 +134,7 @@ void rt_icmp_cleanup_echo_requests(void)
}
/* purge any pending ICMP fragments */
rt_ip_frag_invalidate_socket(&icmp_socket);
rt_ip_frag_invalidate_socket(icmp_socket);
}
......@@ -194,9 +195,12 @@ static void rt_icmp_send_reply(struct icmp_bxm *icmp_param, struct rtskb *skb)
skb->rtdev->local_ip) != 0)
return;
err = rt_ip_build_xmit(&icmp_socket, rt_icmp_glue_reply_bits, icmp_param,
rt_socket_reference(icmp_socket);
err = rt_ip_build_xmit(icmp_socket, rt_icmp_glue_reply_bits, icmp_param,
sizeof(struct icmphdr) + icmp_param->data_len,
&rt, MSG_DONTWAIT);
if (err)
rt_socket_dereference(icmp_socket);
rtdev_dereference(rt.rtdev);
......@@ -320,9 +324,13 @@ static int rt_icmp_send_request(u32 daddr, struct icmp_bxm *icmp_param)
size = icmp_param->head_len + icmp_param->data_len;
if (size + 20 /* ip header */ > rt.rtdev->get_mtu(rt.rtdev, RT_ICMP_PRIO))
err = -EMSGSIZE;
else
err = rt_ip_build_xmit(&icmp_socket, rt_icmp_glue_request_bits,
else {
rt_socket_reference(icmp_socket);
err = rt_ip_build_xmit(icmp_socket, rt_icmp_glue_request_bits,
icmp_param, size, &rt, MSG_DONTWAIT);
if (err)
rt_socket_dereference(icmp_socket);
}
rtdev_dereference(rt.rtdev);
......@@ -342,7 +350,7 @@ int rt_icmp_send_echo(u32 daddr, u16 id, u16 sequence, size_t msg_size)
/* first purge any potentially pending ICMP fragments */
rt_ip_frag_invalidate_socket(&icmp_socket);
rt_ip_frag_invalidate_socket(icmp_socket);
icmp_param.head.icmph.type = ICMP_ECHO;
icmp_param.head.icmph.code = 0;
......@@ -440,7 +448,7 @@ struct rtsocket *rt_icmp_dest_socket(struct rtskb *skb)
{
/* Note that the socket's refcount is not used by this protocol.
* The socket returned here is static and not part of the global pool. */
return &icmp_socket;
return icmp_socket;
}
......@@ -520,12 +528,13 @@ void __init rt_icmp_init(void)
unsigned int skbs;
skbs = rt_bare_socket_init(&icmp_socket, IPPROTO_ICMP, RT_ICMP_PRIO,
ICMP_REPLY_POOL_SIZE);
skbs = rt_bare_socket_init(icmp_fd, IPPROTO_ICMP, RT_ICMP_PRIO,
ICMP_REPLY_POOL_SIZE);
if (skbs < ICMP_REPLY_POOL_SIZE)
printk("RTnet: allocated only %d icmp rtskbs\n", skbs);
icmp_socket.prot.inet.tos = 0;
icmp_socket->prot.inet.tos = 0;
icmp_fd->refs = 1;
rt_inet_add_protocol(&icmp_protocol);
}
......@@ -539,5 +548,5 @@ void rt_icmp_release(void)
{
rt_icmp_cleanup_echo_requests();
rt_inet_del_protocol(&icmp_protocol);
rt_bare_socket_cleanup(&icmp_socket);
rt_bare_socket_cleanup(icmp_socket);
}
......@@ -158,9 +158,6 @@ static struct rtskb *add_to_collector(struct rtskb *skb, unsigned int offset, in
/* Extend the chain */
first_skb->chain_end = skb;
#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
first_skb->chain_len++;
#endif
/* Sanity check: unordered fragments are not allowed! */
if (offset != p_coll->buf_size) {
......@@ -190,9 +187,9 @@ static struct rtskb *add_to_collector(struct rtskb *skb, unsigned int offset, in
#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY
if (rt_ip_fallback_handler) {
__rtskb_push(skb, iph->ihl*4);
rt_ip_fallback_handler(skb);
return NULL;
__rtskb_push(skb, iph->ihl*4);
rt_ip_fallback_handler(skb);
return NULL;
}
#endif
......
......@@ -175,7 +175,13 @@ struct rt_tcp_dispatched_packet_send_cmd {
MODULE_LICENSE("GPL");
static struct tcp_socket rst_socket;
static struct {
struct rtdm_dev_context dummy;
struct tcp_socket rst_socket;
} rst_socket_container;
#define rst_fd (&rst_socket_container.dummy.fd)
#define rst_socket (*(struct tcp_socket *)rtdm_private_to_fd(rst_fd))
static u32 tcp_auto_port_start = 1024;
static u32 tcp_auto_port_mask = ~(RT_TCP_SOCKETS-1);
......@@ -837,6 +843,7 @@ static struct rtsocket *rt_tcp_dest_socket(struct rtskb *skb)
rst_socket.sync.ack_seq = rt_tcp_compute_ack_seq(th, data_len);
if (rt_ip_route_output(&rst_socket.rt, daddr, saddr) == 0) {
rt_socket_reference(&rst_socket.sock);
rt_tcp_send(&rst_socket, TCP_FLAG_ACK|TCP_FLAG_RST);
rtdev_dereference(rst_socket.rt.rtdev);
}
......@@ -985,6 +992,7 @@ static void rt_tcp_rcv(struct rtskb *skb)
if (rt_ip_route_output(&rst_socket.rt, rst_socket.daddr,
rst_socket.saddr) == 0) {
rt_socket_reference(&rst_socket.sock);
rt_tcp_send(&rst_socket, TCP_FLAG_RST|TCP_FLAG_ACK);
rtdev_dereference(rst_socket.rt.rtdev);
}
......@@ -1347,7 +1355,7 @@ static void rt_tcp_socket_destruct(struct tcp_socket* ts)
/***
* rt_tcp_close
*/
static int rt_tcp_close(struct rtdm_fd *fd)
static void rt_tcp_close(struct rtdm_fd *fd)
{
struct tcp_socket* ts = rtdm_fd_to_private(fd);
struct rt_tcp_dispatched_packet_send_cmd send_cmd;
......@@ -1403,7 +1411,7 @@ static int rt_tcp_close(struct rtdm_fd *fd)
rt_tcp_socket_destruct(ts);
return rt_socket_cleanup(fd);
rt_socket_cleanup(fd);
}
/***
......@@ -2255,11 +2263,12 @@ int __init rt_tcp_init(void)
INIT_HLIST_HEAD(&port_hash[i]);
/* Perform essential initialization of the RST|ACK socket */
skbs = rt_bare_socket_init(&rst_socket.sock, IPPROTO_TCP, RT_TCP_RST_PRIO,
skbs = rt_bare_socket_init(rst_fd, IPPROTO_TCP, RT_TCP_RST_PRIO,
RT_TCP_RST_POOL_SIZE);
if (skbs < RT_TCP_RST_POOL_SIZE)
printk("rttcp: allocated only %d RST|ACK rtskbs\n", skbs);
rst_socket.sock.prot.inet.tos = 0;
rst_fd->refs = 1;
rtdm_lock_init(&rst_socket.socket_lock);
/*
......
This diff is collapsed.
......@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
static int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt)
{
struct rtsocket *sock = container_of(pt, struct rtsocket,
prot.packet.packet_type);
prot.packet.packet_type);
int ifindex = sock->prot.packet.ifindex;
void (*callback_func)(struct rtdm_fd *, void *);
void *callback_arg;
......@@ -46,23 +46,23 @@ static int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt)
if (unlikely((ifindex != 0) && (ifindex != skb->rtdev->ifindex)))
return -EUNATCH;
return -EUNATCH;
if (rt_socket_reference(sock) < 0)
return -EUNATCH;
return -EUNATCH;
#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
if (pt->type == htons(ETH_P_ALL)) {
struct rtskb *clone_skb = rtskb_clone(skb, &sock->skb_pool);
if (clone_skb == NULL)