Commit b5ecfc98 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

cobalt: make core handle type 32/64bit-neutral

There is no point in using 64bit long Cobalt handles internally on
64bit platforms. However it makes sense to keep them as 32bit values
so that we may use them as atomic operands on either sides of a
64bit(kernel) <-> 32bit(user) ABI.

At this chance, boilerplate/atomic.h is sanitized for disambiguating
regular and long atomic set/get operations.
parent 87db01f5
......@@ -24,6 +24,7 @@
#include <string.h>
#include <boilerplate/signal.h>
#include <boilerplate/compiler.h>
#include <boilerplate/atomic.h>
extern struct timespec __init_date;
......@@ -35,14 +36,14 @@ struct cleanup_block;
struct name_generator {
const char *radix;
int length;
int serial;
atomic_t serial;
};
#define DEFINE_NAME_GENERATOR(__name, __radix, __type, __member) \
struct name_generator __name = { \
.radix = __radix, \
.length = sizeof ((__type *)0)->__member, \
.serial = 1, \
.serial = ATOMIC_INIT(1), \
}
#define ONE_BILLION 1000000000
......
......@@ -16,44 +16,50 @@
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#ifndef _NOCORE_ATOMIC_H
#define _NOCORE_ATOMIC_H
#ifndef _BOILERPLATE_ATOMIC_H
#define _BOILERPLATE_ATOMIC_H
#include <xeno_config.h>
typedef struct {
unsigned long v;
} atomic_long_t;
typedef struct { int v; } atomic_t;
#ifndef cpu_relax
#define cpu_relax() __sync_synchronize()
#endif
typedef struct { long v; } atomic_long_t;
#ifndef atomic_long_read
#define atomic_long_read(p) ((p)->v)
#endif
#define ATOMIC_INIT(__n) { (__n) }
#ifndef atomic_long_set
#define atomic_long_set(p, i) ((p)->v = i)
#endif
static inline long atomic_long_read(const atomic_long_t *ptr)
{
return ptr->v;
}
static inline void atomic_long_set(atomic_long_t *ptr, long v)
{
ptr->v = v;
}
static inline int atomic_read(const atomic_t *ptr)
{
return ptr->v;
}
#ifndef atomic_long_cmpxchg
#define atomic_long_cmpxchg(p, o, n) \
__sync_val_compare_and_swap(&(p)->v, \
(typeof((p)->v))(o), \
(typeof((p)->v))(n))
static inline void atomic_set(atomic_t *ptr, long v)
{
ptr->v = v;
}
#ifndef atomic_cmpxchg
#define atomic_cmpxchg(__ptr, __old, __new) \
__sync_val_compare_and_swap(&(__ptr)->v, __old, __new)
#endif
#ifndef atomic_sub_fetch
#define atomic_sub_fetch(v, n) __sync_sub_and_fetch(&(v), n)
#define atomic_sub_fetch(__ptr, __n) \
__sync_sub_and_fetch(&(__ptr)->v, __n)
#endif
#ifndef atomic_add_fetch
#define atomic_add_fetch(v, n) __sync_add_and_fetch(&(v), n)
#endif
#ifndef atomic_cmp_swap
#define atomic_cmp_swap(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
#define atomic_add_fetch(__ptr, __n) \
__sync_add_and_fetch(&(__ptr)->v, __n)
#endif
#ifdef CONFIG_SMP
......@@ -76,4 +82,8 @@ typedef struct {
#define barrier() __asm__ __volatile__("": : :"memory")
#endif /* _NOCORE_ATOMIC_H */
#ifndef cpu_relax
#define cpu_relax() __sync_synchronize()
#endif
#endif /* _BOILERPLATE_ATOMIC_H */
......@@ -1082,7 +1082,7 @@ void rtdm_sem_destroy(rtdm_sem_t *sem);
typedef struct rtdm_mutex {
struct xnsynch synch_base;
atomic_long_t fastlock;
atomic_t fastlock;
} rtdm_mutex_t;
void rtdm_mutex_init(rtdm_mutex_t *mutex);
......
......@@ -56,7 +56,7 @@ struct xnsynch {
unsigned long status; /** Status word */
struct list_head pendq; /** Pending threads */
struct xnthread *owner; /** Thread which owns the resource */
atomic_long_t *fastlock; /** Pointer to fast lock word */
atomic_t *fastlock; /** Pointer to fast lock word */
void (*cleanup)(struct xnsynch *synch); /* Cleanup handler */
};
......@@ -130,7 +130,7 @@ static inline void xnsynch_detect_claimed_relax(struct xnthread *owner)
#endif /* !XENO_DEBUG(USER) */
void xnsynch_init(struct xnsynch *synch, int flags,
atomic_long_t *fastlock);
atomic_t *fastlock);
#define xnsynch_destroy(synch) xnsynch_flush(synch, XNRMID)
......
......@@ -39,19 +39,19 @@
do { (handle) &= ~(bits); } while (0)
/* Fast lock API */
static inline int xnsynch_fast_owner_check(atomic_long_t *fastlock,
xnhandle_t ownerh)
static inline int
xnsynch_fast_owner_check(atomic_t *fastlock, xnhandle_t ownerh)
{
return (xnhandle_mask_spare(atomic_long_read(fastlock)) == ownerh) ?
return (xnhandle_mask_spare(atomic_read(fastlock)) == ownerh) ?
0 : -EPERM;
}
static inline int xnsynch_fast_acquire(atomic_long_t *fastlock,
xnhandle_t new_ownerh)
static inline
int xnsynch_fast_acquire(atomic_t *fastlock, xnhandle_t new_ownerh)
{
xnhandle_t h;
h = atomic_long_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
h = atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
if (h != XN_NO_HANDLE) {
if (xnhandle_mask_spare(h) == new_ownerh)
return -EBUSY;
......@@ -62,11 +62,11 @@ static inline int xnsynch_fast_acquire(atomic_long_t *fastlock,
return 0;
}
static inline int xnsynch_fast_release(atomic_long_t *fastlock,
xnhandle_t cur_ownerh)
static inline
int xnsynch_fast_release(atomic_t *fastlock, xnhandle_t cur_ownerh)
{
return (atomic_long_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE) ==
cur_ownerh);
return atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE)
== cur_ownerh;
}
#endif /* !_COBALT_UAPI_KERNEL_SYNCH_H */
......@@ -29,7 +29,7 @@ typedef __u64 xntime_t; /* ns */
typedef __s64 xnstime_t;
typedef unsigned long xnhandle_t;
typedef __u32 xnhandle_t;
#define XN_NO_HANDLE ((xnhandle_t)0)
......
......@@ -19,7 +19,7 @@
#define _COBALT_UAPI_MONITOR_H
struct cobalt_monitor_data {
atomic_long_t owner;
atomic_t owner;
unsigned long flags;
#define COBALT_MONITOR_GRANTED 0x01
#define COBALT_MONITOR_DRAINED 0x02
......
......@@ -21,7 +21,7 @@
#define COBALT_MUTEX_MAGIC 0x86860303
struct mutex_dat {
atomic_long_t owner;
atomic_t owner;
unsigned long flags;
#define COBALT_MUTEX_COND_SIGNAL 0x00000001
#define COBALT_MUTEX_ERRORCHECK 0x00000002
......
......@@ -33,7 +33,7 @@
/**
* @fn void xnsynch_init(struct xnsynch *synch, int flags,
* atomic_long_t *fastlock)
* atomic_t *fastlock)
*
* @brief Initialize a synchronization object.
*
......@@ -78,7 +78,7 @@
*
* @coretags{task-unrestricted}
*/
void xnsynch_init(struct xnsynch *synch, int flags, atomic_long_t *fastlock)
void xnsynch_init(struct xnsynch *synch, int flags, atomic_t *fastlock)
{
if (flags & XNSYNCH_PIP)
flags |= XNSYNCH_PRIO | XNSYNCH_OWNER; /* Obviously... */
......@@ -92,7 +92,7 @@ void xnsynch_init(struct xnsynch *synch, int flags, atomic_long_t *fastlock)
if (flags & XNSYNCH_OWNER) {
BUG_ON(fastlock == NULL);
synch->fastlock = fastlock;
atomic_long_set(fastlock, XN_NO_HANDLE);
atomic_set(fastlock, XN_NO_HANDLE);
} else
synch->fastlock = NULL;
}
......@@ -338,7 +338,7 @@ int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
{
xnhandle_t threadh, fastlock, old;
struct xnthread *thread, *owner;
atomic_long_t *lockp;
atomic_t *lockp;
spl_t s;
primary_mode_only();
......@@ -350,7 +350,7 @@ int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
lockp = xnsynch_fastlock(synch);
trace_cobalt_synch_acquire(synch, thread);
redo:
fastlock = atomic_long_cmpxchg(lockp, XN_NO_HANDLE, threadh);
fastlock = atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh);
if (likely(fastlock == XN_NO_HANDLE)) {
if (xnthread_test_state(thread, XNWEAK))
......@@ -369,13 +369,13 @@ redo:
* set, start with cmpxchg directly.
*/
if (xnsynch_fast_is_claimed(fastlock)) {
old = atomic_long_read(lockp);
old = atomic_read(lockp);
goto test_no_owner;
}
do {
old = atomic_long_cmpxchg(lockp, fastlock,
xnsynch_fast_set_claimed(fastlock, 1));
old = atomic_cmpxchg(lockp, fastlock,
xnsynch_fast_set_claimed(fastlock, 1));
if (likely(old == fastlock))
break;
test_no_owner:
......@@ -467,7 +467,7 @@ block:
threadh = xnsynch_fast_set_claimed(threadh, 1);
/* Set new ownership for this mutex. */
atomic_long_set(lockp, threadh);
atomic_set(lockp, threadh);
out:
xnlock_put_irqrestore(&nklock, s);
......@@ -509,7 +509,7 @@ static struct xnthread *transfer_ownership(struct xnsynch *synch,
{
struct xnthread *nextowner;
xnhandle_t nextownerh;
atomic_long_t *lockp;
atomic_t *lockp;
spl_t s;
xnlock_get_irqsave(&nklock, s);
......@@ -518,7 +518,7 @@ static struct xnthread *transfer_ownership(struct xnsynch *synch,
if (list_empty(&synch->pendq)) {
synch->owner = NULL;
atomic_long_set(lockp, XN_NO_HANDLE);
atomic_set(lockp, XN_NO_HANDLE);
xnlock_put_irqrestore(&nklock, s);
return NULL;
}
......@@ -536,7 +536,7 @@ static struct xnthread *transfer_ownership(struct xnsynch *synch,
nextownerh = xnsynch_fast_set_claimed(xnthread_handle(nextowner),
xnsynch_pended_p(synch));
atomic_long_set(lockp, nextownerh);
atomic_set(lockp, nextownerh);
xnlock_put_irqrestore(&nklock, s);
......@@ -576,8 +576,8 @@ static struct xnthread *transfer_ownership(struct xnsynch *synch,
struct xnthread *xnsynch_release(struct xnsynch *synch,
struct xnthread *thread)
{
atomic_long_t *lockp;
xnhandle_t threadh;
atomic_t *lockp;
XENO_BUGON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
......
......@@ -385,7 +385,7 @@ DECLARE_EVENT_CLASS(cobalt_posix_sem,
TP_fast_assign(
__entry->handle = handle;
),
TP_printk("sem=%#lx", __entry->handle)
TP_printk("sem=%#x", __entry->handle)
);
DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_wait,
......@@ -434,7 +434,7 @@ TRACE_EVENT(cobalt_psem_getvalue,
__entry->handle = handle;
__entry->value = value;
),
TP_printk("sem=%#lx value=%d", __entry->handle, __entry->value)
TP_printk("sem=%#x value=%d", __entry->handle, __entry->value)
);
#define cobalt_print_sem_flags(__flags) \
......@@ -463,7 +463,7 @@ TRACE_EVENT(cobalt_psem_init,
__entry->flags = flags;
__entry->value = value;
),
TP_printk("sem=%#lx(%s) flags=%#x(%s) value=%u",
TP_printk("sem=%#x(%s) flags=%#x(%s) value=%u",
__entry->handle,
__get_str(name),
__entry->flags,
......@@ -527,7 +527,7 @@ TRACE_EVENT(cobalt_psem_open,
__entry->value = 0;
}
),
TP_printk("named_sem=%#lx=(%s) oflags=%#x(%s) mode=%o value=%u",
TP_printk("named_sem=%#x=(%s) oflags=%#x(%s) mode=%o value=%u",
__entry->handle, __get_str(name),
__entry->oflags, cobalt_print_oflags(__entry->oflags),
__entry->mode, __entry->value)
......
......@@ -198,7 +198,7 @@ char *generate_name(char *buf, const char *radix,
strncpy(buf, radix, len);
buf[len] = '\0';
} else {
tag = atomic_add_fetch(ngen->serial, 1);
tag = atomic_add_fetch(&ngen->serial, 1);
snprintf(buf, len, "%s@%d", ngen->radix, tag);
}
......
......@@ -66,8 +66,8 @@ static inline void __cobalt_set_tsd(xnhandle_t current,
{
struct xnthread_user_window *window;
current = (current != XN_NO_HANDLE ? current : (xnhandle_t)(0));
pthread_setspecific(cobalt_current_key, (void *)current);
current = (current != XN_NO_HANDLE ? current : (xnhandle_t)0);
pthread_setspecific(cobalt_current_key, (void *)(uintptr_t)current);
window = (void *)(cobalt_sem_heap[0] + u_winoff);
pthread_setspecific(cobalt_current_window_key, window);
......
......@@ -18,6 +18,7 @@
#ifndef _LIB_COBALT_CURRENT_H
#define _LIB_COBALT_CURRENT_H
#include <stdint.h>
#include <pthread.h>
#include <cobalt/uapi/thread.h>
#include <xeno_config.h>
......@@ -61,7 +62,7 @@ static inline xnhandle_t cobalt_get_current(void)
{
void *val = pthread_getspecific(cobalt_current_key);
return (xnhandle_t)val ?: cobalt_get_current_slow();
return (xnhandle_t)(uintptr_t)val ?: cobalt_get_current_slow();
}
/* syscall-free, but unreliable in TSD destructor context */
......@@ -69,7 +70,7 @@ static inline xnhandle_t cobalt_get_current_fast(void)
{
void *val = pthread_getspecific(cobalt_current_key);
return (xnhandle_t)val ?: XN_NO_HANDLE;
return (xnhandle_t)(uintptr_t)val ?: XN_NO_HANDLE;
}
static inline unsigned long cobalt_get_current_mode(void)
......
......@@ -33,7 +33,7 @@ static inline struct mutex_dat *mutex_get_datp(struct cobalt_mutex_shadow *shado
return shadow->dat;
}
static inline atomic_long_t *mutex_get_ownerp(struct cobalt_mutex_shadow *shadow)
static inline atomic_t *mutex_get_ownerp(struct cobalt_mutex_shadow *shadow)
{
return &mutex_get_datp(shadow)->owner;
}
......
......@@ -435,9 +435,9 @@ int rt_print_init(size_t buffer_size, const char *buffer_name)
do {
bitmap = old_bitmap;
j = __builtin_ffsl(bitmap) - 1;
old_bitmap = atomic_long_cmpxchg(&pool_bitmap[i],
bitmap,
bitmap & ~(1UL << j));
old_bitmap = atomic_cmpxchg(&pool_bitmap[i],
bitmap,
bitmap & ~(1UL << j));
} while (old_bitmap != bitmap && old_bitmap);
j += i * __WORDSIZE;
} while (!old_bitmap);
......@@ -549,9 +549,9 @@ static void cleanup_buffer(struct print_buffer *buffer)
old_bitmap = atomic_long_read(&pool_bitmap[i]);
do {
bitmap = old_bitmap;
old_bitmap = atomic_long_cmpxchg(&pool_bitmap[i],
bitmap,
bitmap | (1UL << j));
old_bitmap = atomic_cmpxchg(&pool_bitmap[i],
bitmap,
bitmap | (1UL << j));
} while (old_bitmap != bitmap);
return;
......
......@@ -201,7 +201,7 @@ COBALT_IMPL(int, sem_post, (sem_t *sem))
old = value;
new = value + 1;
value = atomic_long_cmpxchg(&datp->value, old, new);
value = atomic_cmpxchg(&datp->value, old, new);
if (value < 0)
goto do_syscall;
} while (value != old);
......@@ -262,7 +262,7 @@ COBALT_IMPL(int, sem_trywait, (sem_t *sem))
old = value;
new = value - 1;
value = atomic_long_cmpxchg(&datp->value, old, new);
value = atomic_cmpxchg(&datp->value, old, new);
if (value <= 0)
goto eagain;
} while (value != old);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment