Commit 892ad5ac authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Add CONFIG_REFCOUNT_FULL=y to allow the disabling of the 'full'
     (robustness checked) refcount_t implementation with slightly lower
     runtime overhead. (Kees Cook)

     The lighter weight variant is the default. The two variants use the
     same API. Having this variant was a precondition by some
     maintainers to merge refcount_t cleanups.

   - Add lockdep support for rtmutexes (Peter Zijlstra)

   - liblockdep fixes and improvements (Sasha Levin, Ben Hutchings)

   - ... misc fixes and improvements"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
  locking/refcount: Remove the half-implemented refcount_sub() API
  locking/refcount: Create unchecked atomic_t implementation
  locking/rtmutex: Don't initialize lockdep when not required
  locking/selftest: Add RT-mutex support
  locking/selftest: Remove the bad unlock ordering test
  rt_mutex: Add lockdep annotations
  MAINTAINERS: Claim atomic*_t maintainership
  locking/x86: Remove the unused atomic_inc_short() methd
  tools/lib/lockdep: Remove private kernel headers
  tools/lib/lockdep: Hide liblockdep output from test results
  tools/lib/lockdep: Add dummy current_gfp_context()
  tools/include: Add IS_ERR_OR_NULL to err.h
  tools/lib/lockdep: Add empty __is_[module,kernel]_percpu_address
  tools/lib/lockdep: Include err.h
  tools/include: Add (mostly) empty include/linux/sched/mm.h
  tools/lib/lockdep: Use LDFLAGS
  tools/lib/lockdep: Remove double-quotes from soname
  tools/lib/lockdep: Fix object file paths used in an out-of-tree build
  tools/lib/lockdep: Fix compilation for 4.11
  tools/lib/lockdep: Don't mix fd-based and stream IO
  ...
parents 162b246e 5d6dec6f
......@@ -2322,6 +2322,15 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
F: drivers/input/touchscreen/atmel_mxt_ts.c
F: include/linux/platform_data/atmel_mxt_ts.h
ATOMIC INFRASTRUCTURE
M: Will Deacon <will.deacon@arm.com>
M: Peter Zijlstra <peterz@infradead.org>
R: Boqun Feng <boqun.feng@gmail.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/*/include/asm/atomic*.h
F: include/*/atomic*.h
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
M: Bradley Grove <linuxdrivers@attotech.com>
L: linux-scsi@vger.kernel.org
......@@ -7555,7 +7564,7 @@ S: Maintained
F: drivers/ata/sata_promise.*
LIBLOCKDEP
M: Sasha Levin <sasha.levin@oracle.com>
M: Sasha Levin <alexander.levin@verizon.com>
S: Maintained
F: tools/lib/lockdep/
......
......@@ -867,4 +867,13 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER
bool
config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
help
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
implementation, which can be (slightly) slower but provides protections
against various use-after-free conditions that can be used in
security flaw exploits.
source "kernel/gcov/Kconfig"
......@@ -24,8 +24,7 @@
* has an opportunity to return -EFAULT to the user if needed.
* The 64-bit routines just return a "long long" with the value,
* since they are only used from kernel space and don't expect to fault.
* Support for 16-bit ops is included in the framework but we don't provide
* any (x86_64 has an atomic_inc_short(), so we might want to some day).
* Support for 16-bit ops is included in the framework but we don't provide any.
*
* Note that the caller is advised to issue a suitable L1 or L2
* prefetch on the address being manipulated to avoid extra stalls.
......
......@@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
*
* Atomically adds 1 to @v
* Returns the new value of @u
*/
static __always_inline short int atomic_inc_short(short int *v)
{
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
return *v;
}
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
......
......@@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
#ifdef CONFIG_REFCOUNT_FULL
extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
extern void refcount_add(unsigned int i, refcount_t *r);
......@@ -48,10 +49,45 @@ extern __must_check bool refcount_inc_not_zero(refcount_t *r);
extern void refcount_inc(refcount_t *r);
extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
extern void refcount_sub(unsigned int i, refcount_t *r);
extern __must_check bool refcount_dec_and_test(refcount_t *r);
extern void refcount_dec(refcount_t *r);
#else
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
return atomic_add_unless(&r->refs, i, 0);
}
static inline void refcount_add(unsigned int i, refcount_t *r)
{
atomic_add(i, &r->refs);
}
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
return atomic_add_unless(&r->refs, 1, 0);
}
static inline void refcount_inc(refcount_t *r)
{
atomic_inc(&r->refs);
}
static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
return atomic_sub_and_test(i, &r->refs);
}
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return atomic_dec_and_test(&r->refs);
}
static inline void refcount_dec(refcount_t *r)
{
atomic_dec(&r->refs);
}
#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
......
......@@ -37,6 +37,9 @@ struct rt_mutex {
int line;
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
struct rt_mutex_waiter;
......@@ -58,19 +61,33 @@ struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
# define rt_mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
__rt_mutex_init(mutex, __func__, &__key); \
} while (0)
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
# define rt_mutex_debug_task_free(t) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
, .dep_map = { .name = #mutexname }
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .waiters = RB_ROOT \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
......@@ -86,7 +103,7 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
return lock->owner != NULL;
}
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
......
......@@ -166,12 +166,16 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
memset(waiter, 0x22, sizeof(*waiter));
}
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key)
{
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lock->name = name;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
}
......@@ -11,7 +11,7 @@
extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void debug_rt_mutex_lock(struct rt_mutex *lock);
extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
......
......@@ -1481,6 +1481,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
{
might_sleep();
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
......@@ -1496,9 +1497,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
int ret;
might_sleep();
return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
......@@ -1526,11 +1534,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
int
rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
{
int ret;
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
......@@ -1547,10 +1562,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
int ret;
if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
return 0;
return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
......@@ -1561,6 +1582,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
*/
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, 1, _RET_IP_);
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
......@@ -1620,7 +1642,6 @@ void rt_mutex_destroy(struct rt_mutex *lock)
lock->magic = NULL;
#endif
}
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
/**
......@@ -1632,14 +1653,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
*
* Initializing of a locked rt lock is not allowed
*/
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
void __rt_mutex_init(struct rt_mutex *lock, const char *name,
struct lock_class_key *key)
{
lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock);
lock->waiters = RB_ROOT;
lock->waiters_leftmost = NULL;
debug_rt_mutex_init(lock, name);
if (name && key)
debug_rt_mutex_init(lock, name, key);
}
EXPORT_SYMBOL_GPL(__rt_mutex_init);
......@@ -1660,7 +1683,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL);
__rt_mutex_init(lock, NULL, NULL);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
......
......@@ -17,7 +17,7 @@
#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
#define debug_rt_mutex_unlock(l) do { } while (0)
#define debug_rt_mutex_init(m, n) do { } while (0)
#define debug_rt_mutex_init(m, n, k) do { } while (0)
#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
#define debug_rt_mutex_print_deadlock(w) do { } while (0)
#define debug_rt_mutex_reset_waiter(w) do { } while (0)
......
......@@ -1052,6 +1052,7 @@ config DEBUG_LOCK_ALLOC
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select LOCKDEP
help
This feature will check whether any held lock (spinlock, rwlock,
......@@ -1067,6 +1068,7 @@ config PROVE_LOCKING
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS
default n
......@@ -1121,6 +1123,7 @@ config LOCK_STAT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
default n
help
......
#undef LOCK
#define LOCK RTL
#undef UNLOCK
#define UNLOCK RTU
#undef RLOCK
#undef WLOCK
#undef INIT
#define INIT RTI
......@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/rtmutex.h>
/*
* Change this to 1 if you want to see the failure printouts:
......@@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_MUTEX 0x4
#define LOCKTYPE_RWSEM 0x8
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
......@@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B);
static DECLARE_RWSEM(rwsem_C);
static DECLARE_RWSEM(rwsem_D);
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(rtmutex_A);
static DEFINE_RT_MUTEX(rtmutex_B);
static DEFINE_RT_MUTEX(rtmutex_C);
static DEFINE_RT_MUTEX(rtmutex_D);
#endif
/*
* Locks that we initialize dynamically as well so that
* e.g. X1 and X2 becomes two instances of the same class,
......@@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2);
static DECLARE_RWSEM(rwsem_Z1);
static DECLARE_RWSEM(rwsem_Z2);
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(rtmutex_X1);
static DEFINE_RT_MUTEX(rtmutex_X2);
static DEFINE_RT_MUTEX(rtmutex_Y1);
static DEFINE_RT_MUTEX(rtmutex_Y2);
static DEFINE_RT_MUTEX(rtmutex_Z1);
static DEFINE_RT_MUTEX(rtmutex_Z2);
#endif
/*
* non-inlined runtime initializers, to let separate locks share
* the same lock-class:
......@@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z)
static void init_shared_classes(void)
{
#ifdef CONFIG_RT_MUTEXES
static struct lock_class_key rt_X, rt_Y, rt_Z;
__rt_mutex_init(&rtmutex_X1, __func__, &rt_X);
__rt_mutex_init(&rtmutex_X2, __func__, &rt_X);
__rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y);
__rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y);
__rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z);
__rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z);
#endif
init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
......@@ -193,6 +226,10 @@ static void init_shared_classes(void)
#define MU(x) mutex_unlock(&mutex_##x)
#define MI(x) mutex_init(&mutex_##x)
#define RTL(x) rt_mutex_lock(&rtmutex_##x)
#define RTU(x) rt_mutex_unlock(&rtmutex_##x)
#define RTI(x) rt_mutex_init(&rtmutex_##x)
#define WSL(x) down_write(&rwsem_##x)
#define WSU(x) up_write(&rwsem_##x)
......@@ -264,6 +301,11 @@ GENERATE_TESTCASE(AA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(AA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(AA_rtmutex);
#endif
#undef E
/*
......@@ -345,6 +387,11 @@ GENERATE_TESTCASE(ABBA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABBA_rtmutex);
#endif
#undef E
/*
......@@ -373,6 +420,11 @@ GENERATE_TESTCASE(ABBCCA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBCCA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABBCCA_rtmutex);
#endif
#undef E
/*
......@@ -401,6 +453,11 @@ GENERATE_TESTCASE(ABCABC_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCABC_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABCABC_rtmutex);
#endif
#undef E
/*
......@@ -430,6 +487,11 @@ GENERATE_TESTCASE(ABBCCDDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBCCDDA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABBCCDDA_rtmutex);
#endif
#undef E
/*
......@@ -458,6 +520,11 @@ GENERATE_TESTCASE(ABCDBDDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCDBDDA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABCDBDDA_rtmutex);
#endif
#undef E
/*
......@@ -486,6 +553,11 @@ GENERATE_TESTCASE(ABCDBCDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCDBCDA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABCDBCDA_rtmutex);
#endif
#undef E
/*
......@@ -513,33 +585,10 @@ GENERATE_TESTCASE(double_unlock_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(double_unlock_rsem)
#undef E
/*
* Bad unlock ordering:
*/
#define E() \
\
LOCK(A); \
LOCK(B); \
UNLOCK(A); /* fail */ \
UNLOCK(B);
/*
* 6 testcases:
*/
#include "locking-selftest-spin.h"
GENERATE_TESTCASE(bad_unlock_order_spin)
#include "locking-selftest-wlock.h"
GENERATE_TESTCASE(bad_unlock_order_wlock)
#include "locking-selftest-rlock.h"
GENERATE_TESTCASE(bad_unlock_order_rlock)
#include "locking-selftest-mutex.h"
GENERATE_TESTCASE(bad_unlock_order_mutex)
#include "locking-selftest-wsem.h"
GENERATE_TESTCASE(bad_unlock_order_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(bad_unlock_order_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(double_unlock_rtmutex);
#endif
#undef E
......@@ -567,6 +616,11 @@ GENERATE_TESTCASE(init_held_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(init_held_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(init_held_rtmutex);
#endif
#undef E
/*
......@@ -916,6 +970,9 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
#ifdef CONFIG_RT_MUTEXES
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
#endif
#else
# define I_SPINLOCK(x)
# define I_RWLOCK(x)
......@@ -924,12 +981,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
# define I_WW(x)
#endif
#ifndef I_RTMUTEX
# define I_RTMUTEX(x)
#endif
#ifdef CONFIG_RT_MUTEXES
#define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x)
#else
#define I2_RTMUTEX(x)
#endif
#define I1(x) \
do { \
I_SPINLOCK(x); \
I_RWLOCK(x); \
I_MUTEX(x); \
I_RWSEM(x); \
I_RTMUTEX(x); \
} while (0)
#define I2(x) \
......@@ -938,6 +1006,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \
I2_RTMUTEX(x); \
} while (0)
static void reset_locks(void)
......@@ -1013,6 +1082,12 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
reset_locks();
}
#ifdef CONFIG_RT_MUTEXES
#define dotest_rt(fn, e, m) dotest((fn), (e), (m))
#else
#define dotest_rt(fn, e, m)
#endif
static inline void print_testname(const char *testname)
{
printk("%33s:", testname);
......@@ -1050,6 +1125,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
#define DO_TESTCASE_6_SUCCESS(desc, name) \
......@@ -1060,6 +1136,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
/*
......@@ -1073,6 +1150,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
#define DO_TESTCASE_2I(desc, name, nr) \
......@@ -1825,7 +1903,6 @@ void locking_selftest(void)
DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
DO_TESTCASE_6("double unlock", double_unlock);