Skip to content

Commit af9f006

Browse files
Sebastian Andrzej SiewiorPeter Zijlstra
Sebastian Andrzej Siewior
authored and
Peter Zijlstra
committed
locking/rtmutex: Avoid unconditional slowpath for DEBUG_RT_MUTEXES
With DEBUG_RT_MUTEXES enabled the fast-path rt_mutex_cmpxchg_acquire() always fails and all lock operations take the slow path. Provide a new helper inline rt_mutex_try_acquire() which maps to rt_mutex_cmpxchg_acquire() in the non-debug case. For the debug case it invokes rt_mutex_slowtrylock() which can acquire a non-contended rtmutex under full debug coverage. Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 28bc55f commit af9f006

File tree

2 files changed

+21
-2
lines changed

2 files changed

+21
-2
lines changed

kernel/locking/rtmutex.c

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,11 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
218218
return try_cmpxchg_acquire(&lock->owner, &old, new);
219219
}
220220

221+
static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
222+
{
223+
return rt_mutex_cmpxchg_acquire(lock, NULL, current);
224+
}
225+
221226
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
222227
struct task_struct *old,
223228
struct task_struct *new)
@@ -297,6 +302,20 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
297302

298303
}
299304

305+
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
306+
307+
static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
308+
{
309+
/*
310+
* With debug enabled rt_mutex_cmpxchg trylock() will always fail.
311+
*
312+
* Avoid unconditionally taking the slow path by using
313+
* rt_mutex_slow_trylock() which is covered by the debug code and can
314+
* acquire a non-contended rtmutex.
315+
*/
316+
return rt_mutex_slowtrylock(lock);
317+
}
318+
300319
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
301320
struct task_struct *old,
302321
struct task_struct *new)
@@ -1755,7 +1774,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
17551774
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
17561775
unsigned int state)
17571776
{
1758-
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1777+
if (likely(rt_mutex_try_acquire(lock)))
17591778
return 0;
17601779

17611780
return rt_mutex_slowlock(lock, NULL, state);

kernel/locking/ww_rt_mutex.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
6262
}
6363
mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
6464

65-
if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
65+
if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
6666
if (ww_ctx)
6767
ww_mutex_set_context_fastpath(lock, ww_ctx);
6868
return 0;

0 commit comments

Comments
 (0)