// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/locking/mutex.c
*
* Mutexes: blocking mutual exclusion locks
*
* Started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
* David Howells for suggestions and improvements.
*
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
* from the -rt tree, where it was originally implemented for rtmutexes
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
* and Sven Dietrich.
*
* Also see Documentation/locking/mutex-design.rst.
*/
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/sched/debug.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
#include <linux/hung_task.h>
#define CREATE_TRACE_POINTS
#include <trace/events/lock.h>
#ifndef CONFIG_PREEMPT_RT
#include "mutex.h"
#ifdef CONFIG_DEBUG_MUTEXES
# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
#else
# define MUTEX_WARN_ON(cond)
#endif
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
atomic_long_set(&lock->owner, 0);
raw_spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
debug_mutex_init(lock, name, key);
}
EXPORT_SYMBOL(__mutex_init);
static inline struct task_struct *__owner_task(unsigned long owner)
{
return (struct task_struct *)(owner & ~MUTEX_FLAGS);
}
bool mutex_is_locked(struct mutex *lock)
{
return __mutex_owner(lock) != NULL;
}
EXPORT_SYMBOL(mutex_is_locked);
static inline unsigned long __owner_flags(unsigned long owner)
{
return owner & MUTEX_FLAGS;
}
/* Do not use the return value as a pointer directly. */
unsigned long mutex_get_owner(struct mutex *lock)
{
unsigned long owner = atomic_long_read(&lock->owner);
return (unsigned long)__owner_task(owner);
}
/*
* Returns: __mutex_owner(lock) on failure or NULL on success.
*/
static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
{
unsigned long owner, curr = (unsigned long)current;
owner = atomic_long_read(&lock->owner);
for (;;) { /* must loop, can race against a flag */
unsigned long flags = __owner_flags(owner);
unsigned long task = owner & ~MUTEX_FLAGS;
if (task) {
if (flags & MUTEX_FLAG_PICKUP) {
if (task != curr)
break;
flags &= ~MUTEX_FLAG_PICKUP;
} else if (handoff) {
if (flags & MUTEX_FLAG_HANDOFF)
break;
flags |= MUTEX_FLAG_HANDOFF;
} else {
break;
}
} else {
MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
task = curr;
}
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task |