// SPDX-License-Identifier: GPL-2.0
/*
* SLUB: A slab allocator with low overhead percpu array caches and mostly
* lockless freeing of objects to slabs in the slowpath.
*
* The allocator synchronizes using spin_trylock for percpu arrays in the
* fastpath, and cmpxchg_double (or bit spinlock) for slowpath freeing.
* Uses a centralized lock to manage a pool of partial slabs.
*
* (C) 2007 SGI, Christoph Lameter
* (C) 2011 Linux Foundation, Christoph Lameter
* (C) 2025 SUSE, Vlastimil Babka
*/
#include <linux/mm.h>
#include <linux/swap.h> /* mm_account_reclaimed_pages() */
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
#include <linux/swab.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "slab.h"
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
#include <linux/node.h>
#include <linux/kmsan.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/stackdepot.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/kfence.h>
#include <linux/memory.h>
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <linux/kmemleak.h>
#include <linux/stacktrace.h>
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
#include <linux/random.h>
#include <linux/prandom.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include <linux/sort.h>
#include <linux/irq_work.h>
#include <linux/kprobes.h>
#include <linux/debugfs.h>
#include <trace/events/kmem.h>
#include "internal.h"
/*
* Lock order:
* 0. cpu_hotplug_lock
* 1. slab_mutex (Global Mutex)
* 2a. kmem_cache->cpu_sheaves->lock (Local trylock)
* 2b. node->barn->lock (Spinlock)
* 2c. node->list_lock (Spinlock)
* 3. slab_lock(slab) (Only on some arches)
* 4. object_map_lock (Only for debugging)
*
* slab_mutex
*
* The role of the slab_mutex is to protect the list of all the slabs
* and to synchronize major metadata changes to slab cache structures.
* Also synchronizes memory hotplug callbacks.
*
* slab_lock
*
* The slab_lock is a wrapper around the page lock, thus it is a bit
* spinlock.
*
* The slab_lock is only used on arches that do not have the ability
* to do a cmpxchg_double. It only protects:
*
* A. slab->freelist -> List of free objects in a slab
* B. slab->inuse -> Number of objects in use
* C. slab->objects -> Number of objects in slab
* D. slab->frozen -> frozen state
*
* SL_partial slabs
*
* Slabs on node partial list have at least one free object. A limited number
* of slabs on the list can be fully free (slab->inuse == 0), until we start
* discarding them. These slabs are marked with SL_partial, and the flag is
* cleared while removing them, usually to grab their freelist afterwards.
* This clearing also exempts them from list management. Please see
* __slab_free() for more details.
*
* Full slabs
*
* For caches without debugging enabled, full slabs (slab->inuse ==
* slab->objects and slab->freelist == NULL) are not placed on any list.
* The __slab_free() freeing the first object from such a slab will place
* it on the partial list. Caches with debugging enabled place such slab
* on the full list and use different allocation and freeing paths.
*
* Frozen slabs
*
* If a slab is frozen then it is exempt from list management. It is used to
* indicate a slab that has failed consistency checks and thus cannot be
* allocated from anymore - it is also marked as full. Any previously
* allocated objects will be simply leaked upon freeing instead of attempting
* to modify the potentially corrupted freelist and metadata.
*
* To sum up, the current scheme is:
* - node partial slab: SL_partial && !full && !frozen
* - taken off partial list: !SL_partial && !full && !frozen
* - full slab, not on any list: !SL_partial && full && !frozen
* - frozen due to inconsistency: !SL_partial && full && frozen
*
* node->list_lock (spinlock)
*
* The list_lock protects the partial and full list on each node and
* the partial slab counter. If taken then no new slabs may be added or
* removed from the lists nor make the number of partial slabs be modified.
* (Note that the total number of slabs is an atomic value that may be
* modified without taking the list lock).
*
* The list_lock is a centralized lock and thus we avoid taking it as
* much as possible. As long as SLUB does not have to handle partial
* slabs, operations can continue without any centralized lock.
*
* For debug caches, all allocations are forced to go through a list_lock
* protected region to serialize against concurrent validation.
*
* cpu_sheaves->lock (local_trylock)
*
* This lock protects fastpath operations on the percpu sheaves. On !RT it
* only disables preemption and does no atomic operations. As long as the main
* or spare sheaf can handle the allocation or free, there is no other
* overhead.
*
* node->barn->lock (spinlock)
*
* This lock protects the operations on per-NUMA-node barn. It can quickly
* serve an empty or full sheaf if available, and avoid more expensive refill
* or flush operation.
*
* Lockless freeing
*
* Objects may have to be freed to their slabs when they are from a remote
* node (where we want to avoid filling local sheaves with remote objects)
* or when there are too many full sheaves. On architectures supporting
* cmpxchg_double this is done by a lockless update of slab's freelist and
* counters, otherwise slab_lock is taken. This only needs to take the
* list_lock if it's a first free to a full slab, or when a slab becomes empty
* after the free.
*
* irq, preemption, migration considerations
*
* Interrupts are disabled as part of list_lock or barn lock operations, or
* around the slab_lock operation, in order to make the slab allocator safe
* to use in the context of an irq.
* Preemption is disabled as part of local_trylock operations.
* kmalloc_nolock() and kfree_nolock() are safe in NMI context but see
* their limitations.
*
* SLUB assigns two object arrays called sheaves for caching allocations and
* frees on each cpu, with a NUMA node shared barn for balancing between cpus.
* Allocations and frees are primarily served from these sheaves.
*
* Slabs with free elements are kept on a partial list and during regular
* operations no list for full slabs is used. If an object in a full slab is
* freed then the slab will show up again on the partial lists.
* We track full slabs for debugging purposes though because otherwise we
* cannot scan all objects.
*
* Slabs are freed when they become empty. Teardown and setup is minimal so we
* rely on the page allocators per cpu caches for fast frees and allocs.
*
* SLAB_DEBUG_FLAGS Slab requires special handling due to debug
* options set. This moves slab handling out of
* the fast path and disables lockless freelists.
*/
/**
* enum slab_flags - How the slab flags bits are used.
* @SL_locked: Is locked with slab_lock()
* @SL_partial: On the per-node partial list
* @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
*
* The slab flags share space with the page flags but some bits have
* different interpretations. The high bits are used for information
* like zone/node/section.
*/
enum slab_flags {
SL_locked = PG_locked,
SL_partial = PG_workingset, /* Historical reasons for this bit */
SL_pfmemalloc = PG_active, /* Historical reasons for this bit */
};
#ifndef CONFIG_SLUB_TINY
#define __fastpath_inline __always_inline
#else
#define __fastpath_inline
#endif
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
#endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_NUMA
static DEFINE_STATIC_KEY_FALSE(strict_numa);
#endif
/* Structure holding parameters for get_from_partial() call chain */
struct partial_context {
gfp_t flags;
unsigned int orig_size;
};
/* Structure holding parameters for get_partial_node_bulk() */
struct partial_bulk_context {
gfp_t flags;
unsigned int min_objects;
unsigned int max_objects;
struct list_head slabs;
};
static inline bool kmem_cache_debug(struct kmem_cache *s)
{
return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
}
void *fixup_red_left(struct kmem_cache *s, void *p)
{
if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
p += s->red_left_pad;
return p;
}
/*
* Issues still to be resolved:
*
* - Support PAGE_ALLOC_DEBUG. Should be easy to do.
*
* - Variable sizing of the per node arrays
*/
/* Enable to log cmpxchg failures */
#undef SLUB_DEBUG_CMPXCHG
#ifndef CONFIG_SLUB_TINY
/*
* Minimum number of partial slabs. These will be left on the partial
* lists even if they are empty. kmem_cache_shrink may reclaim them.
*/
#define MIN_PARTIAL 5
/*
* Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink
* sort the partial list by the number of objects in use.
*/
#define MAX_PARTIAL 10
#else
#define MIN_PARTIAL 0
#define MAX_PARTIAL 0
#endif
#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER)
/*
* These debug flags cannot use CMPXCHG because there might be consistency
* issues when checking or reading debug information
*/
#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
SLAB_TRACE)
/*
* Debugging flags that require metadata to be stored in the slab. These get
* disabled when slab_debug=O is used and a cache's min order increases with
* metadata.
*/
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
/* Internal SLUB flags */
/* Poison object */
#define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
/* Use cmpxchg_double */
#ifdef system_has_freelist_aba
#define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
#else
#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
#endif
/*
* Tracking user of a slab.
*/
#define TRACK_ADDRS_COUNT 16
struct track {
unsigned long addr; /* Called from address */
#ifdef CONFIG_STACKDEPOT
depot_stack_handle_t handle;
#endif
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
};
enum track_item { TRACK_ALLOC, TRACK_FREE };
#ifdef SLAB_SUPPORTS_SYSFS
static int sysfs_slab_add(struct kmem_cache *);
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
#endif
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
static void debugfs_slab_add(struct kmem_cache *);
#else
static inline void debugfs_slab_add(struct kmem_cache *s) { }
#endif
enum add_mode {
ADD_TO_HEAD,
ADD_TO_TAIL,
};
enum stat_item {
ALLOC_FASTPATH, /* Allocation from percpu sheaves */
ALLOC_SLOWPATH, /* Allocation from partial or new slab */
FREE_RCU_SHEAF, /* Free to rcu_free sheaf */
FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */
FREE_FASTPATH, /* Free to percpu sheaves */
FREE_SLOWPATH, /* Free to a slab */
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
FREE_REMOVE_PARTIAL, /* Freeing removes last object */
ALLOC_SLAB, /* New slab acquired from page allocator */
ALLOC_NODE_MISMATCH, /* Requested node different from cpu sheaf */
FREE_SLAB, /* Slab freed to the page allocator */
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */
SHEAF_FLUSH, /* Objects flushed from a sheaf */
SHEAF_REFILL, /* Objects refilled to a sheaf */
SHEAF_ALLOC, /* Allocation of an empty sheaf */
SHEAF_FREE, /* Freeing of an empty sheaf */
BARN_GET, /* Got full sheaf from barn */
BARN_GET_FAIL, /* Failed to get full sheaf from barn */
BARN_PUT, /* Put full sheaf to barn */
BARN_PUT_FAIL, /* Failed to put full sheaf to barn */
SHEAF_PREFILL_FAST, /* Sheaf prefill grabbed the spare sheaf */
SHEAF_PREFILL_SLOW, /* Sheaf prefill found no spare sheaf */
SHEAF_PREFILL_OVERSIZE, /* Allocation of oversize sheaf for prefill */
SHEAF_RETURN_FAST, /* Sheaf return reattached spare sheaf */
SHEAF_RETURN_SLOW, /* Sheaf return could not reattach spare */
NR_SLUB_STAT_ITEMS
};
#ifdef CONFIG_SLUB_STATS
struct kmem_cache_stats {
unsigned int stat[NR_SLUB_STAT_ITEMS];
};
#endif
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
/*
* The rmw is racy on a preemptible kernel but this is acceptable, so
* avoid this_cpu_add()'s irq-disable overhead.
*/
raw_cpu_inc(s->cpu_stats->stat[si]);
#endif
}
static inline
void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
{
#ifdef CONFIG_SLUB_STATS
raw_cpu_add(s->cpu_stats->stat[si], v);
#endif
}
#define MAX_FULL_SHEAVES 10
#define MAX_EMPTY_SHEAVES 10
struct node_barn {
spinlock_t lock;
struct list_head sheaves_full;
struct list_head sheaves_empty;
unsigned int nr_full;
unsigned int nr_empty;
};
struct slab_sheaf {
union {
struct rcu_head rcu_head;
struct list_head barn_list;
/* only used for prefilled sheafs */
struct {
unsigned int capacity;
bool pfmemalloc;
};
};
struct kmem_cache *cache;
unsigned int size;
int node; /* only used for rcu_sheaf */
void *objects[];
};
struct slub_percpu_sheaves {
local_trylock_t lock;
struct slab_sheaf *main; /* never NULL when unlocked */
struct slab_sheaf *spare; /* empty or full, may be NULL */
struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */
};
/*
* The slab lists for all objects.
*/
struct kmem_cache_node {
spinlock_t list_lock;
unsigned long nr_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
atomic_long_t total_objects;
struct list_head full;
#endif
struct node_barn *barn;
};
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
return s->node[node];
}
/*
* Get the barn of the current cpu's closest memory node. It may not exist on
* systems with memoryless nodes but without CONFIG_HAVE_MEMORYLESS_NODES
*/
static inline struct node_barn *get_barn(struct kmem_cache *s)
{
struct kmem_cache_node *n = get_node(s, numa_mem_id());
if (!n)
return NULL;
return n->barn;
}
/*
* Iterator over all nodes. The body will be executed for each node that has
* a kmem_cache_node structure allocated (which is true for all online nodes)
*/
#define for_each_kmem_cache_node(__s, __node, __n) \
for (__node = 0; __node < nr_node_ids; __node++) \
if ((__n = get_node(__s, __node)))
/*
* Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
* Corresponds to node_state[N_MEMORY], but can temporarily
* differ during memory hotplug/hotremove operations.
* Protected by slab_mutex.
*/
static nodemask_t slab_nodes;
/*
* Workqueue used for flushing cpu and kfree_rcu sheaves.
*/
static struct workqueue_struct *flushwq;
struct slub_flush_work {
struct work_struct work;
struct kmem_cache *s;
bool skip;
};
static DEFINE_MUTEX(flush_lock);
static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
/********************************************************************
* Core slab cache functions
*******************************************************************/
/*
* Returns freelist pointer (ptr). With hardening, this is obfuscated
* with an XOR of the address where the pointer is held and a per-cache
* random number.
*/
static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
void *ptr, unsigned long ptr_addr)
{
unsigned long encoded;
#ifdef CONFIG_SLAB_FREELIST_HARDENED
encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
#else
encoded = (unsigned long)ptr;
#endif
return (freeptr_t){.v = encoded};
}
static inline void *freelist_ptr_decode(const struct kmem_cache *s,
freeptr_t ptr, unsigned long ptr_addr)
{
void *decoded;
#ifdef CONFIG_SLAB_FREELIST_HARDENED
decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
#else
decoded = (void *)ptr.v;
#endif
return decoded;
}
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
unsigned long ptr_addr;
freeptr_t p;
object = kasan_reset_tag(object);
ptr_addr = (unsigned long)object + s->offset;
p = *(freeptr_t *)(ptr_addr);
return freelist_ptr_decode(s, p, ptr_addr);
}
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
unsigned long freeptr_addr = (unsigned long)object + s->offset;
#ifdef CONFIG_SLAB_FREELIST_HARDENED
BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif
freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
}
/*
* See comment in calculate_sizes().
*/
static inline bool freeptr_outside_object(struct kmem_cache *s)
{
return s->offset >= s->inuse;
}
/*
* Return offset of the end of info block which is inuse + free pointer if
* not overlapping with object.
*/
static inline unsigned int get_info_end(struct kmem_cache *s)
{
if (freeptr_outside_object(s))
return s->inuse + sizeof(void *);
else
return s->inuse;
}
/* Loop over all objects in a slab */
#define for_each_object(__p, __s, __addr, __objects) \
for (__p = fixup_red_left(__s, __addr); \
__p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size)
static inline unsigned int order_objects(unsigned int order, unsigned int size)
{
return ((unsigned int)PAGE_SIZE << order) / size;
}
static inline struct kmem_cache_order_objects oo_make(unsigned int order,
unsigned int size)
{
struct kmem_cache_order_objects x = {
(order << OO_SHIFT) + order_objects(order, size)
};
return x;
}
static inline unsigned int oo_order(struct kmem_cache_order_objects x)
{
return x.x >> OO_SHIFT;
}
static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
{
return x.x & OO_MASK;
}
/*
* If network-based swap is enabled, slub must keep track of whether memory
* were allocated from pfmemalloc reserves.
*/
static inline bool slab_test_pfmemalloc(const struct slab *slab)
{
return test_bit(SL_pfmemalloc, &slab->flags.f);
}
static inline void slab_set_pfmemalloc(struct slab *slab)
{
set_bit(SL_pfmemalloc, &slab->flags.f);
}
static inline void __slab_clear_pfmemalloc(struct slab *slab)
{
__clear_bit(SL_pfmemalloc, &slab->flags.f);
}
/*
* Per slab locking using the pagelock
*/
static __always_inline void slab_lock(struct slab *slab)
{
bit_spin_lock(SL_locked, &slab->flags.f);
}
static __always_inline void slab_unlock(struct slab *slab)
{
bit_spin_unlock(SL_locked, &slab->flags.f);
}
static inline bool
__update_freelist_fast(struct slab *slab, struct freelist_counters *old,
struct freelist_counters *new)
{
#ifdef system_has_freelist_aba
return try_cmpxchg_freelist(&slab->freelist_counters,
&old->freelist_counters,
new->freelist_counters);
#else
return false;
#endif
}
static inline bool
__update_freelist_slow(struct slab *slab, struct freelist_counters *old,
struct freelist_counters *new)
{
bool ret = false;
slab_lock(slab);
if (slab->freelist == old->freelist &&
slab->counters == old->counters) {
slab->freelist = new->freelist;
/* prevent tearing for the read in get_partial_node_bulk() */
WRITE_ONCE(slab->counters, new->counters);
ret = true;
}
slab_unlock(slab);
return ret;
}
/*
* Interrupts must be disabled (for the fallback code to work right), typically
* by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
* part of bit_spin_lock(), is sufficient because the policy is not to allow any
* allocation/ free operation in hardirq context. Therefore nothing can
* interrupt the operation.
*/
static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
struct freelist_counters *old, struct freelist_counters *new, const char *n)
{
bool ret;
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
lockdep_assert_irqs_disabled();
if (s->flags & __CMPXCHG_DOUBLE)
ret = __update_freelist_fast(slab, old, new);
else
ret = __update_freelist_slow(slab, old, new);
if (likely(ret))
return true;
cpu_relax();
stat(s, CMPXCHG_DOUBLE_FAIL);
#ifdef SLUB_DEBUG_CMPXCHG
pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif
return false;
}
static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
struct freelist_counters *old, struct freelist_counters *new, const char *n)
{
bool ret;
if (s->flags & __CMPXCHG_DOUBLE) {
ret = __update_freelist_fast(slab, old, new);
} else {
unsigned long flags;
local_irq_save(flags);
ret = __update_freelist_slow(slab, old, new);
local_irq_restore(flags);
}
if (likely(ret))
return true;
cpu_relax();
stat(s, CMPXCHG_DOUBLE_FAIL);
#ifdef SLUB_DEBUG_CMPXCHG
pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif
return false;
}
/*
* kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
* family will round up the real request size to these fixed ones, so
* there could be an extra area than what is requested. Save the original
* request size in the meta data area, for better debug and sanity check.
*/
static inline void set_orig_size(struct kmem_cache *s,
void *object, unsigned long orig_size)
{
void *p = kasan_reset_tag(object);
if (!slub_debug_orig_size(s))
return;
p += get_info_end(s);
p += sizeof(struct track) * 2;
*(unsigned long *)p = orig_size;
}
static inline unsigned long get_orig_size(struct kmem_cache *s, void *object)
{
void *p = kasan_reset_tag(object);
if (is_kfence_address(object))
return kfence_ksize(object);
if (!slub_debug_orig_size(s))
return s->object_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
return *(unsigned long *)p;
}
#ifdef CONFIG_SLAB_OBJ_EXT
/*
* Check if memory cgroup or memory allocation profiling is enabled.
* If enabled, SLUB tries to reduce memory overhead of accounting
* slab objects. If neither is enabled when this function is called,
* the optimization is simply skipped to avoid affecting caches that do not
* need slabobj_ext metadata.
*
* However, this may disable optimization when memory cgroup or memory
* allocation profiling is used, but slabs are created too early
* even before those subsystems are initialized.
*/
static inline bool need_slab_obj_exts(struct kmem_cache *s)
{
if (s->flags & SLAB_NO_OBJ_EXT)
return false;
if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
return true;
if (mem_alloc_profiling_enabled())
return true;
return false;
}
static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
{
return sizeof(struct slabobj_ext) * slab->objects;
}
static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
struct slab *slab)
{
unsigned long objext_offset;
objext_offset = s->size * slab->objects;
objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
return objext_offset;
}
static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
struct slab *slab)
{
unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
unsigned long objext_size = obj_exts_size_in_slab(slab);
return objext_offset + objext_size <= slab_size(slab);
}
static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
{
unsigned long obj_exts;
unsigned long start;
unsigned long end;
obj_exts = slab_obj_exts(slab);
if (!obj_exts)
return false;
start = (unsigned long)slab_address(slab);
end = start + slab_size(slab);
return (obj_exts >= start) && (obj_exts < end);
}
#else
static inline bool need_slab_obj_exts(struct kmem_cache *s)
{
return false;
}
static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
{
return 0;
}
static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
struct slab *slab)
{
return 0;
}
static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
struct slab *slab)
{
return false;
}
static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
{
return false;
}
#endif
#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
{
/*
* Note we cannot rely on the SLAB_OBJ_EXT_IN_OBJ flag here and need to
* check the stride. A cache can have SLAB_OBJ_EXT_IN_OBJ set, but
* allocations within_slab_leftover are preferred. And those may be
* possible or not depending on the particular slab's size.
*/
return obj_exts_in_slab(s, slab) &&
(slab_get_stride(slab) == s->size);
}
static unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
{
unsigned int offset = get_info_end(s);
if (kmem_cache_debug_flags(s, SLAB_STORE_USER))
offset += sizeof(struct track) * 2;
if (slub_debug_orig_size(s))
offset += sizeof(unsigned long);
offset += kasan_metadata_size(s, false);
return offset;
}
#else
static inline bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
{
return false;
}
static inline unsigned int obj_exts_offset_in_object(struct kmem_cache *s)
{
return 0;
}
#endif
#ifdef CONFIG_SLUB_DEBUG
/*
* For debugging context when we want to check if the struct slab pointer
* appears to be valid.
*/
static inline bool validate_slab_ptr(struct slab *slab)
{
return PageSlab(slab_page(slab));
}
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
static DEFINE_SPINLOCK(object_map_lock);
static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
struct slab *slab)
{
void *addr = slab_address(slab);
void *p;
bitmap_zero(obj_map, slab->objects);
for (p = slab->freelist; p; p = get_freepointer(s, p))
set_bit(__obj_to_index(s, addr, p), obj_map);
}
#if IS_ENABLED(CONFIG_KUNIT)
static bool slab_add_kunit_errors(void)
{
struct kunit_resource *resource;
if (!kunit_get_current_test())
return false;
resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
if (!resource)
return false;
(*(int *)resource->data)++;
kunit_put_resource(resource);
return true;
}
bool slab_in_kunit_test(void)
{
struct kunit_resource *resource;
if (!kunit_get_current_test())
return false;
resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
if (!resource)
return false;
kunit_put_resource(resource);
return true;
}
#else
static inline bool slab_add_kunit_errors(void) { return false; }
#endif
static inline unsigned int size_from_object(struct kmem_cache *s)
{
if (s->flags & SLAB_RED_ZONE)
return s->size - s->red_left_pad;
return s->size;
}
static inline void *restore_red_left(struct kmem_cache *s, void *p)
{
if (s->flags & SLAB_RED_ZONE)
p -= s->red_left_pad;
return p;
}
/*
* Debug settings:
*/
#if defined(CONFIG_SLUB_DEBUG_ON)
static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
#else
static slab_flags_t slub_debug;
#endif
static const char *slub_debug_string __ro_after_init;
static int disable_higher_order_debug;
/*
* Object debugging
*/
/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
struct slab *slab, void *object)
{
void *base;
if (!object)
return 1;
base = slab_address(slab);
object = kasan_reset_tag(object);
object = restore_red_left(s, object);
if (object < base || object >= base + slab->objects * s->size ||
(object - base) % s->size) {
return 0;
}
return 1;
}
static void print_section(char *level, char *text, u8 *addr,
unsigned int length)
{
metadata_access_enable();
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
16, 1, kasan_reset_tag((void *)addr), length, 1);
metadata_access_disable();
}
static struct track *get_track(struct kmem_cache *s, void *object,
enum track_item alloc)
{
struct track *p;
p = object + get_info_end(s);
return kasan_reset_tag(p + alloc);
}
#ifdef CONFIG_STACKDEPOT
static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
{
depot_stack_handle_t handle;
unsigned long entries[TRACK_ADDRS_COUNT];
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
handle = stack_depot_save(entries, nr_entries, gfp_flags);
return handle;
}
#else
static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
{
return 0;
}
#endif
static void set_track_update(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr,
depot_stack_handle_t handle)
{
struct track *p = get_track(s, object, alloc);
#ifdef CONFIG_STACKDEPOT
p->handle = handle;
#endif
p->addr = addr;
p->cpu = raw_smp_processor_id();
p->pid = current->pid;
p->when = jiffies;
}
static __always_inline void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
{
depot_stack_handle_t handle = set_track_prepare(gfp_flags);
set_track_update(s, object, alloc, addr, handle);
}
static void init_tracking(struct kmem_cache *s, void *object)
{
struct track *p;
if (!(s->flags & SLAB_STORE_USER))
return;
p = get_track(s, object, TRACK_ALLOC);
memset(p, 0, 2*sizeof(struct track));
}
static void print_track(const char *s, struct track *t, unsigned long pr_time)
{
depot_stack_handle_t handle __maybe_unused;
if (!t->addr)
return;
pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
#ifdef CONFIG_STACKDEPOT
handle = READ_ONCE(t->handle);
if (handle)
stack_depot_print(handle);
else
pr_err("object allocation/free stack trace missing\n");
#endif
}
void print_tracking(struct kmem_cache *s, void *object)
{
unsigned long pr_time = jiffies;
if (!(s->flags & SLAB_STORE_USER))
return;
print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
}
static void print_slab_info(const struct slab *slab)
{
pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
slab, slab->objects, slab->inuse, slab->freelist,
&slab->flags.f);
}
void skip_orig_size_check(struct kmem_cache *s, const void *object)
{
set_orig_size(s, (void *)object, s->object_size);
}
static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
{
struct va_format vaf;
va_list args;
va_copy(args, argsp);
vaf.fmt = fmt;
vaf.va = &args;
pr_err("=============================================================================\n");
pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
pr_err("-----------------------------------------------------------------------------\n\n");
va_end(args);
}
static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
__slab_bug(s, fmt, args);
va_end(args);
}
__printf(2, 3)
static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (slab_add_kunit_errors())
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_err("FIX %s: %pV\n", s->name, &vaf);
va_end(args);
}
static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
{
unsigned int off; /* Offset of last byte */
u8 *addr = slab_address(slab);
print_tracking(s, p);
print_slab_info(slab);
pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
s->red_left_pad);
else if (p > addr + 16)
print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
print_section(KERN_ERR, "Object ", p,
min_t(unsigned int, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
off = get_info_end(s);
if (s->flags & SLAB_STORE_USER)
off += 2 * sizeof(struct track);
if (slub_debug_orig_size(s))
off += sizeof(unsigned long);
off += kasan_metadata_size(s, false);
if (obj_exts_in_object(s, slab))
off += sizeof(struct slabobj_ext);
if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
print_section(KERN_ERR, "Padding ", p + off,
size_from_object(s) - off);
}
static void object_err(struct kmem_cache *s, struct slab *slab,
u8 *object, const char *reason)
{
if (slab_add_kunit_errors())
return;
slab_bug(s, reason);
if (!object || !check_valid_pointer(s, slab, object)) {
print_slab_info(slab);
pr_err("Invalid pointer 0x%p\n", object);
} else {
print_trailer(s, slab, object);
}
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
WARN_ON(1);
}
static void __slab_err(struct slab *slab)
{
if (slab_in_kunit_test())
return;
print_slab_info(slab);
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
WARN_ON(1);
}
static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
const char *fmt, ...)
{
va_list args;
if (slab_add_kunit_errors())
return;
va_start(args, fmt);
__slab_bug(s, fmt, args);
va_end(args);
__slab_err(slab);
}
static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = kasan_reset_tag(object);
unsigned int poison_size = s->object_size;
if (s->flags & SLAB_RED_ZONE) {
/*
* Here and below, avoid overwriting the KMSAN shadow. Keeping
* the shadow makes it possible to distinguish uninit-value
* from use-after-free.
*/
memset_no_sanitize_memory(p - s->red_left_pad, val,
s->red_left_pad);
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
/*
* Redzone the extra allocated space by kmalloc than
* requested, and the poison size will be limited to
* the original request size accordingly.
*/
poison_size = get_orig_size(s, object);
}
}
if (s->flags & __OBJECT_POISON) {
memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
}
if (s->flags & SLAB_RED_ZONE)
memset_no_sanitize_memory(p + poison_size, val,
s->inuse - poison_size);
}
static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
void *from, void *to)
{
slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
memset(from, data, to - from);
}
#ifdef CONFIG_KMSAN
#define pad_check_attributes noinline __no_kmsan_checks
#else
#define pad_check_attributes
#endif
static pad_check_attributes int
check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
u8 *object, const char *what, u8 *start, unsigned int value,
unsigned int bytes, bool slab_obj_print)
{
u8 *fault;
u8 *end;
u8 *addr = slab_address(slab);
metadata_access_enable();
fault = memchr_inv(kasan_reset_tag(start), value, bytes);
metadata_access_disable();
if (!fault)
return 1;
end = start + bytes;
while (end > fault && end[-1] == value)
end--;
if (slab_add_kunit_errors())
goto skip_bug_print;
pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
what, fault, end - 1, fault - addr, fault[0], value);
if (slab_obj_print)
object_err(s, slab, object, "Object corrupt");
skip_bug_print:
restore_bytes(s, what, value, fault, end);
return 0;
}
/*
* Object field layout:
*
* [Left redzone padding] (if SLAB_RED_ZONE)
* - Field size: s->red_left_pad
* - Immediately precedes each object when SLAB_RED_ZONE is set.
* - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
* 0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
*
* [Object bytes] (object address starts here)
* - Field size: s->object_size
* - Object payload bytes.
* - If the freepointer may overlap the object, it is stored inside
* the object (typically near the middle).
* - Poisoning uses 0x6b (POISON_FREE) and the last byte is
* 0xa5 (POISON_END) when __OBJECT_POISON is enabled.
*
* [Word-align padding] (right redzone when SLAB_RED_ZONE is set)
* - Field size: s->inuse - s->object_size
* - If redzoning is enabled and ALIGN(size, sizeof(void *)) adds no
* padding, explicitly extend by one word so the right redzone is
* non-empty.
* - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
* 0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
*
* [Metadata starts at object + s->inuse]
* - A. freelist pointer (if freeptr_outside_object)
* - B. alloc tracking (SLAB_STORE_USER)
* - C. free tracking (SLAB_STORE_USER)
* - D. original request size (SLAB_KMALLOC && SLAB_STORE_USER)
* - E. KASAN metadata (if enabled)
*
* [Mandatory padding] (if CONFIG_SLUB_DEBUG && SLAB_RED_ZONE)
* - One mandatory debug word to guarantee a minimum poisoned gap
* between metadata and the next object, independent of alignment.
* - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set.
* [Final alignment padding]
* - Bytes added by ALIGN(size, s->align) to reach s->size.
* - When the padding is large enough, it can be used to store
* struct slabobj_ext for accounting metadata (obj_exts_in_object()).
* - The remaining bytes (if any) are filled with 0x5a (POISON_INUSE)
* when SLAB_POISON is set.
*
* Notes:
* - Redzones are filled by init_object() with SLUB_RED_ACTIVE/INACTIVE.
* - Object contents are poisoned with POISON_FREE/END when __OBJECT_POISON.
* - The trailing padding is pre-filled with POISON_INUSE by
* setup_slab_debug() when SLAB_POISON is set, and is validated by
* check_pad_bytes().
* - The first object pointer is slab_address(slab) +
* (s->red_left_pad if redzoning); subsequent objects are reached by
* adding s->size each time.
*
* If a slab cache flag relies on specific metadata to exist at a fixed
* offset, the flag must be included in SLAB_NEVER_MERGE to prevent merging.
* Otherwise, the cache would misbehave as s->object_size and s->inuse are
* adjusted during cache merging (see __kmem_cache_alias()).
*/
static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
{
unsigned long off = get_info_end(s); /* The end of info */
if (s->flags & SLAB_STORE_USER) {
/* We also have user information there */
off += 2 * sizeof(struct track);
if (s->flags & SLAB_KMALLOC)
off += sizeof(unsigned long);
}
off += kasan_metadata_size(s, false);
if (obj_exts_in_object(s, slab))
off += sizeof(struct slabobj_ext);
if (size_from_object(s) == off)
return 1;
return check_bytes_and_report(s, slab, p, "Object padding",
p + off, POISON_INUSE, size_from_object(s) - off, true);
}
/* Check the pad bytes at the end of a slab page */
static pad_check_attributes void
slab_pad_check(struct kmem_cache *s, struct slab *slab)
{
u8 *start;
u8 *fault;
u8 *end;
u8 *pad;
int length;
int remainder;
if (!(s->flags & SLAB_POISON))
return;
start = slab_address(slab);
length = slab_size(slab);
end = start + length;
if (obj_exts_in_slab(s, slab) && !obj_exts_in_object(s, slab)) {
remainder = length;
remainder -= obj_exts_offset_in_slab(s, slab);
remainder -= obj_exts_size_in_slab(slab);
} else {
remainder = length % s->size;
}
if (!remainder)
return;
pad = end - remainder;
metadata_access_enable();
fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
metadata_access_disable();
if (!fault)
return;
while (end > fault && end[-1] == POISON_INUSE)
end--;
slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
fault, end - 1, fault - start);
print_section(KERN_ERR, "Padding ", pad, remainder);
__slab_err(slab);
restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
}
static int check_object(struct kmem_cache *s, struct slab *slab,
void *object, u8 val)
{
u8 *p = object;
u8 *endobject = object + s->object_size;
unsigned int orig_size, kasan_meta_size;
int ret = 1;
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
object - s->red_left_pad, val, s->red_left_pad, ret))
ret = 0;
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
endobject, val, s->inuse - s->object_size, ret))
ret = 0;
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
orig_size = get_orig_size(s, object);
if (s->object_size > orig_size &&
!check_bytes_and_report(s, slab, object,
"kmalloc Redzone", p + orig_size,
val, s->object_size - orig_size, ret)) {
ret = 0;
}
}
} else {
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
if (!check_bytes_and_report(s, slab, p, "Alignment padding",
endobject, POISON_INUSE,
s->inuse - s->object_size, ret))
ret = 0;
}
}
if (s->flags & SLAB_POISON) {
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
/*
* KASAN can save its free meta data inside of the
* object at offset 0. Thus, skip checking the part of
* the redzone that overlaps with the meta data.
*/
kasan_meta_size = kasan_metadata_size(s, true);
if (kasan_meta_size < s->object_size - 1 &&
!check_bytes_and_report(s, slab, p, "Poison",
p + kasan_meta_size, POISON_FREE,
s->object_size - kasan_meta_size - 1, ret))
ret = 0;
if (kasan_meta_size < s->object_size &&
!check_bytes_and_report(s, slab, p, "End Poison",
p + s->object_size - 1, POISON_END, 1, ret))
ret = 0;
}
/*
* check_pad_bytes cleans up on its own.
*/
if (!check_pad_bytes(s, slab, p))
ret = 0;
}
/*
* Cannot check freepointer while object is allocated if
* object and freepointer overlap.
*/
if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
!check_valid_pointer(s, slab, get_freepointer(s, p))) {
object_err(s, slab, p, "Freepointer corrupt");
/*
* No choice but to zap it and thus lose the remainder
* of the free objects in this slab. May cause
* another error because the object count is now wrong.
*/
set_freepointer(s, p, NULL);
ret = 0;
}
return ret;
}
/*
* Checks if the slab state looks sane. Assumes the struct slab pointer
* was either obtained in a way that ensures it's valid, or validated
* by validate_slab_ptr()
*/
static int check_slab(struct kmem_cache *s, struct slab *slab)
{
int maxobj;
maxobj = order_objects(slab_order(slab), s->size);
if (slab->objects > maxobj) {
slab_err(s, slab, "objects %u > max %u",
slab->objects, maxobj);
return 0;
}
if (slab->inuse > slab->objects) {
slab_err(s, slab, "inuse %u > max %u",
slab->inuse, slab->objects);
return 0;
}
if (slab->frozen) {
slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
return 0;
}
/* Slab_pad_check fixes things up after itself */
slab_pad_check(s, slab);
return 1;
}
/*
* Determine if a certain object in a slab is on the freelist. Must hold the
* slab lock to guarantee that the chains are in a consistent state.
*/
static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
{
int nr = 0;
void *fp;
void *object = NULL;
int max_objects;
fp = slab->freelist;
while (fp && nr <= slab->objects) {
if (fp == search)
return true;
if (!check_valid_pointer(s, slab, fp)) {
if (object) {
object_err(s, slab, object,
"Freechain corrupt");
set_freepointer(s, object, NULL);
break;
} else {
slab_err(s, slab, "Freepointer corrupt");
slab->freelist = NULL;
slab->inuse = slab->objects;
slab_fix(s, "Freelist cleared");
return false;
}
}
object = fp;
fp = get_freepointer(s, object);
nr++;
}
if (nr > slab->objects) {
slab_err(s, slab, "Freelist cycle detected");
slab->freelist = NULL;
slab->inuse = slab->objects;
slab_fix(s, "Freelist cleared");
return false;
}
max_objects = order_objects(slab_order(slab), s->size);
if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = MAX_OBJS_PER_PAGE;
if (slab->objects != max_objects) {
slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
slab->objects, max_objects);
slab->objects = max_objects;
slab_fix(s, "Number of objects adjusted");
}
if (slab->inuse != slab->objects - nr) {
slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
slab->inuse, slab->objects - nr);
slab->inuse = slab->objects - nr;
slab_fix(s, "Object count adjusted");
}
return search == NULL;
}
static void trace(struct kmem_cache *s, struct slab *slab, void *object,
int alloc)
{
if (s->flags & SLAB_TRACE) {
pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
s->name,
alloc ? "alloc" : "free",
object, slab->inuse,
slab->freelist);
if (!alloc)
print_section(KERN_INFO, "Object ", (void *)object,
s->object_size);
dump_stack();
}
}
/*
* Tracking of fully allocated slabs for debugging purposes.
*/
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct slab *slab)
{
if (!(s->flags & SLAB_STORE_USER))
return;
lockdep_assert_held(&n->list_lock);
list_add(&slab->slab_list, &n->full);
}
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
{
if (!(s->flags & SLAB_STORE_USER))
return;
lockdep_assert_held(&n->list_lock);
list_del(&slab->slab_list);
}
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{
return atomic_long_read(&n->nr_slabs);
}
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
{
struct kmem_cache_node *n = get_node(s, node);
atomic_long_inc(&n->nr_slabs);
atomic_long_add(objects, &n->total_objects);
}
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
{
struct kmem_cache_node *n = get_node(s, node);
atomic_long_dec(&n->nr_slabs);
atomic_long_sub(objects, &n->total_objects);
}
/* Object debug checks for alloc/free paths */
static void setup_object_debug(struct kmem_cache *s, void *object)
{
if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
return;
init_object(s, object, SLUB_RED_INACTIVE);
init_tracking(s, object);
}
static
void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
{
if (!kmem_cache_debug_flags(s, SLAB_POISON))
return;
metadata_access_enable();
memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
metadata_access_disable();
}
static inline int alloc_consistency_checks(struct kmem_cache *s,
struct slab *slab, void *object)
{
if (!check_slab(s, slab))
return 0;
if (!check_valid_pointer(s, slab, object)) {
object_err(s, slab, object, "Freelist Pointer check fails");
return 0;
}
if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
return 0;
return 1;
}
static noinline bool alloc_debug_processing(struct kmem_cache *s,
struct slab *slab, void *object, int orig_size)
{
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!alloc_consistency_checks(s, slab, object))
goto bad;
}
/* Success. Perform special debug activities for allocs */
trace(s, slab, object, 1);
set_orig_size(s, object, orig_size);
init_object(s, object, SLUB_RED_ACTIVE);
return true;
bad:
/*
* Let's do the best we can to avoid issues in the future. Marking all
* objects as used avoids touching the remaining objects.
*/
slab_fix(s, "Marking all objects used");
slab->inuse = slab->objects;
slab->freelist = NULL;
slab->frozen = 1; /* mark consistency-failed slab as frozen */
return false;
}
static inline int free_consistency_checks(struct kmem_cache *s,
struct slab *slab, void *object, unsigned long addr)
{
if (!check_valid_pointer(s, slab, object)) {
slab_err(s, slab, "Invalid object pointer 0x%p", object);
return 0;
}
if (on_freelist(s, slab, object)) {
object_err(s, slab, object, "Object already free");
return 0;
}
if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
return 0;
if (unlikely(s != slab->slab_cache)) {
if (!slab->slab_cache) {
slab_err(NULL, slab, "No slab cache for object 0x%p",
object);
} else {
object_err(s, slab, object,
"page slab pointer corrupt.");
}
return 0;
}
return 1;
}
/*
* Parse a block of slab_debug options. Blocks are delimited by ';'
*
* @str: start of block
* @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
* @slabs: return start of list of slabs, or NULL when there's no list
* @init: assume this is initial parsing and not per-kmem-create parsing
*
* returns the start of next block if there's any, or NULL
*/
static const char *
parse_slub_debug_flags(const char *str, slab_flags_t *flags, const char **slabs, bool init)
{
bool higher_order_disable = false;
/* Skip any completely empty blocks */
while (*str && *str == ';')
str++;
if