// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/softirq.c
*
* Copyright (C) 1992 Linus Torvalds
*
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/local_lock.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/wait_bit.h>
#include <linux/workqueue.h>
#include <asm/softirq_stack.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
by its own spinlocks.
- Even if softirq is serialized, only local cpu is marked for
execution. Hence, we get something sort of weak cpu binding.
Though it is still not clear, will it result in better locality
or will not.
Examples:
- NET RX softirq. It is multithreaded and does not require
any global serialization.
- NET TX softirq. It kicks software netdevice queues, hence
it is logically serialized per device, but this serialization
is invisible to common code.
- Tasklets: serialized wrt itself.
*/
#ifndef __ARCH_IRQ_STAT
DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
#endif
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
static void wakeup_softirqd(void)
{
/* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
if (tsk)
wake_up_process(tsk);
}
#ifdef CONFIG_TRACE_IRQFLAGS
DEFINE_PER_CPU(int, hardirqs_enabled);
DEFINE_PER_CPU(int, hardirq_context);
EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
#endif
/*
* SOFTIRQ_OFFSET usage:
*
* On !RT kernels 'count' is the preempt counter, on RT kernels this applies
* to a per CPU counter and to task::softirqs_disabled_cnt.
*
* - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
* processing.
*
* - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
* on local_bh_disable or local_bh_enable.
*
* This lets us distinguish between whether we are currently processing
* softirq and whether we just have bh disabled.
*/
#ifdef CONFIG_PREEMPT_RT
/*
* RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
* also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
* softirq disabled section to be preempted.
*
* The per task counter is used for softirq_count(), in_softirq() and
* in_serving_softirqs() because these counts are only valid when the task
* holding softirq_ctrl::lock is running.
*
* The per CPU counter prevents pointless wakeups of ksoftirqd in case that
* the task which is in a softirq disabled section is preempted or blocks.
*/
struct softirq_ctrl {
local_lock_t lock;
int cnt;
};
static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key bh_lock_key;
struct lockdep_map bh_lock_map = {
.name = "local_bh",
.key = &bh_lock_key,
.wait_type_outer = LD_WAIT_FREE,
.wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
.lock_type = LD_LOCK_PERCPU,
};
EXPORT_SYMBOL_GPL(bh_lock_map);
#endif
/**
* local_bh_blocked() - Check for idle whether BH processing is blocked
*
* Returns false if the per CPU softirq::cnt is 0 otherwise true.
*
* This is invoked from the idle task to guard against false positive
* softirq pending warnings, which would happen when the task which holds
* softirq_ctrl::lock was the only running task on the CPU and blocks on
* some other lock.
*/
bool local_bh_blocked(void)
{
return __this_cpu_read(softirq_ctrl.cnt) != 0;
}
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
int newcnt;
WARN_ON_ONCE(in_hardirq());
lock_map_acquire_read(&bh_lock_map);
/* First entry