diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-09-30 10:35:11 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-09-30 10:35:11 -0700 |
| commit | 6c7340a7a8d2b6ecad1ad108f6daa73ba1dc082f (patch) | |
| tree | e3381cf64f5094a266547552b0a9945012566bfa | |
| parent | 755fa5b4fb36627796af19932a432d343220ec63 (diff) | |
| parent | 45b7f780739a3145aeef24d2dfa02517a6c82ed6 (diff) | |
Merge tag 'sched-core-2025-09-26' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"Core scheduler changes:
- Make migrate_{en,dis}able() inline, to improve performance
(Menglong Dong)
- Move STDL_INIT() functions out-of-line (Peter Zijlstra)
- Unify the SCHED_{SMT,CLUSTER,MC} Kconfig (Peter Zijlstra)
Fair scheduling:
- Defer throttling to when tasks exit to user-space, to reduce the
chance & impact of throttle-preemption with held locks and other
resources (Aaron Lu, Valentin Schneider)
- Get rid of sched_domains_curr_level hack for tl->cpumask(), as the
warning was getting triggered on certain topologies (Peter
Zijlstra)
Misc cleanups & fixes:
- Header cleanups (Menglong Dong)
- Fix race in push_dl_task() (Harshit Agarwal)"
* tag 'sched-core-2025-09-26' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Fix some typos in include/linux/preempt.h
sched: Make migrate_{en,dis}able() inline
rcu: Replace preempt.h with sched.h in include/linux/rcupdate.h
arch: Add the macro COMPILE_OFFSETS to all the asm-offsets.c
sched/fair: Do not balance task to a throttled cfs_rq
sched/fair: Do not special case tasks in throttled hierarchy
sched/fair: update_cfs_group() for throttled cfs_rqs
sched/fair: Propagate load for throttled cfs_rq
sched/fair: Get rid of throttled_lb_pair()
sched/fair: Task based throttle time accounting
sched/fair: Switch to task based throttle model
sched/fair: Implement throttle task work and related helpers
sched/fair: Add related data structure for task based throttle
sched: Unify the SCHED_{SMT,CLUSTER,MC} Kconfig
sched: Move STDL_INIT() functions out-of-line
sched/fair: Get rid of sched_domains_curr_level hack for tl->cpumask()
sched/deadline: Fix race in push_dl_task()
49 files changed, 686 insertions, 496 deletions
@@ -34,13 +34,24 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: $(timeconst-file) $(bounds-file) $(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE $(call filechk,offsets,__ASM_OFFSETS_H__) +# Generate rq-offsets.h + +rq-offsets-file := include/generated/rq-offsets.h + +targets += kernel/sched/rq-offsets.s + +kernel/sched/rq-offsets.s: $(offsets-file) + +$(rq-offsets-file): kernel/sched/rq-offsets.s FORCE + $(call filechk,offsets,__RQ_OFFSETS_H__) + # Check for missing system calls quiet_cmd_syscalls = CALL $< cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags) PHONY += missing-syscalls -missing-syscalls: scripts/checksyscalls.sh $(offsets-file) +missing-syscalls: scripts/checksyscalls.sh $(rq-offsets-file) $(call cmd,syscalls) # Check the manual modification of atomic headers diff --git a/arch/Kconfig b/arch/Kconfig index 97642c08a124..2162276995a0 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -41,6 +41,44 @@ config HOTPLUG_SMT config SMT_NUM_THREADS_DYNAMIC bool +config ARCH_SUPPORTS_SCHED_SMT + bool + +config ARCH_SUPPORTS_SCHED_CLUSTER + bool + +config ARCH_SUPPORTS_SCHED_MC + bool + +config SCHED_SMT + bool "SMT (Hyperthreading) scheduler support" + depends on ARCH_SUPPORTS_SCHED_SMT + default y + help + Improves the CPU scheduler's decision making when dealing with + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + +config SCHED_CLUSTER + bool "Cluster scheduler support" + depends on ARCH_SUPPORTS_SCHED_CLUSTER + default y + help + Cluster scheduler support improves the CPU scheduler's decision + making when dealing with machines that have clusters of CPUs. + Cluster usually means a couple of CPUs which are placed closely + by sharing mid-level caches, last-level cache tags or internal + busses. + +config SCHED_MC + bool "Multi-Core Cache (MC) scheduler support" + depends on ARCH_SUPPORTS_SCHED_MC + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + # Selected by HOTPLUG_CORE_SYNC_DEAD or HOTPLUG_CORE_SYNC_FULL config HOTPLUG_CORE_SYNC bool diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c index e9dad60b147f..1ebb05890499 100644 --- a/arch/alpha/kernel/asm-offsets.c +++ b/arch/alpha/kernel/asm-offsets.c @@ -4,6 +4,7 @@ * This code generates raw asm output which is post-processed to extract * and format the required data. */ +#define COMPILE_OFFSETS #include <linux/types.h> #include <linux/stddef.h> diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index f77deb799175..2978da85fcb6 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ +#define COMPILE_OFFSETS #include <linux/sched.h> #include <linux/mm.h> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 36ab8625be72..358057001859 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -941,28 +941,14 @@ config IRQSTACKS config ARM_CPU_TOPOLOGY bool "Support cpu topology definition" depends on SMP && CPU_V7 + select ARCH_SUPPORTS_SCHED_MC + select ARCH_SUPPORTS_SCHED_SMT default y help Support ARM cpu topology definition. The MPIDR register defines affinity between processors which is then used to describe the cpu topology of an ARM System. -config SCHED_MC - bool "Multi-core scheduler support" - depends on ARM_CPU_TOPOLOGY - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. If unsure say N here. - -config SCHED_SMT - bool "SMT scheduler support" - depends on ARM_CPU_TOPOLOGY - help - Improves the CPU scheduler's decision making when dealing with - MultiThreading at a cost of slightly increased overhead in some - places. If unsure say N here. - config HAVE_ARM_SCU bool help diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 123f4a8ef446..2101938d27fc 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -7,6 +7,8 @@ * This code generates raw asm output which is post-processed to extract * and format the required data. */ +#define COMPILE_OFFSETS + #include <linux/compiler.h> #include <linux/sched.h> #include <linux/mm.h> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f828781aa840..616b702f10d4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -108,6 +108,9 @@ config ARM64 select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_RT + select ARCH_SUPPORTS_SCHED_SMT + select ARCH_SUPPORTS_SCHED_CLUSTER + select ARCH_SUPPORTS_SCHED_MC select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_DEFAULT_BPF_JIT @@ -1507,29 +1510,6 @@ config CPU_LITTLE_ENDIAN endchoice -config SCHED_MC - bool "Multi-core scheduler support" - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. If unsure say N here. - -config SCHED_CLUSTER - bool "Cluster scheduler support" - help - Cluster scheduler support improves the CPU scheduler's decision - making when dealing with machines that have clusters of CPUs. - Cluster usually means a couple of CPUs which are placed closely - by sharing mid-level caches, last-level cache tags or internal - busses. - -config SCHED_SMT - bool "SMT scheduler support" - help - Improves the CPU scheduler's decision making when dealing with - MultiThreading at a cost of slightly increased overhead in some - places. If unsure say N here. - config NR_CPUS int "Maximum number of CPUs (2-4096)" range 2 4096 diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 30d4bbe68661..b6367ff3a49c 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -6,6 +6,7 @@ * 2001-2002 Keith Owens * Copyright (C) 2012 ARM Ltd. */ +#define COMPILE_OFFSETS #include <linux/arm_sdei.h> #include <linux/sched.h> diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c index d1e903579473..5525c8e7e1d9 100644 --- a/arch/csky/kernel/asm-offsets.c +++ b/arch/csky/kernel/asm-offsets.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#define COMPILE_OFFSETS #include <linux/sched.h> #include <linux/kernel_stat.h> diff --git a/arch/hexagon/kernel/asm-offsets.c b/arch/hexagon/kernel/asm-offsets.c index 03a7063f9456..50eea9fa6f13 100644 --- a/arch/hexagon/kernel/asm-offsets.c +++ b/arch/hexagon/kernel/asm-offsets.c @@ -8,6 +8,7 @@ * * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. */ +#define COMPILE_OFFSETS #include <linux/compat.h> #include <linux/types.h> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 0631a6b11281..1f08941fdea4 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -70,6 +70,8 @@ config LOONGARCH select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_RT + select ARCH_SUPPORTS_SCHED_SMT if SMP + select ARCH_SUPPORTS_SCHED_MC if SMP select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_MEMTEST @@ -452,23 +454,6 @@ config EFI_STUB This kernel feature allows the kernel to be loaded directly by EFI firmware without the use of a bootloader. -config SCHED_SMT - bool "SMT scheduler support" - depends on SMP - default y - help - Improves scheduler's performance when there are multiple - threads in one physical core. - -config SCHED_MC - bool "Multi-core scheduler support" - depends on SMP - default y - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. - config SMP bool "Multi-Processing support" help diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index db1e4bb26b6a..3017c7157600 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -4,6 +4,8 @@ * * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#define COMPILE_OFFSETS + #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index 906d73230537..67a1990f9d74 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c @@ -9,6 +9,7 @@ * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #define ASM_OFFSETS_C #include <linux/stddef.h> diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 104c3ac5f30c..b4b67d58e7f6 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c @@ -7,6 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ +#define COMPILE_OFFSETS #include <linux/init.h> #include <linux/stddef.h> diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index caf508f6e9ec..447b2fcaa316 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2223,7 +2223,7 @@ config MIPS_MT_SMP select SMP select SMP_UP select SYS_SUPPORTS_SMP - select SYS_SUPPORTS_SCHED_SMT + select ARCH_SUPPORTS_SCHED_SMT select MIPS_PERF_SHARED_TC_COUNTERS help This is a kernel model which is known as SMVP. This is supported @@ -2235,18 +2235,6 @@ config MIPS_MT_SMP config MIPS_MT bool -config SCHED_SMT - bool "SMT (multithreading) scheduler support" - depends on SYS_SUPPORTS_SCHED_SMT - default n - help - SMT scheduler support improves the CPU scheduler's decision making - when dealing with MIPS MT enabled cores at a cost of slightly - increased overhead in some places. If unsure say N here. - -config SYS_SUPPORTS_SCHED_SMT - bool - config SYS_SUPPORTS_MULTITHREADING bool @@ -2318,7 +2306,7 @@ config MIPS_CPS select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU select SYNC_R4K if (CEVT_R4K || CSRC_R4K) select SYS_SUPPORTS_HOTPLUG_CPU - select SYS_SUPPORTS_SCHED_SMT if CPU_MIPSR6 + select ARCH_SUPPORTS_SCHED_SMT if CPU_MIPSR6 select SYS_SUPPORTS_SMP select WEAK_ORDERING select GENERIC_IRQ_MIGRATION if HOTPLUG_CPU diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 1e29efcba46e..5debd9a3854a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -9,6 +9,8 @@ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. */ +#define COMPILE_OFFSETS + #include <linux/compat.h> #include <linux/types.h> #include <linux/sched.h> diff --git a/arch/nios2/kernel/asm-offsets.c b/arch/nios2/kernel/asm-offsets.c index e3d9b7b6fb48..88190b503ce5 100644 --- a/arch/nios2/kernel/asm-offsets.c +++ b/arch/nios2/kernel/asm-offsets.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> */ +#define COMPILE_OFFSETS #include <linux/stddef.h> #include <linux/sched.h> diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c index 710651d5aaae..3cc826f2216b 100644 --- a/arch/openrisc/kernel/asm-offsets.c +++ b/arch/openrisc/kernel/asm-offsets.c @@ -18,6 +18,7 @@ * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #include <linux/signal.h> #include <linux/sched.h> diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 2efa4b08b7b8..0940c162f1f7 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -44,6 +44,7 @@ config PARISC select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_SMP_IDLE_THREAD select GENERIC_ARCH_TOPOLOGY if SMP + select ARCH_SUPPORTS_SCHED_MC if SMP && PA8X00 select GENERIC_CPU_DEVICES if !SMP select GENERIC_LIB_DEVMEM_IS_ALLOWED select SYSCTL_ARCH_UNALIGN_ALLOW @@ -319,14 +320,6 @@ config SMP If you don't know what to do here, say N. -config SCHED_MC - bool "Multi-core scheduler support" - depends on GENERIC_ARCH_TOPOLOGY && PA8X00 - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. If unsure say N here. - config IRQSTACKS bool "Use separate kernel stacks when processing interrupts" default y diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 757816a7bd4b..9abfe65492c6 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -13,6 +13,7 @@ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org> */ +#define COMPILE_OFFSETS #include <linux/types.h> #include <linux/sched.h> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 325c1171894d..12fb3da59700 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -170,6 +170,9 @@ config PPC select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx + select ARCH_SUPPORTS_SCHED_MC if SMP + select ARCH_SUPPORTS_SCHED_SMT if PPC64 && SMP + select SCHED_MC if ARCH_SUPPORTS_SCHED_MC select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_MEMTEST @@ -965,14 +968,6 @@ config PPC_PROT_SAO_LPAR config PPC_COPRO_BASE bool -config SCHED_SMT - bool "SMT (Hyperthreading) scheduler support" - depends on PPC64 && SMP - help - SMT scheduler support improves the CPU scheduler's decision making - when dealing with POWER5 cpus at a cost of slightly increased - overhead in some places. If unsure say N here. - config PPC_DENORMALISATION bool "PowerPC denormalisation exception handling" depends on PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index da15b5efe807..f19ca44512d1 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -131,6 +131,8 @@ static inline int cpu_to_coregroup_id(int cpu) #ifdef CONFIG_SMP #include <asm/cputable.h> +struct cpumask *cpu_coregroup_mask(int cpu); + #ifdef CONFIG_PPC64 #include <asm/smp.h> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index b3048f6d3822..a4bc80b30410 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -8,6 +8,7 @@ * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #include <linux/compat.h> #include <linux/signal.h> diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index f59e4b9cc207..68edb66c2964 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1028,19 +1028,19 @@ static int powerpc_shared_proc_flags(void) * We can't just pass cpu_l2_cache_mask() directly because * returns a non-const pointer and the compiler barfs on that. */ -static const struct cpumask *shared_cache_mask(int cpu) +static const struct cpumask *tl_cache_mask(struct sched_domain_topology_level *tl, int cpu) { return per_cpu(cpu_l2_cache_map, cpu); } #ifdef CONFIG_SCHED_SMT -static const struct cpumask *smallcore_smt_mask(int cpu) +static const struct cpumask *tl_smallcore_smt_mask(struct sched_domain_topology_level *tl, int cpu) { return cpu_smallcore_mask(cpu); } #endif -static struct cpumask *cpu_coregroup_mask(int cpu) +struct cpumask *cpu_coregroup_mask(int cpu) { return per_cpu(cpu_coregroup_map, cpu); } @@ -1054,11 +1054,6 @@ static bool has_coregroup_support(void) return coregroup_enabled; } -static const struct cpumask *cpu_mc_mask(int cpu) -{ - return cpu_coregroup_mask(cpu); -} - static int __init init_big_cores(void) { int cpu; @@ -1448,7 +1443,7 @@ static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) return false; } - cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); + cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu)); /* Update l2-cache mask with all the CPUs that are part of submask */ or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); @@ -1538,7 +1533,7 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask) return; } - cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); + cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu)); /* Update coregroup mask with all the CPUs that are part of submask */ or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); @@ -1601,7 +1596,7 @@ static void add_cpu_to_masks(int cpu) /* If chip_id is -1; limit the cpu_core_mask to within PKG */ if (chip_id == -1) - cpumask_and(mask, mask, cpu_cpu_mask(cpu)); + cpumask_and(mask, mask, cpu_node_mask(cpu)); for_each_cpu(i, mask) { if (chip_id == cpu_to_chip_id(i)) { @@ -1701,22 +1696,22 @@ static void __init build_sched_topology(void) if (has_big_cores) { pr_info("Big cores detected but using small core scheduling\n"); powerpc_topology[i++] = - SDTL_INIT(smallcore_smt_mask, powerpc_smt_flags, SMT); + SDTL_INIT(tl_smallcore_smt_mask, powerpc_smt_flags, SMT); } else { - powerpc_topology[i++] = SDTL_INIT(cpu_smt_mask, powerpc_smt_flags, SMT); + powerpc_topology[i++] = SDTL_INIT(tl_smt_mask, powerpc_smt_flags, SMT); } #endif if (shared_caches) { powerpc_topology[i++] = - SDTL_INIT(shared_cache_mask, powerpc_shared_cache_flags, CACHE); + SDTL_INIT(tl_cache_mask, powerpc_shared_cache_flags, CACHE); } if (has_coregroup_support()) { powerpc_topology[i++] = - SDTL_INIT(cpu_mc_mask, powerpc_shared_proc_flags, MC); + SDTL_INIT(tl_mc_mask, powerpc_shared_proc_flags, MC); } - powerpc_topology[i++] = SDTL_INIT(cpu_cpu_mask, powerpc_shared_proc_flags, PKG); + powerpc_topology[i++] = SDTL_INIT(tl_pkg_mask, powerpc_shared_proc_flags, PKG); /* There must be one trailing NULL entry left. */ BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 28d00744cbb5..613ab57cda74 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -74,6 +74,7 @@ config RISCV select ARCH_SUPPORTS_PER_VMA_LOCK if MMU select ARCH_SUPPORTS_RT select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK + select ARCH_SUPPORTS_SCHED_MC if SMP select ARCH_USE_CMPXCHG_LOCKREF if 64BIT select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS @@ -455,14 +456,6 @@ config SMP If you don't know what to do here, say N. -config SCHED_MC - bool "Multi-core scheduler support" - depends on SMP - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. If unsure say N here. - config NR_CPUS |
