diff options
Diffstat (limited to 'arch/loongarch/kernel')
27 files changed, 235 insertions, 132 deletions
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls index cd46c2b69c7f..06f160502537 100644 --- a/arch/loongarch/kernel/Makefile.syscalls +++ b/arch/loongarch/kernel/Makefile.syscalls @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -# No special ABIs on loongarch so far -syscall_abis_32 += -syscall_abis_64 += +syscall_abis_32 += memfd_secret +syscall_abis_64 += memfd_secret diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 1367ca759468..058f0dbe8e8f 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -385,3 +385,12 @@ int acpi_unmap_cpu(int cpu) EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +int acpi_get_cpu_uid(unsigned int cpu, u32 *uid) +{ + if (cpu >= nr_cpu_ids) + return -EINVAL; + *uid = acpi_core_pic[cpu_logical_map(cpu)].processor_id; + return 0; +} +EXPORT_SYMBOL_GPL(acpi_get_cpu_uid); diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 3017c7157600..2cc953f113ac 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -16,6 +16,7 @@ #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/ftrace.h> +#include <asm/sigframe.h> #include <vdso/datapage.h> static void __used output_ptreg_defines(void) @@ -220,6 +221,7 @@ static void __used output_sc_defines(void) COMMENT("Linux sigcontext offsets."); OFFSET(SC_REGS, sigcontext, sc_regs); OFFSET(SC_PC, sigcontext, sc_pc); + OFFSET(RT_SIGFRAME_SC, rt_sigframe, rs_uctx.uc_mcontext); BLANK(); } diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 08a227034042..74d31f260dfd 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/ptrace.h> +#include <linux/cpu.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/export.h> @@ -177,6 +178,14 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options |= LOONGARCH_CPU_LAM; elf_hwcap |= HWCAP_LOONGARCH_LAM; } + if (config & CPUCFG2_LAM_BH) { + c->options |= LOONGARCH_CPU_LAM_BH; + elf_hwcap |= HWCAP_LOONGARCH_LAM_BH; + } + if (config & CPUCFG2_SCQ) { + c->options |= LOONGARCH_CPU_SCQ; + elf_hwcap |= HWCAP_LOONGARCH_SCQ; + } if (config & CPUCFG2_FP) { c->options |= LOONGARCH_CPU_FPU; elf_hwcap |= HWCAP_LOONGARCH_FPU; @@ -398,3 +407,9 @@ void cpu_probe(void) cpu_report(); } + +ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Mitigation: __user pointer sanitization\n"); +} diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 52c21c895318..69dd83f8082f 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -18,7 +18,7 @@ #include <linux/kobject.h> #include <linux/memblock.h> #include <linux/reboot.h> -#include <linux/screen_info.h> +#include <linux/sysfb.h> #include <linux/uaccess.h> #include <asm/early_ioremap.h> @@ -72,30 +72,31 @@ bool efi_poweroff_required(void) (acpi_gbl_reduced_hardware || acpi_no_s5); } -unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR; +unsigned long __initdata primary_display_table = EFI_INVALID_TABLE_ADDR; #if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON) -struct screen_info screen_info __section(".data"); -EXPORT_SYMBOL_GPL(screen_info); +struct sysfb_display_info sysfb_primary_display __section(".data"); +EXPORT_SYMBOL_GPL(sysfb_primary_display); #endif -static void __init init_screen_info(void) +static void __init init_primary_display(void) { - struct screen_info *si; + struct sysfb_display_info *dpy; - if (screen_info_table == EFI_INVALID_TABLE_ADDR) + if (primary_display_table == EFI_INVALID_TABLE_ADDR) return; - si = early_memremap(screen_info_table, sizeof(*si)); - if (!si) { - pr_err("Could not map screen_info config table\n"); + dpy = early_memremap(primary_display_table, sizeof(*dpy)); + if (!dpy) { + pr_err("Could not map primary_display config table\n"); return; } - screen_info = *si; - memset(si, 0, sizeof(*si)); - early_memunmap(si, sizeof(*si)); + sysfb_primary_display = *dpy; + memset(dpy, 0, sizeof(*dpy)); + early_memunmap(dpy, sizeof(*dpy)); - memblock_reserve(__screen_info_lfb_base(&screen_info), screen_info.lfb_size); + memblock_reserve(__screen_info_lfb_base(&sysfb_primary_display.screen), + sysfb_primary_display.screen.lfb_size); } void __init efi_init(void) @@ -129,7 +130,7 @@ void __init efi_init(void) set_bit(EFI_CONFIG_TABLES, &efi.flags); if (IS_ENABLED(CONFIG_EFI_EARLYCON) || IS_ENABLED(CONFIG_SYSFB)) - init_screen_info(); + init_primary_display(); if (boot_memmap == EFI_INVALID_TABLE_ADDR) return; diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 841206fde3ab..652456768b55 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -42,16 +42,15 @@ static int __init init_cpu_fullname(void) int cpu, ret; char *cpuname; const char *model; - struct device_node *root; /* Parsing cpuname from DTS model property */ - root = of_find_node_by_path("/"); - ret = of_property_read_string(root, "model", &model); + ret = of_property_read_string(of_root, "model", &model); if (ret == 0) { cpuname = kstrdup(model, GFP_KERNEL); + if (!cpuname) + return -ENOMEM; loongson_sysconf.cpuname = strsep(&cpuname, " "); } - of_node_put(root); if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) { for (cpu = 0; cpu < NR_CPUS; cpu++) diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index f225dcc5b530..bf7d6b8bf600 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -97,7 +97,7 @@ .endm #ifdef CONFIG_32BIT - .macro sc_save_fcc thread tmp0 tmp1 + .macro sc_save_fcc base tmp0 tmp1 movcf2gr \tmp0, $fcc0 move \tmp1, \tmp0 movcf2gr \tmp0, $fcc1 @@ -106,7 +106,7 @@ bstrins.w \tmp1, \tmp0, 23, 16 movcf2gr \tmp0, $fcc3 bstrins.w \tmp1, \tmp0, 31, 24 - EX st.w \tmp1, \thread, THREAD_FCC + EX st.w \tmp1, \base, 0 movcf2gr \tmp0, $fcc4 move \tmp1, \tmp0 movcf2gr \tmp0, $fcc5 @@ -115,11 +115,11 @@ bstrins.w \tmp1, \tmp0, 23, 16 movcf2gr \tmp0, $fcc7 bstrins.w \tmp1, \tmp0, 31, 24 - EX st.w \tmp1, \thread, (THREAD_FCC + 4) + EX st.w \tmp1, \base, 4 .endm - .macro sc_restore_fcc thread tmp0 tmp1 - EX ld.w \tmp0, \thread, THREAD_FCC + .macro sc_restore_fcc base tmp0 tmp1 + EX ld.w \tmp0, \base, 0 bstrpick.w \tmp1, \tmp0, 7, 0 movgr2cf $fcc0, \tmp1 bstrpick.w \tmp1, \tmp0, 15, 8 @@ -128,7 +128,7 @@ movgr2cf $fcc2, \tmp1 bstrpick.w \tmp1, \tmp0, 31, 24 movgr2cf $fcc3, \tmp1 - EX ld.w \tmp0, \thread, (THREAD_FCC + 4) + EX ld.w \tmp0, \base, 4 bstrpick.w \tmp1, \tmp0, 7, 0 movgr2cf $fcc4, \tmp1 bstrpick.w \tmp1, \tmp0, 15, 8 diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index 7f288e89573b..4eed7bc312a8 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -126,14 +126,6 @@ SYM_CODE_START(smpboot_entry) LONG_LI t1, CSR_STFILL csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1 #endif - /* Enable PG */ - li.w t0, 0xb0 # PLV=0, IE=0, PG=1 - csrwr t0, LOONGARCH_CSR_CRMD - li.w t0, 0x04 # PLV=0, PIE=1, PWE=0 - csrwr t0, LOONGARCH_CSR_PRMD - li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 - csrwr t0, LOONGARCH_CSR_EUEN - la.pcrel t0, cpuboot_data ld.d sp, t0, CPU_BOOT_STACK ld.d tp, t0, CPU_BOOT_TINFO diff --git a/arch/loongarch/kernel/image-vars.h b/arch/loongarch/kernel/image-vars.h index 41ddcf56d21c..e557ebd46c2b 100644 --- a/arch/loongarch/kernel/image-vars.h +++ b/arch/loongarch/kernel/image-vars.h @@ -12,7 +12,7 @@ __efistub_kernel_entry = kernel_entry; __efistub_kernel_asize = kernel_asize; __efistub_kernel_fsize = kernel_fsize; #if defined(CONFIG_EFI_EARLYCON) || defined(CONFIG_SYSFB) -__efistub_screen_info = screen_info; +__efistub_sysfb_primary_display = sysfb_primary_display; #endif #endif diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index bf037f0c6b26..0b9228b7c13a 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -209,6 +209,9 @@ int larch_insn_write(void *addr, u32 insn) int ret; unsigned long flags = 0; + if ((unsigned long)addr & 3) + return -EINVAL; + raw_spin_lock_irqsave(&patch_lock, flags); ret = copy_to_kernel_nofault(addr, &insn, LOONGARCH_INSN_SIZE); raw_spin_unlock_irqrestore(&patch_lock, flags); @@ -221,9 +224,6 @@ int larch_insn_patch_text(void *addr, u32 insn) int ret; u32 *tp = addr; - if ((unsigned long)tp & 3) - return -EINVAL; - ret = larch_insn_write(tp, insn); if (!ret) flush_icache_range((unsigned long)tp, @@ -246,32 +246,51 @@ static int text_copy_cb(void *data) if (smp_processor_id() == copy->cpu) { ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len); - if (ret) + if (ret) { pr_err("%s: operation failed\n", __func__); + return ret; + } } flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len); - return ret; + return 0; } int larch_insn_text_copy(void *dst, void *src, size_t len) { int ret = 0; + int err = 0; size_t start, end; struct insn_copy copy = { .dst = dst, .src = src, .len = len, - .cpu = smp_processor_id(), + .cpu = raw_smp_processor_id(), }; + /* + * Ensure copy.cpu won't be hot removed before stop_machine. + * If it is removed nobody will really update the text. + */ + lockdep_assert_cpus_held(); + start = round_down((size_t)dst, PAGE_SIZE); end = round_up((size_t)dst + len, PAGE_SIZE); - set_memory_rw(start, (end - start) / PAGE_SIZE); - ret = stop_machine(text_copy_cb, ©, cpu_online_mask); - set_memory_rox(start, (end - start) / PAGE_SIZE); + err = set_memory_rw(start, (end - start) / PAGE_SIZE); + if (err) { + pr_info("%s: set_memory_rw() failed\n", __func__); + return err; + } + + ret = stop_machine_cpuslocked(text_copy_cb, ©, cpu_online_mask); + + err = set_memory_rox(start, (end - start) / PAGE_SIZE); + if (err) { + pr_info("%s: set_memory_rox() failed\n", __func__); + return err; + } return ret; } diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 80946cafaec1..7bf68a7a5f4b 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -11,6 +11,7 @@ #include <linux/irqchip.h> #include <linux/kernel_stat.h> #include <linux/proc_fs.h> +#include <linux/minmax.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/seq_file.h> @@ -99,6 +100,11 @@ int __init arch_probe_nr_irqs(void) return NR_IRQS_LEGACY; } +unsigned int arch_dynirq_lower_bound(unsigned int from) +{ + return MAX(from, NR_IRQS_LEGACY); +} + void __init init_IRQ(void) { int i; diff --git a/arch/loongarch/kernel/jump_label.c b/arch/loongarch/kernel/jump_label.c index 31891214b767..24a3f4d8540c 100644 --- a/arch/loongarch/kernel/jump_label.c +++ b/arch/loongarch/kernel/jump_label.c @@ -6,9 +6,10 @@ */ #include <linux/kernel.h> #include <linux/jump_label.h> +#include <asm/cacheflush.h> #include <asm/inst.h> -void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) +bool arch_jump_label_transform_queue(struct jump_entry *entry, enum jump_label_type type) { u32 insn; void *addr = (void *)jump_entry_code(entry); @@ -18,5 +19,12 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type ty else insn = larch_insn_gen_nop(); - larch_insn_patch_text(addr, insn); + larch_insn_write(addr, insn); + + return true; +} + +void arch_jump_label_transform_apply(void) +{ + flush_icache_all(); } diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c index 7be5b4c0c900..17664a6043b1 100644 --- a/arch/loongarch/kernel/kgdb.c +++ b/arch/loongarch/kernel/kgdb.c @@ -697,7 +697,7 @@ void kgdb_arch_late(void) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); - if (IS_ERR((void * __force)breakinfo[i].pev)) { + if (IS_ERR_PCPU(breakinfo[i].pev)) { pr_err("kgdb: Could not allocate hw breakpoints.\n"); breakinfo[i].pev = NULL; return; diff --git a/arch/loongarch/kernel/machine_kexec_file.c b/arch/loongarch/kernel/machine_kexec_file.c index fb57026f5f25..5584b798ba46 100644 --- a/arch/loongarch/kernel/machine_kexec_file.c +++ b/arch/loongarch/kernel/machine_kexec_file.c @@ -68,7 +68,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) for_each_mem_range(i, &start, &end) nr_ranges++; - cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); + cmem = kmalloc_flex(*cmem, ranges, nr_ranges); if (!cmem) return -ENOMEM; diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index b1b51f920b23..10821cce554c 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -6,20 +6,13 @@ #include <linux/kvm_para.h> #include <linux/reboot.h> #include <linux/static_call.h> +#include <linux/sched/cputime.h> #include <asm/paravirt.h> static int has_steal_clock; -struct static_key paravirt_steal_enabled; -struct static_key paravirt_steal_rq_enabled; -static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +DEFINE_STATIC_KEY_FALSE(virt_preempt_key); DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); - -static u64 native_steal_clock(int cpu) -{ - return 0; -} - -DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); +DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); static bool steal_acc = true; @@ -308,6 +301,9 @@ int __init pv_time_init(void) pr_err("Failed to install cpu hotplug callbacks\n"); return r; } + + if (kvm_para_has_feature(KVM_FEATURE_PREEMPT)) + static_branch_enable(&virt_preempt_key); #endif static_call_update(pv_steal_clock, paravt_steal_clock); @@ -318,7 +314,10 @@ int __init pv_time_init(void) static_key_slow_inc(¶virt_steal_rq_enabled); #endif - pr_info("Using paravirt steal-time\n"); + if (static_key_enabled(&virt_preempt_key)) + pr_info("Using paravirt steal-time with preempt enabled\n"); + else + pr_info("Using paravirt steal-time with preempt disabled\n"); return 0; } diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index 9d257c8519c9..e34a6fb33e11 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -626,6 +626,18 @@ static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 conf return pev; } +static inline bool loongarch_pmu_event_requires_counter(const struct perf_event *event) +{ + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + case PERF_TYPE_RAW: + return true; + default: + return false; + } +} + static int validate_group(struct perf_event *event) { struct cpu_hw_events fake_cpuc; @@ -633,15 +645,18 @@ static int validate_group(struct perf_event *event) memset(&fake_cpuc, 0, sizeof(fake_cpuc)); - if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) + if (loongarch_pmu_event_requires_counter(leader) && + loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) return -EINVAL; for_each_sibling_event(sibling, leader) { - if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) + if (loongarch_pmu_event_requires_counter(sibling) && + loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) return -EINVAL; } - if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) + if (loongarch_pmu_event_requires_counter(event) && + loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) return -EINVAL; return 0; diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index a8800d20e11b..d4ce5b585453 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -50,32 +50,51 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n", cpu_pabits + 1, cpu_vabits + 1); - seq_printf(m, "ISA\t\t\t:"); + seq_puts(m, "ISA\t\t\t:"); if (isa & LOONGARCH_CPU_ISA_LA32R) - seq_printf(m, " loongarch32r"); + seq_puts(m, " loongarch32r"); if (isa & LOONGARCH_CPU_ISA_LA32S) - seq_printf(m, " loongarch32s"); + seq_puts(m, " loongarch32s"); if (isa & LOONGARCH_CPU_ISA_LA64) - seq_printf(m, " loongarch64"); - seq_printf(m, "\n"); + seq_puts(m, " loongarch64"); + seq_puts(m, "\n"); - seq_printf(m, "Features\t\t:"); - if (cpu_has_cpucfg) seq_printf(m, " cpucfg"); - if (cpu_has_lam) seq_printf(m, " lam"); - if (cpu_has_ual) seq_printf(m, " ual"); - if (cpu_has_fpu) seq_printf(m, " fpu"); - if (cpu_has_lsx) seq_printf(m, " lsx"); - if (cpu_has_lasx) seq_printf(m, " lasx"); - if (cpu_has_crc32) seq_printf(m, " crc32"); - if (cpu_has_complex) seq_printf(m, " complex"); - if (cpu_has_crypto) seq_printf(m, " crypto"); - if (cpu_has_ptw) seq_printf(m, " ptw"); - if (cpu_has_lspw) seq_printf(m, " lspw"); - if (cpu_has_lvz) seq_printf(m, " lvz"); - if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86"); - if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm"); - if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips"); - seq_printf(m, "\n"); + seq_puts(m, "Features\t\t:"); + if (cpu_has_cpucfg) + seq_puts(m, " cpucfg"); + if (cpu_has_lam) + seq_puts(m, " lam"); + if (cpu_has_lam_bh) + seq_puts(m, " lam_bh"); + if (cpu_has_scq) + seq_puts(m, " scq"); + if (cpu_has_ual) + seq_puts(m, " ual"); + if (cpu_has_fpu) + seq_puts(m, " fpu"); + if (cpu_has_lsx) + seq_puts(m, " lsx"); + if (cpu_has_lasx) + seq_puts(m, " lasx"); + if (cpu_has_crc32) + seq_puts(m, " crc32"); + if (cpu_has_complex) + seq_puts(m, " complex"); + if (cpu_has_crypto) + seq_puts(m, " crypto"); + if (cpu_has_ptw) + seq_puts(m, " ptw"); + if (cpu_has_lspw) + seq_puts(m, " lspw"); + if (cpu_has_lvz) + seq_puts(m, " lvz"); + if (cpu_has_lbt_x86) + seq_puts(m, " lbt_x86"); + if (cpu_has_lbt_arm) + seq_puts(m, " lbt_arm"); + if (cpu_has_lbt_mips) + seq_puts(m, " lbt_mips"); + seq_puts(m, "\n"); seq_printf(m, "Hardware Watchpoint\t: %s", str_yes_no(cpu_has_watch)); if (cpu_has_watch) { @@ -83,7 +102,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count); } - seq_printf(m, "\n\n"); + seq_puts(m, "\n\n"); return 0; } diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 4ac1c3086152..5505fc355e1b 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -52,6 +52,7 @@ #include <asm/switch_to.h> #include <asm/unwind.h> #include <asm/vdso.h> +#include <asm/vdso/vdso.h> #ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> @@ -135,6 +136,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } + dst->thread.fpu.fcsr = src->thread.fpu.fcsr; + if (!used_math()) memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr)); else diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 82aa3f035927..16f6a9b39659 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -128,24 +128,28 @@ static inline __init unsigned long get_random_boot(void) static int __init nokaslr(char *p) { - pr_info("KASLR is disabled.\n"); - - return 0; /* Print a notice and silence the boot warning */ + return 0; /* Just silence the boot warning */ } early_param("nokaslr", nokaslr); +#define KASLR_DISABLED_MESSAGE "KASLR is disabled by %s in %s cmdline.\n" + static inline __init bool kaslr_disabled(void) { char *str; const char *builtin_cmdline = CONFIG_CMDLINE; str = strstr(builtin_cmdline, "nokaslr"); - if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) + if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) { + pr_info(KASLR_DISABLED_MESSAGE, "\'nokaslr\'", "built-in"); return true; + } str = strstr(boot_command_line, "nokaslr"); - if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) { + pr_info(KASLR_DISABLED_MESSAGE, "\'nokaslr\'", "bootloader"); return true; + } #ifdef CONFIG_HIBERNATION str = strstr(builtin_cmdline, "nohibernate"); @@ -165,17 +169,23 @@ static inline __init bool kaslr_disabled(void) return false; str = strstr(builtin_cmdline, "resume="); - if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) + if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) { + pr_info(KASLR_DISABLED_MESSAGE, "\'resume=\'", "built-in"); return true; + } str = strstr(boot_command_line, "resume="); - if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) { + pr_info(KASLR_DISABLED_MESSAGE, "\'resume=\'", "bootloader"); return true; + } #endif str = strstr(boot_command_line, "kexec_file"); - if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) { + pr_info(KASLR_DISABLED_MESSAGE, "\'kexec_file\'", "bootloader"); return true; + } return false; } diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 20cb6f306456..839b23edee87 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -402,14 +402,6 @@ static void __init arch_mem_init(char **cmdline_p) check_kernel_sections_mem(); - /* - * In order to reduce the possibility of kernel panic when failed to - * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate - * low memory as small as possible before swiotlb_init(), so make - * sparse_init() using top-down allocation. - */ - memblock_set_bottom_up(false); - sparse_init(); memblock_set_bottom_up(true); swiotlb_init(true, SWIOTLB_VERBOSE); @@ -421,6 +413,7 @@ static void __init arch_mem_init(char **cmdline_p) PFN_UP(__pa_symbol(&__nosave_end))); memblock_dump_all(); + memblock_set_bottom_up(false); early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); } @@ -477,7 +470,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, unsigned long vaddr; struct logic_pio_hwaddr *range; - range = kzalloc(sizeof(*range), GFP_ATOMIC); + range = kzalloc_obj(*range, GFP_ATOMIC); if (!range) return -ENOMEM; @@ -621,8 +614,6 @@ void __init setup_arch(char **cmdline_p) prefill_possible_map(); #endif - paging_init(); - #ifdef CONFIG_KASAN kasan_init(); #endif diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c index c9f7ca778364..d4151d2fb82e 100644 --- a/arch/loongarch/kernel/signal.c +++ b/arch/loongarch/kernel/signal.c @@ -35,6 +35,7 @@ #include <asm/cpu-features.h> #include <asm/fpu.h> #include <asm/lbt.h> +#include <asm/sigframe.h> #include <asm/ucontext.h> #include <asm/vdso.h> @@ -51,11 +52,6 @@ #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); }) #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); }) -struct rt_sigframe { - struct siginfo rs_info; - struct ucontext rs_uctx; -}; - struct _ctx_layout { struct sctx_info *addr; unsigned int size; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 8b2fcb3fb874..64a048f1b880 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -365,16 +365,29 @@ void __init loongson_smp_setup(void) void __init loongson_prepare_cpus(unsigned int max_cpus) { int i = 0; + int threads_per_core = 0; parse_acpi_topology(); cpu_data[0].global_id = cpu_logical_map(0); + if (!pptt_enabled) + threads_per_core = 1; + else { + for_each_possible_cpu(i) { + if (cpu_to_node(i) != 0) + continue; + if (cpus_are_siblings(0, i)) + threads_per_core++; + } + } + for (i = 0; i < loongson_sysconf.nr_cpus; i++) { set_cpu_present(i, true); csr_mail_send(0, __cpu_logical_map[i], 0); } per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; + cpu_smt_set_num_threads(threads_per_core, threads_per_core); } /* diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c index 1249d82c1cd0..94c1c3b5b0b5 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -9,6 +9,7 @@ #include <linux/entry-common.h> #include <linux/errno.h> #include <linux/linkage.h> +#include <linux/nospec.h> #include <linux/objtool.h> #include <linux/randomize_kstack.h> #include <linux/syscalls.h> @@ -74,21 +75,10 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs) add_random_kstack_offset(); if (nr < NR_syscalls) { - syscall_fn = sys_call_table[nr]; + syscall_fn = sys_call_table[array_index_nospec(nr, NR_syscalls)]; regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6], regs->regs[7], regs->regs[8], regs->regs[9]); } - /* - * This value will get limited by KSTACK_OFFSET_MAX(), which is 10 - * bits. The actual entropy will be further reduced by the compiler - * when applying stack alignment constraints: 16-bytes (i.e. 4-bits) - * aligned, which will remove the 4 low bits from any entropy chosen - * here. - * - * The resulting 6 bits of entropy is seen in SP[9:4]. - */ - choose_r |
