未验证 提交 a3788a37 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1665 arm64: Add framework to turn an IPI as NMI

Merge Pull Request from: @ci-robot 
 
PR sync from: Ruan Jinjie <ruanjinjie@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/EHN6ENC7DMIVST6HGFD53IQMXC7J6VJF/ 
arm64: Add framework to turn an IPI as NMI

Sumit Garg (7):
  arm64: Add framework to turn IPI as NMI
  irqchip/gic-v3: Enable support for SGIs to act as NMIs
  arm64: smp: Assign and setup an IPI as NMI
  nmi: backtrace: Allow runtime arch specific override
  arm64: ipi_nmi: Add support for NMI backtrace
  kgdb: Expose default CPUs roundup fallback mechanism
  arm64: kgdb: Roundup cpus using IPI as NMI

Xiongfeng Wang (1):
  arm64: ipi_nmi: fix compile error when CONFIG_KGDB is disabled


-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/I7R4EN 
 
Link:https://gitee.com/openeuler/kernel/pulls/1665 

Reviewed-by: Wei Li <liwei391@huawei.com> 
Reviewed-by: Xiongfeng Wang <wangxiongfeng2@huawei.com> 
Signed-off-by: Wei Li <liwei391@huawei.com> 
......@@ -32,7 +32,7 @@ void init_IRQ(void);
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
extern bool arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
......
......@@ -850,7 +850,8 @@ static void raise_nmi(cpumask_t *mask)
__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
return true;
}
......@@ -6,6 +6,12 @@
#include <asm-generic/irq.h>
#ifdef CONFIG_SMP
extern bool arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
struct pt_regs;
int set_handle_irq(void (*handle_irq)(struct pt_regs *));
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_NMI_H
#define __ASM_NMI_H
#ifndef __ASSEMBLER__
#include <linux/cpumask.h>
extern bool arm64_supports_nmi(void);
extern void arm64_send_nmi(cpumask_t *mask);
void set_smp_dynamic_ipi(int ipi);
void dynamic_ipi_setup(int cpu);
void dynamic_ipi_teardown(int cpu);
#endif /* !__ASSEMBLER__ */
#endif
......@@ -34,7 +34,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o proton-pack.o idreg-override.o idle.o \
patching.o
patching.o ipi_nmi.o
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* NMI support for IPIs
*
* Copyright (C) 2020 Linaro Limited
* Author: Sumit Garg <sumit.garg@linaro.org>
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kgdb.h>
#include <linux/nmi.h>
#include <linux/smp.h>
#include <asm/nmi.h>
static struct irq_desc *ipi_nmi_desc __read_mostly;
static int ipi_nmi_id __read_mostly;
bool arm64_supports_nmi(void)
{
if (ipi_nmi_desc)
return true;
return false;
}
void arm64_send_nmi(cpumask_t *mask)
{
if (WARN_ON_ONCE(!ipi_nmi_desc))
return;
__ipi_send_mask(ipi_nmi_desc, mask);
}
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
if (!ipi_nmi_desc)
return false;
nmi_trigger_cpumask_backtrace(mask, exclude_self, arm64_send_nmi);
return true;
}
static irqreturn_t ipi_nmi_handler(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
if (nmi_cpu_backtrace(get_irq_regs()))
ret = IRQ_HANDLED;
#ifdef CONFIG_KGDB
if (!kgdb_nmicallback(smp_processor_id(), get_irq_regs()))
ret = IRQ_HANDLED;
#endif
return ret;
}
void dynamic_ipi_setup(int cpu)
{
if (!ipi_nmi_desc)
return;
if (!prepare_percpu_nmi(ipi_nmi_id))
enable_percpu_nmi(ipi_nmi_id, IRQ_TYPE_NONE);
}
void dynamic_ipi_teardown(int cpu)
{
if (!ipi_nmi_desc)
return;
disable_percpu_nmi(ipi_nmi_id);
teardown_percpu_nmi(ipi_nmi_id);
}
void __init set_smp_dynamic_ipi(int ipi)
{
if (!request_percpu_nmi(ipi, ipi_nmi_handler, "IPI", &cpu_number)) {
ipi_nmi_desc = irq_to_desc(ipi);
ipi_nmi_id = ipi;
}
}
......@@ -17,6 +17,7 @@
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/nmi.h>
#include <asm/patching.h>
#include <asm/traps.h>
......@@ -356,3 +357,20 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
return aarch64_insn_write((void *)bpt->bpt_addr,
*(u32 *)bpt->saved_instr);
}
void kgdb_roundup_cpus(void)
{
struct cpumask mask;
if (!arm64_supports_nmi()) {
kgdb_smp_call_nmi_hook();
return;
}
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(raw_smp_processor_id(), &mask);
if (cpumask_empty(&mask))
return;
arm64_send_nmi(&mask);
}
......@@ -43,6 +43,7 @@
#include <asm/daifflags.h>
#include <asm/kvm_mmu.h>
#include <asm/mmu_context.h>
#include <asm/nmi.h>
#include <asm/numa.h>
#include <asm/processor.h>
#include <asm/smp_plat.h>
......@@ -939,6 +940,8 @@ static void ipi_setup(int cpu)
for (i = 0; i < nr_ipi; i++)
enable_percpu_irq(ipi_irq_base + i, 0);
dynamic_ipi_setup(cpu);
}
#ifdef CONFIG_HOTPLUG_CPU
......@@ -951,6 +954,8 @@ static void ipi_teardown(int cpu)
for (i = 0; i < nr_ipi; i++)
disable_percpu_irq(ipi_irq_base + i);
dynamic_ipi_teardown(cpu);
}
#endif
......@@ -972,6 +977,9 @@ void __init set_smp_ipi_range(int ipi_base, int n)
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
}
if (n > nr_ipi)
set_smp_dynamic_ipi(ipi_base + nr_ipi);
ipi_irq_base = ipi_base;
/* Setup the boot CPU immediately */
......
......@@ -77,7 +77,7 @@ extern int cp0_fdc_irq;
extern int get_c0_fdc_int(void);
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
......
......@@ -750,9 +750,10 @@ static void raise_backtrace(cpumask_t *mask)
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
return true;
}
int mips_get_process_fp_mode(struct task_struct *task)
......
......@@ -12,7 +12,7 @@ static inline void watchdog_nmi_set_timeout_pct(u64 pct) {}
#endif
#ifdef CONFIG_NMI_IPI
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
extern bool arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
......
......@@ -221,8 +221,9 @@ static void raise_backtrace_ipi(cpumask_t *mask)
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
return true;
}
#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
......@@ -87,7 +87,7 @@ static inline unsigned long get_softint(void)
return retval;
}
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
......
......@@ -236,7 +236,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
struct thread_info *tp = current_thread_info();
struct pt_regs *regs = get_irq_regs();
......@@ -291,6 +291,8 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
return true;
}
#ifdef CONFIG_MAGIC_SYSRQ
......
......@@ -43,7 +43,7 @@ extern void init_ISA_irqs(void);
extern void __init init_IRQ(void);
#ifdef CONFIG_X86_LOCAL_APIC
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
......
......@@ -34,10 +34,11 @@ static void nmi_raise_cpu_backtrace(cpumask_t *mask)
apic->send_IPI_mask(mask, NMI_VECTOR);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self,
nmi_raise_cpu_backtrace);
return true;
}
static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
......
......@@ -524,6 +524,7 @@ static u32 gic_get_ppi_index(struct irq_data *d)
static int gic_irq_nmi_setup(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
u32 idx;
if (!gic_supports_nmi())
return -EINVAL;
......@@ -541,16 +542,22 @@ static int gic_irq_nmi_setup(struct irq_data *d)
return -EINVAL;
/* desc lock should already be held */
if (gic_irq_in_rdist(d)) {
u32 idx = gic_get_ppi_index(d);
switch (get_intid_range(d)) {
case SGI_RANGE:
break;
case PPI_RANGE:
case EPPI_RANGE:
idx = gic_get_ppi_index(d);
/* Setting up PPI as NMI, only switch handler for first NMI */
if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
refcount_set(&ppi_nmi_refs[idx], 1);
desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
}
} else {
break;
default:
desc->handle_irq = handle_fasteoi_nmi;
break;
}
gic_irq_set_prio(d, GICD_INT_NMI_PRI);
......@@ -561,6 +568,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
static void gic_irq_nmi_teardown(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
u32 idx;
if (WARN_ON(!gic_supports_nmi()))
return;
......@@ -578,14 +586,20 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
return;
/* desc lock should already be held */
if (gic_irq_in_rdist(d)) {
u32 idx = gic_get_ppi_index(d);
switch (get_intid_range(d)) {
case SGI_RANGE:
break;
case PPI_RANGE:
case EPPI_RANGE:
idx = gic_get_ppi_index(d);
/* Tearing down NMI, only switch handler for last NMI */
if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
desc->handle_irq = handle_percpu_devid_irq;
} else {
break;
default:
desc->handle_irq = handle_fasteoi_irq;
break;
}
gic_irq_set_prio(d, GICD_INT_DEF_PRI);
......@@ -1976,6 +1990,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_dist_init();
gic_cpu_init();
gic_enable_nmi_support();
gic_smp_init();
gic_cpu_pm_init();
......@@ -1988,8 +2003,6 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gicv2m_init(handle, gic_data.domain);
}
gic_enable_nmi_support();
return 0;
out_free:
......
......@@ -199,6 +199,18 @@ kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
extern void kgdb_call_nmi_hook(void *ignored);
/**
* kgdb_smp_call_nmi_hook - Provide default fallback mechanism to
* round-up CPUs
*
* If you're using the default implementation of kgdb_roundup_cpus()
* this function will be called. And if an arch detects at runtime to
* not support NMI based roundup then it can fallback to default
* mechanism using this API.
*/
extern void kgdb_smp_call_nmi_hook(void);
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
*
......
......@@ -145,26 +145,22 @@ static inline void touch_nmi_watchdog(void)
#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
arch_trigger_cpumask_backtrace(cpu_online_mask, false);
return true;
return arch_trigger_cpumask_backtrace(cpu_online_mask, false);
}
static inline bool trigger_allbutself_cpu_backtrace(void)
{
arch_trigger_cpumask_backtrace(cpu_online_mask, true);
return true;
return arch_trigger_cpumask_backtrace(cpu_online_mask, true);
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
arch_trigger_cpumask_backtrace(mask, false);
return true;
return arch_trigger_cpumask_backtrace(mask, false);
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
return true;
return arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
}
/* generic implementation */
......
......@@ -238,7 +238,7 @@ NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
CSD_INIT(kgdb_call_nmi_hook, NULL);
void __weak kgdb_roundup_cpus(void)
void kgdb_smp_call_nmi_hook(void)
{
call_single_data_t *csd;
int this_cpu = raw_smp_processor_id();
......@@ -269,6 +269,12 @@ void __weak kgdb_roundup_cpus(void)
kgdb_info[cpu].rounding_up = false;
}
}
NOKPROBE_SYMBOL(kgdb_smp_call_nmi_hook);
void __weak kgdb_roundup_cpus(void)
{
kgdb_smp_call_nmi_hook();
}
NOKPROBE_SYMBOL(kgdb_roundup_cpus);
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册