smp.c 19.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 *  linux/arch/arm/kernel/smp.c
 *
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 */
R
Russell King 已提交
7
#include <linux/module.h>
L
Linus Torvalds 已提交
8 9 10
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
11
#include <linux/sched/mm.h>
12
#include <linux/sched/hotplug.h>
13
#include <linux/sched/task_stack.h>
L
Linus Torvalds 已提交
14 15 16 17 18
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
19
#include <linux/err.h>
L
Linus Torvalds 已提交
20 21
#include <linux/cpu.h>
#include <linux/seq_file.h>
R
Russell King 已提交
22
#include <linux/irq.h>
23
#include <linux/nmi.h>
24 25
#include <linux/percpu.h>
#include <linux/clockchips.h>
26
#include <linux/completion.h>
27
#include <linux/cpufreq.h>
28
#include <linux/irq_work.h>
29
#include <linux/kernel_stat.h>
L
Linus Torvalds 已提交
30

A
Arun Sharma 已提交
31
#include <linux/atomic.h>
32
#include <asm/bugs.h>
33
#include <asm/smp.h>
L
Linus Torvalds 已提交
34 35
#include <asm/cacheflush.h>
#include <asm/cpu.h>
36
#include <asm/cputype.h>
37
#include <asm/exception.h>
38
#include <asm/idmap.h>
39
#include <asm/topology.h>
40
#include <asm/mmu_context.h>
41
#include <asm/procinfo.h>
L
Linus Torvalds 已提交
42
#include <asm/processor.h>
43
#include <asm/sections.h>
L
Linus Torvalds 已提交
44 45
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
46
#include <asm/smp_plat.h>
47
#include <asm/virt.h>
48
#include <asm/mach/arch.h>
49
#include <asm/mpu.h>
L
Linus Torvalds 已提交
50

N
Nicolas Pitre 已提交
51 52
#include <trace/events/ipi.h>

53 54 55 56 57 58 59
/*
 * as from 2.5, kernels no longer have an init_tasks structure
 * so we need some other way of telling a new secondary core
 * where to place its SVC stack
 */
struct secondary_data secondary_data;

L
Linus Torvalds 已提交
60
enum ipi_msg_type {
61 62
	IPI_WAKEUP,
	IPI_TIMER,
L
Linus Torvalds 已提交
63 64 65
	IPI_RESCHEDULE,
	IPI_CALL_FUNC,
	IPI_CPU_STOP,
66
	IPI_IRQ_WORK,
67
	IPI_COMPLETION,
68
	NR_IPI,
69 70 71 72
	/*
	 * CPU_BACKTRACE is special and not included in NR_IPI
	 * or tracable with trace_ipi_*
	 */
73
	IPI_CPU_BACKTRACE = NR_IPI,
74 75 76 77 78
	/*
	 * SGI8-15 can be reserved by secure firmware, and thus may
	 * not be usable by the kernel. Please keep the above limited
	 * to at most 8 entries.
	 */
79
	MAX_IPI
L
Linus Torvalds 已提交
80 81
};

82 83 84 85 86 87
static int ipi_irq_base __read_mostly;
static int nr_ipi __read_mostly = NR_IPI;
static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;

static void ipi_setup(int cpu);

88 89
static DECLARE_COMPLETION(cpu_running);

90
static struct smp_operations smp_ops __ro_after_init;
91

92
void __init smp_set_ops(const struct smp_operations *ops)
93 94 95 96 97
{
	if (ops)
		smp_ops = *ops;
};

98 99
static unsigned long get_arch_pgd(pgd_t *pgd)
{
100 101 102 103 104
#ifdef CONFIG_ARM_LPAE
	return __phys_to_pfn(virt_to_phys(pgd));
#else
	return virt_to_phys(pgd);
#endif
105 106
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
static int secondary_biglittle_prepare(unsigned int cpu)
{
	if (!cpu_vtable[cpu])
		cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);

	return cpu_vtable[cpu] ? 0 : -ENOMEM;
}

static void secondary_biglittle_init(void)
{
	init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
}
#else
static int secondary_biglittle_prepare(unsigned int cpu)
{
	return 0;
}

static void secondary_biglittle_init(void)
{
}
#endif

131
int __cpu_up(unsigned int cpu, struct task_struct *idle)
L
Linus Torvalds 已提交
132 133 134
{
	int ret;

135 136 137
	if (!smp_ops.smp_boot_secondary)
		return -ENOSYS;

138 139 140 141
	ret = secondary_biglittle_prepare(cpu);
	if (ret)
		return ret;

142 143 144 145
	/*
	 * We need to tell the secondary core where to find
	 * its stack and the page tables.
	 */
A
Al Viro 已提交
146
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
147
#ifdef CONFIG_ARM_MPU
148
	secondary_data.mpu_rgn_info = &mpu_rgn_info;
149 150
#endif

151
#ifdef CONFIG_MMU
152
	secondary_data.pgdir = virt_to_phys(idmap_pgd);
153
	secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
154
#endif
155
	secondary_data.task = idle;
156
	sync_cache_w(&secondary_data);
157

L
Linus Torvalds 已提交
158 159 160
	/*
	 * Now bring the CPU into our world.
	 */
161
	ret = smp_ops.smp_boot_secondary(cpu, idle);
162 163 164 165 166
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it
		 * to come online or time out.
		 */
167 168
		wait_for_completion_timeout(&cpu_running,
						 msecs_to_jiffies(1000));
169

170 171
		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
172
			ret = -EIO;
173 174 175
		}
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
176 177 178
	}


179
	memset(&secondary_data, 0, sizeof(secondary_data));
L
Linus Torvalds 已提交
180 181 182
	return ret;
}

183
/* platform specific SMP operations */
184
void __init smp_init_cpus(void)
185 186 187 188 189
{
	if (smp_ops.smp_init_cpus)
		smp_ops.smp_init_cpus();
}

190 191 192 193 194
int platform_can_secondary_boot(void)
{
	return !!smp_ops.smp_boot_secondary;
}

195 196 197 198 199 200 201 202 203 204
int platform_can_cpu_hotplug(void)
{
#ifdef CONFIG_HOTPLUG_CPU
	if (smp_ops.cpu_kill)
		return 1;
#endif

	return 0;
}

205
#ifdef CONFIG_HOTPLUG_CPU
206
static int platform_cpu_kill(unsigned int cpu)
207 208 209 210 211 212
{
	if (smp_ops.cpu_kill)
		return smp_ops.cpu_kill(cpu);
	return 1;
}

213
static int platform_cpu_disable(unsigned int cpu)
214 215 216 217
{
	if (smp_ops.cpu_disable)
		return smp_ops.cpu_disable(cpu);

218 219 220 221 222 223 224 225 226 227 228 229
	return 0;
}

int platform_can_hotplug_cpu(unsigned int cpu)
{
	/* cpu_die must be specified to support hotplug */
	if (!smp_ops.cpu_die)
		return 0;

	if (smp_ops.cpu_can_disable)
		return smp_ops.cpu_can_disable(cpu);

230 231 232 233 234
	/*
	 * By default, allow disabling all CPUs except the first one,
	 * since this is special on a lot of platforms, e.g. because
	 * of clock tick interrupts.
	 */
235
	return cpu != 0;
236
}
237

238 239 240 241 242 243 244 245 246 247 248
static void ipi_teardown(int cpu)
{
	int i;

	if (WARN_ON_ONCE(!ipi_irq_base))
		return;

	for (i = 0; i < nr_ipi; i++)
		disable_percpu_irq(ipi_irq_base + i);
}

249 250 251
/*
 * __cpu_disable runs on the processor to be shutdown.
 */
252
int __cpu_disable(void)
253 254 255 256
{
	unsigned int cpu = smp_processor_id();
	int ret;

257
	ret = platform_cpu_disable(cpu);
258 259 260
	if (ret)
		return ret;

261 262 263 264
#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
	remove_cpu_topology(cpu);
#endif

265 266 267 268
	/*
	 * Take this CPU offline.  Once we clear this, we can't return,
	 * and we must not schedule until we're ready to give up the cpu.
	 */
269
	set_cpu_online(cpu, false);
270
	ipi_teardown(cpu);
271 272 273 274

	/*
	 * OK - migrate IRQs away from this CPU
	 */
275
	irq_migrate_all_off_this_cpu();
276 277 278 279

	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
280 281 282
	 *
	 * Caches are flushed to the Level of Unification Inner Shareable
	 * to write-back dirty lines to unified caches shared by all CPUs.
283
	 */
284
	flush_cache_louis();
285 286 287 288 289 290 291 292 293
	local_flush_tlb_all();

	return 0;
}

/*
 * called on the thread which is asking for a CPU to be shutdown -
 * waits until shutdown has completed, or it is timed out.
 */
294
void __cpu_die(unsigned int cpu)
295
{
296
	if (!cpu_wait_death(cpu, 5)) {
297 298 299
		pr_err("CPU%u: cpu didn't die\n", cpu);
		return;
	}
300
	pr_debug("CPU%u: shutdown\n", cpu);
301

302
	clear_tasks_mm_cpumask(cpu);
303 304 305 306 307 308 309
	/*
	 * platform_cpu_kill() is generally expected to do the powering off
	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
	 * be done by the CPU which is dying in preference to supporting
	 * this call, but that means there is _no_ synchronisation between
	 * the requesting CPU and the dying CPU actually losing power.
	 */
310
	if (!platform_cpu_kill(cpu))
R
Russell King 已提交
311
		pr_err("CPU%u: unable to kill\n", cpu);
312 313 314 315 316 317 318 319 320 321
}

/*
 * Called from the idle thread for the CPU which has been shutdown.
 *
 * Note that we disable IRQs here, but do not re-enable them
 * before returning to the caller. This is also the behaviour
 * of the other hotplug-cpu capable cores, so presumably coming
 * out of idle fixes this.
 */
322
void __noreturn arch_cpu_idle_dead(void)
323 324 325 326 327
{
	unsigned int cpu = smp_processor_id();

	idle_task_exit();

328 329
	local_irq_disable();

330 331 332 333 334 335 336 337 338 339 340 341 342
	/*
	 * Flush the data out of the L1 cache for this CPU.  This must be
	 * before the completion to ensure that data is safely written out
	 * before platform_cpu_kill() gets called - which may disable
	 * *this* CPU and power down its cache.
	 */
	flush_cache_louis();

	/*
	 * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
	 * this returns, power and/or clocks can be removed at any point
	 * from this CPU and its cache by platform_cpu_kill().
	 */
343
	(void)cpu_report_death();
344

345
	/*
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	 * Ensure that the cache lines associated with that completion are
	 * written out.  This covers the case where _this_ CPU is doing the
	 * powering down, to ensure that the completion is visible to the
	 * CPU waiting for this one.
	 */
	flush_cache_louis();

	/*
	 * The actual CPU shutdown procedure is at least platform (if not
	 * CPU) specific.  This may remove power, or it may simply spin.
	 *
	 * Platforms are generally expected *NOT* to return from this call,
	 * although there are some which do because they have no way to
	 * power down the CPU.  These platforms are the _only_ reason we
	 * have a return path which uses the fragment of assembly below.
	 *
	 * The return path should not be used for platforms which can
	 * power off the CPU.
364
	 */
365 366
	if (smp_ops.cpu_die)
		smp_ops.cpu_die(cpu);
367

368 369 370
	pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
		cpu);

371 372 373 374 375 376
	/*
	 * Do not return to the idle loop - jump back to the secondary
	 * cpu initialisation.  There's some initialisation which needs
	 * to be repeated to undo the effects of taking the CPU offline.
	 */
	__asm__("mov	sp, %0\n"
377
	"	mov	fp, #0\n"
378
	"	mov	r0, %1\n"
379 380
	"	b	secondary_start_kernel"
		:
381 382 383
		: "r" (task_stack_page(current) + THREAD_SIZE - 8),
		  "r" (current)
		: "r0");
384 385

	unreachable();
386 387 388
}
#endif /* CONFIG_HOTPLUG_CPU */

389 390 391 392
/*
 * Called by both boot and secondaries to move global data into
 * per-processor storage.
 */
393
static void smp_store_cpu_info(unsigned int cpuid)
394 395 396 397
{
	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);

	cpu_info->loops_per_jiffy = loops_per_jiffy;
398
	cpu_info->cpuid = read_cpuid_id();
399 400

	store_cpu_topology(cpuid);
401
	check_cpu_icache_size(cpuid);
402 403
}

404 405 406 407 408 409
static void set_current(struct task_struct *cur)
{
	/* Set TPIDRURO */
	asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
}

410 411 412 413
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
414
asmlinkage void secondary_start_kernel(struct task_struct *task)
415 416
{
	struct mm_struct *mm = &init_mm;
417 418
	unsigned int cpu;

419 420
	set_current(task);

421 422
	secondary_biglittle_init();

423 424 425 426 427
	/*
	 * The identity mapping is uncached (strongly ordered), so
	 * switch away from it before attempting any exclusive accesses.
	 */
	cpu_switch_mm(mm->pgd, mm);
428
	local_flush_bp_all();
429 430
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();
431 432 433 434 435

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
436
	cpu = smp_processor_id();
V
Vegard Nossum 已提交
437
	mmgrab(mm);
438
	current->active_mm = mm;
439
	cpumask_set_cpu(cpu, mm_cpumask(mm));
440

441 442
	cpu_init();

443 444 445
#ifndef CONFIG_MMU
	setup_vectors_base();
#endif
446
	pr_debug("CPU%u: Booted secondary processor\n", cpu);
447

448
	trace_hardirqs_off();
449 450 451 452

	/*
	 * Give the platform a chance to do its own initialisation.
	 */
453 454
	if (smp_ops.smp_secondary_init)
		smp_ops.smp_secondary_init(cpu);
455

456
	notify_cpu_starting(cpu);
457

458 459
	ipi_setup(cpu);

460 461 462 463 464
	calibrate_delay();

	smp_store_cpu_info(cpu);

	/*
465 466
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
467
	 * before we continue - which happens after __cpu_up returns.
468
	 */
469
	set_cpu_online(cpu, true);
470 471 472

	check_other_bugs();

473
	complete(&cpu_running);
474 475 476

	local_irq_enable();
	local_fiq_enable();
477
	local_abt_enable();
478

479 480 481
	/*
	 * OK, it's off to the idle thread for us
	 */
482
	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
483 484
}

L
Linus Torvalds 已提交
485 486
void __init smp_cpus_done(unsigned int max_cpus)
{
487 488 489 490 491 492 493 494 495 496 497 498
	int cpu;
	unsigned long bogosum = 0;

	for_each_online_cpu(cpu)
		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;

	printk(KERN_INFO "SMP: Total of %d processors activated "
	       "(%lu.%02lu BogoMIPS).\n",
	       num_online_cpus(),
	       bogosum / (500000/HZ),
	       (bogosum / (5000/HZ)) % 100);

499
	hyp_mode_check();
L
Linus Torvalds 已提交
500 501 502 503
}

void __init smp_prepare_boot_cpu(void)
{
504
	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
L
Linus Torvalds 已提交
505 506
}

507
void __init smp_prepare_cpus(unsigned int max_cpus)
L
Linus Torvalds 已提交
508
{
509
	unsigned int ncores = num_possible_cpus();
L
Linus Torvalds 已提交
510

511 512
	init_cpu_topology();

513
	smp_store_cpu_info(smp_processor_id());
L
Linus Torvalds 已提交
514 515

	/*
516
	 * are we trying to boot more cores than exist?
L
Linus Torvalds 已提交
517
	 */
518 519
	if (max_cpus > ncores)
		max_cpus = ncores;
520 521 522 523
	if (ncores > 1 && max_cpus) {
		/*
		 * Initialise the present map, which describes the set of CPUs
		 * actually populated at the present time. A platform should
524 525
		 * re-initialize the map in the platforms smp_prepare_cpus()
		 * if present != possible (e.g. physical hotplug).
526
		 */
527
		init_cpu_present(cpu_possible_mask);
528

529 530 531 532
		/*
		 * Initialise the SCU if there are more than one CPU
		 * and let them know where to start.
		 */
533 534
		if (smp_ops.smp_prepare_cpus)
			smp_ops.smp_prepare_cpus(max_cpus);
535
	}
L
Linus Torvalds 已提交
536 537
}

N
Nicolas Pitre 已提交
538
static const char *ipi_types[NR_IPI] __tracepoint_string = {
539 540 541 542 543 544 545
	[IPI_WAKEUP]		= "CPU wakeup interrupts",
	[IPI_TIMER]		= "Timer broadcast interrupts",
	[IPI_RESCHEDULE]	= "Rescheduling interrupts",
	[IPI_CALL_FUNC]		= "Function call interrupts",
	[IPI_CPU_STOP]		= "CPU stop interrupts",
	[IPI_IRQ_WORK]		= "IRQ work interrupts",
	[IPI_COMPLETION]	= "completion interrupts",
546 547
};

M
Marc Zyngier 已提交
548
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
N
Nicolas Pitre 已提交
549

550
void show_ipi_list(struct seq_file *p, int prec)
L
Linus Torvalds 已提交
551
{
552
	unsigned int cpu, i;
L
Linus Torvalds 已提交
553

554
	for (i = 0; i < NR_IPI; i++) {
555 556 557
		if (!ipi_desc[i])
			continue;

558
		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
L
Linus Torvalds 已提交
559

560
		for_each_online_cpu(cpu)
561
			seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
L
Linus Torvalds 已提交
562

563 564
		seq_printf(p, " %s\n", ipi_types[i]);
	}
L
Linus Torvalds 已提交
565 566
}

N
Nicolas Pitre 已提交
567 568 569 570 571 572 573 574 575 576 577 578
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_CALL_FUNC);
}

void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
}

void arch_send_call_function_single_ipi(int cpu)
{
579
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
N
Nicolas Pitre 已提交
580 581 582 583 584
}

#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
585
	if (arch_irq_work_has_interrupt())
N
Nicolas Pitre 已提交
586 587 588 589
		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
#endif

590
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
591
void tick_broadcast(const struct cpumask *mask)
592
{
R
Russell King 已提交
593
	smp_cross_call(mask, IPI_TIMER);
594
}
595
#endif
596

597
static DEFINE_RAW_SPINLOCK(stop_lock);
L
Linus Torvalds 已提交
598 599 600 601 602 603

/*
 * ipi_cpu_stop - handle IPI from smp_send_stop()
 */
static void ipi_cpu_stop(unsigned int cpu)
{
604 605
	local_fiq_disable();

T
Thomas Gleixner 已提交
606
	if (system_state <= SYSTEM_RUNNING) {
607
		raw_spin_lock(&stop_lock);
R
Russell King 已提交
608
		pr_crit("CPU%u: stopping\n", cpu);
609
		dump_stack();
610
		raw_spin_unlock(&stop_lock);
611
	}
L
Linus Torvalds 已提交
612

613
	set_cpu_online(cpu, false);
L
Linus Torvalds 已提交
614

615
	while (1) {
L
Linus Torvalds 已提交
616
		cpu_relax();
617 618
		wfe();
	}
L
Linus Torvalds 已提交
619 620
}

621 622 623 624 625 626 627 628 629 630 631 632 633
static DEFINE_PER_CPU(struct completion *, cpu_completion);

int register_ipi_completion(struct completion *completion, int cpu)
{
	per_cpu(cpu_completion, cpu) = completion;
	return IPI_COMPLETION;
}

static void ipi_complete(unsigned int cpu)
{
	complete(per_cpu(cpu_completion, cpu));
}

L
Linus Torvalds 已提交
634 635 636
/*
 * Main handler for inter-processor interrupts
 */
637
static void do_handle_IPI(int ipinr)
L
Linus Torvalds 已提交
638 639 640
{
	unsigned int cpu = smp_processor_id();

641
	if ((unsigned)ipinr < NR_IPI)
642
		trace_ipi_entry(ipi_types[ipinr]);
L
Linus Torvalds 已提交
643

644
	switch (ipinr) {
645 646 647
	case IPI_WAKEUP:
		break;

648
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
649
	case IPI_TIMER:
650
		tick_receive_broadcast();
651
		break;
652
#endif
L
Linus Torvalds 已提交
653

654
	case IPI_RESCHEDULE:
655
		scheduler_ipi();
656
		break;
L
Linus Torvalds 已提交
657

658 659 660
	case IPI_CALL_FUNC:
		generic_smp_call_function_interrupt();
		break;
L
Linus Torvalds 已提交
661

662 663 664
	case IPI_CPU_STOP:
		ipi_cpu_stop(cpu);
		break;
L
Linus Torvalds 已提交
665

666 667 668 669 670 671
#ifdef CONFIG_IRQ_WORK
	case IPI_IRQ_WORK:
		irq_work_run();
		break;
#endif

672 673 674 675
	case IPI_COMPLETION:
		ipi_complete(cpu);
		break;

676
	case IPI_CPU_BACKTRACE:
J
John Ogness 已提交
677
		printk_deferred_enter();
678
		nmi_cpu_backtrace(get_irq_regs());
J
John Ogness 已提交
679
		printk_deferred_exit();
680 681
		break;

682
	default:
R
Russell King 已提交
683 684
		pr_crit("CPU%u: Unknown IPI message 0x%x\n",
		        cpu, ipinr);
685
		break;
L
Linus Torvalds 已提交
686
	}
N
Nicolas Pitre 已提交
687 688

	if ((unsigned)ipinr < NR_IPI)
689
		trace_ipi_exit(ipi_types[ipinr]);
690 691 692 693 694 695 696 697 698 699 700
}

/* Legacy version, should go away once all irqchips have been converted */
void handle_IPI(int ipinr, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();
	do_handle_IPI(ipinr);
	irq_exit();

R
Russell King 已提交
701
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
702 703
}

704 705 706 707 708 709
static irqreturn_t ipi_handler(int irq, void *data)
{
	do_handle_IPI(irq - ipi_irq_base);
	return IRQ_HANDLED;
}

M
Marc Zyngier 已提交
710
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
711
{
712
	trace_ipi_raise(target, ipi_types[ipinr]);
M
Marc Zyngier 已提交
713
	__ipi_send_mask(ipi_desc[ipinr], target);
714 715 716 717 718 719
}

static void ipi_setup(int cpu)
{
	int i;

M
Marc Zyngier 已提交
720
	if (WARN_ON_ONCE(!ipi_irq_base))
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
		return;

	for (i = 0; i < nr_ipi; i++)
		enable_percpu_irq(ipi_irq_base + i, 0);
}

void __init set_smp_ipi_range(int ipi_base, int n)
{
	int i;

	WARN_ON(n < MAX_IPI);
	nr_ipi = min(n, MAX_IPI);

	for (i = 0; i < nr_ipi; i++) {
		int err;

		err = request_percpu_irq(ipi_base + i, ipi_handler,
					 "IPI", &irq_stat);
		WARN_ON(err);

		ipi_desc[i] = irq_to_desc(ipi_base + i);
		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
	}

	ipi_irq_base = ipi_base;

	/* Setup the boot CPU immediately */
	ipi_setup(smp_processor_id());
}

751
void arch_smp_send_reschedule(int cpu)
L
Linus Torvalds 已提交
752
{
R
Russell King 已提交
753
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
L
Linus Torvalds 已提交
754 755 756 757
}

void smp_send_stop(void)
{
758
	unsigned long timeout;
759
	struct cpumask mask;
L
Linus Torvalds 已提交
760

761 762
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
763 764
	if (!cpumask_empty(&mask))
		smp_cross_call(&mask, IPI_CPU_STOP);
765

766 767 768 769
	/* Wait up to one second for other CPUs to stop */
	timeout = USEC_PER_SEC;
	while (num_online_cpus() > 1 && timeout--)
		udelay(1);
770

771
	if (num_online_cpus() > 1)
772
		pr_warn("SMP: failed to stop secondary CPUs\n");
773 774
}

775 776 777 778 779 780
/* In case panic() and panic() called at the same time on CPU1 and CPU2,
 * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
 * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
 * kdump fails. So split out the panic_smp_self_stop() and add
 * set_cpu_online(smp_processor_id(), false).
 */
781
void __noreturn panic_smp_self_stop(void)
782 783 784 785 786 787 788 789
{
	pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
	         smp_processor_id());
	set_cpu_online(smp_processor_id(), false);
	while (1)
		cpu_relax();
}

790 791 792 793 794 795 796 797 798 799 800
#ifdef CONFIG_CPU_FREQ

static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
static unsigned long global_l_p_j_ref;
static unsigned long global_l_p_j_ref_freq;

static int cpufreq_callback(struct notifier_block *nb,
					unsigned long val, void *data)
{
	struct cpufreq_freqs *freq = data;
801 802 803
	struct cpumask *cpus = freq->policy->cpus;
	int cpu, first = cpumask_first(cpus);
	unsigned int lpj;
804 805 806 807

	if (freq->flags & CPUFREQ_CONST_LOOPS)
		return NOTIFY_OK;

808 809 810 811 812 813 814
	if (!per_cpu(l_p_j_ref, first)) {
		for_each_cpu(cpu, cpus) {
			per_cpu(l_p_j_ref, cpu) =
				per_cpu(cpu_data, cpu).loops_per_jiffy;
			per_cpu(l_p_j_ref_freq, cpu) = freq->old;
		}

815 816 817 818 819 820 821
		if (!global_l_p_j_ref) {
			global_l_p_j_ref = loops_per_jiffy;
			global_l_p_j_ref_freq = freq->old;
		}
	}

	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
822
	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
823 824 825
		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
						global_l_p_j_ref_freq,
						freq->new);
826 827 828 829 830

		lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
				    per_cpu(l_p_j_ref_freq, first), freq->new);
		for_each_cpu(cpu, cpus)
			per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
	}
	return NOTIFY_OK;
}

static struct notifier_block cpufreq_notifier = {
	.notifier_call  = cpufreq_callback,
};

static int __init register_cpufreq_notifier(void)
{
	return cpufreq_register_notifier(&cpufreq_notifier,
						CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);

#endif
847 848 849

static void raise_nmi(cpumask_t *mask)
{
M
Marc Zyngier 已提交
850
	__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
851 852
}

853
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
854
{
855
	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
856
	return true;
857
}