process.c 22.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7
 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
L
Linus Torvalds 已提交
8 9
 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
 * Copyright (C) 2004 Thiemo Seufer
10
 * Copyright (C) 2013  Imagination Technologies Ltd.
L
Linus Torvalds 已提交
11
 */
12
#include <linux/cpu.h>
L
Linus Torvalds 已提交
13 14
#include <linux/errno.h>
#include <linux/init.h>
15
#include <linux/kallsyms.h>
16
#include <linux/kernel.h>
17
#include <linux/nmi.h>
18 19 20 21 22 23
#include <linux/personality.h>
#include <linux/prctl.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
L
Linus Torvalds 已提交
24

25
#include <asm/abi.h>
26
#include <asm/asm.h>
27
#include <asm/dsemul.h>
28
#include <asm/dsp.h>
29
#include <asm/exec.h>
L
Linus Torvalds 已提交
30
#include <asm/fpu.h>
31
#include <asm/inst.h>
32
#include <asm/irq.h>
33 34
#include <asm/irq_regs.h>
#include <asm/isadep.h>
35
#include <asm/msa.h>
36
#include <asm/mips-cps.h>
L
Linus Torvalds 已提交
37 38
#include <asm/mipsregs.h>
#include <asm/processor.h>
A
Alex Smith 已提交
39
#include <asm/reg.h>
40
#include <asm/stacktrace.h>
L
Linus Torvalds 已提交
41

T
Thomas Gleixner 已提交
42
#ifdef CONFIG_HOTPLUG_CPU
43
void __noreturn arch_cpu_idle_dead(void)
L
Linus Torvalds 已提交
44
{
45
	play_dead();
T
Thomas Gleixner 已提交
46 47
}
#endif
48

L
Linus Torvalds 已提交
49
asmlinkage void ret_from_fork(void);
A
Al Viro 已提交
50
asmlinkage void ret_from_kernel_thread(void);
L
Linus Torvalds 已提交
51 52 53 54 55 56

void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
	unsigned long status;

	/* New thread loses kernel privileges. */
57
	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
L
Linus Torvalds 已提交
58 59
	status |= KU_USER;
	regs->cp0_status = status;
60 61
	lose_fpu(0);
	clear_thread_flag(TIF_MSA_CTX_LIVE);
L
Linus Torvalds 已提交
62
	clear_used_math();
63
#ifdef CONFIG_MIPS_FP_SUPPORT
64
	atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
65
#endif
P
Paul Burton 已提交
66
	init_dsp();
L
Linus Torvalds 已提交
67 68 69 70
	regs->cp0_epc = pc;
	regs->regs[29] = sp;
}

71 72 73 74 75 76 77 78 79 80
void exit_thread(struct task_struct *tsk)
{
	/*
	 * User threads may have allocated a delay slot emulation frame.
	 * If so, clean up that allocation.
	 */
	if (!(current->flags & PF_KTHREAD))
		dsemul_thread_cleanup(tsk);
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
	/*
	 * Save any process state which is live in hardware registers to the
	 * parent context prior to duplication. This prevents the new child
	 * state becoming stale if the parent is preempted before copy_thread()
	 * gets a chance to save the parent's live hardware registers to the
	 * child context.
	 */
	preempt_disable();

	if (is_msa_enabled())
		save_msa(current);
	else if (is_fpu_owner())
		_save_fp(current);

	save_dsp(current);

	preempt_enable();

	*dst = *src;
	return 0;
}

105 106 107
/*
 * Copy architecture-specific thread state
 */
108
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
L
Linus Torvalds 已提交
109
{
110 111 112
	unsigned long clone_flags = args->flags;
	unsigned long usp = args->stack;
	unsigned long tls = args->tls;
A
Al Viro 已提交
113
	struct thread_info *ti = task_thread_info(p);
114
	struct pt_regs *childregs, *regs = current_pt_regs();
115
	unsigned long childksp;
L
Linus Torvalds 已提交
116

A
Al Viro 已提交
117
	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
L
Linus Torvalds 已提交
118 119 120

	/* set up new TSS. */
	childregs = (struct pt_regs *) childksp - 1;
121 122
	/*  Put the stack after the struct pt_regs.  */
	childksp = (unsigned long) childregs;
123
	p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
124
	if (unlikely(args->fn)) {
125
		/* kernel thread */
A
Al Viro 已提交
126 127
		unsigned long status = p->thread.cp0_status;
		memset(childregs, 0, sizeof(struct pt_regs));
128 129
		p->thread.reg16 = (unsigned long)args->fn;
		p->thread.reg17 = (unsigned long)args->fn_arg;
A
Al Viro 已提交
130 131
		p->thread.reg29 = childksp;
		p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
132
#if defined(CONFIG_CPU_R3000)
A
Al Viro 已提交
133 134 135 136 137 138 139 140
		status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
			 ((status & (ST0_KUC | ST0_IEC)) << 2);
#else
		status |= ST0_EXL;
#endif
		childregs->cp0_status = status;
		return 0;
	}
141 142

	/* user thread */
L
Linus Torvalds 已提交
143
	*childregs = *regs;
R
Ralf Baechle 已提交
144 145
	childregs->regs[7] = 0; /* Clear error flag */
	childregs->regs[2] = 0; /* Child gets zero as return value */
146 147
	if (usp)
		childregs->regs[29] = usp;
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156 157 158

	p->thread.reg29 = (unsigned long) childregs;
	p->thread.reg31 = (unsigned long) ret_from_fork;

	/*
	 * New tasks lose permission to use the fpu. This accelerates context
	 * switching for most programs since they don't use the fpu.
	 */
	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);

	clear_tsk_thread_flag(p, TIF_USEDFPU);
159 160
	clear_tsk_thread_flag(p, TIF_USEDMSA);
	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
L
Linus Torvalds 已提交
161

R
Ralf Baechle 已提交
162
#ifdef CONFIG_MIPS_MT_FPAFF
163
	clear_tsk_thread_flag(p, TIF_FPUBOUND);
R
Ralf Baechle 已提交
164 165
#endif /* CONFIG_MIPS_MT_FPAFF */

166
#ifdef CONFIG_MIPS_FP_SUPPORT
167
	atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
168
#endif
169

R
Ralf Baechle 已提交
170
	if (clone_flags & CLONE_SETTLS)
171
		ti->tp_value = tls;
R
Ralf Baechle 已提交
172

L
Linus Torvalds 已提交
173 174 175
	return 0;
}

176
#ifdef CONFIG_STACKPROTECTOR
177 178 179 180 181
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif

182 183 184 185 186 187
struct mips_frame_info {
	void		*func;
	unsigned long	func_size;
	int		frame_size;
	int		pc_offset;
};
188

189 190 191
#define J_TARGET(pc,target)	\
		(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static inline int is_jr_ra_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * jr16 ra
	 * jr ra
	 */
	if (mm_insn_16bit(ip->word >> 16)) {
		if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
		    ip->mm16_r5_format.rt == mm_jr16_op &&
		    ip->mm16_r5_format.imm == 31)
			return 1;
		return 0;
	}

	if (ip->r_format.opcode == mm_pool32a_op &&
	    ip->r_format.func == mm_pool32axf_op &&
	    ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
	    ip->r_format.rt == 31)
		return 1;
	return 0;
#else
	if (ip->r_format.opcode == spec_op &&
	    ip->r_format.func == jr_op &&
	    ip->r_format.rs == 31)
		return 1;
	return 0;
#endif
}

222
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
223
{
224 225 226 227 228 229 230 231 232 233
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * swsp ra,offset
	 * swm16 reglist,offset(sp)
	 * swm32 reglist,offset(sp)
	 * sw32 ra,offset(sp)
	 * jradiussp - NOT SUPPORTED
	 *
	 * microMIPS is way more fun...
	 */
234
	if (mm_insn_16bit(ip->word >> 16)) {
235 236 237 238 239
		switch (ip->mm16_r5_format.opcode) {
		case mm_swsp16_op:
			if (ip->mm16_r5_format.rt != 31)
				return 0;

240
			*poff = ip->mm16_r5_format.imm;
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
			*poff = (*poff << 2) / sizeof(ulong);
			return 1;

		case mm_pool16c_op:
			switch (ip->mm16_m_format.func) {
			case mm_swm16_op:
				*poff = ip->mm16_m_format.imm;
				*poff += 1 + ip->mm16_m_format.rlist;
				*poff = (*poff << 2) / sizeof(ulong);
				return 1;

			default:
				return 0;
			}

		default:
			return 0;
		}
259
	}
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

	switch (ip->i_format.opcode) {
	case mm_sw32_op:
		if (ip->i_format.rs != 29)
			return 0;
		if (ip->i_format.rt != 31)
			return 0;

		*poff = ip->i_format.simmediate / sizeof(ulong);
		return 1;

	case mm_pool32b_op:
		switch (ip->mm_m_format.func) {
		case mm_swm32_func:
			if (ip->mm_m_format.rd < 0x10)
				return 0;
			if (ip->mm_m_format.base != 29)
				return 0;

			*poff = ip->mm_m_format.simmediate;
			*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
			*poff /= sizeof(ulong);
			return 1;
		default:
			return 0;
		}

	default:
		return 0;
289 290
	}
#else
291
	/* sw / sd $ra, offset($sp) */
292 293 294 295 296
	if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
		ip->i_format.rs == 29 && ip->i_format.rt == 31) {
		*poff = ip->i_format.simmediate / sizeof(ulong);
		return 1;
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
#ifdef CONFIG_CPU_LOONGSON64
	if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
		      (ip->loongson3_lswc2_format.ls == 1) &&
		      (ip->loongson3_lswc2_format.fr == 0) &&
		      (ip->loongson3_lswc2_format.base == 29)) {
		if (ip->loongson3_lswc2_format.rt == 31) {
			*poff = ip->loongson3_lswc2_format.offset << 1;
			return 1;
		}
		if (ip->loongson3_lswc2_format.rq == 31) {
			*poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
			return 1;
		}
	}
#endif
312
	return 0;
313
#endif
314 315
}

316
static inline int is_jump_ins(union mips_instruction *ip)
317
{
318 319 320 321 322 323 324 325 326
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * jr16,jrc,jalr16,jalr16
	 * jal
	 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
	 * jraddiusp - NOT SUPPORTED
	 *
	 * microMIPS is kind of more fun...
	 */
327
	if (mm_insn_16bit(ip->word >> 16)) {
328 329
		if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
		    (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
330 331 332 333
			return 1;
		return 0;
	}

334 335
	if (ip->j_format.opcode == mm_j32_op)
		return 1;
336
	if (ip->j_format.opcode == mm_jal32_op)
337 338 339 340
		return 1;
	if (ip->r_format.opcode != mm_pool32a_op ||
			ip->r_format.func != mm_pool32axf_op)
		return 0;
341
	return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
342
#else
343 344
	if (ip->j_format.opcode == j_op)
		return 1;
345 346 347 348 349
	if (ip->j_format.opcode == jal_op)
		return 1;
	if (ip->r_format.opcode != spec_op)
		return 0;
	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
350
#endif
351 352
}

353
static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
354
{
355
#ifdef CONFIG_CPU_MICROMIPS
356 357
	unsigned short tmp;

358 359 360 361 362 363 364 365
	/*
	 * addiusp -imm
	 * addius5 sp,-imm
	 * addiu32 sp,sp,-imm
	 * jradiussp - NOT SUPPORTED
	 *
	 * microMIPS is not more fun...
	 */
366
	if (mm_insn_16bit(ip->word >> 16)) {
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
		if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
		    ip->mm16_r3_format.simmediate & mm_addiusp_func) {
			tmp = ip->mm_b0_format.simmediate >> 1;
			tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
			if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
				tmp ^= 0x100;
			*frame_size = -(signed short)(tmp << 2);
			return 1;
		}
		if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
		    ip->mm16_r5_format.rt == 29) {
			tmp = ip->mm16_r5_format.imm >> 1;
			*frame_size = -(signed short)(tmp & 0xf);
			return 1;
		}
		return 0;
383
	}
384

385 386 387 388 389
	if (ip->mm_i_format.opcode == mm_addiu32_op &&
	    ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
		*frame_size = -ip->i_format.simmediate;
		return 1;
	}
390
#else
391 392 393
	/* addiu/daddiu sp,sp,-imm */
	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
		return 0;
394 395 396 397

	if (ip->i_format.opcode == addiu_op ||
	    ip->i_format.opcode == daddiu_op) {
		*frame_size = -ip->i_format.simmediate;
398
		return 1;
399
	}
400
#endif
401 402 403
	return 0;
}

404
static int get_frame_info(struct mips_frame_info *info)
L
Linus Torvalds 已提交
405
{
406
	bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
407
	union mips_instruction insn, *ip, *ip_end;
408
	unsigned int last_insn_size = 0;
C
Corey Minyard 已提交
409
	bool saw_jump = false;
410

L
Linus Torvalds 已提交
411
	info->pc_offset = -1;
412
	info->frame_size = 0;
L
Linus Torvalds 已提交
413

414
	ip = (void *)msk_isa16_mode((ulong)info->func);
415 416 417
	if (!ip)
		goto err;

418 419 420
	ip_end = (void *)ip + (info->func_size ? info->func_size : 512);

	while (ip < ip_end) {
421
		ip = (void *)ip + last_insn_size;
422

423
		if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
424
			insn.word = ip->halfword[0] << 16;
425
			last_insn_size = 2;
426
		} else if (is_mmips) {
427
			insn.word = ip->halfword[0] << 16 | ip->halfword[1];
428
			last_insn_size = 4;
429 430
		} else {
			insn.word = ip->word;
431
			last_insn_size = 4;
432
		}
433

434 435 436
		if (is_jr_ra_ins(ip)) {
			break;
		} else if (!info->frame_size) {
437
			is_sp_move_ins(&insn, &info->frame_size);
438
			continue;
C
Corey Minyard 已提交
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
		} else if (!saw_jump && is_jump_ins(ip)) {
			/*
			 * If we see a jump instruction, we are finished
			 * with the frame save.
			 *
			 * Some functions can have a shortcut return at
			 * the beginning of the function, so don't start
			 * looking for jump instruction until we see the
			 * frame setup.
			 *
			 * The RA save instruction can get put into the
			 * delay slot of the jump instruction, so look
			 * at the next instruction, too.
			 */
			saw_jump = true;
			continue;
455
		}
456 457
		if (info->pc_offset == -1 &&
		    is_ra_save_ins(&insn, &info->pc_offset))
458
			break;
C
Corey Minyard 已提交
459 460
		if (saw_jump)
			break;
L
Linus Torvalds 已提交
461
	}
462 463 464 465
	if (info->frame_size && info->pc_offset >= 0) /* nested */
		return 0;
	if (info->pc_offset < 0) /* leaf */
		return 1;
A
Andrea Gelmini 已提交
466
	/* prologue seems bogus... */
467
err:
468
	return -1;
L
Linus Torvalds 已提交
469 470
}

471 472
static struct mips_frame_info schedule_mfi __read_mostly;

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
#ifdef CONFIG_KALLSYMS
static unsigned long get___schedule_addr(void)
{
	return kallsyms_lookup_name("__schedule");
}
#else
static unsigned long get___schedule_addr(void)
{
	union mips_instruction *ip = (void *)schedule;
	int max_insns = 8;
	int i;

	for (i = 0; i < max_insns; i++, ip++) {
		if (ip->j_format.opcode == j_op)
			return J_TARGET(ip, ip->j_format.target);
	}
	return 0;
}
#endif

L
Linus Torvalds 已提交
493 494
static int __init frame_info_init(void)
{
495
	unsigned long size = 0;
496
#ifdef CONFIG_KALLSYMS
497
	unsigned long ofs;
498 499
#endif
	unsigned long addr;
500

501 502 503 504 505 506
	addr = get___schedule_addr();
	if (!addr)
		addr = (unsigned long)schedule;

#ifdef CONFIG_KALLSYMS
	kallsyms_lookup_size_offset(addr, &size, &ofs);
507
#endif
508
	schedule_mfi.func = (void *)addr;
509 510 511
	schedule_mfi.func_size = size;

	get_frame_info(&schedule_mfi);
512 513 514

	/*
	 * Without schedule() frame info, result given by
515
	 * thread_saved_pc() and __get_wchan() are not reliable.
516
	 */
517
	if (schedule_mfi.pc_offset < 0)
518
		printk("Can't analyze schedule() prologue at %p\n", schedule);
519

L
Linus Torvalds 已提交
520 521 522 523 524 525 526 527
	return 0;
}

arch_initcall(frame_info_init);

/*
 * Return saved PC of a blocked thread.
 */
T
Tobias Klauser 已提交
528
static unsigned long thread_saved_pc(struct task_struct *tsk)
L
Linus Torvalds 已提交
529 530 531 532 533 534
{
	struct thread_struct *t = &tsk->thread;

	/* New born processes are a special case */
	if (t->reg31 == (unsigned long) ret_from_fork)
		return t->reg31;
535
	if (schedule_mfi.pc_offset < 0)
L
Linus Torvalds 已提交
536
		return 0;
537
	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
L
Linus Torvalds 已提交
538 539 540
}


541
#ifdef CONFIG_KALLSYMS
542 543 544 545 546
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
					      unsigned long *sp,
					      unsigned long pc,
					      unsigned long *ra)
547
{
548
	unsigned long low, high, irq_stack_high;
549 550
	struct mips_frame_info info;
	unsigned long size, ofs;
551
	struct pt_regs *regs;
F
Franck Bui-Huu 已提交
552
	int leaf;
553 554 555 556

	if (!stack_page)
		return 0;

557
	/*
558 559
	 * IRQ stacks start at IRQ_STACK_START
	 * task stacks at THREAD_SIZE - 32
560
	 */
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	low = stack_page;
	if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
		high = stack_page + IRQ_STACK_START;
		irq_stack_high = high;
	} else {
		high = stack_page + THREAD_SIZE - 32;
		irq_stack_high = 0;
	}

	/*
	 * If we reached the top of the interrupt stack, start unwinding
	 * the interrupted task stack.
	 */
	if (unlikely(*sp == irq_stack_high)) {
		unsigned long task_sp = *(unsigned long *)*sp;

		/*
		 * Check that the pointer saved in the IRQ stack head points to
		 * something within the stack of the current task
		 */
		if (!object_is_on_stack((void *)task_sp))
			return 0;

		/*
		 * Follow pointer to tasks kernel stack frame where interrupted
		 * state was saved.
		 */
		regs = (struct pt_regs *)task_sp;
		pc = regs->cp0_epc;
		if (!user_mode(regs) && __kernel_text_address(pc)) {
			*sp = regs->regs[29];
			*ra = regs->regs[31];
			return pc;
594 595 596
		}
		return 0;
	}
597
	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
598
		return 0;
599
	/*
L
Lucas De Marchi 已提交
600
	 * Return ra if an exception occurred at the first instruction
601
	 */
602 603 604 605 606
	if (unlikely(ofs == 0)) {
		pc = *ra;
		*ra = 0;
		return pc;
	}
607 608 609

	info.func = (void *)(pc - ofs);
	info.func_size = ofs;	/* analyze from start to ofs */
F
Franck Bui-Huu 已提交
610 611
	leaf = get_frame_info(&info);
	if (leaf < 0)
612
		return 0;
F
Franck Bui-Huu 已提交
613

614
	if (*sp < low || *sp + info.frame_size > high)
615 616
		return 0;

F
Franck Bui-Huu 已提交
617 618 619 620 621 622 623
	if (leaf)
		/*
		 * For some extreme cases, get_frame_info() can
		 * consider wrongly a nested function as a leaf
		 * one. In that cases avoid to return always the
		 * same value.
		 */
624
		pc = pc != *ra ? *ra : 0;
F
Franck Bui-Huu 已提交
625 626 627 628
	else
		pc = ((unsigned long *)(*sp))[info.pc_offset];

	*sp += info.frame_size;
629
	*ra = 0;
F
Franck Bui-Huu 已提交
630
	return __kernel_text_address(pc) ? pc : 0;
631
}
632 633 634 635 636 637
EXPORT_SYMBOL(unwind_stack_by_address);

/* used by show_backtrace() */
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
			   unsigned long pc, unsigned long *ra)
{
638 639 640 641 642 643 644 645 646 647 648 649 650
	unsigned long stack_page = 0;
	int cpu;

	for_each_possible_cpu(cpu) {
		if (on_irq_stack(cpu, *sp)) {
			stack_page = (unsigned long)irq_stack[cpu];
			break;
		}
	}

	if (!stack_page)
		stack_page = (unsigned long)task_stack_page(task);

651 652
	return unwind_stack_by_address(stack_page, sp, pc, ra);
}
653
#endif
654 655

/*
656
 * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
657
 */
658
unsigned long __get_wchan(struct task_struct *task)
659 660 661 662
{
	unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
	unsigned long sp;
663
	unsigned long ra = 0;
664 665 666 667 668 669 670 671 672 673 674
#endif

	if (!task_stack_page(task))
		goto out;

	pc = thread_saved_pc(task);

#ifdef CONFIG_KALLSYMS
	sp = task->thread.reg29 + schedule_mfi.frame_size;

	while (in_sched_functions(pc))
675
		pc = unwind_stack(task, &sp, pc, &ra);
676 677 678 679 680
#endif

out:
	return pc;
}
681

682 683 684 685
unsigned long mips_stack_top(void)
{
	unsigned long top = TASK_SIZE & PAGE_MASK;

686 687 688 689
	if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
		/* One page for branch delay slot "emulation" */
		top -= PAGE_SIZE;
	}
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706

	/* Space for the VDSO, data page & GIC user page */
	top -= PAGE_ALIGN(current->thread.abi->vdso->size);
	top -= PAGE_SIZE;
	top -= mips_gic_present() ? PAGE_SIZE : 0;

	/* Space for cache colour alignment */
	if (cpu_has_dc_aliases)
		top -= shm_align_mask + 1;

	/* Space to randomize the VDSO base */
	if (current->flags & PF_RANDOMIZE)
		top -= VDSO_RANDOMIZE_SIZE;

	return top;
}

707 708 709 710 711 712 713
/*
 * Don't forget that the stack pointer must be aligned on a 8 bytes
 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
 */
unsigned long arch_align_stack(unsigned long sp)
{
	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
714
		sp -= get_random_u32_below(PAGE_SIZE);
715 716 717

	return sp & ALMASK;
}
718

719
static struct cpumask backtrace_csd_busy;
720

721 722 723 724
static void handle_backtrace(void *info)
{
	nmi_cpu_backtrace(get_irq_regs());
	cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
725 726
}

P
Peter Zijlstra 已提交
727 728 729
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
	CSD_INIT(handle_backtrace, NULL);

730
static void raise_backtrace(cpumask_t *mask)
731
{
732 733
	call_single_data_t *csd;
	int cpu;
734

735 736 737 738 739 740 741 742 743 744 745 746
	for_each_cpu(cpu, mask) {
		/*
		 * If we previously sent an IPI to the target CPU & it hasn't
		 * cleared its bit in the busy cpumask then it didn't handle
		 * our previous IPI & it's not safe for us to reuse the
		 * call_single_data_t.
		 */
		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
			pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
				cpu);
			continue;
		}
747

748 749 750 751
		csd = &per_cpu(backtrace_csd, cpu);
		smp_call_function_single_async(cpu, csd);
	}
}
752

753
bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
754 755
{
	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
756
	return true;
757
}
758 759 760 761 762 763 764 765 766 767 768 769 770

int mips_get_process_fp_mode(struct task_struct *task)
{
	int value = 0;

	if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
		value |= PR_FP_MODE_FR;
	if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
		value |= PR_FP_MODE_FRE;

	return value;
}

771
static long prepare_for_fp_mode_switch(void *unused)
772
{
773 774 775
	/*
	 * This is icky, but we use this to simply ensure that all CPUs have
	 * context switched, regardless of whether they were previously running
776 777 778
	 * kernel or user code. This ensures that no CPU that a mode-switching
	 * program may execute on keeps its FPU enabled (& in the old mode)
	 * throughout the mode switch.
779 780
	 */
	return 0;
781 782
}

783 784 785 786
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
	struct task_struct *t;
787 788
	struct cpumask process_cpus;
	int cpu;
789

790 791 792 793 794 795 796 797 798 799 800 801
	/* If nothing to change, return right away, successfully.  */
	if (value == mips_get_process_fp_mode(task))
		return 0;

	/* Only accept a mode change if 64-bit FP enabled for o32.  */
	if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
		return -EOPNOTSUPP;

	/* And only for o32 tasks.  */
	if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
		return -EOPNOTSUPP;

802 803 804 805
	/* Check the value is valid */
	if (value & ~known_bits)
		return -EOPNOTSUPP;

806 807 808 809
	/* Setting FRE without FR is not supported.  */
	if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
		return -EOPNOTSUPP;

810
	/* Avoid inadvertently triggering emulation */
811 812
	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
813
		return -EOPNOTSUPP;
814
	if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
815 816
		return -EOPNOTSUPP;

817
	/* FR = 0 not supported in MIPS R6 */
818
	if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
819 820
		return -EOPNOTSUPP;

821
	/* Indicate the new FP mode in each thread */
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
	for_each_thread(task, t) {
		/* Update desired FP register width */
		if (value & PR_FP_MODE_FR) {
			clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
		} else {
			set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
			clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
		}

		/* Update desired FP single layout */
		if (value & PR_FP_MODE_FRE)
			set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
		else
			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
	}

838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
	/*
	 * We need to ensure that all threads in the process have switched mode
	 * before returning, in order to allow userland to not worry about
	 * races. We can do this by forcing all CPUs that any thread in the
	 * process may be running on to schedule something else - in this case
	 * prepare_for_fp_mode_switch().
	 *
	 * We begin by generating a mask of all CPUs that any thread in the
	 * process may be running on.
	 */
	cpumask_clear(&process_cpus);
	for_each_thread(task, t)
		cpumask_set_cpu(task_cpu(t), &process_cpus);

	/*
	 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
	 *
	 * The CPUs may have rescheduled already since we switched mode or
	 * generated the cpumask, but that doesn't matter. If the task in this
	 * process is scheduled out then our scheduling
	 * prepare_for_fp_mode_switch() will simply be redundant. If it's
	 * scheduled in then it will already have picked up the new FP mode
	 * whilst doing so.
	 */
862
	cpus_read_lock();
863 864
	for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
		work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
865
	cpus_read_unlock();
866 867 868

	return 0;
}
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912

#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
{
	unsigned int i;

	for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
		/* k0/k1 are copied as zero. */
		if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
			uregs[i] = 0;
		else
			uregs[i] = regs->regs[i - MIPS32_EF_R0];
	}

	uregs[MIPS32_EF_LO] = regs->lo;
	uregs[MIPS32_EF_HI] = regs->hi;
	uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
	uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
	uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
	uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */

#ifdef CONFIG_64BIT
void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
{
	unsigned int i;

	for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
		/* k0/k1 are copied as zero. */
		if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
			uregs[i] = 0;
		else
			uregs[i] = regs->regs[i - MIPS64_EF_R0];
	}

	uregs[MIPS64_EF_LO] = regs->lo;
	uregs[MIPS64_EF_HI] = regs->hi;
	uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
	uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
	uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
	uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_64BIT */