patch.c 2.7 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2020 SiFive
 */

#include <linux/spinlock.h>
#include <linux/mm.h>
8
#include <linux/memory.h>
9 10 11 12 13
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
14
#include <asm/patch.h>
15

16
struct patch_insn {
17 18 19 20 21 22
	void *addr;
	u32 insn;
	atomic_t cpu_count;
};

#ifdef CONFIG_MMU
23
static void *patch_map(void *addr, int fixmap)
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
{
	uintptr_t uintaddr = (uintptr_t) addr;
	struct page *page;

	if (core_kernel_text(uintaddr))
		page = phys_to_page(__pa_symbol(addr));
	else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
		page = vmalloc_to_page(addr);
	else
		return addr;

	BUG_ON(!page);

	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
					 (uintaddr & ~PAGE_MASK));
}
40
NOKPROBE_SYMBOL(patch_map);
41

42
static void patch_unmap(int fixmap)
43 44 45
{
	clear_fixmap(fixmap);
}
46
NOKPROBE_SYMBOL(patch_unmap);
47

48
static int patch_insn_write(void *addr, const void *insn, size_t len)
49 50 51 52 53
{
	void *waddr = addr;
	bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
	int ret;

54 55 56 57 58 59
	/*
	 * Before reaching here, it was expected to lock the text_mutex
	 * already, so we don't need to give another lock here and could
	 * ensure that it was safe between each cores.
	 */
	lockdep_assert_held(&text_mutex);
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

	if (across_pages)
		patch_map(addr + len, FIX_TEXT_POKE1);

	waddr = patch_map(addr, FIX_TEXT_POKE0);

	ret = probe_kernel_write(waddr, insn, len);

	patch_unmap(FIX_TEXT_POKE0);

	if (across_pages)
		patch_unmap(FIX_TEXT_POKE1);

	return ret;
}
75
NOKPROBE_SYMBOL(patch_insn_write);
76
#else
77
static int patch_insn_write(void *addr, const void *insn, size_t len)
78 79 80
{
	return probe_kernel_write(addr, insn, len);
}
81
NOKPROBE_SYMBOL(patch_insn_write);
82 83
#endif /* CONFIG_MMU */

84
int patch_text_nosync(void *addr, const void *insns, size_t len)
85 86 87 88
{
	u32 *tp = addr;
	int ret;

89
	ret = patch_insn_write(tp, insns, len);
90 91 92 93 94 95

	if (!ret)
		flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);

	return ret;
}
96
NOKPROBE_SYMBOL(patch_text_nosync);
97

98
static int patch_text_cb(void *data)
99
{
100
	struct patch_insn *patch = data;
101 102 103 104
	int ret = 0;

	if (atomic_inc_return(&patch->cpu_count) == 1) {
		ret =
105
		    patch_text_nosync(patch->addr, &patch->insn,
106 107 108 109 110 111 112 113 114 115
					    GET_INSN_LENGTH(patch->insn));
		atomic_inc(&patch->cpu_count);
	} else {
		while (atomic_read(&patch->cpu_count) <= num_online_cpus())
			cpu_relax();
		smp_mb();
	}

	return ret;
}
116
NOKPROBE_SYMBOL(patch_text_cb);
117

118
int patch_text(void *addr, u32 insn)
119
{
120
	struct patch_insn patch = {
121 122 123 124 125
		.addr = addr,
		.insn = insn,
		.cpu_count = ATOMIC_INIT(0),
	};

126
	return stop_machine_cpuslocked(patch_text_cb,
127 128
				       &patch, cpu_online_mask);
}
129
NOKPROBE_SYMBOL(patch_text);