forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/arm64/kernel/insn.c
....@@ -1,20 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2013 Huawei Ltd.
34 * Author: Jiang Liu <liuj97@gmail.com>
45 *
56 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
187 */
198 #include <linux/bitops.h>
209 #include <linux/bug.h>
....@@ -32,12 +21,13 @@
3221 #include <asm/fixmap.h>
3322 #include <asm/insn.h>
3423 #include <asm/kprobes.h>
24
+#include <asm/sections.h>
3525
3626 #define AARCH64_INSN_SF_BIT BIT(31)
3727 #define AARCH64_INSN_N_BIT BIT(22)
3828 #define AARCH64_INSN_LSL_12 BIT(22)
3929
40
-static int aarch64_insn_encoding_class[] = {
30
+static const int aarch64_insn_encoding_class[] = {
4131 AARCH64_INSN_CLS_UNKNOWN,
4232 AARCH64_INSN_CLS_UNKNOWN,
4333 AARCH64_INSN_CLS_UNKNOWN,
....@@ -61,21 +51,27 @@
6151 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
6252 }
6353
64
-/* NOP is an alias of HINT */
65
-bool __kprobes aarch64_insn_is_nop(u32 insn)
54
+bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
6655 {
6756 if (!aarch64_insn_is_hint(insn))
6857 return false;
6958
7059 switch (insn & 0xFE0) {
71
- case AARCH64_INSN_HINT_YIELD:
72
- case AARCH64_INSN_HINT_WFE:
73
- case AARCH64_INSN_HINT_WFI:
74
- case AARCH64_INSN_HINT_SEV:
75
- case AARCH64_INSN_HINT_SEVL:
76
- return false;
77
- default:
60
+ case AARCH64_INSN_HINT_XPACLRI:
61
+ case AARCH64_INSN_HINT_PACIA_1716:
62
+ case AARCH64_INSN_HINT_PACIB_1716:
63
+ case AARCH64_INSN_HINT_PACIAZ:
64
+ case AARCH64_INSN_HINT_PACIASP:
65
+ case AARCH64_INSN_HINT_PACIBZ:
66
+ case AARCH64_INSN_HINT_PACIBSP:
67
+ case AARCH64_INSN_HINT_BTI:
68
+ case AARCH64_INSN_HINT_BTIC:
69
+ case AARCH64_INSN_HINT_BTIJ:
70
+ case AARCH64_INSN_HINT_BTIJC:
71
+ case AARCH64_INSN_HINT_NOP:
7872 return true;
73
+ default:
74
+ return false;
7975 }
8076 }
8177
....@@ -89,16 +85,29 @@
8985
9086 static DEFINE_RAW_SPINLOCK(patch_lock);
9187
88
+static bool is_exit_text(unsigned long addr)
89
+{
90
+ /* discarded with init text/data */
91
+ return system_state < SYSTEM_RUNNING &&
92
+ addr >= (unsigned long)__exittext_begin &&
93
+ addr < (unsigned long)__exittext_end;
94
+}
95
+
96
+static bool is_image_text(unsigned long addr)
97
+{
98
+ return core_kernel_text(addr) || is_exit_text(addr);
99
+}
100
+
92101 static void __kprobes *patch_map(void *addr, int fixmap)
93102 {
94103 unsigned long uintaddr = (uintptr_t) addr;
95
- bool module = !core_kernel_text(uintaddr);
104
+ bool image = is_image_text(uintaddr);
96105 struct page *page;
97106
98
- if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
99
- page = vmalloc_to_page(addr);
100
- else if (!module)
107
+ if (image)
101108 page = phys_to_page(__pa_symbol(addr));
109
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
110
+ page = vmalloc_to_page(addr);
102111 else
103112 return addr;
104113
....@@ -120,7 +129,7 @@
120129 int ret;
121130 __le32 val;
122131
123
- ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
132
+ ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
124133 if (!ret)
125134 *insnp = le32_to_cpu(val);
126135
....@@ -136,7 +145,7 @@
136145 raw_spin_lock_irqsave(&patch_lock, flags);
137146 waddr = patch_map(addr, FIX_TEXT_POKE0);
138147
139
- ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
148
+ ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
140149
141150 patch_unmap(FIX_TEXT_POKE0);
142151 raw_spin_unlock_irqrestore(&patch_lock, flags);
....@@ -161,7 +170,7 @@
161170
162171 bool __kprobes aarch64_insn_is_branch(u32 insn)
163172 {
164
- /* b, bl, cb*, tb*, b.cond, br, blr */
173
+ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
165174
166175 return aarch64_insn_is_b(insn) ||
167176 aarch64_insn_is_bl(insn) ||
....@@ -170,8 +179,11 @@
170179 aarch64_insn_is_tbz(insn) ||
171180 aarch64_insn_is_tbnz(insn) ||
172181 aarch64_insn_is_ret(insn) ||
182
+ aarch64_insn_is_ret_auth(insn) ||
173183 aarch64_insn_is_br(insn) ||
184
+ aarch64_insn_is_br_auth(insn) ||
174185 aarch64_insn_is_blr(insn) ||
186
+ aarch64_insn_is_blr_auth(insn) ||
175187 aarch64_insn_is_bcond(insn);
176188 }
177189
....@@ -204,8 +216,8 @@
204216 int i, ret = 0;
205217 struct aarch64_insn_patch *pp = arg;
206218
207
- /* The first CPU becomes master */
208
- if (atomic_inc_return(&pp->cpu_count) == 1) {
219
+ /* The last CPU becomes master */
220
+ if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
209221 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
210222 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
211223 pp->new_insns[i]);
....@@ -571,7 +583,7 @@
571583 offset >> 2);
572584 }
573585
574
-u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
586
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
575587 {
576588 return aarch64_insn_get_hint_value() | op;
577589 }
....@@ -1280,6 +1292,48 @@
12801292 }
12811293
12821294 /*
1295
+ * MOV (register) is architecturally an alias of ORR (shifted register) where
1296
+ * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1297
+ */
1298
+u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1299
+ enum aarch64_insn_register src,
1300
+ enum aarch64_insn_variant variant)
1301
+{
1302
+ return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1303
+ src, 0, variant,
1304
+ AARCH64_INSN_LOGIC_ORR);
1305
+}
1306
+
1307
+u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1308
+ enum aarch64_insn_register reg,
1309
+ enum aarch64_insn_adr_type type)
1310
+{
1311
+ u32 insn;
1312
+ s32 offset;
1313
+
1314
+ switch (type) {
1315
+ case AARCH64_INSN_ADR_TYPE_ADR:
1316
+ insn = aarch64_insn_get_adr_value();
1317
+ offset = addr - pc;
1318
+ break;
1319
+ case AARCH64_INSN_ADR_TYPE_ADRP:
1320
+ insn = aarch64_insn_get_adrp_value();
1321
+ offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1322
+ break;
1323
+ default:
1324
+ pr_err("%s: unknown adr encoding %d\n", __func__, type);
1325
+ return AARCH64_BREAK_FAULT;
1326
+ }
1327
+
1328
+ if (offset < -SZ_1M || offset >= SZ_1M)
1329
+ return AARCH64_BREAK_FAULT;
1330
+
1331
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1332
+
1333
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1334
+}
1335
+
1336
+/*
12831337 * Decode the imm field of a branch, and return the byte offset as a
12841338 * signed value (so it can be used when computing a new branch
12851339 * target).