Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/lib/insn.c
29269 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* x86 instruction analysis
4
*
5
* Copyright (C) IBM Corporation, 2002, 2004, 2009
6
*/
7
8
#include <linux/kernel.h>
9
#ifdef __KERNEL__
10
#include <linux/string.h>
11
#else
12
#include <string.h>
13
#endif
14
#include <asm/inat.h> /*__ignore_sync_check__ */
15
#include <asm/insn.h> /* __ignore_sync_check__ */
16
#include <linux/unaligned.h> /* __ignore_sync_check__ */
17
18
#include <linux/errno.h>
19
#include <linux/kconfig.h>
20
21
#include <asm/emulate_prefix.h> /* __ignore_sync_check__ */
22
23
#define leXX_to_cpu(t, r) \
24
({ \
25
__typeof__(t) v; \
26
switch (sizeof(t)) { \
27
case 4: v = le32_to_cpu(r); break; \
28
case 2: v = le16_to_cpu(r); break; \
29
case 1: v = r; break; \
30
default: \
31
BUILD_BUG(); break; \
32
} \
33
v; \
34
})
35
36
/* Verify next sizeof(t) bytes can be on the same instruction */
37
#define validate_next(t, insn, n) \
38
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
39
40
#define __get_next(t, insn) \
41
({ t r = get_unaligned((t *)(insn)->next_byte); (insn)->next_byte += sizeof(t); leXX_to_cpu(t, r); })
42
43
#define __peek_nbyte_next(t, insn, n) \
44
({ t r = get_unaligned((t *)(insn)->next_byte + n); leXX_to_cpu(t, r); })
45
46
#define get_next(t, insn) \
47
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
48
49
#define peek_nbyte_next(t, insn, n) \
50
({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
51
52
#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
53
54
/**
55
* insn_init() - initialize struct insn
56
* @insn: &struct insn to be initialized
57
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
58
* @buf_len: length of the insn buffer at @kaddr
59
* @x86_64: !0 for 64-bit kernel or 64-bit app
60
*/
61
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
62
{
63
/*
64
* Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
65
* even if the input buffer is long enough to hold them.
66
*/
67
if (buf_len > MAX_INSN_SIZE)
68
buf_len = MAX_INSN_SIZE;
69
70
memset(insn, 0, sizeof(*insn));
71
insn->kaddr = kaddr;
72
insn->end_kaddr = kaddr + buf_len;
73
insn->next_byte = kaddr;
74
insn->x86_64 = x86_64;
75
insn->opnd_bytes = 4;
76
if (x86_64)
77
insn->addr_bytes = 8;
78
else
79
insn->addr_bytes = 4;
80
}
81
82
static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
83
static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
84
85
static int __insn_get_emulate_prefix(struct insn *insn,
86
const insn_byte_t *prefix, size_t len)
87
{
88
size_t i;
89
90
for (i = 0; i < len; i++) {
91
if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
92
goto err_out;
93
}
94
95
insn->emulate_prefix_size = len;
96
insn->next_byte += len;
97
98
return 1;
99
100
err_out:
101
return 0;
102
}
103
104
static void insn_get_emulate_prefix(struct insn *insn)
105
{
106
if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
107
return;
108
109
__insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
110
}
111
112
/**
113
* insn_get_prefixes - scan x86 instruction prefix bytes
114
* @insn: &struct insn containing instruction
115
*
116
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
117
* to point to the (first) opcode. No effect if @insn->prefixes.got
118
* is already set.
119
*
120
* * Returns:
121
* 0: on success
122
* < 0: on error
123
*/
124
int insn_get_prefixes(struct insn *insn)
125
{
126
struct insn_field *prefixes = &insn->prefixes;
127
insn_attr_t attr;
128
insn_byte_t b, lb;
129
int i, nb;
130
131
if (prefixes->got)
132
return 0;
133
134
insn_get_emulate_prefix(insn);
135
136
nb = 0;
137
lb = 0;
138
b = peek_next(insn_byte_t, insn);
139
attr = inat_get_opcode_attribute(b);
140
while (inat_is_legacy_prefix(attr)) {
141
/* Skip if same prefix */
142
for (i = 0; i < nb; i++)
143
if (prefixes->bytes[i] == b)
144
goto found;
145
if (nb == 4)
146
/* Invalid instruction */
147
break;
148
prefixes->bytes[nb++] = b;
149
if (inat_is_address_size_prefix(attr)) {
150
/* address size switches 2/4 or 4/8 */
151
if (insn->x86_64)
152
insn->addr_bytes ^= 12;
153
else
154
insn->addr_bytes ^= 6;
155
} else if (inat_is_operand_size_prefix(attr)) {
156
/* oprand size switches 2/4 */
157
insn->opnd_bytes ^= 6;
158
}
159
found:
160
prefixes->nbytes++;
161
insn->next_byte++;
162
lb = b;
163
b = peek_next(insn_byte_t, insn);
164
attr = inat_get_opcode_attribute(b);
165
}
166
/* Set the last prefix */
167
if (lb && lb != insn->prefixes.bytes[3]) {
168
if (unlikely(insn->prefixes.bytes[3])) {
169
/* Swap the last prefix */
170
b = insn->prefixes.bytes[3];
171
for (i = 0; i < nb; i++)
172
if (prefixes->bytes[i] == lb)
173
insn_set_byte(prefixes, i, b);
174
}
175
insn_set_byte(&insn->prefixes, 3, lb);
176
}
177
178
/* Decode REX prefix */
179
if (insn->x86_64) {
180
b = peek_next(insn_byte_t, insn);
181
attr = inat_get_opcode_attribute(b);
182
if (inat_is_rex_prefix(attr)) {
183
insn_field_set(&insn->rex_prefix, b, 1);
184
insn->next_byte++;
185
if (X86_REX_W(b))
186
/* REX.W overrides opnd_size */
187
insn->opnd_bytes = 8;
188
} else if (inat_is_rex2_prefix(attr)) {
189
insn_set_byte(&insn->rex_prefix, 0, b);
190
b = peek_nbyte_next(insn_byte_t, insn, 1);
191
insn_set_byte(&insn->rex_prefix, 1, b);
192
insn->rex_prefix.nbytes = 2;
193
insn->next_byte += 2;
194
if (X86_REX_W(b))
195
/* REX.W overrides opnd_size */
196
insn->opnd_bytes = 8;
197
insn->rex_prefix.got = 1;
198
goto vex_end;
199
}
200
}
201
insn->rex_prefix.got = 1;
202
203
/* Decode VEX/XOP prefix */
204
b = peek_next(insn_byte_t, insn);
205
if (inat_is_vex_prefix(attr) || inat_is_xop_prefix(attr)) {
206
insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
207
208
if (inat_is_xop_prefix(attr) && X86_MODRM_REG(b2) == 0) {
209
/* Grp1A.0 is always POP Ev */
210
goto vex_end;
211
} else if (!insn->x86_64) {
212
/*
213
* In 32-bits mode, if the [7:6] bits (mod bits of
214
* ModRM) on the second byte are not 11b, it is
215
* LDS or LES or BOUND.
216
*/
217
if (X86_MODRM_MOD(b2) != 3)
218
goto vex_end;
219
}
220
insn_set_byte(&insn->vex_prefix, 0, b);
221
insn_set_byte(&insn->vex_prefix, 1, b2);
222
if (inat_is_evex_prefix(attr)) {
223
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
224
insn_set_byte(&insn->vex_prefix, 2, b2);
225
b2 = peek_nbyte_next(insn_byte_t, insn, 3);
226
insn_set_byte(&insn->vex_prefix, 3, b2);
227
insn->vex_prefix.nbytes = 4;
228
insn->next_byte += 4;
229
if (insn->x86_64 && X86_VEX_W(b2))
230
/* VEX.W overrides opnd_size */
231
insn->opnd_bytes = 8;
232
} else if (inat_is_vex3_prefix(attr) || inat_is_xop_prefix(attr)) {
233
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
234
insn_set_byte(&insn->vex_prefix, 2, b2);
235
insn->vex_prefix.nbytes = 3;
236
insn->next_byte += 3;
237
if (insn->x86_64 && X86_VEX_W(b2))
238
/* VEX.W/XOP.W overrides opnd_size */
239
insn->opnd_bytes = 8;
240
} else {
241
/*
242
* For VEX2, fake VEX3-like byte#2.
243
* Makes it easier to decode vex.W, vex.vvvv,
244
* vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
245
*/
246
insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f);
247
insn->vex_prefix.nbytes = 2;
248
insn->next_byte += 2;
249
}
250
}
251
vex_end:
252
insn->vex_prefix.got = 1;
253
254
prefixes->got = 1;
255
256
return 0;
257
258
err_out:
259
return -ENODATA;
260
}
261
262
/**
263
* insn_get_opcode - collect opcode(s)
264
* @insn: &struct insn containing instruction
265
*
266
* Populates @insn->opcode, updates @insn->next_byte to point past the
267
* opcode byte(s), and set @insn->attr (except for groups).
268
* If necessary, first collects any preceding (prefix) bytes.
269
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
270
* is already 1.
271
*
272
* Returns:
273
* 0: on success
274
* < 0: on error
275
*/
276
int insn_get_opcode(struct insn *insn)
277
{
278
struct insn_field *opcode = &insn->opcode;
279
int pfx_id, ret;
280
insn_byte_t op;
281
282
if (opcode->got)
283
return 0;
284
285
ret = insn_get_prefixes(insn);
286
if (ret)
287
return ret;
288
289
/* Get first opcode */
290
op = get_next(insn_byte_t, insn);
291
insn_set_byte(opcode, 0, op);
292
opcode->nbytes = 1;
293
294
/* Check if there is VEX/XOP prefix or not */
295
if (insn_is_avx_or_xop(insn)) {
296
insn_byte_t m, p;
297
298
/* XOP prefix has different encoding */
299
if (unlikely(avx_insn_is_xop(insn))) {
300
m = insn_xop_map_bits(insn);
301
insn->attr = inat_get_xop_attribute(op, m);
302
if (!inat_accept_xop(insn->attr)) {
303
insn->attr = 0;
304
return -EINVAL;
305
}
306
/* XOP has only 1 byte for opcode */
307
goto end;
308
}
309
310
m = insn_vex_m_bits(insn);
311
p = insn_vex_p_bits(insn);
312
insn->attr = inat_get_avx_attribute(op, m, p);
313
/* SCALABLE EVEX uses p bits to encode operand size */
314
if (inat_evex_scalable(insn->attr) && !insn_vex_w_bit(insn) &&
315
p == INAT_PFX_OPNDSZ)
316
insn->opnd_bytes = 2;
317
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
318
(!inat_accept_vex(insn->attr) &&
319
!inat_is_group(insn->attr))) {
320
/* This instruction is bad */
321
insn->attr = 0;
322
return -EINVAL;
323
}
324
/* VEX has only 1 byte for opcode */
325
goto end;
326
}
327
328
/* Check if there is REX2 prefix or not */
329
if (insn_is_rex2(insn)) {
330
if (insn_rex2_m_bit(insn)) {
331
/* map 1 is escape 0x0f */
332
insn_attr_t esc_attr = inat_get_opcode_attribute(0x0f);
333
334
pfx_id = insn_last_prefix_id(insn);
335
insn->attr = inat_get_escape_attribute(op, pfx_id, esc_attr);
336
} else {
337
insn->attr = inat_get_opcode_attribute(op);
338
}
339
goto end;
340
}
341
342
insn->attr = inat_get_opcode_attribute(op);
343
if (insn->x86_64 && inat_is_invalid64(insn->attr)) {
344
/* This instruction is invalid, like UD2. Stop decoding. */
345
insn->attr &= INAT_INV64;
346
}
347
348
while (inat_is_escape(insn->attr)) {
349
/* Get escaped opcode */
350
op = get_next(insn_byte_t, insn);
351
opcode->bytes[opcode->nbytes++] = op;
352
pfx_id = insn_last_prefix_id(insn);
353
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
354
}
355
356
if (inat_must_vex(insn->attr)) {
357
/* This instruction is bad */
358
insn->attr = 0;
359
return -EINVAL;
360
}
361
362
end:
363
opcode->got = 1;
364
return 0;
365
366
err_out:
367
return -ENODATA;
368
}
369
370
/**
371
* insn_get_modrm - collect ModRM byte, if any
372
* @insn: &struct insn containing instruction
373
*
374
* Populates @insn->modrm and updates @insn->next_byte to point past the
375
* ModRM byte, if any. If necessary, first collects the preceding bytes
376
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
377
*
378
* Returns:
379
* 0: on success
380
* < 0: on error
381
*/
382
int insn_get_modrm(struct insn *insn)
383
{
384
struct insn_field *modrm = &insn->modrm;
385
insn_byte_t pfx_id, mod;
386
int ret;
387
388
if (modrm->got)
389
return 0;
390
391
ret = insn_get_opcode(insn);
392
if (ret)
393
return ret;
394
395
if (inat_has_modrm(insn->attr)) {
396
mod = get_next(insn_byte_t, insn);
397
insn_field_set(modrm, mod, 1);
398
if (inat_is_group(insn->attr)) {
399
pfx_id = insn_last_prefix_id(insn);
400
insn->attr = inat_get_group_attribute(mod, pfx_id,
401
insn->attr);
402
if (insn_is_avx_or_xop(insn) && !inat_accept_vex(insn->attr) &&
403
!inat_accept_xop(insn->attr)) {
404
/* Bad insn */
405
insn->attr = 0;
406
return -EINVAL;
407
}
408
}
409
}
410
411
if (insn->x86_64 && inat_is_force64(insn->attr))
412
insn->opnd_bytes = 8;
413
414
modrm->got = 1;
415
return 0;
416
417
err_out:
418
return -ENODATA;
419
}
420
421
422
/**
423
* insn_rip_relative() - Does instruction use RIP-relative addressing mode?
424
* @insn: &struct insn containing instruction
425
*
426
* If necessary, first collects the instruction up to and including the
427
* ModRM byte. No effect if @insn->x86_64 is 0.
428
*/
429
int insn_rip_relative(struct insn *insn)
430
{
431
struct insn_field *modrm = &insn->modrm;
432
int ret;
433
434
if (!insn->x86_64)
435
return 0;
436
437
ret = insn_get_modrm(insn);
438
if (ret)
439
return 0;
440
/*
441
* For rip-relative instructions, the mod field (top 2 bits)
442
* is zero and the r/m field (bottom 3 bits) is 0x5.
443
*/
444
return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5);
445
}
446
447
/**
448
* insn_get_sib() - Get the SIB byte of instruction
449
* @insn: &struct insn containing instruction
450
*
451
* If necessary, first collects the instruction up to and including the
452
* ModRM byte.
453
*
454
* Returns:
455
* 0: if decoding succeeded
456
* < 0: otherwise.
457
*/
458
int insn_get_sib(struct insn *insn)
459
{
460
insn_byte_t modrm;
461
int ret;
462
463
if (insn->sib.got)
464
return 0;
465
466
ret = insn_get_modrm(insn);
467
if (ret)
468
return ret;
469
470
if (insn->modrm.nbytes) {
471
modrm = insn->modrm.bytes[0];
472
if (insn->addr_bytes != 2 &&
473
X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
474
insn_field_set(&insn->sib,
475
get_next(insn_byte_t, insn), 1);
476
}
477
}
478
insn->sib.got = 1;
479
480
return 0;
481
482
err_out:
483
return -ENODATA;
484
}
485
486
487
/**
488
* insn_get_displacement() - Get the displacement of instruction
489
* @insn: &struct insn containing instruction
490
*
491
* If necessary, first collects the instruction up to and including the
492
* SIB byte.
493
* Displacement value is sign-expanded.
494
*
495
* * Returns:
496
* 0: if decoding succeeded
497
* < 0: otherwise.
498
*/
499
int insn_get_displacement(struct insn *insn)
500
{
501
insn_byte_t mod, rm, base;
502
int ret;
503
504
if (insn->displacement.got)
505
return 0;
506
507
ret = insn_get_sib(insn);
508
if (ret)
509
return ret;
510
511
if (insn->modrm.nbytes) {
512
/*
513
* Interpreting the modrm byte:
514
* mod = 00 - no displacement fields (exceptions below)
515
* mod = 01 - 1-byte displacement field
516
* mod = 10 - displacement field is 4 bytes, or 2 bytes if
517
* address size = 2 (0x67 prefix in 32-bit mode)
518
* mod = 11 - no memory operand
519
*
520
* If address size = 2...
521
* mod = 00, r/m = 110 - displacement field is 2 bytes
522
*
523
* If address size != 2...
524
* mod != 11, r/m = 100 - SIB byte exists
525
* mod = 00, SIB base = 101 - displacement field is 4 bytes
526
* mod = 00, r/m = 101 - rip-relative addressing, displacement
527
* field is 4 bytes
528
*/
529
mod = X86_MODRM_MOD(insn->modrm.value);
530
rm = X86_MODRM_RM(insn->modrm.value);
531
base = X86_SIB_BASE(insn->sib.value);
532
if (mod == 3)
533
goto out;
534
if (mod == 1) {
535
insn_field_set(&insn->displacement,
536
get_next(signed char, insn), 1);
537
} else if (insn->addr_bytes == 2) {
538
if ((mod == 0 && rm == 6) || mod == 2) {
539
insn_field_set(&insn->displacement,
540
get_next(short, insn), 2);
541
}
542
} else {
543
if ((mod == 0 && rm == 5) || mod == 2 ||
544
(mod == 0 && base == 5)) {
545
insn_field_set(&insn->displacement,
546
get_next(int, insn), 4);
547
}
548
}
549
}
550
out:
551
insn->displacement.got = 1;
552
return 0;
553
554
err_out:
555
return -ENODATA;
556
}
557
558
/* Decode moffset16/32/64. Return 0 if failed */
559
static int __get_moffset(struct insn *insn)
560
{
561
switch (insn->addr_bytes) {
562
case 2:
563
insn_field_set(&insn->moffset1, get_next(short, insn), 2);
564
break;
565
case 4:
566
insn_field_set(&insn->moffset1, get_next(int, insn), 4);
567
break;
568
case 8:
569
insn_field_set(&insn->moffset1, get_next(int, insn), 4);
570
insn_field_set(&insn->moffset2, get_next(int, insn), 4);
571
break;
572
default: /* opnd_bytes must be modified manually */
573
goto err_out;
574
}
575
insn->moffset1.got = insn->moffset2.got = 1;
576
577
return 1;
578
579
err_out:
580
return 0;
581
}
582
583
/* Decode imm v32(Iz). Return 0 if failed */
584
static int __get_immv32(struct insn *insn)
585
{
586
switch (insn->opnd_bytes) {
587
case 2:
588
insn_field_set(&insn->immediate, get_next(short, insn), 2);
589
break;
590
case 4:
591
case 8:
592
insn_field_set(&insn->immediate, get_next(int, insn), 4);
593
break;
594
default: /* opnd_bytes must be modified manually */
595
goto err_out;
596
}
597
598
return 1;
599
600
err_out:
601
return 0;
602
}
603
604
/* Decode imm v64(Iv/Ov), Return 0 if failed */
605
static int __get_immv(struct insn *insn)
606
{
607
switch (insn->opnd_bytes) {
608
case 2:
609
insn_field_set(&insn->immediate1, get_next(short, insn), 2);
610
break;
611
case 4:
612
insn_field_set(&insn->immediate1, get_next(int, insn), 4);
613
insn->immediate1.nbytes = 4;
614
break;
615
case 8:
616
insn_field_set(&insn->immediate1, get_next(int, insn), 4);
617
insn_field_set(&insn->immediate2, get_next(int, insn), 4);
618
break;
619
default: /* opnd_bytes must be modified manually */
620
goto err_out;
621
}
622
insn->immediate1.got = insn->immediate2.got = 1;
623
624
return 1;
625
err_out:
626
return 0;
627
}
628
629
/* Decode ptr16:16/32(Ap) */
630
static int __get_immptr(struct insn *insn)
631
{
632
switch (insn->opnd_bytes) {
633
case 2:
634
insn_field_set(&insn->immediate1, get_next(short, insn), 2);
635
break;
636
case 4:
637
insn_field_set(&insn->immediate1, get_next(int, insn), 4);
638
break;
639
case 8:
640
/* ptr16:64 is not exist (no segment) */
641
return 0;
642
default: /* opnd_bytes must be modified manually */
643
goto err_out;
644
}
645
insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2);
646
insn->immediate1.got = insn->immediate2.got = 1;
647
648
return 1;
649
err_out:
650
return 0;
651
}
652
653
/**
654
* insn_get_immediate() - Get the immediate in an instruction
655
* @insn: &struct insn containing instruction
656
*
657
* If necessary, first collects the instruction up to and including the
658
* displacement bytes.
659
* Basically, most of immediates are sign-expanded. Unsigned-value can be
660
* computed by bit masking with ((1 << (nbytes * 8)) - 1)
661
*
662
* Returns:
663
* 0: on success
664
* < 0: on error
665
*/
666
int insn_get_immediate(struct insn *insn)
667
{
668
int ret;
669
670
if (insn->immediate.got)
671
return 0;
672
673
ret = insn_get_displacement(insn);
674
if (ret)
675
return ret;
676
677
if (inat_has_moffset(insn->attr)) {
678
if (!__get_moffset(insn))
679
goto err_out;
680
goto done;
681
}
682
683
if (!inat_has_immediate(insn->attr))
684
goto done;
685
686
switch (inat_immediate_size(insn->attr)) {
687
case INAT_IMM_BYTE:
688
insn_field_set(&insn->immediate, get_next(signed char, insn), 1);
689
break;
690
case INAT_IMM_WORD:
691
insn_field_set(&insn->immediate, get_next(short, insn), 2);
692
break;
693
case INAT_IMM_DWORD:
694
insn_field_set(&insn->immediate, get_next(int, insn), 4);
695
break;
696
case INAT_IMM_QWORD:
697
insn_field_set(&insn->immediate1, get_next(int, insn), 4);
698
insn_field_set(&insn->immediate2, get_next(int, insn), 4);
699
break;
700
case INAT_IMM_PTR:
701
if (!__get_immptr(insn))
702
goto err_out;
703
break;
704
case INAT_IMM_VWORD32:
705
if (!__get_immv32(insn))
706
goto err_out;
707
break;
708
case INAT_IMM_VWORD:
709
if (!__get_immv(insn))
710
goto err_out;
711
break;
712
default:
713
/* Here, insn must have an immediate, but failed */
714
goto err_out;
715
}
716
if (inat_has_second_immediate(insn->attr)) {
717
insn_field_set(&insn->immediate2, get_next(signed char, insn), 1);
718
}
719
done:
720
insn->immediate.got = 1;
721
return 0;
722
723
err_out:
724
return -ENODATA;
725
}
726
727
/**
728
* insn_get_length() - Get the length of instruction
729
* @insn: &struct insn containing instruction
730
*
731
* If necessary, first collects the instruction up to and including the
732
* immediates bytes.
733
*
734
* Returns:
735
* - 0 on success
736
* - < 0 on error
737
*/
738
int insn_get_length(struct insn *insn)
739
{
740
int ret;
741
742
if (insn->length)
743
return 0;
744
745
ret = insn_get_immediate(insn);
746
if (ret)
747
return ret;
748
749
insn->length = (unsigned char)((unsigned long)insn->next_byte
750
- (unsigned long)insn->kaddr);
751
752
return 0;
753
}
754
755
/* Ensure this instruction is decoded completely */
756
static inline int insn_complete(struct insn *insn)
757
{
758
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
759
insn->displacement.got && insn->immediate.got;
760
}
761
762
/**
763
* insn_decode() - Decode an x86 instruction
764
* @insn: &struct insn to be initialized
765
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
766
* @buf_len: length of the insn buffer at @kaddr
767
* @m: insn mode, see enum insn_mode
768
*
769
* Returns:
770
* 0: if decoding succeeded
771
* < 0: otherwise.
772
*/
773
int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
774
{
775
int ret;
776
777
/* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */
778
779
if (m == INSN_MODE_KERN)
780
insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
781
else
782
insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
783
784
ret = insn_get_length(insn);
785
if (ret)
786
return ret;
787
788
if (insn_complete(insn))
789
return 0;
790
791
return -EINVAL;
792
}
793
794