Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu_insn.c
29269 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4
* Copyright (c) 2022 Ventana Micro Systems Inc.
5
*/
6
7
#include <linux/bitops.h>
8
#include <linux/kvm_host.h>
9
10
#include <asm/cpufeature.h>
11
#include <asm/insn.h>
12
13
struct insn_func {
14
unsigned long mask;
15
unsigned long match;
16
/*
17
* Possible return values are as follows:
18
* 1) Returns < 0 for error case
19
* 2) Returns 0 for exit to user-space
20
* 3) Returns 1 to continue with next sepc
21
* 4) Returns 2 to continue with same sepc
22
* 5) Returns 3 to inject illegal instruction trap and continue
23
* 6) Returns 4 to inject virtual instruction trap and continue
24
*
25
* Use enum kvm_insn_return for return values
26
*/
27
int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
28
};
29
30
static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
31
ulong insn)
32
{
33
struct kvm_cpu_trap utrap = { 0 };
34
35
/* Redirect trap to Guest VCPU */
36
utrap.sepc = vcpu->arch.guest_context.sepc;
37
utrap.scause = EXC_INST_ILLEGAL;
38
utrap.stval = insn;
39
utrap.htval = 0;
40
utrap.htinst = 0;
41
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
42
43
return 1;
44
}
45
46
static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
47
ulong insn)
48
{
49
struct kvm_cpu_trap utrap = { 0 };
50
51
/* Redirect trap to Guest VCPU */
52
utrap.sepc = vcpu->arch.guest_context.sepc;
53
utrap.scause = EXC_VIRTUAL_INST_FAULT;
54
utrap.stval = insn;
55
utrap.htval = 0;
56
utrap.htinst = 0;
57
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
58
59
return 1;
60
}
61
62
/**
63
* kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
64
*
65
* @vcpu: The VCPU pointer
66
*/
67
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
68
{
69
if (!kvm_arch_vcpu_runnable(vcpu)) {
70
kvm_vcpu_srcu_read_unlock(vcpu);
71
kvm_vcpu_halt(vcpu);
72
kvm_vcpu_srcu_read_lock(vcpu);
73
}
74
}
75
76
static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
77
{
78
vcpu->stat.wfi_exit_stat++;
79
kvm_riscv_vcpu_wfi(vcpu);
80
return KVM_INSN_CONTINUE_NEXT_SEPC;
81
}
82
83
static int wrs_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
84
{
85
vcpu->stat.wrs_exit_stat++;
86
kvm_vcpu_on_spin(vcpu, vcpu->arch.guest_context.sstatus & SR_SPP);
87
return KVM_INSN_CONTINUE_NEXT_SEPC;
88
}
89
90
struct csr_func {
91
unsigned int base;
92
unsigned int count;
93
/*
94
* Possible return values are as same as "func" callback in
95
* "struct insn_func".
96
*/
97
int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
98
unsigned long *val, unsigned long new_val,
99
unsigned long wr_mask);
100
};
101
102
static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num,
103
unsigned long *val, unsigned long new_val,
104
unsigned long wr_mask)
105
{
106
if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR))
107
return KVM_INSN_ILLEGAL_TRAP;
108
109
return KVM_INSN_EXIT_TO_USER_SPACE;
110
}
111
112
static const struct csr_func csr_funcs[] = {
113
KVM_RISCV_VCPU_AIA_CSR_FUNCS
114
KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
115
{ .base = CSR_SEED, .count = 1, .func = seed_csr_rmw },
116
};
117
118
/**
119
* kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
120
* emulation or in-kernel emulation
121
*
122
* @vcpu: The VCPU pointer
123
* @run: The VCPU run struct containing the CSR data
124
*
125
* Returns > 0 upon failure and 0 upon success
126
*/
127
int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
128
{
129
ulong insn;
130
131
if (vcpu->arch.csr_decode.return_handled)
132
return 0;
133
vcpu->arch.csr_decode.return_handled = 1;
134
135
/* Update destination register for CSR reads */
136
insn = vcpu->arch.csr_decode.insn;
137
if ((insn >> SH_RD) & MASK_RX)
138
SET_RD(insn, &vcpu->arch.guest_context,
139
run->riscv_csr.ret_value);
140
141
/* Move to next instruction */
142
vcpu->arch.guest_context.sepc += INSN_LEN(insn);
143
144
return 0;
145
}
146
147
static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
148
{
149
int i, rc = KVM_INSN_ILLEGAL_TRAP;
150
unsigned int csr_num = insn >> SH_RS2;
151
unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
152
ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
153
const struct csr_func *tcfn, *cfn = NULL;
154
ulong val = 0, wr_mask = 0, new_val = 0;
155
156
/* Decode the CSR instruction */
157
switch (GET_FUNCT3(insn)) {
158
case GET_FUNCT3(INSN_MATCH_CSRRW):
159
wr_mask = -1UL;
160
new_val = rs1_val;
161
break;
162
case GET_FUNCT3(INSN_MATCH_CSRRS):
163
wr_mask = rs1_val;
164
new_val = -1UL;
165
break;
166
case GET_FUNCT3(INSN_MATCH_CSRRC):
167
wr_mask = rs1_val;
168
new_val = 0;
169
break;
170
case GET_FUNCT3(INSN_MATCH_CSRRWI):
171
wr_mask = -1UL;
172
new_val = rs1_num;
173
break;
174
case GET_FUNCT3(INSN_MATCH_CSRRSI):
175
wr_mask = rs1_num;
176
new_val = -1UL;
177
break;
178
case GET_FUNCT3(INSN_MATCH_CSRRCI):
179
wr_mask = rs1_num;
180
new_val = 0;
181
break;
182
default:
183
return rc;
184
}
185
186
/* Save instruction decode info */
187
vcpu->arch.csr_decode.insn = insn;
188
vcpu->arch.csr_decode.return_handled = 0;
189
190
/* Update CSR details in kvm_run struct */
191
run->riscv_csr.csr_num = csr_num;
192
run->riscv_csr.new_value = new_val;
193
run->riscv_csr.write_mask = wr_mask;
194
run->riscv_csr.ret_value = 0;
195
196
/* Find in-kernel CSR function */
197
for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
198
tcfn = &csr_funcs[i];
199
if ((tcfn->base <= csr_num) &&
200
(csr_num < (tcfn->base + tcfn->count))) {
201
cfn = tcfn;
202
break;
203
}
204
}
205
206
/* First try in-kernel CSR emulation */
207
if (cfn && cfn->func) {
208
rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
209
if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
210
if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
211
run->riscv_csr.ret_value = val;
212
vcpu->stat.csr_exit_kernel++;
213
kvm_riscv_vcpu_csr_return(vcpu, run);
214
rc = KVM_INSN_CONTINUE_SAME_SEPC;
215
}
216
return rc;
217
}
218
}
219
220
/* Exit to user-space for CSR emulation */
221
if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
222
vcpu->stat.csr_exit_user++;
223
run->exit_reason = KVM_EXIT_RISCV_CSR;
224
}
225
226
return rc;
227
}
228
229
static const struct insn_func system_opcode_funcs[] = {
230
{
231
.mask = INSN_MASK_CSRRW,
232
.match = INSN_MATCH_CSRRW,
233
.func = csr_insn,
234
},
235
{
236
.mask = INSN_MASK_CSRRS,
237
.match = INSN_MATCH_CSRRS,
238
.func = csr_insn,
239
},
240
{
241
.mask = INSN_MASK_CSRRC,
242
.match = INSN_MATCH_CSRRC,
243
.func = csr_insn,
244
},
245
{
246
.mask = INSN_MASK_CSRRWI,
247
.match = INSN_MATCH_CSRRWI,
248
.func = csr_insn,
249
},
250
{
251
.mask = INSN_MASK_CSRRSI,
252
.match = INSN_MATCH_CSRRSI,
253
.func = csr_insn,
254
},
255
{
256
.mask = INSN_MASK_CSRRCI,
257
.match = INSN_MATCH_CSRRCI,
258
.func = csr_insn,
259
},
260
{
261
.mask = INSN_MASK_WFI,
262
.match = INSN_MATCH_WFI,
263
.func = wfi_insn,
264
},
265
{
266
.mask = INSN_MASK_WRS,
267
.match = INSN_MATCH_WRS,
268
.func = wrs_insn,
269
},
270
};
271
272
static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
273
ulong insn)
274
{
275
int i, rc = KVM_INSN_ILLEGAL_TRAP;
276
const struct insn_func *ifn;
277
278
for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
279
ifn = &system_opcode_funcs[i];
280
if ((insn & ifn->mask) == ifn->match) {
281
rc = ifn->func(vcpu, run, insn);
282
break;
283
}
284
}
285
286
switch (rc) {
287
case KVM_INSN_ILLEGAL_TRAP:
288
return truly_illegal_insn(vcpu, run, insn);
289
case KVM_INSN_VIRTUAL_TRAP:
290
return truly_virtual_insn(vcpu, run, insn);
291
case KVM_INSN_CONTINUE_NEXT_SEPC:
292
vcpu->arch.guest_context.sepc += INSN_LEN(insn);
293
break;
294
default:
295
break;
296
}
297
298
return (rc <= 0) ? rc : 1;
299
}
300
301
/**
302
* kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
303
*
304
* @vcpu: The VCPU pointer
305
* @run: The VCPU run struct containing the mmio data
306
* @trap: Trap details
307
*
308
* Returns > 0 to continue run-loop
309
* Returns 0 to exit run-loop and handle in user-space.
310
* Returns < 0 to report failure and exit run-loop
311
*/
312
int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
313
struct kvm_cpu_trap *trap)
314
{
315
unsigned long insn = trap->stval;
316
struct kvm_cpu_trap utrap = { 0 };
317
struct kvm_cpu_context *ct;
318
319
if (unlikely(INSN_IS_16BIT(insn))) {
320
if (insn == 0) {
321
ct = &vcpu->arch.guest_context;
322
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
323
ct->sepc,
324
&utrap);
325
if (utrap.scause) {
326
utrap.sepc = ct->sepc;
327
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
328
return 1;
329
}
330
}
331
if (INSN_IS_16BIT(insn))
332
return truly_illegal_insn(vcpu, run, insn);
333
}
334
335
switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
336
case INSN_OPCODE_SYSTEM:
337
return system_opcode_insn(vcpu, run, insn);
338
default:
339
return truly_illegal_insn(vcpu, run, insn);
340
}
341
}
342
343
/**
344
* kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
345
*
346
* @vcpu: The VCPU pointer
347
* @run: The VCPU run struct containing the mmio data
348
* @fault_addr: Guest physical address to load
349
* @htinst: Transformed encoding of the load instruction
350
*
351
* Returns > 0 to continue run-loop
352
* Returns 0 to exit run-loop and handle in user-space.
353
* Returns < 0 to report failure and exit run-loop
354
*/
355
int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
356
unsigned long fault_addr,
357
unsigned long htinst)
358
{
359
u8 data_buf[8];
360
unsigned long insn;
361
int shift = 0, len = 0, insn_len = 0;
362
struct kvm_cpu_trap utrap = { 0 };
363
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
364
365
/* Determine trapped instruction */
366
if (htinst & 0x1) {
367
/*
368
* Bit[0] == 1 implies trapped instruction value is
369
* transformed instruction or custom instruction.
370
*/
371
insn = htinst | INSN_16BIT_MASK;
372
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
373
} else {
374
/*
375
* Bit[0] == 0 implies trapped instruction value is
376
* zero or special value.
377
*/
378
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
379
&utrap);
380
if (utrap.scause) {
381
/* Redirect trap if we failed to read instruction */
382
utrap.sepc = ct->sepc;
383
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
384
return 1;
385
}
386
insn_len = INSN_LEN(insn);
387
}
388
389
/* Decode length of MMIO and shift */
390
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
391
len = 4;
392
shift = 8 * (sizeof(ulong) - len);
393
} else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
394
len = 1;
395
shift = 8 * (sizeof(ulong) - len);
396
} else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
397
len = 1;
398
shift = 8 * (sizeof(ulong) - len);
399
#ifdef CONFIG_64BIT
400
} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
401
len = 8;
402
shift = 8 * (sizeof(ulong) - len);
403
} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
404
len = 4;
405
#endif
406
} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
407
len = 2;
408
shift = 8 * (sizeof(ulong) - len);
409
} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
410
len = 2;
411
#ifdef CONFIG_64BIT
412
} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
413
len = 8;
414
shift = 8 * (sizeof(ulong) - len);
415
insn = RVC_RS2S(insn) << SH_RD;
416
} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
417
((insn >> SH_RD) & 0x1f)) {
418
len = 8;
419
shift = 8 * (sizeof(ulong) - len);
420
#endif
421
} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
422
len = 4;
423
shift = 8 * (sizeof(ulong) - len);
424
insn = RVC_RS2S(insn) << SH_RD;
425
} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
426
((insn >> SH_RD) & 0x1f)) {
427
len = 4;
428
shift = 8 * (sizeof(ulong) - len);
429
} else {
430
return -EOPNOTSUPP;
431
}
432
433
/* Fault address should be aligned to length of MMIO */
434
if (fault_addr & (len - 1))
435
return -EIO;
436
437
/* Save instruction decode info */
438
vcpu->arch.mmio_decode.insn = insn;
439
vcpu->arch.mmio_decode.insn_len = insn_len;
440
vcpu->arch.mmio_decode.shift = shift;
441
vcpu->arch.mmio_decode.len = len;
442
vcpu->arch.mmio_decode.return_handled = 0;
443
444
/* Update MMIO details in kvm_run struct */
445
run->mmio.is_write = false;
446
run->mmio.phys_addr = fault_addr;
447
run->mmio.len = len;
448
449
/* Try to handle MMIO access in the kernel */
450
if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
451
/* Successfully handled MMIO access in the kernel so resume */
452
memcpy(run->mmio.data, data_buf, len);
453
vcpu->stat.mmio_exit_kernel++;
454
kvm_riscv_vcpu_mmio_return(vcpu, run);
455
return 1;
456
}
457
458
/* Exit to userspace for MMIO emulation */
459
vcpu->stat.mmio_exit_user++;
460
run->exit_reason = KVM_EXIT_MMIO;
461
462
return 0;
463
}
464
465
/**
466
* kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
467
*
468
* @vcpu: The VCPU pointer
469
* @run: The VCPU run struct containing the mmio data
470
* @fault_addr: Guest physical address to store
471
* @htinst: Transformed encoding of the store instruction
472
*
473
* Returns > 0 to continue run-loop
474
* Returns 0 to exit run-loop and handle in user-space.
475
* Returns < 0 to report failure and exit run-loop
476
*/
477
int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
478
unsigned long fault_addr,
479
unsigned long htinst)
480
{
481
u8 data8;
482
u16 data16;
483
u32 data32;
484
u64 data64;
485
ulong data;
486
unsigned long insn;
487
int len = 0, insn_len = 0;
488
struct kvm_cpu_trap utrap = { 0 };
489
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
490
491
/* Determine trapped instruction */
492
if (htinst & 0x1) {
493
/*
494
* Bit[0] == 1 implies trapped instruction value is
495
* transformed instruction or custom instruction.
496
*/
497
insn = htinst | INSN_16BIT_MASK;
498
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
499
} else {
500
/*
501
* Bit[0] == 0 implies trapped instruction value is
502
* zero or special value.
503
*/
504
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
505
&utrap);
506
if (utrap.scause) {
507
/* Redirect trap if we failed to read instruction */
508
utrap.sepc = ct->sepc;
509
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
510
return 1;
511
}
512
insn_len = INSN_LEN(insn);
513
}
514
515
data = GET_RS2(insn, &vcpu->arch.guest_context);
516
data8 = data16 = data32 = data64 = data;
517
518
if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
519
len = 4;
520
} else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
521
len = 1;
522
#ifdef CONFIG_64BIT
523
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
524
len = 8;
525
#endif
526
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
527
len = 2;
528
#ifdef CONFIG_64BIT
529
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
530
len = 8;
531
data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
532
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
533
((insn >> SH_RD) & 0x1f)) {
534
len = 8;
535
data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
536
#endif
537
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
538
len = 4;
539
data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
540
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
541
((insn >> SH_RD) & 0x1f)) {
542
len = 4;
543
data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
544
} else {
545
return -EOPNOTSUPP;
546
}
547
548
/* Fault address should be aligned to length of MMIO */
549
if (fault_addr & (len - 1))
550
return -EIO;
551
552
/* Save instruction decode info */
553
vcpu->arch.mmio_decode.insn = insn;
554
vcpu->arch.mmio_decode.insn_len = insn_len;
555
vcpu->arch.mmio_decode.shift = 0;
556
vcpu->arch.mmio_decode.len = len;
557
vcpu->arch.mmio_decode.return_handled = 0;
558
559
/* Copy data to kvm_run instance */
560
switch (len) {
561
case 1:
562
*((u8 *)run->mmio.data) = data8;
563
break;
564
case 2:
565
*((u16 *)run->mmio.data) = data16;
566
break;
567
case 4:
568
*((u32 *)run->mmio.data) = data32;
569
break;
570
case 8:
571
*((u64 *)run->mmio.data) = data64;
572
break;
573
default:
574
return -EOPNOTSUPP;
575
}
576
577
/* Update MMIO details in kvm_run struct */
578
run->mmio.is_write = true;
579
run->mmio.phys_addr = fault_addr;
580
run->mmio.len = len;
581
582
/* Try to handle MMIO access in the kernel */
583
if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
584
fault_addr, len, run->mmio.data)) {
585
/* Successfully handled MMIO access in the kernel so resume */
586
vcpu->stat.mmio_exit_kernel++;
587
kvm_riscv_vcpu_mmio_return(vcpu, run);
588
return 1;
589
}
590
591
/* Exit to userspace for MMIO emulation */
592
vcpu->stat.mmio_exit_user++;
593
run->exit_reason = KVM_EXIT_MMIO;
594
595
return 0;
596
}
597
598
/**
599
* kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
600
* or in-kernel IO emulation
601
*
602
* @vcpu: The VCPU pointer
603
* @run: The VCPU run struct containing the mmio data
604
*/
605
int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
606
{
607
u8 data8;
608
u16 data16;
609
u32 data32;
610
u64 data64;
611
ulong insn;
612
int len, shift;
613
614
if (vcpu->arch.mmio_decode.return_handled)
615
return 0;
616
617
vcpu->arch.mmio_decode.return_handled = 1;
618
insn = vcpu->arch.mmio_decode.insn;
619
620
if (run->mmio.is_write)
621
goto done;
622
623
len = vcpu->arch.mmio_decode.len;
624
shift = vcpu->arch.mmio_decode.shift;
625
626
switch (len) {
627
case 1:
628
data8 = *((u8 *)run->mmio.data);
629
SET_RD(insn, &vcpu->arch.guest_context,
630
(ulong)data8 << shift >> shift);
631
break;
632
case 2:
633
data16 = *((u16 *)run->mmio.data);
634
SET_RD(insn, &vcpu->arch.guest_context,
635
(ulong)data16 << shift >> shift);
636
break;
637
case 4:
638
data32 = *((u32 *)run->mmio.data);
639
SET_RD(insn, &vcpu->arch.guest_context,
640
(ulong)data32 << shift >> shift);
641
break;
642
case 8:
643
data64 = *((u64 *)run->mmio.data);
644
SET_RD(insn, &vcpu->arch.guest_context,
645
(ulong)data64 << shift >> shift);
646
break;
647
default:
648
return -EOPNOTSUPP;
649
}
650
651
done:
652
/* Move to next instruction */
653
vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
654
655
return 0;
656
}
657
658