Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/vgic/vgic-its.c
29271 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* GICv3 ITS emulation
4
*
5
* Copyright (C) 2015,2016 ARM Ltd.
6
* Author: Andre Przywara <[email protected]>
7
*/
8
9
#include <linux/cpu.h>
10
#include <linux/kvm.h>
11
#include <linux/kvm_host.h>
12
#include <linux/interrupt.h>
13
#include <linux/list.h>
14
#include <linux/uaccess.h>
15
#include <linux/list_sort.h>
16
17
#include <linux/irqchip/arm-gic-v3.h>
18
19
#include <asm/kvm_emulate.h>
20
#include <asm/kvm_arm.h>
21
#include <asm/kvm_mmu.h>
22
23
#include "vgic.h"
24
#include "vgic-mmio.h"
25
26
static struct kvm_device_ops kvm_arm_vgic_its_ops;
27
28
static int vgic_its_save_tables_v0(struct vgic_its *its);
29
static int vgic_its_restore_tables_v0(struct vgic_its *its);
30
static int vgic_its_commit_v0(struct vgic_its *its);
31
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
32
struct kvm_vcpu *filter_vcpu, bool needs_inv);
33
34
#define vgic_its_read_entry_lock(i, g, valp, t) \
35
({ \
36
int __sz = vgic_its_get_abi(i)->t##_esz; \
37
struct kvm *__k = (i)->dev->kvm; \
38
int __ret; \
39
\
40
BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
41
sizeof(*(valp)) != ABI_0_ESZ); \
42
if (NR_ITS_ABIS > 1 && \
43
KVM_BUG_ON(__sz != sizeof(*(valp)), __k)) \
44
__ret = -EINVAL; \
45
else \
46
__ret = kvm_read_guest_lock(__k, (g), \
47
valp, __sz); \
48
__ret; \
49
})
50
51
#define vgic_its_write_entry_lock(i, g, val, t) \
52
({ \
53
int __sz = vgic_its_get_abi(i)->t##_esz; \
54
struct kvm *__k = (i)->dev->kvm; \
55
typeof(val) __v = (val); \
56
int __ret; \
57
\
58
BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
59
sizeof(__v) != ABI_0_ESZ); \
60
if (NR_ITS_ABIS > 1 && \
61
KVM_BUG_ON(__sz != sizeof(__v), __k)) \
62
__ret = -EINVAL; \
63
else \
64
__ret = vgic_write_guest_lock(__k, (g), \
65
&__v, __sz); \
66
__ret; \
67
})
68
69
/*
70
* Creates a new (reference to a) struct vgic_irq for a given LPI.
71
* If this LPI is already mapped on another ITS, we increase its refcount
72
* and return a pointer to the existing structure.
73
* If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
74
* This function returns a pointer to the _unlocked_ structure.
75
*/
76
static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
77
struct kvm_vcpu *vcpu)
78
{
79
struct vgic_dist *dist = &kvm->arch.vgic;
80
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
81
int ret;
82
83
/* In this case there is no put, since we keep the reference. */
84
if (irq)
85
return irq;
86
87
irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
88
if (!irq)
89
return ERR_PTR(-ENOMEM);
90
91
ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
92
if (ret) {
93
kfree(irq);
94
return ERR_PTR(ret);
95
}
96
97
INIT_LIST_HEAD(&irq->ap_list);
98
raw_spin_lock_init(&irq->irq_lock);
99
100
irq->config = VGIC_CONFIG_EDGE;
101
refcount_set(&irq->refcount, 1);
102
irq->intid = intid;
103
irq->target_vcpu = vcpu;
104
irq->group = 1;
105
106
xa_lock(&dist->lpi_xa);
107
108
/*
109
* There could be a race with another vgic_add_lpi(), so we need to
110
* check that we don't add a second list entry with the same LPI.
111
*/
112
oldirq = xa_load(&dist->lpi_xa, intid);
113
if (vgic_try_get_irq_ref(oldirq)) {
114
/* Someone was faster with adding this LPI, lets use that. */
115
kfree(irq);
116
irq = oldirq;
117
118
goto out_unlock;
119
}
120
121
ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
122
if (ret) {
123
xa_release(&dist->lpi_xa, intid);
124
kfree(irq);
125
}
126
127
out_unlock:
128
xa_unlock(&dist->lpi_xa);
129
130
if (ret)
131
return ERR_PTR(ret);
132
133
/*
134
* We "cache" the configuration table entries in our struct vgic_irq's.
135
* However we only have those structs for mapped IRQs, so we read in
136
* the respective config data from memory here upon mapping the LPI.
137
*
138
* Should any of these fail, behave as if we couldn't create the LPI
139
* by dropping the refcount and returning the error.
140
*/
141
ret = update_lpi_config(kvm, irq, NULL, false);
142
if (ret) {
143
vgic_put_irq(kvm, irq);
144
return ERR_PTR(ret);
145
}
146
147
ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
148
if (ret) {
149
vgic_put_irq(kvm, irq);
150
return ERR_PTR(ret);
151
}
152
153
return irq;
154
}
155
156
/**
157
* struct vgic_its_abi - ITS abi ops and settings
158
* @cte_esz: collection table entry size
159
* @dte_esz: device table entry size
160
* @ite_esz: interrupt translation table entry size
161
* @save_tables: save the ITS tables into guest RAM
162
* @restore_tables: restore the ITS internal structs from tables
163
* stored in guest RAM
164
* @commit: initialize the registers which expose the ABI settings,
165
* especially the entry sizes
166
*/
167
struct vgic_its_abi {
168
int cte_esz;
169
int dte_esz;
170
int ite_esz;
171
int (*save_tables)(struct vgic_its *its);
172
int (*restore_tables)(struct vgic_its *its);
173
int (*commit)(struct vgic_its *its);
174
};
175
176
#define ABI_0_ESZ 8
177
#define ESZ_MAX ABI_0_ESZ
178
179
static const struct vgic_its_abi its_table_abi_versions[] = {
180
[0] = {
181
.cte_esz = ABI_0_ESZ,
182
.dte_esz = ABI_0_ESZ,
183
.ite_esz = ABI_0_ESZ,
184
.save_tables = vgic_its_save_tables_v0,
185
.restore_tables = vgic_its_restore_tables_v0,
186
.commit = vgic_its_commit_v0,
187
},
188
};
189
190
#define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
191
192
inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
193
{
194
return &its_table_abi_versions[its->abi_rev];
195
}
196
197
static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
198
{
199
const struct vgic_its_abi *abi;
200
201
its->abi_rev = rev;
202
abi = vgic_its_get_abi(its);
203
return abi->commit(its);
204
}
205
206
/*
207
* Find and returns a device in the device table for an ITS.
208
* Must be called with the its_lock mutex held.
209
*/
210
static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
211
{
212
struct its_device *device;
213
214
list_for_each_entry(device, &its->device_list, dev_list)
215
if (device_id == device->device_id)
216
return device;
217
218
return NULL;
219
}
220
221
/*
222
* Find and returns an interrupt translation table entry (ITTE) for a given
223
* Device ID/Event ID pair on an ITS.
224
* Must be called with the its_lock mutex held.
225
*/
226
static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
227
u32 event_id)
228
{
229
struct its_device *device;
230
struct its_ite *ite;
231
232
device = find_its_device(its, device_id);
233
if (device == NULL)
234
return NULL;
235
236
list_for_each_entry(ite, &device->itt_head, ite_list)
237
if (ite->event_id == event_id)
238
return ite;
239
240
return NULL;
241
}
242
243
/* To be used as an iterator this macro misses the enclosing parentheses */
244
#define for_each_lpi_its(dev, ite, its) \
245
list_for_each_entry(dev, &(its)->device_list, dev_list) \
246
list_for_each_entry(ite, &(dev)->itt_head, ite_list)
247
248
#define GIC_LPI_OFFSET 8192
249
250
#define VITS_TYPER_IDBITS 16
251
#define VITS_MAX_EVENTID (BIT(VITS_TYPER_IDBITS) - 1)
252
#define VITS_TYPER_DEVBITS 16
253
#define VITS_MAX_DEVID (BIT(VITS_TYPER_DEVBITS) - 1)
254
#define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
255
#define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
256
257
/*
258
* Finds and returns a collection in the ITS collection table.
259
* Must be called with the its_lock mutex held.
260
*/
261
static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
262
{
263
struct its_collection *collection;
264
265
list_for_each_entry(collection, &its->collection_list, coll_list) {
266
if (coll_id == collection->collection_id)
267
return collection;
268
}
269
270
return NULL;
271
}
272
273
#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
274
#define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
275
276
/*
277
* Reads the configuration data for a given LPI from guest memory and
278
* updates the fields in struct vgic_irq.
279
* If filter_vcpu is not NULL, applies only if the IRQ is targeting this
280
* VCPU. Unconditionally applies if filter_vcpu is NULL.
281
*/
282
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
283
struct kvm_vcpu *filter_vcpu, bool needs_inv)
284
{
285
u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
286
u8 prop;
287
int ret;
288
unsigned long flags;
289
290
ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
291
&prop, 1);
292
293
if (ret)
294
return ret;
295
296
raw_spin_lock_irqsave(&irq->irq_lock, flags);
297
298
if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
299
irq->priority = LPI_PROP_PRIORITY(prop);
300
irq->enabled = LPI_PROP_ENABLE_BIT(prop);
301
302
if (!irq->hw) {
303
vgic_queue_irq_unlock(kvm, irq, flags);
304
return 0;
305
}
306
}
307
308
if (irq->hw)
309
ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
310
311
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
312
return ret;
313
}
314
315
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
316
{
317
struct its_vlpi_map map;
318
int ret;
319
320
guard(raw_spinlock_irqsave)(&irq->irq_lock);
321
irq->target_vcpu = vcpu;
322
323
if (!irq->hw)
324
return 0;
325
326
ret = its_get_vlpi(irq->host_irq, &map);
327
if (ret)
328
return ret;
329
330
if (map.vpe)
331
atomic_dec(&map.vpe->vlpi_count);
332
333
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
334
atomic_inc(&map.vpe->vlpi_count);
335
return its_map_vlpi(irq->host_irq, &map);
336
}
337
338
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
339
struct its_collection *col)
340
{
341
return kvm_get_vcpu_by_id(kvm, col->target_addr);
342
}
343
344
/*
345
* Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
346
* is targeting) to the VGIC's view, which deals with target VCPUs.
347
* Needs to be called whenever either the collection for a LPIs has
348
* changed or the collection itself got retargeted.
349
*/
350
static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
351
{
352
struct kvm_vcpu *vcpu;
353
354
if (!its_is_collection_mapped(ite->collection))
355
return;
356
357
vcpu = collection_to_vcpu(kvm, ite->collection);
358
update_affinity(ite->irq, vcpu);
359
}
360
361
/*
362
* Updates the target VCPU for every LPI targeting this collection.
363
* Must be called with the its_lock mutex held.
364
*/
365
static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
366
struct its_collection *coll)
367
{
368
struct its_device *device;
369
struct its_ite *ite;
370
371
for_each_lpi_its(device, ite, its) {
372
if (ite->collection != coll)
373
continue;
374
375
update_affinity_ite(kvm, ite);
376
}
377
}
378
379
static u32 max_lpis_propbaser(u64 propbaser)
380
{
381
int nr_idbits = (propbaser & 0x1f) + 1;
382
383
return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
384
}
385
386
/*
387
* Sync the pending table pending bit of LPIs targeting @vcpu
388
* with our own data structures. This relies on the LPI being
389
* mapped before.
390
*/
391
static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
392
{
393
gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
394
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
395
unsigned long intid, flags;
396
struct vgic_irq *irq;
397
int last_byte_offset = -1;
398
int ret = 0;
399
u8 pendmask;
400
401
xa_for_each(&dist->lpi_xa, intid, irq) {
402
int byte_offset, bit_nr;
403
404
byte_offset = intid / BITS_PER_BYTE;
405
bit_nr = intid % BITS_PER_BYTE;
406
407
/*
408
* For contiguously allocated LPIs chances are we just read
409
* this very same byte in the last iteration. Reuse that.
410
*/
411
if (byte_offset != last_byte_offset) {
412
ret = kvm_read_guest_lock(vcpu->kvm,
413
pendbase + byte_offset,
414
&pendmask, 1);
415
if (ret)
416
return ret;
417
418
last_byte_offset = byte_offset;
419
}
420
421
irq = vgic_get_irq(vcpu->kvm, intid);
422
if (!irq)
423
continue;
424
425
raw_spin_lock_irqsave(&irq->irq_lock, flags);
426
if (irq->target_vcpu == vcpu)
427
irq->pending_latch = pendmask & (1U << bit_nr);
428
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
429
vgic_put_irq(vcpu->kvm, irq);
430
}
431
432
return ret;
433
}
434
435
static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
436
struct vgic_its *its,
437
gpa_t addr, unsigned int len)
438
{
439
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
440
u64 reg = GITS_TYPER_PLPIS;
441
442
/*
443
* We use linear CPU numbers for redistributor addressing,
444
* so GITS_TYPER.PTA is 0.
445
* Also we force all PROPBASER registers to be the same, so
446
* CommonLPIAff is 0 as well.
447
* To avoid memory waste in the guest, we keep the number of IDBits and
448
* DevBits low - as least for the time being.
449
*/
450
reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
451
reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
452
reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
453
454
return extract_bytes(reg, addr & 7, len);
455
}
456
457
static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
458
struct vgic_its *its,
459
gpa_t addr, unsigned int len)
460
{
461
u32 val;
462
463
val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
464
val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
465
return val;
466
}
467
468
static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
469
struct vgic_its *its,
470
gpa_t addr, unsigned int len,
471
unsigned long val)
472
{
473
u32 rev = GITS_IIDR_REV(val);
474
475
if (rev >= NR_ITS_ABIS)
476
return -EINVAL;
477
return vgic_its_set_abi(its, rev);
478
}
479
480
static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
481
struct vgic_its *its,
482
gpa_t addr, unsigned int len)
483
{
484
switch (addr & 0xffff) {
485
case GITS_PIDR0:
486
return 0x92; /* part number, bits[7:0] */
487
case GITS_PIDR1:
488
return 0xb4; /* part number, bits[11:8] */
489
case GITS_PIDR2:
490
return GIC_PIDR2_ARCH_GICv3 | 0x0b;
491
case GITS_PIDR4:
492
return 0x40; /* This is a 64K software visible page */
493
/* The following are the ID registers for (any) GIC. */
494
case GITS_CIDR0:
495
return 0x0d;
496
case GITS_CIDR1:
497
return 0xf0;
498
case GITS_CIDR2:
499
return 0x05;
500
case GITS_CIDR3:
501
return 0xb1;
502
}
503
504
return 0;
505
}
506
507
static struct vgic_its *__vgic_doorbell_to_its(struct kvm *kvm, gpa_t db)
508
{
509
struct kvm_io_device *kvm_io_dev;
510
struct vgic_io_device *iodev;
511
512
kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, db);
513
if (!kvm_io_dev)
514
return ERR_PTR(-EINVAL);
515
516
if (kvm_io_dev->ops != &kvm_io_gic_ops)
517
return ERR_PTR(-EINVAL);
518
519
iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
520
if (iodev->iodev_type != IODEV_ITS)
521
return ERR_PTR(-EINVAL);
522
523
return iodev->its;
524
}
525
526
static unsigned long vgic_its_cache_key(u32 devid, u32 eventid)
527
{
528
return (((unsigned long)devid) << VITS_TYPER_IDBITS) | eventid;
529
530
}
531
532
static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
533
u32 devid, u32 eventid)
534
{
535
unsigned long cache_key = vgic_its_cache_key(devid, eventid);
536
struct vgic_its *its;
537
struct vgic_irq *irq;
538
539
if (devid > VITS_MAX_DEVID || eventid > VITS_MAX_EVENTID)
540
return NULL;
541
542
its = __vgic_doorbell_to_its(kvm, db);
543
if (IS_ERR(its))
544
return NULL;
545
546
rcu_read_lock();
547
548
irq = xa_load(&its->translation_cache, cache_key);
549
if (!vgic_try_get_irq_ref(irq))
550
irq = NULL;
551
552
rcu_read_unlock();
553
554
return irq;
555
}
556
557
static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
558
u32 devid, u32 eventid,
559
struct vgic_irq *irq)
560
{
561
unsigned long cache_key = vgic_its_cache_key(devid, eventid);
562
struct vgic_irq *old;
563
564
/* Do not cache a directly injected interrupt */
565
if (irq->hw)
566
return;
567
568
/*
569
* The irq refcount is guaranteed to be nonzero while holding the
570
* its_lock, as the ITE (and the reference it holds) cannot be freed.
571
*/
572
lockdep_assert_held(&its->its_lock);
573
vgic_get_irq_ref(irq);
574
575
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
576
577
/*
578
* Put the reference taken on @irq if the store fails. Intentionally do
579
* not return the error as the translation cache is best effort.
580
*/
581
if (xa_is_err(old)) {
582
vgic_put_irq(kvm, irq);
583
return;
584
}
585
586
/*
587
* We could have raced with another CPU caching the same
588
* translation behind our back, ensure we don't leak a
589
* reference if that is the case.
590
*/
591
if (old)
592
vgic_put_irq(kvm, old);
593
}
594
595
static void vgic_its_invalidate_cache(struct vgic_its *its)
596
{
597
struct kvm *kvm = its->dev->kvm;
598
struct vgic_irq *irq;
599
unsigned long idx;
600
601
xa_for_each(&its->translation_cache, idx, irq) {
602
xa_erase(&its->translation_cache, idx);
603
vgic_put_irq(kvm, irq);
604
}
605
}
606
607
void vgic_its_invalidate_all_caches(struct kvm *kvm)
608
{
609
struct kvm_device *dev;
610
struct vgic_its *its;
611
612
rcu_read_lock();
613
614
list_for_each_entry_rcu(dev, &kvm->devices, vm_node) {
615
if (dev->ops != &kvm_arm_vgic_its_ops)
616
continue;
617
618
its = dev->private;
619
vgic_its_invalidate_cache(its);
620
}
621
622
rcu_read_unlock();
623
}
624
625
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
626
u32 devid, u32 eventid, struct vgic_irq **irq)
627
{
628
struct kvm_vcpu *vcpu;
629
struct its_ite *ite;
630
631
if (!its->enabled)
632
return -EBUSY;
633
634
ite = find_ite(its, devid, eventid);
635
if (!ite || !its_is_collection_mapped(ite->collection))
636
return E_ITS_INT_UNMAPPED_INTERRUPT;
637
638
vcpu = collection_to_vcpu(kvm, ite->collection);
639
if (!vcpu)
640
return E_ITS_INT_UNMAPPED_INTERRUPT;
641
642
if (!vgic_lpis_enabled(vcpu))
643
return -EBUSY;
644
645
vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
646
647
*irq = ite->irq;
648
return 0;
649
}
650
651
struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
652
{
653
u64 address;
654
655
if (!vgic_has_its(kvm))
656
return ERR_PTR(-ENODEV);
657
658
if (!(msi->flags & KVM_MSI_VALID_DEVID))
659
return ERR_PTR(-EINVAL);
660
661
address = (u64)msi->address_hi << 32 | msi->address_lo;
662
663
return __vgic_doorbell_to_its(kvm, address);
664
}
665
666
/*
667
* Find the target VCPU and the LPI number for a given devid/eventid pair
668
* and make this IRQ pending, possibly injecting it.
669
* Must be called with the its_lock mutex held.
670
* Returns 0 on success, a positive error value for any ITS mapping
671
* related errors and negative error values for generic errors.
672
*/
673
static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
674
u32 devid, u32 eventid)
675
{
676
struct vgic_irq *irq = NULL;
677
unsigned long flags;
678
int err;
679
680
err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
681
if (err)
682
return err;
683
684
if (irq->hw)
685
return irq_set_irqchip_state(irq->host_irq,
686
IRQCHIP_STATE_PENDING, true);
687
688
raw_spin_lock_irqsave(&irq->irq_lock, flags);
689
irq->pending_latch = true;
690
vgic_queue_irq_unlock(kvm, irq, flags);
691
692
return 0;
693
}
694
695
int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
696
{
697
struct vgic_irq *irq;
698
unsigned long flags;
699
phys_addr_t db;
700
701
db = (u64)msi->address_hi << 32 | msi->address_lo;
702
irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
703
if (!irq)
704
return -EWOULDBLOCK;
705
706
raw_spin_lock_irqsave(&irq->irq_lock, flags);
707
irq->pending_latch = true;
708
vgic_queue_irq_unlock(kvm, irq, flags);
709
vgic_put_irq(kvm, irq);
710
711
return 0;
712
}
713
714
/*
715
* Queries the KVM IO bus framework to get the ITS pointer from the given
716
* doorbell address.
717
* We then call vgic_its_trigger_msi() with the decoded data.
718
* According to the KVM_SIGNAL_MSI API description returns 1 on success.
719
*/
720
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
721
{
722
struct vgic_its *its;
723
int ret;
724
725
if (!vgic_its_inject_cached_translation(kvm, msi))
726
return 1;
727
728
its = vgic_msi_to_its(kvm, msi);
729
if (IS_ERR(its))
730
return PTR_ERR(its);
731
732
mutex_lock(&its->its_lock);
733
ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
734
mutex_unlock(&its->its_lock);
735
736
if (ret < 0)
737
return ret;
738
739
/*
740
* KVM_SIGNAL_MSI demands a return value > 0 for success and 0
741
* if the guest has blocked the MSI. So we map any LPI mapping
742
* related error to that.
743
*/
744
if (ret)
745
return 0;
746
else
747
return 1;
748
}
749
750
/* Requires the its_lock to be held. */
751
static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
752
{
753
struct vgic_irq *irq = ite->irq;
754
list_del(&ite->ite_list);
755
756
/* This put matches the get in vgic_add_lpi. */
757
if (irq) {
758
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
759
if (irq->hw)
760
its_unmap_vlpi(ite->irq->host_irq);
761
762
irq->hw = false;
763
}
764
765
vgic_put_irq(kvm, ite->irq);
766
}
767
768
kfree(ite);
769
}
770
771
static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
772
{
773
return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
774
}
775
776
#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
777
#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
778
#define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
779
#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
780
#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
781
#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
782
#define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
783
#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
784
#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
785
786
/*
787
* The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
788
* Must be called with the its_lock mutex held.
789
*/
790
static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
791
u64 *its_cmd)
792
{
793
u32 device_id = its_cmd_get_deviceid(its_cmd);
794
u32 event_id = its_cmd_get_id(its_cmd);
795
struct its_ite *ite;
796
797
ite = find_ite(its, device_id, event_id);
798
if (ite && its_is_collection_mapped(ite->collection)) {
799
struct its_device *device = find_its_device(its, device_id);
800
int ite_esz = vgic_its_get_abi(its)->ite_esz;
801
gpa_t gpa = device->itt_addr + ite->event_id * ite_esz;
802
/*
803
* Though the spec talks about removing the pending state, we
804
* don't bother here since we clear the ITTE anyway and the
805
* pending state is a property of the ITTE struct.
806
*/
807
vgic_its_invalidate_cache(its);
808
809
its_free_ite(kvm, ite);
810
811
return vgic_its_write_entry_lock(its, gpa, 0ULL, ite);
812
}
813
814
return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
815
}
816
817
/*
818
* The MOVI command moves an ITTE to a different collection.
819
* Must be called with the its_lock mutex held.
820
*/
821
static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
822
u64 *its_cmd)
823
{
824
u32 device_id = its_cmd_get_deviceid(its_cmd);
825
u32 event_id = its_cmd_get_id(its_cmd);
826
u32 coll_id = its_cmd_get_collection(its_cmd);
827
struct kvm_vcpu *vcpu;
828
struct its_ite *ite;
829
struct its_collection *collection;
830
831
ite = find_ite(its, device_id, event_id);
832
if (!ite)
833
return E_ITS_MOVI_UNMAPPED_INTERRUPT;
834
835
if (!its_is_collection_mapped(ite->collection))
836
return E_ITS_MOVI_UNMAPPED_COLLECTION;
837
838
collection = find_collection(its, coll_id);
839
if (!its_is_collection_mapped(collection))
840
return E_ITS_MOVI_UNMAPPED_COLLECTION;
841
842
ite->collection = collection;
843
vcpu = collection_to_vcpu(kvm, collection);
844
845
vgic_its_invalidate_cache(its);
846
847
return update_affinity(ite->irq, vcpu);
848
}
849
850
static bool __is_visible_gfn_locked(struct vgic_its *its, gpa_t gpa)
851
{
852
gfn_t gfn = gpa >> PAGE_SHIFT;
853
int idx;
854
bool ret;
855
856
idx = srcu_read_lock(&its->dev->kvm->srcu);
857
ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
858
srcu_read_unlock(&its->dev->kvm->srcu, idx);
859
return ret;
860
}
861
862
/*
863
* Check whether an ID can be stored into the corresponding guest table.
864
* For a direct table this is pretty easy, but gets a bit nasty for
865
* indirect tables. We check whether the resulting guest physical address
866
* is actually valid (covered by a memslot and guest accessible).
867
* For this we have to read the respective first level entry.
868
*/
869
static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
870
gpa_t *eaddr)
871
{
872
int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
873
u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
874
phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
875
int esz = GITS_BASER_ENTRY_SIZE(baser);
876
int index;
877
878
switch (type) {
879
case GITS_BASER_TYPE_DEVICE:
880
if (id > VITS_MAX_DEVID)
881
return false;
882
break;
883
case GITS_BASER_TYPE_COLLECTION:
884
/* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
885
if (id >= BIT_ULL(16))
886
return false;
887
break;
888
default:
889
return false;
890
}
891
892
if (!(baser & GITS_BASER_INDIRECT)) {
893
phys_addr_t addr;
894
895
if (id >= (l1_tbl_size / esz))
896
return false;
897
898
addr = base + id * esz;
899
900
if (eaddr)
901
*eaddr = addr;
902
903
return __is_visible_gfn_locked(its, addr);
904
}
905
906
/* calculate and check the index into the 1st level */
907
index = id / (SZ_64K / esz);
908
if (index >= (l1_tbl_size / sizeof(u64)))
909
return false;
910
911
/* Each 1st level entry is represented by a 64-bit value. */
912
if (kvm_read_guest_lock(its->dev->kvm,
913
base + index * sizeof(indirect_ptr),
914
&indirect_ptr, sizeof(indirect_ptr)))
915
return false;
916
917
indirect_ptr = le64_to_cpu(indirect_ptr);
918
919
/* check the valid bit of the first level entry */
920
if (!(indirect_ptr & BIT_ULL(63)))
921
return false;
922
923
/* Mask the guest physical address and calculate the frame number. */
924
indirect_ptr &= GENMASK_ULL(51, 16);
925
926
/* Find the address of the actual entry */
927
index = id % (SZ_64K / esz);
928
indirect_ptr += index * esz;
929
930
if (eaddr)
931
*eaddr = indirect_ptr;
932
933
return __is_visible_gfn_locked(its, indirect_ptr);
934
}
935
936
/*
937
* Check whether an event ID can be stored in the corresponding Interrupt
938
* Translation Table, which starts at device->itt_addr.
939
*/
940
static bool vgic_its_check_event_id(struct vgic_its *its, struct its_device *device,
941
u32 event_id)
942
{
943
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
944
int ite_esz = abi->ite_esz;
945
gpa_t gpa;
946
947
/* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */
948
if (event_id >= BIT_ULL(device->num_eventid_bits))
949
return false;
950
951
gpa = device->itt_addr + event_id * ite_esz;
952
return __is_visible_gfn_locked(its, gpa);
953
}
954
955
/*
956
* Add a new collection into the ITS collection table.
957
* Returns 0 on success, and a negative error value for generic errors.
958
*/
959
static int vgic_its_alloc_collection(struct vgic_its *its,
960
struct its_collection **colp,
961
u32 coll_id)
962
{
963
struct its_collection *collection;
964
965
collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT);
966
if (!collection)
967
return -ENOMEM;
968
969
collection->collection_id = coll_id;
970
collection->target_addr = COLLECTION_NOT_MAPPED;
971
972
list_add_tail(&collection->coll_list, &its->collection_list);
973
*colp = collection;
974
975
return 0;
976
}
977
978
static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
979
{
980
struct its_collection *collection;
981
struct its_device *device;
982
struct its_ite *ite;
983
984
/*
985
* Clearing the mapping for that collection ID removes the
986
* entry from the list. If there wasn't any before, we can
987
* go home early.
988
*/
989
collection = find_collection(its, coll_id);
990
if (!collection)
991
return;
992
993
for_each_lpi_its(device, ite, its)
994
if (ite->collection &&
995
ite->collection->collection_id == coll_id)
996
ite->collection = NULL;
997
998
list_del(&collection->coll_list);
999
kfree(collection);
1000
}
1001
1002
/* Must be called with its_lock mutex held */
1003
static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
1004
struct its_collection *collection,
1005
u32 event_id)
1006
{
1007
struct its_ite *ite;
1008
1009
ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT);
1010
if (!ite)
1011
return ERR_PTR(-ENOMEM);
1012
1013
ite->event_id = event_id;
1014
ite->collection = collection;
1015
1016
list_add_tail(&ite->ite_list, &device->itt_head);
1017
return ite;
1018
}
1019
1020
/*
1021
* The MAPTI and MAPI commands map LPIs to ITTEs.
1022
* Must be called with its_lock mutex held.
1023
*/
1024
static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1025
u64 *its_cmd)
1026
{
1027
u32 device_id = its_cmd_get_deviceid(its_cmd);
1028
u32 event_id = its_cmd_get_id(its_cmd);
1029
u32 coll_id = its_cmd_get_collection(its_cmd);
1030
struct its_ite *ite;
1031
struct kvm_vcpu *vcpu = NULL;
1032
struct its_device *device;
1033
struct its_collection *collection, *new_coll = NULL;
1034
struct vgic_irq *irq;
1035
int lpi_nr;
1036
1037
device = find_its_device(its, device_id);
1038
if (!device)
1039
return E_ITS_MAPTI_UNMAPPED_DEVICE;
1040
1041
if (!vgic_its_check_event_id(its, device, event_id))
1042
return E_ITS_MAPTI_ID_OOR;
1043
1044
if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1045
lpi_nr = its_cmd_get_physical_id(its_cmd);
1046
else
1047
lpi_nr = event_id;
1048
if (lpi_nr < GIC_LPI_OFFSET ||
1049
lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1050
return E_ITS_MAPTI_PHYSICALID_OOR;
1051
1052
/* If there is an existing mapping, behavior is UNPREDICTABLE. */
1053
if (find_ite(its, device_id, event_id))
1054
return 0;
1055
1056
collection = find_collection(its, coll_id);
1057
if (!collection) {
1058
int ret;
1059
1060
if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
1061
return E_ITS_MAPC_COLLECTION_OOR;
1062
1063
ret = vgic_its_alloc_collection(its, &collection, coll_id);
1064
if (ret)
1065
return ret;
1066
new_coll = collection;
1067
}
1068
1069
ite = vgic_its_alloc_ite(device, collection, event_id);
1070
if (IS_ERR(ite)) {
1071
if (new_coll)
1072
vgic_its_free_collection(its, coll_id);
1073
return PTR_ERR(ite);
1074
}
1075
1076
if (its_is_collection_mapped(collection))
1077
vcpu = collection_to_vcpu(kvm, collection);
1078
1079
irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1080
if (IS_ERR(irq)) {
1081
if (new_coll)
1082
vgic_its_free_collection(its, coll_id);
1083
its_free_ite(kvm, ite);
1084
return PTR_ERR(irq);
1085
}
1086
ite->irq = irq;
1087
1088
return 0;
1089
}
1090
1091
/* Requires the its_lock to be held. */
1092
static void vgic_its_free_device(struct kvm *kvm, struct vgic_its *its,
1093
struct its_device *device)
1094
{
1095
struct its_ite *ite, *temp;
1096
1097
/*
1098
* The spec says that unmapping a device with still valid
1099
* ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1100
* since we cannot leave the memory unreferenced.
1101
*/
1102
list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1103
its_free_ite(kvm, ite);
1104
1105
vgic_its_invalidate_cache(its);
1106
1107
list_del(&device->dev_list);
1108
kfree(device);
1109
}
1110
1111
/* its lock must be held */
1112
static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1113
{
1114
struct its_device *cur, *temp;
1115
1116
list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1117
vgic_its_free_device(kvm, its, cur);
1118
}
1119
1120
/* its lock must be held */
1121
static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1122
{
1123
struct its_collection *cur, *temp;
1124
1125
list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1126
vgic_its_free_collection(its, cur->collection_id);
1127
}
1128
1129
/* Must be called with its_lock mutex held */
1130
static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1131
u32 device_id, gpa_t itt_addr,
1132
u8 num_eventid_bits)
1133
{
1134
struct its_device *device;
1135
1136
device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT);
1137
if (!device)
1138
return ERR_PTR(-ENOMEM);
1139
1140
device->device_id = device_id;
1141
device->itt_addr = itt_addr;
1142
device->num_eventid_bits = num_eventid_bits;
1143
INIT_LIST_HEAD(&device->itt_head);
1144
1145
list_add_tail(&device->dev_list, &its->device_list);
1146
return device;
1147
}
1148
1149
/*
1150
* MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1151
* Must be called with the its_lock mutex held.
1152
*/
1153
static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1154
u64 *its_cmd)
1155
{
1156
u32 device_id = its_cmd_get_deviceid(its_cmd);
1157
bool valid = its_cmd_get_validbit(its_cmd);
1158
u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1159
gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1160
struct its_device *device;
1161
gpa_t gpa;
1162
1163
if (!vgic_its_check_id(its, its->baser_device_table, device_id, &gpa))
1164
return E_ITS_MAPD_DEVICE_OOR;
1165
1166
if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1167
return E_ITS_MAPD_ITTSIZE_OOR;
1168
1169
device = find_its_device(its, device_id);
1170
1171
/*
1172
* The spec says that calling MAPD on an already mapped device
1173
* invalidates all cached data for this device. We implement this
1174
* by removing the mapping and re-establishing it.
1175
*/
1176
if (device)
1177
vgic_its_free_device(kvm, its, device);
1178
1179
/*
1180
* The spec does not say whether unmapping a not-mapped device
1181
* is an error, so we are done in any case.
1182
*/
1183
if (!valid)
1184
return vgic_its_write_entry_lock(its, gpa, 0ULL, dte);
1185
1186
device = vgic_its_alloc_device(its, device_id, itt_addr,
1187
num_eventid_bits);
1188
1189
return PTR_ERR_OR_ZERO(device);
1190
}
1191
1192
/*
1193
* The MAPC command maps collection IDs to redistributors.
1194
* Must be called with the its_lock mutex held.
1195
*/
1196
static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1197
u64 *its_cmd)
1198
{
1199
u16 coll_id;
1200
struct its_collection *collection;
1201
bool valid;
1202
1203
valid = its_cmd_get_validbit(its_cmd);
1204
coll_id = its_cmd_get_collection(its_cmd);
1205
1206
if (!valid) {
1207
vgic_its_free_collection(its, coll_id);
1208
vgic_its_invalidate_cache(its);
1209
} else {
1210
struct kvm_vcpu *vcpu;
1211
1212
vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1213
if (!vcpu)
1214
return E_ITS_MAPC_PROCNUM_OOR;
1215
1216
collection = find_collection(its, coll_id);
1217
1218
if (!collection) {
1219
int ret;
1220
1221
if (!vgic_its_check_id(its, its->baser_coll_table,
1222
coll_id, NULL))
1223
return E_ITS_MAPC_COLLECTION_OOR;
1224
1225
ret = vgic_its_alloc_collection(its, &collection,
1226
coll_id);
1227
if (ret)
1228
return ret;
1229
collection->target_addr = vcpu->vcpu_id;
1230
} else {
1231
collection->target_addr = vcpu->vcpu_id;
1232
update_affinity_collection(kvm, its, collection);
1233
}
1234
}
1235
1236
return 0;
1237
}
1238
1239
/*
1240
* The CLEAR command removes the pending state for a particular LPI.
1241
* Must be called with the its_lock mutex held.
1242
*/
1243
static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1244
u64 *its_cmd)
1245
{
1246
u32 device_id = its_cmd_get_deviceid(its_cmd);
1247
u32 event_id = its_cmd_get_id(its_cmd);
1248
struct its_ite *ite;
1249
1250
1251
ite = find_ite(its, device_id, event_id);
1252
if (!ite)
1253
return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1254
1255
ite->irq->pending_latch = false;
1256
1257
if (ite->irq->hw)
1258
return irq_set_irqchip_state(ite->irq->host_irq,
1259
IRQCHIP_STATE_PENDING, false);
1260
1261
return 0;
1262
}
1263
1264
int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq)
1265
{
1266
return update_lpi_config(kvm, irq, NULL, true);
1267
}
1268
1269
/*
1270
* The INV command syncs the configuration bits from the memory table.
1271
* Must be called with the its_lock mutex held.
1272
*/
1273
static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1274
u64 *its_cmd)
1275
{
1276
u32 device_id = its_cmd_get_deviceid(its_cmd);
1277
u32 event_id = its_cmd_get_id(its_cmd);
1278
struct its_ite *ite;
1279
1280
1281
ite = find_ite(its, device_id, event_id);
1282
if (!ite)
1283
return E_ITS_INV_UNMAPPED_INTERRUPT;
1284
1285
return vgic_its_inv_lpi(kvm, ite->irq);
1286
}
1287
1288
/**
1289
* vgic_its_invall - invalidate all LPIs targeting a given vcpu
1290
* @vcpu: the vcpu for which the RD is targeted by an invalidation
1291
*
1292
* Contrary to the INVALL command, this targets a RD instead of a
1293
* collection, and we don't need to hold the its_lock, since no ITS is
1294
* involved here.
1295
*/
1296
int vgic_its_invall(struct kvm_vcpu *vcpu)
1297
{
1298
struct kvm *kvm = vcpu->kvm;
1299
struct vgic_dist *dist = &kvm->arch.vgic;
1300
struct vgic_irq *irq;
1301
unsigned long intid;
1302
1303
xa_for_each(&dist->lpi_xa, intid, irq) {
1304
irq = vgic_get_irq(kvm, intid);
1305
if (!irq)
1306
continue;
1307
1308
update_lpi_config(kvm, irq, vcpu, false);
1309
vgic_put_irq(kvm, irq);
1310
}
1311
1312
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1313
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1314
1315
return 0;
1316
}
1317
1318
/*
1319
* The INVALL command requests flushing of all IRQ data in this collection.
1320
* Find the VCPU mapped to that collection, then iterate over the VM's list
1321
* of mapped LPIs and update the configuration for each IRQ which targets
1322
* the specified vcpu. The configuration will be read from the in-memory
1323
* configuration table.
1324
* Must be called with the its_lock mutex held.
1325
*/
1326
static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1327
u64 *its_cmd)
1328
{
1329
u32 coll_id = its_cmd_get_collection(its_cmd);
1330
struct its_collection *collection;
1331
struct kvm_vcpu *vcpu;
1332
1333
collection = find_collection(its, coll_id);
1334
if (!its_is_collection_mapped(collection))
1335
return E_ITS_INVALL_UNMAPPED_COLLECTION;
1336
1337
vcpu = collection_to_vcpu(kvm, collection);
1338
vgic_its_invall(vcpu);
1339
1340
return 0;
1341
}
1342
1343
/*
1344
* The MOVALL command moves the pending state of all IRQs targeting one
1345
* redistributor to another. We don't hold the pending state in the VCPUs,
1346
* but in the IRQs instead, so there is really not much to do for us here.
1347
* However the spec says that no IRQ must target the old redistributor
1348
* afterwards, so we make sure that no LPI is using the associated target_vcpu.
1349
* This command affects all LPIs in the system that target that redistributor.
1350
*/
1351
static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1352
u64 *its_cmd)
1353
{
1354
struct vgic_dist *dist = &kvm->arch.vgic;
1355
struct kvm_vcpu *vcpu1, *vcpu2;
1356
struct vgic_irq *irq;
1357
unsigned long intid;
1358
1359
/* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
1360
vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1361
vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
1362
1363
if (!vcpu1 || !vcpu2)
1364
return E_ITS_MOVALL_PROCNUM_OOR;
1365
1366
if (vcpu1 == vcpu2)
1367
return 0;
1368
1369
xa_for_each(&dist->lpi_xa, intid, irq) {
1370
irq = vgic_get_irq(kvm, intid);
1371
if (!irq)
1372
continue;
1373
1374
update_affinity(irq, vcpu2);
1375
1376
vgic_put_irq(kvm, irq);
1377
}
1378
1379
vgic_its_invalidate_cache(its);
1380
1381
return 0;
1382
}
1383
1384
/*
1385
* The INT command injects the LPI associated with that DevID/EvID pair.
1386
* Must be called with the its_lock mutex held.
1387
*/
1388
static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1389
u64 *its_cmd)
1390
{
1391
u32 msi_data = its_cmd_get_id(its_cmd);
1392
u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1393
1394
return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1395
}
1396
1397
/*
1398
* This function is called with the its_cmd lock held, but the ITS data
1399
* structure lock dropped.
1400
*/
1401
static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1402
u64 *its_cmd)
1403
{
1404
int ret = -ENODEV;
1405
1406
mutex_lock(&its->its_lock);
1407
switch (its_cmd_get_command(its_cmd)) {
1408
case GITS_CMD_MAPD:
1409
ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1410
break;
1411
case GITS_CMD_MAPC:
1412
ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1413
break;
1414
case GITS_CMD_MAPI:
1415
ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1416
break;
1417
case GITS_CMD_MAPTI:
1418
ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1419
break;
1420
case GITS_CMD_MOVI:
1421
ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1422
break;
1423
case GITS_CMD_DISCARD:
1424
ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1425
break;
1426
case GITS_CMD_CLEAR:
1427
ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1428
break;
1429
case GITS_CMD_MOVALL:
1430
ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1431
break;
1432
case GITS_CMD_INT:
1433
ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1434
break;
1435
case GITS_CMD_INV:
1436
ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1437
break;
1438
case GITS_CMD_INVALL:
1439
ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1440
break;
1441
case GITS_CMD_SYNC:
1442
/* we ignore this command: we are in sync all of the time */
1443
ret = 0;
1444
break;
1445
}
1446
mutex_unlock(&its->its_lock);
1447
1448
return ret;
1449
}
1450
1451
static u64 vgic_sanitise_its_baser(u64 reg)
1452
{
1453
reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1454
GITS_BASER_SHAREABILITY_SHIFT,
1455
vgic_sanitise_shareability);
1456
reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1457
GITS_BASER_INNER_CACHEABILITY_SHIFT,
1458
vgic_sanitise_inner_cacheability);
1459
reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1460
GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1461
vgic_sanitise_outer_cacheability);
1462
1463
/* We support only one (ITS) page size: 64K */
1464
reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1465
1466
return reg;
1467
}
1468
1469
static u64 vgic_sanitise_its_cbaser(u64 reg)
1470
{
1471
reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1472
GITS_CBASER_SHAREABILITY_SHIFT,
1473
vgic_sanitise_shareability);
1474
reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1475
GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1476
vgic_sanitise_inner_cacheability);
1477
reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1478
GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1479
vgic_sanitise_outer_cacheability);
1480
1481
/* Sanitise the physical address to be 64k aligned. */
1482
reg &= ~GENMASK_ULL(15, 12);
1483
1484
return reg;
1485
}
1486
1487
static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1488
struct vgic_its *its,
1489
gpa_t addr, unsigned int len)
1490
{
1491
return extract_bytes(its->cbaser, addr & 7, len);
1492
}
1493
1494
static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1495
gpa_t addr, unsigned int len,
1496
unsigned long val)
1497
{
1498
/* When GITS_CTLR.Enable is 1, this register is RO. */
1499
if (its->enabled)
1500
return;
1501
1502
mutex_lock(&its->cmd_lock);
1503
its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1504
its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1505
its->creadr = 0;
1506
/*
1507
* CWRITER is architecturally UNKNOWN on reset, but we need to reset
1508
* it to CREADR to make sure we start with an empty command buffer.
1509
*/
1510
its->cwriter = its->creadr;
1511
mutex_unlock(&its->cmd_lock);
1512
}
1513
1514
#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1515
#define ITS_CMD_SIZE 32
1516
#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1517
1518
/* Must be called with the cmd_lock held. */
1519
static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1520
{
1521
gpa_t cbaser;
1522
u64 cmd_buf[4];
1523
1524
/* Commands are only processed when the ITS is enabled. */
1525
if (!its->enabled)
1526
return;
1527
1528
cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1529
1530
while (its->cwriter != its->creadr) {
1531
int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1532
cmd_buf, ITS_CMD_SIZE);
1533
/*
1534
* If kvm_read_guest() fails, this could be due to the guest
1535
* programming a bogus value in CBASER or something else going
1536
* wrong from which we cannot easily recover.
1537
* According to section 6.3.2 in the GICv3 spec we can just
1538
* ignore that command then.
1539
*/
1540
if (!ret)
1541
vgic_its_handle_command(kvm, its, cmd_buf);
1542
1543
its->creadr += ITS_CMD_SIZE;
1544
if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1545
its->creadr = 0;
1546
}
1547
}
1548
1549
/*
1550
* By writing to CWRITER the guest announces new commands to be processed.
1551
* To avoid any races in the first place, we take the its_cmd lock, which
1552
* protects our ring buffer variables, so that there is only one user
1553
* per ITS handling commands at a given time.
1554
*/
1555
static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1556
gpa_t addr, unsigned int len,
1557
unsigned long val)
1558
{
1559
u64 reg;
1560
1561
if (!its)
1562
return;
1563
1564
mutex_lock(&its->cmd_lock);
1565
1566
reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1567
reg = ITS_CMD_OFFSET(reg);
1568
if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1569
mutex_unlock(&its->cmd_lock);
1570
return;
1571
}
1572
its->cwriter = reg;
1573
1574
vgic_its_process_commands(kvm, its);
1575
1576
mutex_unlock(&its->cmd_lock);
1577
}
1578
1579
static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1580
struct vgic_its *its,
1581
gpa_t addr, unsigned int len)
1582
{
1583
return extract_bytes(its->cwriter, addr & 0x7, len);
1584
}
1585
1586
static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1587
struct vgic_its *its,
1588
gpa_t addr, unsigned int len)
1589
{
1590
return extract_bytes(its->creadr, addr & 0x7, len);
1591
}
1592
1593
static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1594
struct vgic_its *its,
1595
gpa_t addr, unsigned int len,
1596
unsigned long val)
1597
{
1598
u32 cmd_offset;
1599
int ret = 0;
1600
1601
mutex_lock(&its->cmd_lock);
1602
1603
if (its->enabled) {
1604
ret = -EBUSY;
1605
goto out;
1606
}
1607
1608
cmd_offset = ITS_CMD_OFFSET(val);
1609
if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1610
ret = -EINVAL;
1611
goto out;
1612
}
1613
1614
its->creadr = cmd_offset;
1615
out:
1616
mutex_unlock(&its->cmd_lock);
1617
return ret;
1618
}
1619
1620
#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1621
static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1622
struct vgic_its *its,
1623
gpa_t addr, unsigned int len)
1624
{
1625
u64 reg;
1626
1627
switch (BASER_INDEX(addr)) {
1628
case 0:
1629
reg = its->baser_device_table;
1630
break;
1631
case 1:
1632
reg = its->baser_coll_table;
1633
break;
1634
default:
1635
reg = 0;
1636
break;
1637
}
1638
1639
return extract_bytes(reg, addr & 7, len);
1640
}
1641
1642
#define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1643
static void vgic_mmio_write_its_baser(struct kvm *kvm,
1644
struct vgic_its *its,
1645
gpa_t addr, unsigned int len,
1646
unsigned long val)
1647
{
1648
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1649
u64 entry_size, table_type;
1650
u64 reg, *regptr, clearbits = 0;
1651
1652
/* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1653
if (its->enabled)
1654
return;
1655
1656
switch (BASER_INDEX(addr)) {
1657
case 0:
1658
regptr = &its->baser_device_table;
1659
entry_size = abi->dte_esz;
1660
table_type = GITS_BASER_TYPE_DEVICE;
1661
break;
1662
case 1:
1663
regptr = &its->baser_coll_table;
1664
entry_size = abi->cte_esz;
1665
table_type = GITS_BASER_TYPE_COLLECTION;
1666
clearbits = GITS_BASER_INDIRECT;
1667
break;
1668
default:
1669
return;
1670
}
1671
1672
reg = update_64bit_reg(*regptr, addr & 7, len, val);
1673
reg &= ~GITS_BASER_RO_MASK;
1674
reg &= ~clearbits;
1675
1676
reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1677
reg |= table_type << GITS_BASER_TYPE_SHIFT;
1678
reg = vgic_sanitise_its_baser(reg);
1679
1680
*regptr = reg;
1681
1682
if (!(reg & GITS_BASER_VALID)) {
1683
/* Take the its_lock to prevent a race with a save/restore */
1684
mutex_lock(&its->its_lock);
1685
switch (table_type) {
1686
case GITS_BASER_TYPE_DEVICE:
1687
vgic_its_free_device_list(kvm, its);
1688
break;
1689
case GITS_BASER_TYPE_COLLECTION:
1690
vgic_its_free_collection_list(kvm, its);
1691
break;
1692
}
1693
mutex_unlock(&its->its_lock);
1694
}
1695
}
1696
1697
static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1698
struct vgic_its *its,
1699
gpa_t addr, unsigned int len)
1700
{
1701
u32 reg = 0;
1702
1703
mutex_lock(&its->cmd_lock);
1704
if (its->creadr == its->cwriter)
1705
reg |= GITS_CTLR_QUIESCENT;
1706
if (its->enabled)
1707
reg |= GITS_CTLR_ENABLE;
1708
mutex_unlock(&its->cmd_lock);
1709
1710
return reg;
1711
}
1712
1713
static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1714
gpa_t addr, unsigned int len,
1715
unsigned long val)
1716
{
1717
mutex_lock(&its->cmd_lock);
1718
1719
/*
1720
* It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1721
* device/collection BASER are invalid
1722
*/
1723
if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1724
(!(its->baser_device_table & GITS_BASER_VALID) ||
1725
!(its->baser_coll_table & GITS_BASER_VALID) ||
1726
!(its->cbaser & GITS_CBASER_VALID)))
1727
goto out;
1728
1729
its->enabled = !!(val & GITS_CTLR_ENABLE);
1730
if (!its->enabled)
1731
vgic_its_invalidate_cache(its);
1732
1733
/*
1734
* Try to process any pending commands. This function bails out early
1735
* if the ITS is disabled or no commands have been queued.
1736
*/
1737
vgic_its_process_commands(kvm, its);
1738
1739
out:
1740
mutex_unlock(&its->cmd_lock);
1741
}
1742
1743
#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1744
{ \
1745
.reg_offset = off, \
1746
.len = length, \
1747
.access_flags = acc, \
1748
.its_read = rd, \
1749
.its_write = wr, \
1750
}
1751
1752
#define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1753
{ \
1754
.reg_offset = off, \
1755
.len = length, \
1756
.access_flags = acc, \
1757
.its_read = rd, \
1758
.its_write = wr, \
1759
.uaccess_its_write = uwr, \
1760
}
1761
1762
static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1763
gpa_t addr, unsigned int len, unsigned long val)
1764
{
1765
/* Ignore */
1766
}
1767
1768
static struct vgic_register_region its_registers[] = {
1769
REGISTER_ITS_DESC(GITS_CTLR,
1770
vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1771
VGIC_ACCESS_32bit),
1772
REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1773
vgic_mmio_read_its_iidr, its_mmio_write_wi,
1774
vgic_mmio_uaccess_write_its_iidr, 4,
1775
VGIC_ACCESS_32bit),
1776
REGISTER_ITS_DESC(GITS_TYPER,
1777
vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1778
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1779
REGISTER_ITS_DESC(GITS_CBASER,
1780
vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1781
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1782
REGISTER_ITS_DESC(GITS_CWRITER,
1783
vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1784
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1785
REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1786
vgic_mmio_read_its_creadr, its_mmio_write_wi,
1787
vgic_mmio_uaccess_write_its_creadr, 8,
1788
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1789
REGISTER_ITS_DESC(GITS_BASER,
1790
vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1791
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1792
REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1793
vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1794
VGIC_ACCESS_32bit),
1795
};
1796
1797
/* This is called on setting the LPI enable bit in the redistributor. */
1798
void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1799
{
1800
if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1801
its_sync_lpi_pending_table(vcpu);
1802
}
1803
1804
static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1805
u64 addr)
1806
{
1807
struct vgic_io_device *iodev = &its->iodev;
1808
int ret;
1809
1810
mutex_lock(&kvm->slots_lock);
1811
if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1812
ret = -EBUSY;
1813
goto out;
1814
}
1815
1816
its->vgic_its_base = addr;
1817
iodev->regions = its_registers;
1818
iodev->nr_regions = ARRAY_SIZE(its_registers);
1819
kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1820
1821
iodev->base_addr = its->vgic_its_base;
1822
iodev->iodev_type = IODEV_ITS;
1823
iodev->its = its;
1824
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1825
KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1826
out:
1827
mutex_unlock(&kvm->slots_lock);
1828
1829
return ret;
1830
}
1831
1832
#define INITIAL_BASER_VALUE \
1833
(GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1834
GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1835
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1836
GITS_BASER_PAGE_SIZE_64K)
1837
1838
#define INITIAL_PROPBASER_VALUE \
1839
(GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1840
GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1841
GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1842
1843
static int vgic_its_create(struct kvm_device *dev, u32 type)
1844
{
1845
int ret;
1846
struct vgic_its *its;
1847
1848
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1849
return -ENODEV;
1850
1851
its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT);
1852
if (!its)
1853
return -ENOMEM;
1854
1855
mutex_lock(&dev->kvm->arch.config_lock);
1856
1857
if (vgic_initialized(dev->kvm)) {
1858
ret = vgic_v4_init(dev->kvm);
1859
if (ret < 0) {
1860
mutex_unlock(&dev->kvm->arch.config_lock);
1861
kfree(its);
1862
return ret;
1863
}
1864
}
1865
1866
mutex_init(&its->its_lock);
1867
mutex_init(&its->cmd_lock);
1868
1869
/* Yep, even more trickery for lock ordering... */
1870
#ifdef CONFIG_LOCKDEP
1871
mutex_lock(&its->cmd_lock);
1872
mutex_lock(&its->its_lock);
1873
mutex_unlock(&its->its_lock);
1874
mutex_unlock(&its->cmd_lock);
1875
#endif
1876
1877
its->vgic_its_base = VGIC_ADDR_UNDEF;
1878
1879
INIT_LIST_HEAD(&its->device_list);
1880
INIT_LIST_HEAD(&its->collection_list);
1881
xa_init(&its->translation_cache);
1882
1883
dev->kvm->arch.vgic.msis_require_devid = true;
1884
dev->kvm->arch.vgic.has_its = true;
1885
its->enabled = false;
1886
its->dev = dev;
1887
1888
its->baser_device_table = INITIAL_BASER_VALUE |
1889
((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1890
its->baser_coll_table = INITIAL_BASER_VALUE |
1891
((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1892
dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1893
1894
dev->private = its;
1895
1896
ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1897
1898
mutex_unlock(&dev->kvm->arch.config_lock);
1899
1900
return ret;
1901
}
1902
1903
static void vgic_its_destroy(struct kvm_device *kvm_dev)
1904
{
1905
struct kvm *kvm = kvm_dev->kvm;
1906
struct vgic_its *its = kvm_dev->private;
1907
1908
mutex_lock(&its->its_lock);
1909
1910
vgic_its_debug_destroy(kvm_dev);
1911
1912
vgic_its_free_device_list(kvm, its);
1913
vgic_its_free_collection_list(kvm, its);
1914
vgic_its_invalidate_cache(its);
1915
xa_destroy(&its->translation_cache);
1916
1917
mutex_unlock(&its->its_lock);
1918
kfree(its);
1919
kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1920
}
1921
1922
static int vgic_its_has_attr_regs(struct kvm_device *dev,
1923
struct kvm_device_attr *attr)
1924
{
1925
const struct vgic_register_region *region;
1926
gpa_t offset = attr->attr;
1927
int align;
1928
1929
align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1930
1931
if (offset & align)
1932
return -EINVAL;
1933
1934
region = vgic_find_mmio_region(its_registers,
1935
ARRAY_SIZE(its_registers),
1936
offset);
1937
if (!region)
1938
return -ENXIO;
1939
1940
return 0;
1941
}
1942
1943
static int vgic_its_attr_regs_access(struct kvm_device *dev,
1944
struct kvm_device_attr *attr,
1945
u64 *reg, bool is_write)
1946
{
1947
const struct vgic_register_region *region;
1948
struct vgic_its *its;
1949
gpa_t addr, offset;
1950
unsigned int len;
1951
int align, ret = 0;
1952
1953
its = dev->private;
1954
offset = attr->attr;
1955
1956
/*
1957
* Although the spec supports upper/lower 32-bit accesses to
1958
* 64-bit ITS registers, the userspace ABI requires 64-bit
1959
* accesses to all 64-bit wide registers. We therefore only
1960
* support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1961
* registers
1962
*/
1963
if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1964
align = 0x3;
1965
else
1966
align = 0x7;
1967
1968
if (offset & align)
1969
return -EINVAL;
1970
1971
mutex_lock(&dev->kvm->lock);
1972
1973
if (kvm_trylock_all_vcpus(dev->kvm)) {
1974
mutex_unlock(&dev->kvm->lock);
1975
return -EBUSY;
1976
}
1977
1978
mutex_lock(&dev->kvm->arch.config_lock);
1979
1980
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1981
ret = -ENXIO;
1982
goto out;
1983
}
1984
1985
region = vgic_find_mmio_region(its_registers,
1986
ARRAY_SIZE(its_registers),
1987
offset);
1988
if (!region) {
1989
ret = -ENXIO;
1990
goto out;
1991
}
1992
1993
addr = its->vgic_its_base + offset;
1994
1995
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1996
1997
if (is_write) {
1998
if (region->uaccess_its_write)
1999
ret = region->uaccess_its_write(dev->kvm, its, addr,
2000
len, *reg);
2001
else
2002
region->its_write(dev->kvm, its, addr, len, *reg);
2003
} else {
2004
*reg = region->its_read(dev->kvm, its, addr, len);
2005
}
2006
out:
2007
mutex_unlock(&dev->kvm->arch.config_lock);
2008
kvm_unlock_all_vcpus(dev->kvm);
2009
mutex_unlock(&dev->kvm->lock);
2010
return ret;
2011
}
2012
2013
static u32 compute_next_devid_offset(struct list_head *h,
2014
struct its_device *dev)
2015
{
2016
struct its_device *next;
2017
u32 next_offset;
2018
2019
if (list_is_last(&dev->dev_list, h))
2020
return 0;
2021
next = list_next_entry(dev, dev_list);
2022
next_offset = next->device_id - dev->device_id;
2023
2024
return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2025
}
2026
2027
static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2028
{
2029
struct its_ite *next;
2030
u32 next_offset;
2031
2032
if (list_is_last(&ite->ite_list, h))
2033
return 0;
2034
next = list_next_entry(ite, ite_list);
2035
next_offset = next->event_id - ite->event_id;
2036
2037
return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2038
}
2039
2040
/**
2041
* typedef entry_fn_t - Callback called on a table entry restore path
2042
* @its: its handle
2043
* @id: id of the entry
2044
* @entry: pointer to the entry
2045
* @opaque: pointer to an opaque data
2046
*
2047
* Return: < 0 on error, 0 if last element was identified, id offset to next
2048
* element otherwise
2049
*/
2050
typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2051
void *opaque);
2052
2053
/**
2054
* scan_its_table - Scan a contiguous table in guest RAM and applies a function
2055
* to each entry
2056
*
2057
* @its: its handle
2058
* @base: base gpa of the table
2059
* @size: size of the table in bytes
2060
* @esz: entry size in bytes
2061
* @start_id: the ID of the first entry in the table
2062
* (non zero for 2d level tables)
2063
* @fn: function to apply on each entry
2064
* @opaque: pointer to opaque data
2065
*
2066
* Return: < 0 on error, 0 if last element was identified, 1 otherwise
2067
* (the last element may not be found on second level tables)
2068
*/
2069
static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2070
int start_id, entry_fn_t fn, void *opaque)
2071
{
2072
struct kvm *kvm = its->dev->kvm;
2073
unsigned long len = size;
2074
int id = start_id;
2075
gpa_t gpa = base;
2076
char entry[ESZ_MAX];
2077
int ret;
2078
2079
memset(entry, 0, esz);
2080
2081
while (true) {
2082
int next_offset;
2083
size_t byte_offset;
2084
2085
ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2086
if (ret)
2087
return ret;
2088
2089
next_offset = fn(its, id, entry, opaque);
2090
if (next_offset <= 0)
2091
return next_offset;
2092
2093
byte_offset = next_offset * esz;
2094
if (byte_offset >= len)
2095
break;
2096
2097
id += next_offset;
2098
gpa += byte_offset;
2099
len -= byte_offset;
2100
}
2101
return 1;
2102
}
2103
2104
/*
2105
* vgic_its_save_ite - Save an interrupt translation entry at @gpa
2106
*/
2107
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2108
struct its_ite *ite, gpa_t gpa)
2109
{
2110
u32 next_offset;
2111
u64 val;
2112
2113
next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2114
val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2115
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2116
ite->collection->collection_id;
2117
val = cpu_to_le64(val);
2118
2119
return vgic_its_write_entry_lock(its, gpa, val, ite);
2120
}
2121
2122
/**
2123
* vgic_its_restore_ite - restore an interrupt translation entry
2124
*
2125
* @its: its handle
2126
* @event_id: id used for indexing
2127
* @ptr: pointer to the ITE entry
2128
* @opaque: pointer to the its_device
2129
*/
2130
static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2131
void *ptr, void *opaque)
2132
{
2133
struct its_device *dev = opaque;
2134
struct its_collection *collection;
2135
struct kvm *kvm = its->dev->kvm;
2136
struct kvm_vcpu *vcpu = NULL;
2137
u64 val;
2138
u64 *p = (u64 *)ptr;
2139
struct vgic_irq *irq;
2140
u32 coll_id, lpi_id;
2141
struct its_ite *ite;
2142
u32 offset;
2143
2144
val = *p;
2145
2146
val = le64_to_cpu(val);
2147
2148
coll_id = val & KVM_ITS_ITE_ICID_MASK;
2149
lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2150
2151
if (!lpi_id)
2152
return 1; /* invalid entry, no choice but to scan next entry */
2153
2154
if (lpi_id < VGIC_MIN_LPI)
2155
return -EINVAL;
2156
2157
offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2158
if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2159
return -EINVAL;
2160
2161
collection = find_collection(its, coll_id);
2162
if (!collection)
2163
return -EINVAL;
2164
2165
if (!vgic_its_check_event_id(its, dev, event_id))
2166
return -EINVAL;
2167
2168
ite = vgic_its_alloc_ite(dev, collection, event_id);
2169
if (IS_ERR(ite))
2170
return PTR_ERR(ite);
2171
2172
if (its_is_collection_mapped(collection))
2173
vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
2174
2175
irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2176
if (IS_ERR(irq)) {
2177
its_free_ite(kvm, ite);
2178
return PTR_ERR(irq);
2179
}
2180
ite->irq = irq;
2181
2182
return offset;
2183
}
2184
2185
static int vgic_its_ite_cmp(void *priv, const struct list_head *a,
2186
const struct list_head *b)
2187
{
2188
struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2189
struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2190
2191
if (itea->event_id < iteb->event_id)
2192
return -1;
2193
else
2194
return 1;
2195
}
2196
2197
static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2198
{
2199
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2200
gpa_t base = device->itt_addr;
2201
struct its_ite *ite;
2202
int ret;
2203
int ite_esz = abi->ite_esz;
2204
2205
list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2206
2207
list_for_each_entry(ite, &device->itt_head, ite_list) {
2208
gpa_t gpa = base + ite->event_id * ite_esz;
2209
2210
/*
2211
* If an LPI carries the HW bit, this means that this
2212
* interrupt is controlled by GICv4, and we do not
2213
* have direct access to that state without GICv4.1.
2214
* Let's simply fail the save operation...
2215
*/
2216
if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
2217
return -EACCES;
2218
2219
ret = vgic_its_save_ite(its, device, ite, gpa);
2220
if (ret)
2221
return ret;
2222
}
2223
return 0;
2224
}
2225
2226
/**
2227
* vgic_its_restore_itt - restore the ITT of a device
2228
*
2229
* @its: its handle
2230
* @dev: device handle
2231
*
2232
* Return 0 on success, < 0 on error
2233
*/
2234
static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2235
{
2236
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2237
gpa_t base = dev->itt_addr;
2238
int ret;
2239
int ite_esz = abi->ite_esz;
2240
size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2241
2242
ret = scan_its_table(its, base, max_size, ite_esz, 0,
2243
vgic_its_restore_ite, dev);
2244
2245
/* scan_its_table returns +1 if all ITEs are invalid */
2246
if (ret > 0)
2247
ret = 0;
2248
2249
return ret;
2250
}
2251
2252
/**
2253
* vgic_its_save_dte - Save a device table entry at a given GPA
2254
*
2255
* @its: ITS handle
2256
* @dev: ITS device
2257
* @ptr: GPA
2258
*/
2259
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2260
gpa_t ptr)
2261
{
2262
u64 val, itt_addr_field;
2263
u32 next_offset;
2264
2265
itt_addr_field = dev->itt_addr >> 8;
2266
next_offset = compute_next_devid_offset(&its->device_list, dev);
2267
val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2268
((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2269
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2270
(dev->num_eventid_bits - 1));
2271
val = cpu_to_le64(val);
2272
2273
return vgic_its_write_entry_lock(its, ptr, val, dte);
2274
}
2275
2276
/**
2277
* vgic_its_restore_dte - restore a device table entry
2278
*
2279
* @its: its handle
2280
* @id: device id the DTE corresponds to
2281
* @ptr: kernel VA where the 8 byte DTE is located
2282
* @opaque: unused
2283
*
2284
* Return: < 0 on error, 0 if the dte is the last one, id offset to the
2285
* next dte otherwise
2286
*/
2287
static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2288
void *ptr, void *opaque)
2289
{
2290
struct its_device *dev;
2291
u64 baser = its->baser_device_table;
2292
gpa_t itt_addr;
2293
u8 num_eventid_bits;
2294
u64 entry = *(u64 *)ptr;
2295
bool valid;
2296
u32 offset;
2297
int ret;
2298
2299
entry = le64_to_cpu(entry);
2300
2301
valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2302
num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2303
itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2304
>> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2305
2306
if (!valid)
2307
return 1;
2308
2309
/* dte entry is valid */
2310
offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2311
2312
if (!vgic_its_check_id(its, baser, id, NULL))
2313
return -EINVAL;
2314
2315
dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2316
if (IS_ERR(dev))
2317
return PTR_ERR(dev);
2318
2319
ret = vgic_its_restore_itt(its, dev);
2320
if (ret) {
2321
vgic_its_free_device(its->dev->kvm, its, dev);
2322
return ret;
2323
}
2324
2325
return offset;
2326
}
2327
2328
static int vgic_its_device_cmp(void *priv, const struct list_head *a,
2329
const struct list_head *b)
2330
{
2331
struct its_device *deva = container_of(a, struct its_device, dev_list);
2332
struct its_device *devb = container_of(b, struct its_device, dev_list);
2333
2334
if (deva->device_id < devb->device_id)
2335
return -1;
2336
else
2337
return 1;
2338
}
2339
2340
/*
2341
* vgic_its_save_device_tables - Save the device table and all ITT
2342
* into guest RAM
2343
*
2344
* L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2345
* returns the GPA of the device entry
2346
*/
2347
static int vgic_its_save_device_tables(struct vgic_its *its)
2348
{
2349
u64 baser = its->baser_device_table;
2350
struct its_device *dev;
2351
2352
if (!(baser & GITS_BASER_VALID))
2353
return 0;
2354
2355
list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2356
2357
list_for_each_entry(dev, &its->device_list, dev_list) {
2358
int ret;
2359
gpa_t eaddr;
2360
2361
if (!vgic_its_check_id(its, baser,
2362
dev->device_id, &eaddr))
2363
return -EINVAL;
2364
2365
ret = vgic_its_save_itt(its, dev);
2366
if (ret)
2367
return ret;
2368
2369
ret = vgic_its_save_dte(its, dev, eaddr);
2370
if (ret)
2371
return ret;
2372
}
2373
return 0;
2374
}
2375
2376
/**
2377
* handle_l1_dte - callback used for L1 device table entries (2 stage case)
2378
*
2379
* @its: its handle
2380
* @id: index of the entry in the L1 table
2381
* @addr: kernel VA
2382
* @opaque: unused
2383
*
2384
* L1 table entries are scanned by steps of 1 entry
2385
* Return < 0 if error, 0 if last dte was found when scanning the L2
2386
* table, +1 otherwise (meaning next L1 entry must be scanned)
2387
*/
2388
static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2389
void *opaque)
2390
{
2391
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2392
int l2_start_id = id * (SZ_64K / abi->dte_esz);
2393
u64 entry = *(u64 *)addr;
2394
int dte_esz = abi->dte_esz;
2395
gpa_t gpa;
2396
int ret;
2397
2398
entry = le64_to_cpu(entry);
2399
2400
if (!(entry & KVM_ITS_L1E_VALID_MASK))
2401
return 1;
2402
2403
gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2404
2405
ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2406
l2_start_id, vgic_its_restore_dte, NULL);
2407
2408
return ret;
2409
}
2410
2411
/*
2412
* vgic_its_restore_device_tables - Restore the device table and all ITT
2413
* from guest RAM to internal data structs
2414
*/
2415
static int vgic_its_restore_device_tables(struct vgic_its *its)
2416
{
2417
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2418
u64 baser = its->baser_device_table;
2419
int l1_esz, ret;
2420
int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2421
gpa_t l1_gpa;
2422
2423
if (!(baser & GITS_BASER_VALID))
2424
return 0;
2425
2426
l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2427
2428
if (baser & GITS_BASER_INDIRECT) {
2429
l1_esz = GITS_LVL1_ENTRY_SIZE;
2430
ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2431
handle_l1_dte, NULL);
2432
} else {
2433
l1_esz = abi->dte_esz;
2434
ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2435
vgic_its_restore_dte, NULL);
2436
}
2437
2438
/* scan_its_table returns +1 if all entries are invalid */
2439
if (ret > 0)
2440
ret = 0;
2441
2442
if (ret < 0)
2443
vgic_its_free_device_list(its->dev->kvm, its);
2444
2445
return ret;
2446
}
2447
2448
static int vgic_its_save_cte(struct vgic_its *its,
2449
struct its_collection *collection,
2450
gpa_t gpa)
2451
{
2452
u64 val;
2453
2454
val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2455
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2456
collection->collection_id);
2457
val = cpu_to_le64(val);
2458
2459
return vgic_its_write_entry_lock(its, gpa, val, cte);
2460
}
2461
2462
/*
2463
* Restore a collection entry into the ITS collection table.
2464
* Return +1 on success, 0 if the entry was invalid (which should be
2465
* interpreted as end-of-table), and a negative error value for generic errors.
2466
*/
2467
static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa)
2468
{
2469
struct its_collection *collection;
2470
struct kvm *kvm = its->dev->kvm;
2471
u32 target_addr, coll_id;
2472
u64 val;
2473
int ret;
2474
2475
ret = vgic_its_read_entry_lock(its, gpa, &val, cte);
2476
if (ret)
2477
return ret;
2478
val = le64_to_cpu(val);
2479
if (!(val & KVM_ITS_CTE_VALID_MASK))
2480
return 0;
2481
2482
target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2483
coll_id = val & KVM_ITS_CTE_ICID_MASK;
2484
2485
if (target_addr != COLLECTION_NOT_MAPPED &&
2486
!kvm_get_vcpu_by_id(kvm, target_addr))
2487
return -EINVAL;
2488
2489
collection = find_collection(its, coll_id);
2490
if (collection)
2491
return -EEXIST;
2492
2493
if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
2494
return -EINVAL;
2495
2496
ret = vgic_its_alloc_collection(its, &collection, coll_id);
2497
if (ret)
2498
return ret;
2499
collection->target_addr = target_addr;
2500
return 1;
2501
}
2502
2503
/*
2504
* vgic_its_save_collection_table - Save the collection table into
2505
* guest RAM
2506
*/
2507
static int vgic_its_save_collection_table(struct vgic_its *its)
2508
{
2509
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2510
u64 baser = its->baser_coll_table;
2511
gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2512
struct its_collection *collection;
2513
size_t max_size, filled = 0;
2514
int ret, cte_esz = abi->cte_esz;
2515
2516
if (!(baser & GITS_BASER_VALID))
2517
return 0;
2518
2519
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2520
2521
list_for_each_entry(collection, &its->collection_list, coll_list) {
2522
ret = vgic_its_save_cte(its, collection, gpa);
2523
if (ret)
2524
return ret;
2525
gpa += cte_esz;
2526
filled += cte_esz;
2527
}
2528
2529
if (filled == max_size)
2530
return 0;
2531
2532
/*
2533
* table is not fully filled, add a last dummy element
2534
* with valid bit unset
2535
*/
2536
return vgic_its_write_entry_lock(its, gpa, 0ULL, cte);
2537
}
2538
2539
/*
2540
* vgic_its_restore_collection_table - reads the collection table
2541
* in guest memory and restores the ITS internal state. Requires the
2542
* BASER registers to be restored before.
2543
*/
2544
static int vgic_its_restore_collection_table(struct vgic_its *its)
2545
{
2546
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2547
u64 baser = its->baser_coll_table;
2548
int cte_esz = abi->cte_esz;
2549
size_t max_size, read = 0;
2550
gpa_t gpa;
2551
int ret;
2552
2553
if (!(baser & GITS_BASER_VALID))
2554
return 0;
2555
2556
gpa = GITS_BASER_ADDR_48_to_52(baser);
2557
2558
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2559
2560
while (read < max_size) {
2561
ret = vgic_its_restore_cte(its, gpa);
2562
if (ret <= 0)
2563
break;
2564
gpa += cte_esz;
2565
read += cte_esz;
2566
}
2567
2568
if (ret > 0)
2569
return 0;
2570
2571
if (ret < 0)
2572
vgic_its_free_collection_list(its->dev->kvm, its);
2573
2574
return ret;
2575
}
2576
2577
/*
2578
* vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2579
* according to v0 ABI
2580
*/
2581
static int vgic_its_save_tables_v0(struct vgic_its *its)
2582
{
2583
int ret;
2584
2585
ret = vgic_its_save_device_tables(its);
2586
if (ret)
2587
return ret;
2588
2589
return vgic_its_save_collection_table(its);
2590
}
2591
2592
/*
2593
* vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2594
* to internal data structs according to V0 ABI
2595
*
2596
*/
2597
static int vgic_its_restore_tables_v0(struct vgic_its *its)
2598
{
2599
int ret;
2600
2601
ret = vgic_its_restore_collection_table(its);
2602
if (ret)
2603
return ret;
2604
2605
ret = vgic_its_restore_device_tables(its);
2606
if (ret)
2607
vgic_its_free_collection_list(its->dev->kvm, its);
2608
return ret;
2609
}
2610
2611
static int vgic_its_commit_v0(struct vgic_its *its)
2612
{
2613
const struct vgic_its_abi *abi;
2614
2615
abi = vgic_its_get_abi(its);
2616
its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2617
its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2618
2619
its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2620
<< GITS_BASER_ENTRY_SIZE_SHIFT);
2621
2622
its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2623
<< GITS_BASER_ENTRY_SIZE_SHIFT);
2624
return 0;
2625
}
2626
2627
static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2628
{
2629
/* We need to keep the ABI specific field values */
2630
its->baser_coll_table &= ~GITS_BASER_VALID;
2631
its->baser_device_table &= ~GITS_BASER_VALID;
2632
its->cbaser = 0;
2633
its->creadr = 0;
2634
its->cwriter = 0;
2635
its->enabled = 0;
2636
vgic_its_free_device_list(kvm, its);
2637
vgic_its_free_collection_list(kvm, its);
2638
}
2639
2640
static int vgic_its_has_attr(struct kvm_device *dev,
2641
struct kvm_device_attr *attr)
2642
{
2643
switch (attr->group) {
2644
case KVM_DEV_ARM_VGIC_GRP_ADDR:
2645
switch (attr->attr) {
2646
case KVM_VGIC_ITS_ADDR_TYPE:
2647
return 0;
2648
}
2649
break;
2650
case KVM_DEV_ARM_VGIC_GRP_CTRL:
2651
switch (attr->attr) {
2652
case KVM_DEV_ARM_VGIC_CTRL_INIT:
2653
return 0;
2654
case KVM_DEV_ARM_ITS_CTRL_RESET:
2655
return 0;
2656
case KVM_DEV_ARM_ITS_SAVE_TABLES:
2657
return 0;
2658
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2659
return 0;
2660
}
2661
break;
2662
case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2663
return vgic_its_has_attr_regs(dev, attr);
2664
}
2665
return -ENXIO;
2666
}
2667
2668
static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2669
{
2670
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2671
int ret = 0;
2672
2673
if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2674
return 0;
2675
2676
mutex_lock(&kvm->lock);
2677
2678
if (kvm_trylock_all_vcpus(kvm)) {
2679
mutex_unlock(&kvm->lock);
2680
return -EBUSY;
2681
}
2682
2683
mutex_lock(&kvm->arch.config_lock);
2684
mutex_lock(&its->its_lock);
2685
2686
switch (attr) {
2687
case KVM_DEV_ARM_ITS_CTRL_RESET:
2688
vgic_its_reset(kvm, its);
2689
break;
2690
case KVM_DEV_ARM_ITS_SAVE_TABLES:
2691
ret = abi->save_tables(its);
2692
break;
2693
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2694
ret = abi->restore_tables(its);
2695
break;
2696
default:
2697
ret = -ENXIO;
2698
break;
2699
}
2700
2701
mutex_unlock(&its->its_lock);
2702
mutex_unlock(&kvm->arch.config_lock);
2703
kvm_unlock_all_vcpus(kvm);
2704
mutex_unlock(&kvm->lock);
2705
return ret;
2706
}
2707
2708
/*
2709
* kvm_arch_allow_write_without_running_vcpu - allow writing guest memory
2710
* without the running VCPU when dirty ring is enabled.
2711
*
2712
* The running VCPU is required to track dirty guest pages when dirty ring
2713
* is enabled. Otherwise, the backup bitmap should be used to track the
2714
* dirty guest pages. When vgic/its tables are being saved, the backup
2715
* bitmap is used to track the dirty guest pages due to the missed running
2716
* VCPU in the period.
2717
*/
2718
bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
2719
{
2720
struct vgic_dist *dist = &kvm->arch.vgic;
2721
2722
return dist->table_write_in_progress;
2723
}
2724
2725
static int vgic_its_set_attr(struct kvm_device *dev,
2726
struct kvm_device_attr *attr)
2727
{
2728
struct vgic_its *its = dev->private;
2729
int ret;
2730
2731
switch (attr->group) {
2732
case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2733
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2734
unsigned long type = (unsigned long)attr->attr;
2735
u64 addr;
2736
2737
if (type != KVM_VGIC_ITS_ADDR_TYPE)
2738
return -ENODEV;
2739
2740
if (copy_from_user(&addr, uaddr, sizeof(addr)))
2741
return -EFAULT;
2742
2743
ret = vgic_check_iorange(dev->kvm, its->vgic_its_base,
2744
addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE);
2745
if (ret)
2746
return ret;
2747
2748
ret = vgic_register_its_iodev(dev->kvm, its, addr);
2749
if (ret)
2750
return ret;
2751
2752
return vgic_its_debug_init(dev);
2753
2754
}
2755
case KVM_DEV_ARM_VGIC_GRP_CTRL:
2756
return vgic_its_ctrl(dev->kvm, its, attr->attr);
2757
case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2758
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2759
u64 reg;
2760
2761
if (get_user(reg, uaddr))
2762
return -EFAULT;
2763
2764
return vgic_its_attr_regs_access(dev, attr, &reg, true);
2765
}
2766
}
2767
return -ENXIO;
2768
}
2769
2770
static int vgic_its_get_attr(struct kvm_device *dev,
2771
struct kvm_device_attr *attr)
2772
{
2773
switch (attr->group) {
2774
case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2775
struct vgic_its *its = dev->private;
2776
u64 addr = its->vgic_its_base;
2777
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2778
unsigned long type = (unsigned long)attr->attr;
2779
2780
if (type != KVM_VGIC_ITS_ADDR_TYPE)
2781
return -ENODEV;
2782
2783
if (copy_to_user(uaddr, &addr, sizeof(addr)))
2784
return -EFAULT;
2785
break;
2786
}
2787
case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2788
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2789
u64 reg;
2790
int ret;
2791
2792
ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2793
if (ret)
2794
return ret;
2795
return put_user(reg, uaddr);
2796
}
2797
default:
2798
return -ENXIO;
2799
}
2800
2801
return 0;
2802
}
2803
2804
static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2805
.name = "kvm-arm-vgic-its",
2806
.create = vgic_its_create,
2807
.destroy = vgic_its_destroy,
2808
.set_attr = vgic_its_set_attr,
2809
.get_attr = vgic_its_get_attr,
2810
.has_attr = vgic_its_has_attr,
2811
};
2812
2813
int kvm_vgic_register_its_device(void)
2814
{
2815
return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2816
KVM_DEV_TYPE_ARM_VGIC_ITS);
2817
}
2818
2819