return 0;
}
-static void vgic_its_init_collection(struct vgic_its *its,
- struct its_collection *collection,
+/*
+ * Check whether an ID can be stored into the corresponding guest table.
+ * For a direct table this is pretty easy, but gets a bit nasty for
+ * indirect tables. We check whether the resulting guest physical address
+ * is actually valid (covered by a memslot and guest accessbible).
+ * For this we have to read the respective first level entry.
+ */
+static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
+{
+ int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
+ int index;
+ u64 indirect_ptr;
+ gfn_t gfn;
+
+ if (!(baser & GITS_BASER_INDIRECT)) {
+ phys_addr_t addr;
+
+ if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser)))
+ return false;
+
+ addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser);
+ gfn = addr >> PAGE_SHIFT;
+
+ return kvm_is_visible_gfn(its->dev->kvm, gfn);
+ }
+
+ /* calculate and check the index into the 1st level */
+ index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
+ if (index >= (l1_tbl_size / sizeof(u64)))
+ return false;
+
+ /* Each 1st level entry is represented by a 64-bit value. */
+ if (kvm_read_guest(its->dev->kvm,
+ BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
+ &indirect_ptr, sizeof(indirect_ptr)))
+ return false;
+
+ indirect_ptr = le64_to_cpu(indirect_ptr);
+
+ /* check the valid bit of the first level entry */
+ if (!(indirect_ptr & BIT_ULL(63)))
+ return false;
+
+ /*
+ * Mask the guest physical address and calculate the frame number.
+ * Any address beyond our supported 48 bits of PA will be caught
+ * by the actual check in the final step.
+ */
+ indirect_ptr &= GENMASK_ULL(51, 16);
+
+ /* Find the address of the actual entry */
+ index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
+ indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser);
+ gfn = indirect_ptr >> PAGE_SHIFT;
+
+ return kvm_is_visible_gfn(its->dev->kvm, gfn);
+}
+
+static int vgic_its_alloc_collection(struct vgic_its *its,
+ struct its_collection **colp,
u32 coll_id)
{
+ struct its_collection *collection;
+
+ if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
+ return E_ITS_MAPC_COLLECTION_OOR;
+
+ collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+
collection->collection_id = coll_id;
collection->target_addr = COLLECTION_NOT_MAPPED;
list_add_tail(&collection->coll_list, &its->collection_list);
+ *colp = collection;
+
+ return 0;
+}
+
+static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
+{
+ struct its_collection *collection;
+ struct its_device *device;
+ struct its_itte *itte;
+
+ /*
+ * Clearing the mapping for that collection ID removes the
+ * entry from the list. If there wasn't any before, we can
+ * go home early.
+ */
+ collection = find_collection(its, coll_id);
+ if (!collection)
+ return;
+
+ for_each_lpi_its(device, itte, its)
+ if (itte->collection &&
+ itte->collection->collection_id == coll_id)
+ itte->collection = NULL;
+
+ list_del(&collection->coll_list);
+ kfree(collection);
}
/*
* Must be called with its_lock mutex held.
*/
static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
- u64 *its_cmd, u8 subcmd)
+ u64 *its_cmd)
{
u32 device_id = its_cmd_get_deviceid(its_cmd);
u32 event_id = its_cmd_get_id(its_cmd);
if (!device)
return E_ITS_MAPTI_UNMAPPED_DEVICE;
- collection = find_collection(its, coll_id);
- if (!collection) {
- new_coll = kzalloc(sizeof(struct its_collection), GFP_KERNEL);
- if (!new_coll)
- return -ENOMEM;
- }
-
- if (subcmd == GITS_CMD_MAPTI)
+ if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
lpi_nr = its_cmd_get_physical_id(its_cmd);
else
lpi_nr = event_id;
if (lpi_nr < GIC_LPI_OFFSET ||
- lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) {
- kfree(new_coll);
+ lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
return E_ITS_MAPTI_PHYSICALID_OOR;
+
+ collection = find_collection(its, coll_id);
+ if (!collection) {
+ int ret = vgic_its_alloc_collection(its, &collection, coll_id);
+ if (ret)
+ return ret;
+ new_coll = collection;
}
itte = find_itte(its, device_id, event_id);
if (!itte) {
itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
if (!itte) {
- kfree(new_coll);
+ if (new_coll)
+ vgic_its_free_collection(its, coll_id);
return -ENOMEM;
}
list_add_tail(&itte->itte_list, &device->itt_head);
}
- if (!collection) {
- collection = new_coll;
- vgic_its_init_collection(its, collection, coll_id);
- }
-
itte->collection = collection;
itte->lpi = lpi_nr;
itte->irq = vgic_add_lpi(kvm, lpi_nr);
kfree(device);
}
-/*
- * Check whether a device ID can be stored into the guest device tables.
- * For a direct table this is pretty easy, but gets a bit nasty for
- * indirect tables. We check whether the resulting guest physical address
- * is actually valid (covered by a memslot and guest accessbible).
- * For this we have to read the respective first level entry.
- */
-static bool vgic_its_check_device_id(struct kvm *kvm, struct vgic_its *its,
- int device_id)
-{
- u64 r = its->baser_device_table;
- int l1_tbl_size = GITS_BASER_NR_PAGES(r) * SZ_64K;
- int index;
- u64 indirect_ptr;
- gfn_t gfn;
-
-
- if (!(r & GITS_BASER_INDIRECT)) {
- phys_addr_t addr;
-
- if (device_id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(r)))
- return false;
-
- addr = BASER_ADDRESS(r) + device_id * GITS_BASER_ENTRY_SIZE(r);
- gfn = addr >> PAGE_SHIFT;
-
- return kvm_is_visible_gfn(kvm, gfn);
- }
-
- /* calculate and check the index into the 1st level */
- index = device_id / (SZ_64K / GITS_BASER_ENTRY_SIZE(r));
- if (index >= (l1_tbl_size / sizeof(u64)))
- return false;
-
- /* Each 1st level entry is represented by a 64-bit value. */
- if (kvm_read_guest(kvm,
- BASER_ADDRESS(r) + index * sizeof(indirect_ptr),
- &indirect_ptr, sizeof(indirect_ptr)))
- return false;
-
- indirect_ptr = le64_to_cpu(indirect_ptr);
-
- /* check the valid bit of the first level entry */
- if (!(indirect_ptr & BIT_ULL(63)))
- return false;
-
- /*
- * Mask the guest physical address and calculate the frame number.
- * Any address beyond our supported 48 bits of PA will be caught
- * by the actual check in the final step.
- */
- gfn = (indirect_ptr & GENMASK_ULL(51, 16)) >> PAGE_SHIFT;
-
- return kvm_is_visible_gfn(kvm, gfn);
-}
-
/*
* MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
* Must be called with the its_lock mutex held.
bool valid = its_cmd_get_validbit(its_cmd);
struct its_device *device;
- if (!vgic_its_check_device_id(kvm, its, device_id))
+ if (!vgic_its_check_id(its, its->baser_device_table, device_id))
return E_ITS_MAPD_DEVICE_OOR;
device = find_its_device(its, device_id);
return 0;
}
-static int vgic_its_nr_collection_ids(struct vgic_its *its)
-{
- u64 r = its->baser_coll_table;
-
- return (GITS_BASER_NR_PAGES(r) * SZ_64K) / GITS_BASER_ENTRY_SIZE(r);
-}
-
/*
* The MAPC command maps collection IDs to redistributors.
* Must be called with the its_lock mutex held.
if (target_addr >= atomic_read(&kvm->online_vcpus))
return E_ITS_MAPC_PROCNUM_OOR;
- if (coll_id >= vgic_its_nr_collection_ids(its))
- return E_ITS_MAPC_COLLECTION_OOR;
-
- collection = find_collection(its, coll_id);
-
if (!valid) {
- struct its_device *device;
- struct its_itte *itte;
- /*
- * Clearing the mapping for that collection ID removes the
- * entry from the list. If there wasn't any before, we can
- * go home early.
- */
- if (!collection)
- return 0;
-
- for_each_lpi_its(device, itte, its)
- if (itte->collection &&
- itte->collection->collection_id == coll_id)
- itte->collection = NULL;
-
- list_del(&collection->coll_list);
- kfree(collection);
+ vgic_its_free_collection(its, coll_id);
} else {
+ collection = find_collection(its, coll_id);
+
if (!collection) {
- collection = kzalloc(sizeof(struct its_collection),
- GFP_KERNEL);
- if (!collection)
- return -ENOMEM;
+ int ret;
- vgic_its_init_collection(its, collection, coll_id);
+ ret = vgic_its_alloc_collection(its, &collection,
+ coll_id);
+ if (ret)
+ return ret;
collection->target_addr = target_addr;
} else {
collection->target_addr = target_addr;
static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
- u8 cmd = its_cmd_get_command(its_cmd);
int ret = -ENODEV;
mutex_lock(&its->its_lock);
- switch (cmd) {
+ switch (its_cmd_get_command(its_cmd)) {
case GITS_CMD_MAPD:
ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
break;
ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
break;
case GITS_CMD_MAPI:
- ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd, cmd);
+ ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
break;
case GITS_CMD_MAPTI:
- ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd, cmd);
+ ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
break;
case GITS_CMD_MOVI:
ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
dev->kvm->arch.vgic.has_its = true;
its->initialized = false;
its->enabled = false;
+ its->dev = dev;
its->baser_device_table = INITIAL_BASER_VALUE |
((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);