summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-05-04 18:58:39 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-05-04 18:58:39 +0100
commit853f8ca13cd6d2566b87ed647f8bb5480cdc5e47 (patch)
tree7b28115061968559b26f450e2375182f4b7c8346
parentc8b7e627b4269a3bc3ae41d9f420547a47e6d9b9 (diff)
parente24e3454829579eb815ec95d7b3679b0f65845b4 (diff)
downloadqemu-853f8ca13cd6d2566b87ed647f8bb5480cdc5e47.tar.gz
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180504-1' into staging
target-arm queue: * Emulate the SMMUv3 (IOMMU); one will be created in the 'virt' board if the commandline includes "-machine iommu=smmuv3" * target/arm: Implement v8M VLLDM and VLSTM * hw/arm: Don't fail qtest due to missing SD card in -nodefaults mode * Some fixes to silence Coverity false-positives * arm: boot: set boot_info starting from first_cpu (fixes a technical bug not visible in practice) * hw/net/smc91c111: Convert away from old_mmio * hw/usb/tusb6010: Convert away from old_mmio * hw/char/cmsdk-apb-uart.c: Accept more input after character read * target/arm: Make MPUIR write-ignored on OMAP, StrongARM * hw/arm/virt: Add linux,pci-domain property # gpg: Signature made Fri 04 May 2018 18:54:49 BST # gpg: using RSA key 3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20180504-1: (24 commits) hw/arm/virt: Introduce the iommu option hw/arm/virt-acpi-build: Add smmuv3 node in IORT table hw/arm/virt: Add SMMUv3 to the virt board target/arm/kvm: Translate the MSI doorbell in kvm_arch_fixup_msi_route hw/arm/smmuv3: Abort on vfio or vhost case hw/arm/smmuv3: Implement translate callback hw/arm/smmuv3: Event queue recording helper hw/arm/smmuv3: Implement MMIO write operations hw/arm/smmuv3: Queue helpers hw/arm/smmuv3: Wired IRQ and GERROR helpers hw/arm/smmuv3: Skeleton hw/arm/smmu-common: VMSAv8-64 page table walk hw/arm/smmu-common: IOMMU memory region and address space setup hw/arm/smmu-common: smmu base device and datatypes target/arm: Implement v8M VLLDM and VLSTM hw/arm: Don't fail qtest due to missing SD card in -nodefaults mode target/arm: Tidy condition in disas_simd_two_reg_misc target/arm: Tidy conditions in handle_vec_simd_shri arm: boot: set boot_info starting from first_cpu hw/net/smc91c111: Convert away from old_mmio ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--default-configs/aarch64-softmmu.mak1
-rw-r--r--hw/arm/Makefile.objs1
-rw-r--r--hw/arm/boot.c2
-rw-r--r--hw/arm/omap1.c8
-rw-r--r--hw/arm/omap2.c8
-rw-r--r--hw/arm/pxa2xx.c15
-rw-r--r--hw/arm/smmu-common.c372
-rw-r--r--hw/arm/smmu-internal.h99
-rw-r--r--hw/arm/smmuv3-internal.h621
-rw-r--r--hw/arm/smmuv3.c1191
-rw-r--r--hw/arm/trace-events37
-rw-r--r--hw/arm/virt-acpi-build.c55
-rw-r--r--hw/arm/virt.c101
-rw-r--r--hw/char/cmsdk-apb-uart.c1
-rw-r--r--hw/net/smc91c111.c54
-rw-r--r--hw/usb/tusb6010.c40
-rw-r--r--include/hw/acpi/acpi-defs.h15
-rw-r--r--include/hw/arm/smmu-common.h145
-rw-r--r--include/hw/arm/smmuv3.h87
-rw-r--r--include/hw/arm/virt.h10
-rw-r--r--target/arm/helper.c2
-rw-r--r--target/arm/kvm.c38
-rw-r--r--target/arm/trace-events3
-rw-r--r--target/arm/translate-a64.c12
-rw-r--r--target/arm/translate.c17
25 files changed, 2868 insertions, 67 deletions
diff --git a/default-configs/aarch64-softmmu.mak b/default-configs/aarch64-softmmu.mak
index 9ddccf855e..6f790f061a 100644
--- a/default-configs/aarch64-softmmu.mak
+++ b/default-configs/aarch64-softmmu.mak
@@ -8,3 +8,4 @@ CONFIG_DDC=y
CONFIG_DPCD=y
CONFIG_XLNX_ZYNQMP=y
CONFIG_XLNX_ZYNQMP_ARM=y
+CONFIG_ARM_SMMUV3=y
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index 2885e3e234..d51fcecaf2 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -35,3 +35,4 @@ obj-$(CONFIG_MPS2) += mps2-tz.o
obj-$(CONFIG_MSF2) += msf2-soc.o msf2-som.o
obj-$(CONFIG_IOTKIT) += iotkit.o
obj-$(CONFIG_FSL_IMX7) += fsl-imx7.o mcimx7d-sabre.o
+obj-$(CONFIG_ARM_SMMUV3) += smmu-common.o smmuv3.o
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 9ae6ab2689..1e2be20731 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -1170,7 +1170,7 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
}
info->is_linux = is_linux;
- for (cs = CPU(cpu); cs; cs = CPU_NEXT(cs)) {
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
ARM_CPU(cs)->env.boot_info = info;
}
}
diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c
index 24673abfca..e54c1f8f99 100644
--- a/hw/arm/omap1.c
+++ b/hw/arm/omap1.c
@@ -30,6 +30,7 @@
#include "hw/arm/soc_dma.h"
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
+#include "sysemu/qtest.h"
#include "qemu/range.h"
#include "hw/sysbus.h"
#include "qemu/cutils.h"
@@ -3987,12 +3988,11 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *system_memory,
omap_findclk(s, "dpll3"));
dinfo = drive_get(IF_SD, 0, 0);
- if (!dinfo) {
- error_report("missing SecureDigital device");
- exit(1);
+ if (!dinfo && !qtest_enabled()) {
+ warn_report("missing SecureDigital device");
}
s->mmc = omap_mmc_init(0xfffb7800, system_memory,
- blk_by_legacy_dinfo(dinfo),
+ dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN),
&s->drq[OMAP_DMA_MMC_TX],
omap_findclk(s, "mmc_ck"));
diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c
index 80663533e1..b8d0910a1f 100644
--- a/hw/arm/omap2.c
+++ b/hw/arm/omap2.c
@@ -25,6 +25,7 @@
#include "cpu.h"
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
+#include "sysemu/qtest.h"
#include "hw/boards.h"
#include "hw/hw.h"
#include "hw/arm/arm.h"
@@ -2486,12 +2487,11 @@ struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sysmem,
s->drq[OMAP24XX_DMA_GPMC]);
dinfo = drive_get(IF_SD, 0, 0);
- if (!dinfo) {
- error_report("missing SecureDigital device");
- exit(1);
+ if (!dinfo && !qtest_enabled()) {
+ warn_report("missing SecureDigital device");
}
s->mmc = omap2_mmc_init(omap_l4tao(s->l4, 9),
- blk_by_legacy_dinfo(dinfo),
+ dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MMC_IRQ),
&s->drq[OMAP24XX_DMA_MMC1_TX],
omap_findclk(s, "mmc_fclk"), omap_findclk(s, "mmc_iclk"));
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
index 928a0431d6..a2803fdee4 100644
--- a/hw/arm/pxa2xx.c
+++ b/hw/arm/pxa2xx.c
@@ -21,6 +21,7 @@
#include "chardev/char-fe.h"
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
+#include "sysemu/qtest.h"
#include "qemu/cutils.h"
static struct {
@@ -2095,12 +2096,11 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space,
s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 121);
dinfo = drive_get(IF_SD, 0, 0);
- if (!dinfo) {
- error_report("missing SecureDigital device");
- exit(1);
+ if (!dinfo && !qtest_enabled()) {
+ warn_report("missing SecureDigital device");
}
s->mmc = pxa2xx_mmci_init(address_space, 0x41100000,
- blk_by_legacy_dinfo(dinfo),
+ dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC),
qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI),
qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI));
@@ -2220,12 +2220,11 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 85);
dinfo = drive_get(IF_SD, 0, 0);
- if (!dinfo) {
- error_report("missing SecureDigital device");
- exit(1);
+ if (!dinfo && !qtest_enabled()) {
+ warn_report("missing SecureDigital device");
}
s->mmc = pxa2xx_mmci_init(address_space, 0x41100000,
- blk_by_legacy_dinfo(dinfo),
+ dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC),
qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI),
qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI));
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
new file mode 100644
index 0000000000..01c7be82b6
--- /dev/null
+++ b/hw/arm/smmu-common.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Prem Mallappa <pmallapp@broadcom.com>
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+#include "trace.h"
+#include "exec/target_page.h"
+#include "qom/cpu.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+
+#include "qemu/error-report.h"
+#include "hw/arm/smmu-common.h"
+#include "smmu-internal.h"
+
+/* VMSAv8-64 Translation */
+
+/**
+ * get_pte - Get the content of a page table entry located at
+ * @base_addr[@index]
+ */
+static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
+ SMMUPTWEventInfo *info)
+{
+ int ret;
+ dma_addr_t addr = baseaddr + index * sizeof(*pte);
+
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, addr,
+ (uint8_t *)pte, sizeof(*pte));
+
+ if (ret != MEMTX_OK) {
+ info->type = SMMU_PTW_ERR_WALK_EABT;
+ info->addr = addr;
+ return -EINVAL;
+ }
+ trace_smmu_get_pte(baseaddr, index, addr, *pte);
+ return 0;
+}
+
+/* VMSAv8-64 Translation Table Format Descriptor Decoding */
+
+/**
+ * get_page_pte_address - returns the L3 descriptor output address,
+ * ie. the page frame
+ * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format
+ */
+static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz)
+{
+ return PTE_ADDRESS(pte, granule_sz);
+}
+
+/**
+ * get_table_pte_address - return table descriptor output address,
+ * ie. address of next level table
+ * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
+ */
+static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz)
+{
+ return PTE_ADDRESS(pte, granule_sz);
+}
+
+/**
+ * get_block_pte_address - return block descriptor output address and block size
+ * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
+ */
+static inline hwaddr get_block_pte_address(uint64_t pte, int level,
+ int granule_sz, uint64_t *bsz)
+{
+ int n = (granule_sz - 3) * (4 - level) + 3;
+
+ *bsz = 1 << n;
+ return PTE_ADDRESS(pte, n);
+}
+
+SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
+{
+ bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi);
+ uint8_t tbi_byte = tbi * 8;
+
+ if (cfg->tt[0].tsz &&
+ !extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) {
+ /* there is a ttbr0 region and we are in it (high bits all zero) */
+ return &cfg->tt[0];
+ } else if (cfg->tt[1].tsz &&
+ !extract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte)) {
+ /* there is a ttbr1 region and we are in it (high bits all one) */
+ return &cfg->tt[1];
+ } else if (!cfg->tt[0].tsz) {
+ /* ttbr0 region is "everything not in the ttbr1 region" */
+ return &cfg->tt[0];
+ } else if (!cfg->tt[1].tsz) {
+ /* ttbr1 region is "everything not in the ttbr0 region" */
+ return &cfg->tt[1];
+ }
+ /* in the gap between the two regions, this is a Translation fault */
+ return NULL;
+}
+
+/**
+ * smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA
+ * @cfg: translation config
+ * @iova: iova to translate
+ * @perm: access type
+ * @tlbe: IOMMUTLBEntry (out)
+ * @info: handle to an error info
+ *
+ * Return 0 on success, < 0 on error. In case of error, @info is filled
+ * and tlbe->perm is set to IOMMU_NONE.
+ * Upon success, @tlbe is filled with translated_addr and entry
+ * permission rights.
+ */
+static int smmu_ptw_64(SMMUTransCfg *cfg,
+ dma_addr_t iova, IOMMUAccessFlags perm,
+ IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
+{
+ dma_addr_t baseaddr, indexmask;
+ int stage = cfg->stage;
+ SMMUTransTableInfo *tt = select_tt(cfg, iova);
+ uint8_t level, granule_sz, inputsize, stride;
+
+ if (!tt || tt->disabled) {
+ info->type = SMMU_PTW_ERR_TRANSLATION;
+ goto error;
+ }
+
+ granule_sz = tt->granule_sz;
+ stride = granule_sz - 3;
+ inputsize = 64 - tt->tsz;
+ level = 4 - (inputsize - 4) / stride;
+ indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
+ baseaddr = extract64(tt->ttb, 0, 48);
+ baseaddr &= ~indexmask;
+
+ tlbe->iova = iova;
+ tlbe->addr_mask = (1 << granule_sz) - 1;
+
+ while (level <= 3) {
+ uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
+ uint64_t mask = subpage_size - 1;
+ uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
+ uint64_t pte;
+ dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
+ uint8_t ap;
+
+ if (get_pte(baseaddr, offset, &pte, info)) {
+ goto error;
+ }
+ trace_smmu_ptw_level(level, iova, subpage_size,
+ baseaddr, offset, pte);
+
+ if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
+ trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
+ pte_addr, offset, pte);
+ info->type = SMMU_PTW_ERR_TRANSLATION;
+ goto error;
+ }
+
+ if (is_page_pte(pte, level)) {
+ uint64_t gpa = get_page_pte_address(pte, granule_sz);
+
+ ap = PTE_AP(pte);
+ if (is_permission_fault(ap, perm)) {
+ info->type = SMMU_PTW_ERR_PERMISSION;
+ goto error;
+ }
+
+ tlbe->translated_addr = gpa + (iova & mask);
+ tlbe->perm = PTE_AP_TO_PERM(ap);
+ trace_smmu_ptw_page_pte(stage, level, iova,
+ baseaddr, pte_addr, pte, gpa);
+ return 0;
+ }
+ if (is_block_pte(pte, level)) {
+ uint64_t block_size;
+ hwaddr gpa = get_block_pte_address(pte, level, granule_sz,
+ &block_size);
+
+ ap = PTE_AP(pte);
+ if (is_permission_fault(ap, perm)) {
+ info->type = SMMU_PTW_ERR_PERMISSION;
+ goto error;
+ }
+
+ trace_smmu_ptw_block_pte(stage, level, baseaddr,
+ pte_addr, pte, iova, gpa,
+ block_size >> 20);
+
+ tlbe->translated_addr = gpa + (iova & mask);
+ tlbe->perm = PTE_AP_TO_PERM(ap);
+ return 0;
+ }
+
+ /* table pte */
+ ap = PTE_APTABLE(pte);
+
+ if (is_permission_fault(ap, perm)) {
+ info->type = SMMU_PTW_ERR_PERMISSION;
+ goto error;
+ }
+ baseaddr = get_table_pte_address(pte, granule_sz);
+ level++;
+ }
+
+ info->type = SMMU_PTW_ERR_TRANSLATION;
+
+error:
+ tlbe->perm = IOMMU_NONE;
+ return -EINVAL;
+}
+
+/**
+ * smmu_ptw - Walk the page tables for an IOVA, according to @cfg
+ *
+ * @cfg: translation configuration
+ * @iova: iova to translate
+ * @perm: tentative access type
+ * @tlbe: returned entry
+ * @info: ptw event handle
+ *
+ * return 0 on success
+ */
+inline int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
+ IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
+{
+ if (!cfg->aa64) {
+ /*
+ * This code path is not entered as we check this while decoding
+ * the configuration data in the derived SMMU model.
+ */
+ g_assert_not_reached();
+ }
+
+ return smmu_ptw_64(cfg, iova, perm, tlbe, info);
+}
+
+/**
+ * The bus number is used for lookup when SID based invalidation occurs.
+ * In that case we lazily populate the SMMUPciBus array from the bus hash
+ * table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus
+ * numbers may not be always initialized yet.
+ */
+SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num)
+{
+ SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num];
+
+ if (!smmu_pci_bus) {
+ GHashTableIter iter;
+
+ g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) {
+ if (pci_bus_num(smmu_pci_bus->bus) == bus_num) {
+ s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus;
+ return smmu_pci_bus;
+ }
+ }
+ }
+ return smmu_pci_bus;
+}
+
+static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn)
+{
+ SMMUState *s = opaque;
+ SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus);
+ SMMUDevice *sdev;
+
+ if (!sbus) {
+ sbus = g_malloc0(sizeof(SMMUPciBus) +
+ sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX);
+ sbus->bus = bus;
+ g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus);
+ }
+
+ sdev = sbus->pbdev[devfn];
+ if (!sdev) {
+ char *name = g_strdup_printf("%s-%d-%d",
+ s->mrtypename,
+ pci_bus_num(bus), devfn);
+ sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1);
+
+ sdev->smmu = s;
+ sdev->bus = bus;
+ sdev->devfn = devfn;
+
+ memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu),
+ s->mrtypename,
+ OBJECT(s), name, 1ULL << SMMU_MAX_VA_BITS);
+ address_space_init(&sdev->as,
+ MEMORY_REGION(&sdev->iommu), name);
+ trace_smmu_add_mr(name);
+ g_free(name);
+ }
+
+ return &sdev->as;
+}
+
+static void smmu_base_realize(DeviceState *dev, Error **errp)
+{
+ SMMUState *s = ARM_SMMU(dev);
+ SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ sbc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL);
+
+ if (s->primary_bus) {
+ pci_setup_iommu(s->primary_bus, smmu_find_add_as, s);
+ } else {
+ error_setg(errp, "SMMU is not attached to any PCI bus!");
+ }
+}
+
+static void smmu_base_reset(DeviceState *dev)
+{
+ /* will be filled later on */
+}
+
+static Property smmu_dev_properties[] = {
+ DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0),
+ DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus, "PCI", PCIBus *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void smmu_base_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass);
+
+ dc->props = smmu_dev_properties;
+ device_class_set_parent_realize(dc, smmu_base_realize,
+ &sbc->parent_realize);
+ dc->reset = smmu_base_reset;
+}
+
+static const TypeInfo smmu_base_info = {
+ .name = TYPE_ARM_SMMU,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SMMUState),
+ .class_data = NULL,
+ .class_size = sizeof(SMMUBaseClass),
+ .class_init = smmu_base_class_init,
+ .abstract = true,
+};
+
+static void smmu_base_register_types(void)
+{
+ type_register_static(&smmu_base_info);
+}
+
+type_init(smmu_base_register_types)
+
diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h
new file mode 100644
index 0000000000..7794d6d394
--- /dev/null
+++ b/hw/arm/smmu-internal.h
@@ -0,0 +1,99 @@
+/*
+ * ARM SMMU support - Internal API
+ *
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ARM_SMMU_INTERNAL_H
+#define HW_ARM_SMMU_INTERNAL_H
+
+#define TBI0(tbi) ((tbi) & 0x1)
+#define TBI1(tbi) ((tbi) & 0x2 >> 1)
+
+/* PTE Manipulation */
+
+#define ARM_LPAE_PTE_TYPE_SHIFT 0
+#define ARM_LPAE_PTE_TYPE_MASK 0x3
+
+#define ARM_LPAE_PTE_TYPE_BLOCK 1
+#define ARM_LPAE_PTE_TYPE_TABLE 3
+
+#define ARM_LPAE_L3_PTE_TYPE_RESERVED 1
+#define ARM_LPAE_L3_PTE_TYPE_PAGE 3
+
+#define ARM_LPAE_PTE_VALID (1 << 0)
+
+#define PTE_ADDRESS(pte, shift) \
+ (extract64(pte, shift, 47 - shift + 1) << shift)
+
+#define is_invalid_pte(pte) (!(pte & ARM_LPAE_PTE_VALID))
+
+#define is_reserved_pte(pte, level) \
+ ((level == 3) && \
+ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_RESERVED))
+
+#define is_block_pte(pte, level) \
+ ((level < 3) && \
+ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_BLOCK))
+
+#define is_table_pte(pte, level) \
+ ((level < 3) && \
+ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_TABLE))
+
+#define is_page_pte(pte, level) \
+ ((level == 3) && \
+ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_PAGE))
+
+/* access permissions */
+
+#define PTE_AP(pte) \
+ (extract64(pte, 6, 2))
+
+#define PTE_APTABLE(pte) \
+ (extract64(pte, 61, 2))
+
+/*
+ * TODO: At the moment all transactions are considered as privileged (EL1)
+ * as IOMMU translation callback does not pass user/priv attributes.
+ */
+#define is_permission_fault(ap, perm) \
+ (((perm) & IOMMU_WO) && ((ap) & 0x2))
+
+#define PTE_AP_TO_PERM(ap) \
+ (IOMMU_ACCESS_FLAG(true, !((ap) & 0x2)))
+
+/* Level Indexing */
+
+static inline int level_shift(int level, int granule_sz)
+{
+ return granule_sz + (3 - level) * (granule_sz - 3);
+}
+
+static inline uint64_t level_page_mask(int level, int granule_sz)
+{
+ return ~(MAKE_64BIT_MASK(0, level_shift(level, granule_sz)));
+}
+
+static inline
+uint64_t iova_level_offset(uint64_t iova, int inputsize,
+ int level, int gsz)
+{
+ return ((iova & MAKE_64BIT_MASK(0, inputsize)) >> level_shift(level, gsz)) &
+ MAKE_64BIT_MASK(0, gsz - 3);
+}
+
+#endif
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
new file mode 100644
index 0000000000..a9d714b56e
--- /dev/null
+++ b/hw/arm/smmuv3-internal.h
@@ -0,0 +1,621 @@
+/*
+ * ARM SMMUv3 support - Internal API
+ *
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ARM_SMMU_V3_INTERNAL_H
+#define HW_ARM_SMMU_V3_INTERNAL_H
+
+#include "hw/arm/smmu-common.h"
+
+/* MMIO Registers */
+
+REG32(IDR0, 0x0)
+ FIELD(IDR0, S1P, 1 , 1)
+ FIELD(IDR0, TTF, 2 , 2)
+ FIELD(IDR0, COHACC, 4 , 1)
+ FIELD(IDR0, ASID16, 12, 1)
+ FIELD(IDR0, TTENDIAN, 21, 2)
+ FIELD(IDR0, STALL_MODEL, 24, 2)
+ FIELD(IDR0, TERM_MODEL, 26, 1)
+ FIELD(IDR0, STLEVEL, 27, 2)
+
+REG32(IDR1, 0x4)
+ FIELD(IDR1, SIDSIZE, 0 , 6)
+ FIELD(IDR1, EVENTQS, 16, 5)
+ FIELD(IDR1, CMDQS, 21, 5)
+
+#define SMMU_IDR1_SIDSIZE 16
+#define SMMU_CMDQS 19
+#define SMMU_EVENTQS 19
+
+REG32(IDR2, 0x8)
+REG32(IDR3, 0xc)
+REG32(IDR4, 0x10)
+REG32(IDR5, 0x14)
+ FIELD(IDR5, OAS, 0, 3);
+ FIELD(IDR5, GRAN4K, 4, 1);
+ FIELD(IDR5, GRAN16K, 5, 1);
+ FIELD(IDR5, GRAN64K, 6, 1);
+
+#define SMMU_IDR5_OAS 4
+
+REG32(IIDR, 0x1c)
+REG32(CR0, 0x20)
+ FIELD(CR0, SMMU_ENABLE, 0, 1)
+ FIELD(CR0, EVENTQEN, 2, 1)
+ FIELD(CR0, CMDQEN, 3, 1)
+
+#define SMMU_CR0_RESERVED 0xFFFFFC20
+
+REG32(CR0ACK, 0x24)
+REG32(CR1, 0x28)
+REG32(CR2, 0x2c)
+REG32(STATUSR, 0x40)
+REG32(IRQ_CTRL, 0x50)
+ FIELD(IRQ_CTRL, GERROR_IRQEN, 0, 1)
+ FIELD(IRQ_CTRL, PRI_IRQEN, 1, 1)
+ FIELD(IRQ_CTRL, EVENTQ_IRQEN, 2, 1)
+
+REG32(IRQ_CTRL_ACK, 0x54)
+REG32(GERROR, 0x60)
+ FIELD(GERROR, CMDQ_ERR, 0, 1)
+ FIELD(GERROR, EVENTQ_ABT_ERR, 2, 1)
+ FIELD(GERROR, PRIQ_ABT_ERR, 3, 1)
+ FIELD(GERROR, MSI_CMDQ_ABT_ERR, 4, 1)
+ FIELD(GERROR, MSI_EVENTQ_ABT_ERR, 5, 1)
+ FIELD(GERROR, MSI_PRIQ_ABT_ERR, 6, 1)
+ FIELD(GERROR, MSI_GERROR_ABT_ERR, 7, 1)
+ FIELD(GERROR, MSI_SFM_ERR, 8, 1)
+
+REG32(GERRORN, 0x64)
+
+#define A_GERROR_IRQ_CFG0 0x68 /* 64b */
+REG32(GERROR_IRQ_CFG1, 0x70)
+REG32(GERROR_IRQ_CFG2, 0x74)
+
+#define A_STRTAB_BASE 0x80 /* 64b */
+
+#define SMMU_BASE_ADDR_MASK 0xffffffffffe0
+
+REG32(STRTAB_BASE_CFG, 0x88)
+ FIELD(STRTAB_BASE_CFG, FMT, 16, 2)
+ FIELD(STRTAB_BASE_CFG, SPLIT, 6 , 5)
+ FIELD(STRTAB_BASE_CFG, LOG2SIZE, 0 , 6)
+
+#define A_CMDQ_BASE 0x90 /* 64b */
+REG32(CMDQ_PROD, 0x98)
+REG32(CMDQ_CONS, 0x9c)
+ FIELD(CMDQ_CONS, ERR, 24, 7)
+
+#define A_EVENTQ_BASE 0xa0 /* 64b */
+REG32(EVENTQ_PROD, 0xa8)
+REG32(EVENTQ_CONS, 0xac)
+
+#define A_EVENTQ_IRQ_CFG0 0xb0 /* 64b */
+REG32(EVENTQ_IRQ_CFG1, 0xb8)
+REG32(EVENTQ_IRQ_CFG2, 0xbc)
+
+#define A_IDREGS 0xfd0
+
+static inline int smmu_enabled(SMMUv3State *s)
+{
+ return FIELD_EX32(s->cr[0], CR0, SMMU_ENABLE);
+}
+
+/* Command Queue Entry */
+typedef struct Cmd {
+ uint32_t word[4];
+} Cmd;
+
+/* Event Queue Entry */
+typedef struct Evt {
+ uint32_t word[8];
+} Evt;
+
+static inline uint32_t smmuv3_idreg(int regoffset)
+{
+ /*
+ * Return the value of the Primecell/Corelink ID registers at the
+ * specified offset from the first ID register.
+ * These value indicate an ARM implementation of MMU600 p1
+ */
+ static const uint8_t smmuv3_ids[] = {
+ 0x04, 0, 0, 0, 0x84, 0xB4, 0xF0, 0x10, 0x0D, 0xF0, 0x05, 0xB1
+ };
+ return smmuv3_ids[regoffset / 4];
+}
+
+static inline bool smmuv3_eventq_irq_enabled(SMMUv3State *s)
+{
+ return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, EVENTQ_IRQEN);
+}
+
+static inline bool smmuv3_gerror_irq_enabled(SMMUv3State *s)
+{
+ return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN);
+}
+
+/* Queue Handling */
+
+#define Q_BASE(q) ((q)->base & SMMU_BASE_ADDR_MASK)
+#define WRAP_MASK(q) (1 << (q)->log2size)
+#define INDEX_MASK(q) (((1 << (q)->log2size)) - 1)
+#define WRAP_INDEX_MASK(q) ((1 << ((q)->log2size + 1)) - 1)
+
+#define Q_CONS(q) ((q)->cons & INDEX_MASK(q))
+#define Q_PROD(q) ((q)->prod & INDEX_MASK(q))
+
+#define Q_CONS_ENTRY(q) (Q_BASE(q) + (q)->entry_size * Q_CONS(q))
+#define Q_PROD_ENTRY(q) (Q_BASE(q) + (q)->entry_size * Q_PROD(q))
+
+#define Q_CONS_WRAP(q) (((q)->cons & WRAP_MASK(q)) >> (q)->log2size)
+#define Q_PROD_WRAP(q) (((q)->prod & WRAP_MASK(q)) >> (q)->log2size)
+
+static inline bool smmuv3_q_full(SMMUQueue *q)
+{
+ return ((q->cons ^ q->prod) & WRAP_INDEX_MASK(q)) == WRAP_MASK(q);
+}
+
+static inline bool smmuv3_q_empty(SMMUQueue *q)
+{
+ return (q->cons & WRAP_INDEX_MASK(q)) == (q->prod & WRAP_INDEX_MASK(q));
+}
+
+static inline void queue_prod_incr(SMMUQueue *q)
+{
+ q->prod = (q->prod + 1) & WRAP_INDEX_MASK(q);
+}
+
+static inline void queue_cons_incr(SMMUQueue *q)
+{
+ /*
+ * We have to use deposit for the CONS registers to preserve
+ * the ERR field in the high bits.
+ */
+ q->cons = deposit32(q->cons, 0, q->log2size + 1, q->cons + 1);
+}
+
+static inline bool smmuv3_cmdq_enabled(SMMUv3State *s)
+{
+ return FIELD_EX32(s->cr[0], CR0, CMDQEN);
+}
+
+static inline bool smmuv3_eventq_enabled(SMMUv3State *s)
+{
+ return FIELD_EX32(s->cr[0], CR0, EVENTQEN);
+}
+
+static inline void smmu_write_cmdq_err(SMMUv3State *s, uint32_t err_type)
+{
+ s->cmdq.cons = FIELD_DP32(s->cmdq.cons, CMDQ_CONS, ERR, err_type);
+}
+
+/* Commands */
+
+typedef enum SMMUCommandType {
+ SMMU_CMD_NONE = 0x00,
+ SMMU_CMD_PREFETCH_CONFIG ,
+ SMMU_CMD_PREFETCH_ADDR,
+ SMMU_CMD_CFGI_STE,
+ SMMU_CMD_CFGI_STE_RANGE,
+ SMMU_CMD_CFGI_CD,
+ SMMU_CMD_CFGI_CD_ALL,
+ SMMU_CMD_CFGI_ALL,
+ SMMU_CMD_TLBI_NH_ALL = 0x10,
+ SMMU_CMD_TLBI_NH_ASID,
+ SMMU_CMD_TLBI_NH_VA,
+ SMMU_CMD_TLBI_NH_VAA,
+ SMMU_CMD_TLBI_EL3_ALL = 0x18,
+ SMMU_CMD_TLBI_EL3_VA = 0x1a,
+ SMMU_CMD_TLBI_EL2_ALL = 0x20,
+ SMMU_CMD_TLBI_EL2_ASID,
+ SMMU_CMD_TLBI_EL2_VA,
+ SMMU_CMD_TLBI_EL2_VAA,
+ SMMU_CMD_TLBI_S12_VMALL = 0x28,
+ SMMU_CMD_TLBI_S2_IPA = 0x2a,
+ SMMU_CMD_TLBI_NSNH_ALL = 0x30,
+ SMMU_CMD_ATC_INV = 0x40,
+ SMMU_CMD_PRI_RESP,
+ SMMU_CMD_RESUME = 0x44,
+ SMMU_CMD_STALL_TERM,
+ SMMU_CMD_SYNC,
+} SMMUCommandType;
+
+static const char *cmd_stringify[] = {
+ [SMMU_CMD_PREFETCH_CONFIG] = "SMMU_CMD_PREFETCH_CONFIG",
+ [SMMU_CMD_PREFETCH_ADDR] = "SMMU_CMD_PREFETCH_ADDR",
+ [SMMU_CMD_CFGI_STE] = "SMMU_CMD_CFGI_STE",
+ [SMMU_CMD_CFGI_STE_RANGE] = "SMMU_CMD_CFGI_STE_RANGE",
+ [SMMU_CMD_CFGI_CD] = "SMMU_CMD_CFGI_CD",
+ [SMMU_CMD_CFGI_CD_ALL] = "SMMU_CMD_CFGI_CD_ALL",
+ [SMMU_CMD_CFGI_ALL] = "SMMU_CMD_CFGI_ALL",
+ [SMMU_CMD_TLBI_NH_ALL] = "SMMU_CMD_TLBI_NH_ALL",
+ [SMMU_CMD_TLBI_NH_ASID] = "SMMU_CMD_TLBI_NH_ASID",
+ [SMMU_CMD_TLBI_NH_VA] = "SMMU_CMD_TLBI_NH_VA",
+ [SMMU_CMD_TLBI_NH_VAA] = "SMMU_CMD_TLBI_NH_VAA",
+ [SMMU_CMD_TLBI_EL3_ALL] = "SMMU_CMD_TLBI_EL3_ALL",
+ [SMMU_CMD_TLBI_EL3_VA] = "SMMU_CMD_TLBI_EL3_VA",
+ [SMMU_CMD_TLBI_EL2_ALL] = "SMMU_CMD_TLBI_EL2_ALL",
+ [SMMU_CMD_TLBI_EL2_ASID] = "SMMU_CMD_TLBI_EL2_ASID",
+ [SMMU_CMD_TLBI_EL2_VA] = "SMMU_CMD_TLBI_EL2_VA",
+ [SMMU_CMD_TLBI_EL2_VAA] = "SMMU_CMD_TLBI_EL2_VAA",
+ [SMMU_CMD_TLBI_S12_VMALL] = "SMMU_CMD_TLBI_S12_VMALL",
+ [SMMU_CMD_TLBI_S2_IPA] = "SMMU_CMD_TLBI_S2_IPA",
+ [SMMU_CMD_TLBI_NSNH_ALL] = "SMMU_CMD_TLBI_NSNH_ALL",
+ [SMMU_CMD_ATC_INV] = "SMMU_CMD_ATC_INV",
+ [SMMU_CMD_PRI_RESP] = "SMMU_CMD_PRI_RESP",
+ [SMMU_CMD_RESUME] = "SMMU_CMD_RESUME",
+ [SMMU_CMD_STALL_TERM] = "SMMU_CMD_STALL_TERM",
+ [SMMU_CMD_SYNC] = "SMMU_CMD_SYNC",
+};
+
+static inline const char *smmu_cmd_string(SMMUCommandType type)
+{
+ if (type > SMMU_CMD_NONE && type < ARRAY_SIZE(cmd_stringify)) {
+ return cmd_stringify[type] ? cmd_stringify[type] : "UNKNOWN";
+ } else {
+ return "INVALID";
+ }
+}
+
+/* CMDQ fields */
+
+typedef enum {
+ SMMU_CERROR_NONE = 0,
+ SMMU_CERROR_ILL,
+ SMMU_CERROR_ABT,
+ SMMU_CERROR_ATC_INV_SYNC,
+} SMMUCmdError;
+
+enum { /* Command completion notification */
+ CMD_SYNC_SIG_NONE,
+ CMD_SYNC_SIG_IRQ,
+ CMD_SYNC_SIG_SEV,
+};
+
+#define CMD_TYPE(x) extract32((x)->word[0], 0 , 8)
+#define CMD_SSEC(x) extract32((x)->word[0], 10, 1)
+#define CMD_SSV(x) extract32((x)->word[0], 11, 1)
+#define CMD_RESUME_AC(x) extract32((x)->word[0], 12, 1)
+#define CMD_RESUME_AB(x) extract32((x)->word[0], 13, 1)
+#define CMD_SYNC_CS(x) extract32((x)->word[0], 12, 2)
+#define CMD_SSID(x) extract32((x)->word[0], 12, 20)
+#define CMD_SID(x) ((x)->word[1])
+#define CMD_VMID(x) extract32((x)->word[1], 0 , 16)
+#define CMD_ASID(x) extract32((x)->word[1], 16, 16)
+#define CMD_RESUME_STAG(x) extract32((x)->word[2], 0 , 16)
+#define CMD_RESP(x) extract32((x)->word[2], 11, 2)
+#define CMD_LEAF(x) extract32((x)->word[2], 0 , 1)
+#define CMD_STE_RANGE(x) extract32((x)->word[2], 0 , 5)
+#define CMD_ADDR(x) ({ \
+ uint64_t high = (uint64_t)(x)->word[3]; \
+ uint64_t low = extract32((x)->word[2], 12, 20); \
+ uint64_t addr = high << 32 | (low << 12); \
+ addr; \
+ })
+
+#define SMMU_FEATURE_2LVL_STE (1 << 0)
+
+/* Events */
+
+typedef enum SMMUEventType {
+ SMMU_EVT_OK = 0x00,
+ SMMU_EVT_F_UUT ,
+ SMMU_EVT_C_BAD_STREAMID ,
+ SMMU_EVT_F_STE_FETCH ,
+ SMMU_EVT_C_BAD_STE ,
+ SMMU_EVT_F_BAD_ATS_TREQ ,
+ SMMU_EVT_F_STREAM_DISABLED ,
+ SMMU_EVT_F_TRANS_FORBIDDEN ,
+ SMMU_EVT_C_BAD_SUBSTREAMID ,
+ SMMU_EVT_F_CD_FETCH ,
+ SMMU_EVT_C_BAD_CD ,
+ SMMU_EVT_F_WALK_EABT ,
+ SMMU_EVT_F_TRANSLATION = 0x10,
+ SMMU_EVT_F_ADDR_SIZE ,
+ SMMU_EVT_F_ACCESS ,
+ SMMU_EVT_F_PERMISSION ,
+ SMMU_EVT_F_TLB_CONFLICT = 0x20,
+ SMMU_EVT_F_CFG_CONFLICT ,
+ SMMU_EVT_E_PAGE_REQ = 0x24,
+} SMMUEventType;
+
+static const char *event_stringify[] = {
+ [SMMU_EVT_OK] = "SMMU_EVT_OK",
+ [SMMU_EVT_F_UUT] = "SMMU_EVT_F_UUT",
+ [SMMU_EVT_C_BAD_STREAMID] = "SMMU_EVT_C_BAD_STREAMID",
+ [SMMU_EVT_F_STE_FETCH] = "SMMU_EVT_F_STE_FETCH",
+ [SMMU_EVT_C_BAD_STE] = "SMMU_EVT_C_BAD_STE",
+ [SMMU_EVT_F_BAD_ATS_TREQ] = "SMMU_EVT_F_BAD_ATS_TREQ",
+ [SMMU_EVT_F_STREAM_DISABLED] = "SMMU_EVT_F_STREAM_DISABLED",
+ [SMMU_EVT_F_TRANS_FORBIDDEN] = "SMMU_EVT_F_TRANS_FORBIDDEN",
+ [SMMU_EVT_C_BAD_SUBSTREAMID] = "SMMU_EVT_C_BAD_SUBSTREAMID",
+ [SMMU_EVT_F_CD_FETCH] = "SMMU_EVT_F_CD_FETCH",
+ [SMMU_EVT_C_BAD_CD] = "SMMU_EVT_C_BAD_CD",
+ [SMMU_EVT_F_WALK_EABT] = "SMMU_EVT_F_WALK_EABT",
+ [SMMU_EVT_F_TRANSLATION] = "SMMU_EVT_F_TRANSLATION",
+ [SMMU_EVT_F_ADDR_SIZE] = "SMMU_EVT_F_ADDR_SIZE",
+ [SMMU_EVT_F_ACCESS] = "SMMU_EVT_F_ACCESS",
+ [SMMU_EVT_F_PERMISSION] = "SMMU_EVT_F_PERMISSION",
+ [SMMU_EVT_F_TLB_CONFLICT] = "SMMU_EVT_F_TLB_CONFLICT",
+ [SMMU_EVT_F_CFG_CONFLICT] = "SMMU_EVT_F_CFG_CONFLICT",
+ [SMMU_EVT_E_PAGE_REQ] = "SMMU_EVT_E_PAGE_REQ",
+};
+
+static inline const char *smmu_event_string(SMMUEventType type)
+{
+ if (type < ARRAY_SIZE(event_stringify)) {
+ return event_stringify[type] ? event_stringify[type] : "UNKNOWN";
+ } else {
+ return "INVALID";
+ }
+}
+
+/* Encode an event record */
+typedef struct SMMUEventInfo {
+ SMMUEventType type;
+ uint32_t sid;
+ bool recorded;
+ bool record_trans_faults;
+ union {
+ struct {
+ uint32_t ssid;
+ bool ssv;
+ dma_addr_t addr;
+ bool rnw;
+ bool pnu;
+ bool ind;
+ } f_uut;
+ struct SSIDInfo {
+ uint32_t ssid;
+ bool ssv;
+ } c_bad_streamid;
+ struct SSIDAddrInfo {
+ uint32_t ssid;
+ bool ssv;
+ dma_addr_t addr;
+ } f_ste_fetch;
+ struct SSIDInfo c_bad_ste;
+ struct {
+ dma_addr_t addr;
+ bool rnw;
+ } f_transl_forbidden;
+ struct {
+ uint32_t ssid;
+ } c_bad_substream;
+ struct SSIDAddrInfo f_cd_fetch;
+ struct SSIDInfo c_bad_cd;
+ struct FullInfo {
+ bool stall;
+ uint16_t stag;
+ uint32_t ssid;
+ bool ssv;
+ bool s2;
+ dma_addr_t addr;
+ bool rnw;
+ bool pnu;
+ bool ind;
+ uint8_t class;
+ dma_addr_t addr2;
+ } f_walk_eabt;
+ struct FullInfo f_translation;
+ struct FullInfo f_addr_size;
+ struct FullInfo f_access;
+ struct FullInfo f_permission;
+ struct SSIDInfo f_cfg_conflict;
+ /**
+ * not supported yet:
+ * F_BAD_ATS_TREQ
+ * F_BAD_ATS_TREQ
+ * F_TLB_CONFLICT
+ * E_PAGE_REQUEST
+ * IMPDEF_EVENTn
+ */
+ } u;
+} SMMUEventInfo;
+
+/* EVTQ fields */
+
+#define EVT_Q_OVERFLOW (1 << 31)
+
+#define EVT_SET_TYPE(x, v) deposit32((x)->word[0], 0 , 8 , v)
+#define EVT_SET_SSV(x, v) deposit32((x)->word[0], 11, 1 , v)
+#define EVT_SET_SSID(x, v) deposit32((x)->word[0], 12, 20, v)
+#define EVT_SET_SID(x, v) ((x)->word[1] = v)
+#define EVT_SET_STAG(x, v) deposit32((x)->word[2], 0 , 16, v)
+#define EVT_SET_STALL(x, v) deposit32((x)->word[2], 31, 1 , v)
+#define EVT_SET_PNU(x, v) deposit32((x)->word[3], 1 , 1 , v)
+#define EVT_SET_IND(x, v) deposit32((x)->word[3], 2 , 1 , v)
+#define EVT_SET_RNW(x, v) deposit32((x)->word[3], 3 , 1 , v)
+#define EVT_SET_S2(x, v) deposit32((x)->word[3], 7 , 1 , v)
+#define EVT_SET_CLASS(x, v) deposit32((x)->word[3], 8 , 2 , v)
+#define EVT_SET_ADDR(x, addr) \
+ do { \
+ (x)->word[5] = (uint32_t)(addr >> 32); \
+ (x)->word[4] = (uint32_t)(addr & 0xffffffff); \
+ } while (0)
+#define EVT_SET_ADDR2(x, addr) \
+ do { \
+ deposit32((x)->word[7], 3, 29, addr >> 16); \
+ deposit32((x)->word[7], 0, 16, addr & 0xffff);\
+ } while (0)
+
+void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *event);
+
+/* Configuration Data */
+
+/* STE Level 1 Descriptor */
+typedef struct STEDesc {
+ uint32_t word[2];
+} STEDesc;
+
+/* CD Level 1 Descriptor */
+typedef struct CDDesc {
+ uint32_t word[2];
+} CDDesc;
+
+/* Stream Table Entry(STE) */
+typedef struct STE {
+ uint32_t word[16];
+} STE;
+
+/* Context Descriptor(CD) */
+typedef struct CD {
+ uint32_t word[16];
+} CD;
+
+/* STE fields */
+
+#define STE_VALID(x) extract32((x)->word[0], 0, 1)
+
+#define STE_CONFIG(x) extract32((x)->word[0], 1, 3)
+#define STE_CFG_S1_ENABLED(config) (config & 0x1)
+#define STE_CFG_S2_ENABLED(config) (config & 0x2)
+#define STE_CFG_ABORT(config) (!(config & 0x4))
+#define STE_CFG_BYPASS(config) (config == 0x4)
+
+#define STE_S1FMT(x) extract32((x)->word[0], 4 , 2)
+#define STE_S1CDMAX(x) extract32((x)->word[1], 27, 5)
+#define STE_S1STALLD(x) extract32((x)->word[2], 27, 1)
+#define STE_EATS(x) extract32((x)->word[2], 28, 2)
+#define STE_STRW(x) extract32((x)->word[2], 30, 2)
+#define STE_S2VMID(x) extract32((x)->word[4], 0 , 16)
+#define STE_S2T0SZ(x) extract32((x)->word[5], 0 , 6)
+#define STE_S2SL0(x) extract32((x)->word[5], 6 , 2)
+#define STE_S2TG(x) extract32((x)->word[5], 14, 2)
+#define STE_S2PS(x) extract32((x)->word[5], 16, 3)
+#define STE_S2AA64(x) extract32((x)->word[5], 19, 1)
+#define STE_S2HD(x) extract32((x)->word[5], 24, 1)
+#define STE_S2HA(x) extract32((x)->word[5], 25, 1)
+#define STE_S2S(x) extract32((x)->word[5], 26, 1)
+#define STE_CTXPTR(x) \
+ ({ \
+ unsigned long addr; \
+ addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32; \
+ addr |= (uint64_t)((x)->word[0] & 0xffffffc0); \
+ addr; \
+ })
+
+#define STE_S2TTB(x) \
+ ({ \
+ unsigned long addr; \
+ addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32; \
+ addr |= (uint64_t)((x)->word[6] & 0xfffffff0); \
+ addr; \
+ })
+
+static inline int oas2bits(int oas_field)
+{
+ switch (oas_field) {
+ case 0:
+ return 32;
+ case 1:
+ return 36;
+ case 2:
+ return 40;
+ case 3:
+ return 42;
+ case 4:
+ return 44;
+ case 5:
+ return 48;
+ }
+ return -1;
+}
+
+static inline int pa_range(STE *ste)
+{
+ int oas_field = MIN(STE_S2PS(ste), SMMU_IDR5_OAS);
+
+ if (!STE_S2AA64(ste)) {
+ return 40;
+ }
+
+ return oas2bits(oas_field);
+}
+
+#define MAX_PA(ste) ((1 << pa_range(ste)) - 1)
+
+/* CD fields */
+
+#define CD_VALID(x) extract32((x)->word[0], 30, 1)
+#define CD_ASID(x) extract32((x)->word[1], 16, 16)
+#define CD_TTB(x, sel) \
+ ({ \
+ uint64_t hi, lo; \
+ hi = extract32((x)->word[(sel) * 2 + 3], 0, 19); \
+ hi <<= 32; \
+ lo = (x)->word[(sel) * 2 + 2] & ~0xfULL; \
+ hi | lo; \
+ })
+
+#define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6)
+#define CD_TG(x, sel) extract32((x)->word[0], (16 * (sel)) + 6, 2)
+#define CD_EPD(x, sel) extract32((x)->word[0], (16 * (sel)) + 14, 1)
+#define CD_ENDI(x) extract32((x)->word[0], 15, 1)
+#define CD_IPS(x) extract32((x)->word[1], 0 , 3)
+#define CD_TBI(x) extract32((x)->word[1], 6 , 2)
+#define CD_HD(x) extract32((x)->word[1], 10 , 1)
+#define CD_HA(x) extract32((x)->word[1], 11 , 1)
+#define CD_S(x) extract32((x)->word[1], 12, 1)
+#define CD_R(x) extract32((x)->word[1], 13, 1)
+#define CD_A(x) extract32((x)->word[1], 14, 1)
+#define CD_AARCH64(x) extract32((x)->word[1], 9 , 1)
+
+#define CDM_VALID(x) ((x)->word[0] & 0x1)
+
+static inline int is_cd_valid(SMMUv3State *s, STE *ste, CD *cd)
+{
+ return CD_VALID(cd);
+}
+
+/**
+ * tg2granule - Decodes the CD translation granule size field according
+ * to the ttbr in use
+ * @bits: TG0/1 fields
+ * @ttbr: ttbr index in use
+ */
+static inline int tg2granule(int bits, int ttbr)
+{
+ switch (bits) {
+ case 0:
+ return ttbr ? 0 : 12;
+ case 1:
+ return ttbr ? 14 : 16;
+ case 2:
+ return ttbr ? 12 : 14;
+ case 3:
+ return ttbr ? 16 : 0;
+ default:
+ return 0;
+ }
+}
+
+static inline uint64_t l1std_l2ptr(STEDesc *desc)
+{
+ uint64_t hi, lo;
+
+ hi = desc->word[1];
+ lo = desc->word[0] & ~0x1fULL;
+ return hi << 32 | lo;
+}
+
+#define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 4))
+
+#endif
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
new file mode 100644
index 0000000000..b3026dea20
--- /dev/null
+++ b/hw/arm/smmuv3.c
@@ -0,0 +1,1191 @@
+/*
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "hw/sysbus.h"
+#include "hw/qdev-core.h"
+#include "hw/pci/pci.h"
+#include "exec/address-spaces.h"
+#include "trace.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+
+#include "hw/arm/smmuv3.h"
+#include "smmuv3-internal.h"
+
+/**
+ * smmuv3_trigger_irq - pulse @irq if enabled and update
+ * GERROR register in case of GERROR interrupt
+ *
+ * @irq: irq type
+ * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
+ */
+static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
+ uint32_t gerror_mask)
+{
+
+ bool pulse = false;
+
+ switch (irq) {
+ case SMMU_IRQ_EVTQ:
+ pulse = smmuv3_eventq_irq_enabled(s);
+ break;
+ case SMMU_IRQ_PRIQ:
+ qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
+ break;
+ case SMMU_IRQ_CMD_SYNC:
+ pulse = true;
+ break;
+ case SMMU_IRQ_GERROR:
+ {
+ uint32_t pending = s->gerror ^ s->gerrorn;
+ uint32_t new_gerrors = ~pending & gerror_mask;
+
+ if (!new_gerrors) {
+ /* only toggle non pending errors */
+ return;
+ }
+ s->gerror ^= new_gerrors;
+ trace_smmuv3_write_gerror(new_gerrors, s->gerror);
+
+ pulse = smmuv3_gerror_irq_enabled(s);
+ break;
+ }
+ }
+ if (pulse) {
+ trace_smmuv3_trigger_irq(irq);
+ qemu_irq_pulse(s->irq[irq]);
+ }
+}
+
+static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
+{
+ uint32_t pending = s->gerror ^ s->gerrorn;
+ uint32_t toggled = s->gerrorn ^ new_gerrorn;
+
+ if (toggled & ~pending) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "guest toggles non pending errors = 0x%x\n",
+ toggled & ~pending);
+ }
+
+ /*
+ * We do not raise any error in case guest toggles bits corresponding
+ * to not active IRQs (CONSTRAINED UNPREDICTABLE)
+ */
+ s->gerrorn = new_gerrorn;
+
+ trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
+}
+
+static inline MemTxResult queue_read(SMMUQueue *q, void *data)
+{
+ dma_addr_t addr = Q_CONS_ENTRY(q);
+
+ return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
+}
+
+static MemTxResult queue_write(SMMUQueue *q, void *data)
+{
+ dma_addr_t addr = Q_PROD_ENTRY(q);
+ MemTxResult ret;
+
+ ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
+ if (ret != MEMTX_OK) {
+ return ret;
+ }
+
+ queue_prod_incr(q);
+ return MEMTX_OK;
+}
+
+static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
+{
+ SMMUQueue *q = &s->eventq;
+ MemTxResult r;
+
+ if (!smmuv3_eventq_enabled(s)) {
+ return MEMTX_ERROR;
+ }
+
+ if (smmuv3_q_full(q)) {
+ return MEMTX_ERROR;
+ }
+
+ r = queue_write(q, evt);
+ if (r != MEMTX_OK) {
+ return r;
+ }
+
+ if (smmuv3_q_empty(q)) {
+ smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
+ }
+ return MEMTX_OK;
+}
+
+void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
+{
+ Evt evt;
+ MemTxResult r;
+
+ if (!smmuv3_eventq_enabled(s)) {
+ return;
+ }
+
+ EVT_SET_TYPE(&evt, info->type);
+ EVT_SET_SID(&evt, info->sid);
+
+ switch (info->type) {
+ case SMMU_EVT_OK:
+ return;
+ case SMMU_EVT_F_UUT:
+ EVT_SET_SSID(&evt, info->u.f_uut.ssid);
+ EVT_SET_SSV(&evt, info->u.f_uut.ssv);
+ EVT_SET_ADDR(&evt, info->u.f_uut.addr);
+ EVT_SET_RNW(&evt, info->u.f_uut.rnw);
+ EVT_SET_PNU(&evt, info->u.f_uut.pnu);
+ EVT_SET_IND(&evt, info->u.f_uut.ind);
+ break;
+ case SMMU_EVT_C_BAD_STREAMID:
+ EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
+ EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
+ break;
+ case SMMU_EVT_F_STE_FETCH:
+ EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
+ EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
+ EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
+ break;
+ case SMMU_EVT_C_BAD_STE:
+ EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
+ EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
+ break;
+ case SMMU_EVT_F_STREAM_DISABLED:
+ break;
+ case SMMU_EVT_F_TRANS_FORBIDDEN:
+ EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
+ EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
+ break;
+ case SMMU_EVT_C_BAD_SUBSTREAMID:
+ EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
+ break;
+ case SMMU_EVT_F_CD_FETCH:
+ EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
+ EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
+ EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
+ break;
+ case SMMU_EVT_C_BAD_CD:
+ EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
+ EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
+ break;
+ case SMMU_EVT_F_WALK_EABT:
+ case SMMU_EVT_F_TRANSLATION:
+ case SMMU_EVT_F_ADDR_SIZE:
+ case SMMU_EVT_F_ACCESS:
+ case SMMU_EVT_F_PERMISSION:
+ EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
+ EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
+ EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
+ EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
+ EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
+ EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
+ EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
+ EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
+ EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
+ EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
+ EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
+ break;
+ case SMMU_EVT_F_CFG_CONFLICT:
+ EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
+ EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
+ break;
+ /* rest is not implemented */
+ case SMMU_EVT_F_BAD_ATS_TREQ:
+ case SMMU_EVT_F_TLB_CONFLICT:
+ case SMMU_EVT_E_PAGE_REQ:
+ default:
+ g_assert_not_reached();
+ }
+
+ trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
+ r = smmuv3_write_eventq(s, &evt);
+ if (r != MEMTX_OK) {
+ smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
+ }
+ info->recorded = true;
+}
+
+static void smmuv3_init_regs(SMMUv3State *s)
+{
+ /**
+ * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
+ * multi-level stream table
+ */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
+ /* terminated transaction will always be aborted/error returned */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
+ /* 2-level stream table supported */
+ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
+
+ s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
+ s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
+ s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
+
+ /* 4K and 64K granule support */
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
+
+ s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
+ s->cmdq.prod = 0;
+ s->cmdq.cons = 0;
+ s->cmdq.entry_size = sizeof(struct Cmd);
+ s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
+ s->eventq.prod = 0;
+ s->eventq.cons = 0;
+ s->eventq.entry_size = sizeof(struct Evt);
+
+ s->features = 0;
+ s->sid_split = 0;
+}
+
+static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
+ SMMUEventInfo *event)
+{
+ int ret;
+
+ trace_smmuv3_get_ste(addr);
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, addr,
+ (void *)buf, sizeof(*buf));
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
+ event->type = SMMU_EVT_F_STE_FETCH;
+ event->u.f_ste_fetch.addr = addr;
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+/* @ssid > 0 not supported yet */
+static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
+ CD *buf, SMMUEventInfo *event)
+{
+ dma_addr_t addr = STE_CTXPTR(ste);
+ int ret;
+
+ trace_smmuv3_get_cd(addr);
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, addr,
+ (void *)buf, sizeof(*buf));
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
+ event->type = SMMU_EVT_F_CD_FETCH;
+ event->u.f_ste_fetch.addr = addr;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Returns <0 if the caller has no need to continue the translation */
+static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
+ STE *ste, SMMUEventInfo *event)
+{
+ uint32_t config;
+ int ret = -EINVAL;
+
+ if (!STE_VALID(ste)) {
+ goto bad_ste;
+ }
+
+ config = STE_CONFIG(ste);
+
+ if (STE_CFG_ABORT(config)) {
+ cfg->aborted = true; /* abort but don't record any event */
+ return ret;
+ }
+
+ if (STE_CFG_BYPASS(config)) {
+ cfg->bypassed = true;
+ return ret;
+ }
+
+ if (STE_CFG_S2_ENABLED(config)) {
+ qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
+ goto bad_ste;
+ }
+
+ if (STE_S1CDMAX(ste) != 0) {
+ qemu_log_mask(LOG_UNIMP,
+ "SMMUv3 does not support multiple context descriptors yet\n");
+ goto bad_ste;
+ }
+
+ if (STE_S1STALLD(ste)) {
+ qemu_log_mask(LOG_UNIMP,
+ "SMMUv3 S1 stalling fault model not allowed yet\n");
+ goto bad_ste;
+ }
+ return 0;
+
+bad_ste:
+ event->type = SMMU_EVT_C_BAD_STE;
+ return -EINVAL;
+}
+
+/**
+ * smmu_find_ste - Return the stream table entry associated
+ * to the sid
+ *
+ * @s: smmuv3 handle
+ * @sid: stream ID
+ * @ste: returned stream table entry
+ * @event: handle to an event info
+ *
+ * Supports linear and 2-level stream table
+ * Return 0 on success, -EINVAL otherwise
+ */
+static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
+ SMMUEventInfo *event)
+{
+ dma_addr_t addr;
+ int ret;
+
+ trace_smmuv3_find_ste(sid, s->features, s->sid_split);
+ /* Check SID range */
+ if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
+ event->type = SMMU_EVT_C_BAD_STREAMID;
+ return -EINVAL;
+ }
+ if (s->features & SMMU_FEATURE_2LVL_STE) {
+ int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
+ dma_addr_t strtab_base, l1ptr, l2ptr;
+ STEDesc l1std;
+
+ strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
+ l1_ste_offset = sid >> s->sid_split;
+ l2_ste_offset = sid & ((1 << s->sid_split) - 1);
+ l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, l1ptr,
+ (uint8_t *)&l1std, sizeof(l1std));
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
+ event->type = SMMU_EVT_F_STE_FETCH;
+ event->u.f_ste_fetch.addr = l1ptr;
+ return -EINVAL;
+ }
+
+ span = L1STD_SPAN(&l1std);
+
+ if (!span) {
+ /* l2ptr is not valid */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "invalid sid=%d (L1STD span=0)\n", sid);
+ event->type = SMMU_EVT_C_BAD_STREAMID;
+ return -EINVAL;
+ }
+ max_l2_ste = (1 << span) - 1;
+ l2ptr = l1std_l2ptr(&l1std);
+ trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
+ l2ptr, l2_ste_offset, max_l2_ste);
+ if (l2_ste_offset > max_l2_ste) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "l2_ste_offset=%d > max_l2_ste=%d\n",
+ l2_ste_offset, max_l2_ste);
+ event->type = SMMU_EVT_C_BAD_STE;
+ return -EINVAL;
+ }
+ addr = l2ptr + l2_ste_offset * sizeof(*ste);
+ } else {
+ addr = s->strtab_base + sid * sizeof(*ste);
+ }
+
+ if (smmu_get_ste(s, addr, ste, event)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
+{
+ int ret = -EINVAL;
+ int i;
+
+ if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
+ goto bad_cd;
+ }
+ if (!CD_A(cd)) {
+ goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
+ }
+ if (CD_S(cd)) {
+ goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
+ }
+ if (CD_HA(cd) || CD_HD(cd)) {
+ goto bad_cd; /* HTTU = 0 */
+ }
+
+ /* we support only those at the moment */
+ cfg->aa64 = true;
+ cfg->stage = 1;
+
+ cfg->oas = oas2bits(CD_IPS(cd));
+ cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
+ cfg->tbi = CD_TBI(cd);
+ cfg->asid = CD_ASID(cd);
+
+ trace_smmuv3_decode_cd(cfg->oas);
+
+ /* decode data dependent on TT */
+ for (i = 0; i <= 1; i++) {
+ int tg, tsz;
+ SMMUTransTableInfo *tt = &cfg->tt[i];
+
+ cfg->tt[i].disabled = CD_EPD(cd, i);
+ if (cfg->tt[i].disabled) {
+ continue;
+ }
+
+ tsz = CD_TSZ(cd, i);
+ if (tsz < 16 || tsz > 39) {
+ goto bad_cd;
+ }
+
+ tg = CD_TG(cd, i);
+ tt->granule_sz = tg2granule(tg, i);
+ if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
+ goto bad_cd;
+ }
+
+ tt->tsz = tsz;
+ tt->ttb = CD_TTB(cd, i);
+ if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
+ goto bad_cd;
+ }
+ trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
+ }
+
+ event->record_trans_faults = CD_R(cd);
+
+ return 0;
+
+bad_cd:
+ event->type = SMMU_EVT_C_BAD_CD;
+ return ret;
+}
+
+/**
+ * smmuv3_decode_config - Prepare the translation configuration
+ * for the @mr iommu region
+ * @mr: iommu memory region the translation config must be prepared for
+ * @cfg: output translation configuration which is populated through
+ * the different configuration decoding steps
+ * @event: must be zero'ed by the caller
+ *
+ * return < 0 if the translation needs to be aborted (@event is filled
+ * accordingly). Return 0 otherwise.
+ */
+static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
+ SMMUEventInfo *event)
+{
+ SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
+ uint32_t sid = smmu_get_sid(sdev);
+ SMMUv3State *s = sdev->smmu;
+ int ret = -EINVAL;
+ STE ste;
+ CD cd;
+
+ if (smmu_find_ste(s, sid, &ste, event)) {
+ return ret;
+ }
+
+ if (decode_ste(s, cfg, &ste, event)) {
+ return ret;
+ }
+
+ if (smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event)) {
+ return ret;
+ }
+
+ return decode_cd(cfg, &cd, event);
+}
+
+static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
+ IOMMUAccessFlags flag)
+{
+ SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
+ SMMUv3State *s = sdev->smmu;
+ uint32_t sid = smmu_get_sid(sdev);
+ SMMUEventInfo event = {.type = SMMU_EVT_OK, .sid = sid};
+ SMMUPTWEventInfo ptw_info = {};
+ SMMUTransCfg cfg = {};
+ IOMMUTLBEntry entry = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = addr,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+ int ret = 0;
+
+ if (!smmu_enabled(s)) {
+ goto out;
+ }
+
+ ret = smmuv3_decode_config(mr, &cfg, &event);
+ if (ret) {
+ goto out;
+ }
+
+ if (cfg.aborted) {
+ goto out;
+ }
+
+ ret = smmu_ptw(&cfg, addr, flag, &entry, &ptw_info);
+ if (ret) {
+ switch (ptw_info.type) {
+ case SMMU_PTW_ERR_WALK_EABT:
+ event.type = SMMU_EVT_F_WALK_EABT;
+ event.u.f_walk_eabt.addr = addr;
+ event.u.f_walk_eabt.rnw = flag & 0x1;
+ event.u.f_walk_eabt.class = 0x1;
+ event.u.f_walk_eabt.addr2 = ptw_info.addr;
+ break;
+ case SMMU_PTW_ERR_TRANSLATION:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_TRANSLATION;
+ event.u.f_translation.addr = addr;
+ event.u.f_translation.rnw = flag & 0x1;
+ }
+ break;
+ case SMMU_PTW_ERR_ADDR_SIZE:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_ADDR_SIZE;
+ event.u.f_addr_size.addr = addr;
+ event.u.f_addr_size.rnw = flag & 0x1;
+ }
+ break;
+ case SMMU_PTW_ERR_ACCESS:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_ACCESS;
+ event.u.f_access.addr = addr;
+ event.u.f_access.rnw = flag & 0x1;
+ }
+ break;
+ case SMMU_PTW_ERR_PERMISSION:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_PERMISSION;
+ event.u.f_permission.addr = addr;
+ event.u.f_permission.rnw = flag & 0x1;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+out:
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s translation failed for iova=0x%"PRIx64"(%d)\n",
+ mr->parent_obj.name, addr, ret);
+ entry.perm = IOMMU_NONE;
+ smmuv3_record_event(s, &event);
+ } else if (!cfg.aborted) {
+ entry.perm = flag;
+ trace_smmuv3_translate(mr->parent_obj.name, sid, addr,
+ entry.translated_addr, entry.perm);
+ }
+
+ return entry;
+}
+
+static int smmuv3_cmdq_consume(SMMUv3State *s)
+{
+ SMMUCmdError cmd_error = SMMU_CERROR_NONE;
+ SMMUQueue *q = &s->cmdq;
+ SMMUCommandType type = 0;
+
+ if (!smmuv3_cmdq_enabled(s)) {
+ return 0;
+ }
+ /*
+ * some commands depend on register values, typically CR0. In case those
+ * register values change while handling the command, spec says it
+ * is UNPREDICTABLE whether the command is interpreted under the new
+ * or old value.
+ */
+
+ while (!smmuv3_q_empty(q)) {
+ uint32_t pending = s->gerror ^ s->gerrorn;
+ Cmd cmd;
+
+ trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
+ Q_PROD_WRAP(q), Q_CONS_WRAP(q));
+
+ if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
+ break;
+ }
+
+ if (queue_read(q, &cmd) != MEMTX_OK) {
+ cmd_error = SMMU_CERROR_ABT;
+ break;
+ }
+
+ type = CMD_TYPE(&cmd);
+
+ trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
+
+ switch (type) {
+ case SMMU_CMD_SYNC:
+ if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
+ smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
+ }
+ break;
+ case SMMU_CMD_PREFETCH_CONFIG:
+ case SMMU_CMD_PREFETCH_ADDR:
+ case SMMU_CMD_CFGI_STE:
+ case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
+ case SMMU_CMD_CFGI_CD:
+ case SMMU_CMD_CFGI_CD_ALL:
+ case SMMU_CMD_TLBI_NH_ALL:
+ case SMMU_CMD_TLBI_NH_ASID:
+ case SMMU_CMD_TLBI_NH_VA:
+ case SMMU_CMD_TLBI_NH_VAA:
+ case SMMU_CMD_TLBI_EL3_ALL:
+ case SMMU_CMD_TLBI_EL3_VA:
+ case SMMU_CMD_TLBI_EL2_ALL:
+ case SMMU_CMD_TLBI_EL2_ASID:
+ case SMMU_CMD_TLBI_EL2_VA:
+ case SMMU_CMD_TLBI_EL2_VAA:
+ case SMMU_CMD_TLBI_S12_VMALL:
+ case SMMU_CMD_TLBI_S2_IPA:
+ case SMMU_CMD_TLBI_NSNH_ALL:
+ case SMMU_CMD_ATC_INV:
+ case SMMU_CMD_PRI_RESP:
+ case SMMU_CMD_RESUME:
+ case SMMU_CMD_STALL_TERM:
+ trace_smmuv3_unhandled_cmd(type);
+ break;
+ default:
+ cmd_error = SMMU_CERROR_ILL;
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Illegal command type: %d\n", CMD_TYPE(&cmd));
+ break;
+ }
+ if (cmd_error) {
+ break;
+ }
+ /*
+ * We only increment the cons index after the completion of
+ * the command. We do that because the SYNC returns immediately
+ * and does not check the completion of previous commands
+ */
+ queue_cons_incr(q);
+ }
+
+ if (cmd_error) {
+ trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
+ smmu_write_cmdq_err(s, cmd_error);
+ smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
+ }
+
+ trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
+ Q_PROD_WRAP(q), Q_CONS_WRAP(q));
+
+ return 0;
+}
+
+static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
+ uint64_t data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case A_GERROR_IRQ_CFG0:
+ s->gerror_irq_cfg0 = data;
+ return MEMTX_OK;
+ case A_STRTAB_BASE:
+ s->strtab_base = data;
+ return MEMTX_OK;
+ case A_CMDQ_BASE:
+ s->cmdq.base = data;
+ s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
+ if (s->cmdq.log2size > SMMU_CMDQS) {
+ s->cmdq.log2size = SMMU_CMDQS;
+ }
+ return MEMTX_OK;
+ case A_EVENTQ_BASE:
+ s->eventq.base = data;
+ s->eventq.log2size = extract64(s->eventq.base, 0, 5);
+ if (s->eventq.log2size > SMMU_EVENTQS) {
+ s->eventq.log2size = SMMU_EVENTQS;
+ }
+ return MEMTX_OK;
+ case A_EVENTQ_IRQ_CFG0:
+ s->eventq_irq_cfg0 = data;
+ return MEMTX_OK;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
+ __func__, offset);
+ return MEMTX_OK;
+ }
+}
+
+static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
+ uint64_t data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case A_CR0:
+ s->cr[0] = data;
+ s->cr0ack = data & ~SMMU_CR0_RESERVED;
+ /* in case the command queue has been enabled */
+ smmuv3_cmdq_consume(s);
+ return MEMTX_OK;
+ case A_CR1:
+ s->cr[1] = data;
+ return MEMTX_OK;
+ case A_CR2:
+ s->cr[2] = data;
+ return MEMTX_OK;
+ case A_IRQ_CTRL:
+ s->irq_ctrl = data;
+ return MEMTX_OK;
+ case A_GERRORN:
+ smmuv3_write_gerrorn(s, data);
+ /*
+ * By acknowledging the CMDQ_ERR, SW may notify cmds can
+ * be processed again
+ */
+ smmuv3_cmdq_consume(s);
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG0: /* 64b */
+ s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG0 + 4:
+ s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG1:
+ s->gerror_irq_cfg1 = data;
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG2:
+ s->gerror_irq_cfg2 = data;
+ return MEMTX_OK;
+ case A_STRTAB_BASE: /* 64b */
+ s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
+ return MEMTX_OK;
+ case A_STRTAB_BASE + 4:
+ s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
+ return MEMTX_OK;
+ case A_STRTAB_BASE_CFG:
+ s->strtab_base_cfg = data;
+ if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
+ s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
+ s->features |= SMMU_FEATURE_2LVL_STE;
+ }
+ return MEMTX_OK;
+ case A_CMDQ_BASE: /* 64b */
+ s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
+ s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
+ if (s->cmdq.log2size > SMMU_CMDQS) {
+ s->cmdq.log2size = SMMU_CMDQS;
+ }
+ return MEMTX_OK;
+ case A_CMDQ_BASE + 4: /* 64b */
+ s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
+ return MEMTX_OK;
+ case A_CMDQ_PROD:
+ s->cmdq.prod = data;
+ smmuv3_cmdq_consume(s);
+ return MEMTX_OK;
+ case A_CMDQ_CONS:
+ s->cmdq.cons = data;
+ return MEMTX_OK;
+ case A_EVENTQ_BASE: /* 64b */
+ s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
+ s->eventq.log2size = extract64(s->eventq.base, 0, 5);
+ if (s->eventq.log2size > SMMU_EVENTQS) {
+ s->eventq.log2size = SMMU_EVENTQS;
+ }
+ return MEMTX_OK;
+ case A_EVENTQ_BASE + 4:
+ s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
+ return MEMTX_OK;
+ case A_EVENTQ_PROD:
+ s->eventq.prod = data;
+ return MEMTX_OK;
+ case A_EVENTQ_CONS:
+ s->eventq.cons = data;
+ return MEMTX_OK;
+ case A_EVENTQ_IRQ_CFG0: /* 64b */
+ s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
+ return MEMTX_OK;
+ case A_EVENTQ_IRQ_CFG0 + 4:
+ s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
+ return MEMTX_OK;
+ case A_EVENTQ_IRQ_CFG1:
+ s->eventq_irq_cfg1 = data;
+ return MEMTX_OK;
+ case A_EVENTQ_IRQ_CFG2:
+ s->eventq_irq_cfg2 = data;
+ return MEMTX_OK;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
+ __func__, offset);
+ return MEMTX_OK;
+ }
+}
+
+static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ SMMUState *sys = opaque;
+ SMMUv3State *s = ARM_SMMUV3(sys);
+ MemTxResult r;
+
+ /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
+ offset &= ~0x10000;
+
+ switch (size) {
+ case 8:
+ r = smmu_writell(s, offset, data, attrs);
+ break;
+ case 4:
+ r = smmu_writel(s, offset, data, attrs);
+ break;
+ default:
+ r = MEMTX_ERROR;
+ break;
+ }
+
+ trace_smmuv3_write_mmio(offset, data, size, r);
+ return r;
+}
+
+static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case A_GERROR_IRQ_CFG0:
+ *data = s->gerror_irq_cfg0;
+ return MEMTX_OK;
+ case A_STRTAB_BASE:
+ *data = s->strtab_base;
+ return MEMTX_OK;
+ case A_CMDQ_BASE:
+ *data = s->cmdq.base;
+ return MEMTX_OK;
+ case A_EVENTQ_BASE:
+ *data = s->eventq.base;
+ return MEMTX_OK;
+ default:
+ *data = 0;
+ qemu_log_mask(LOG_UNIMP,
+ "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
+ __func__, offset);
+ return MEMTX_OK;
+ }
+}
+
+static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case A_IDREGS ... A_IDREGS + 0x1f:
+ *data = smmuv3_idreg(offset - A_IDREGS);
+ return MEMTX_OK;
+ case A_IDR0 ... A_IDR5:
+ *data = s->idr[(offset - A_IDR0) / 4];
+ return MEMTX_OK;
+ case A_IIDR:
+ *data = s->iidr;
+ return MEMTX_OK;
+ case A_CR0:
+ *data = s->cr[0];
+ return MEMTX_OK;
+ case A_CR0ACK:
+ *data = s->cr0ack;
+ return MEMTX_OK;
+ case A_CR1:
+ *data = s->cr[1];
+ return MEMTX_OK;
+ case A_CR2:
+ *data = s->cr[2];
+ return MEMTX_OK;
+ case A_STATUSR:
+ *data = s->statusr;
+ return MEMTX_OK;
+ case A_IRQ_CTRL:
+ case A_IRQ_CTRL_ACK:
+ *data = s->irq_ctrl;
+ return MEMTX_OK;
+ case A_GERROR:
+ *data = s->gerror;
+ return MEMTX_OK;
+ case A_GERRORN:
+ *data = s->gerrorn;
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG0: /* 64b */
+ *data = extract64(s->gerror_irq_cfg0, 0, 32);
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG0 + 4:
+ *data = extract64(s->gerror_irq_cfg0, 32, 32);
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG1:
+ *data = s->gerror_irq_cfg1;
+ return MEMTX_OK;
+ case A_GERROR_IRQ_CFG2:
+ *data = s->gerror_irq_cfg2;
+ return MEMTX_OK;
+ case A_STRTAB_BASE: /* 64b */
+ *data = extract64(s->strtab_base, 0, 32);
+ return MEMTX_OK;
+ case A_STRTAB_BASE + 4: /* 64b */
+ *data = extract64(s->strtab_base, 32, 32);
+ return MEMTX_OK;
+ case A_STRTAB_BASE_CFG:
+ *data = s->strtab_base_cfg;
+ return MEMTX_OK;
+ case A_CMDQ_BASE: /* 64b */
+ *data = extract64(s->cmdq.base, 0, 32);
+ return MEMTX_OK;
+ case A_CMDQ_BASE + 4:
+ *data = extract64(s->cmdq.base, 32, 32);
+ return MEMTX_OK;
+ case A_CMDQ_PROD:
+ *data = s->cmdq.prod;
+ return MEMTX_OK;
+ case A_CMDQ_CONS:
+ *data = s->cmdq.cons;
+ return MEMTX_OK;
+ case A_EVENTQ_BASE: /* 64b */
+ *data = extract64(s->eventq.base, 0, 32);
+ return MEMTX_OK;
+ case A_EVENTQ_BASE + 4: /* 64b */
+ *data = extract64(s->eventq.base, 32, 32);
+ return MEMTX_OK;
+ case A_EVENTQ_PROD:
+ *data = s->eventq.prod;
+ return MEMTX_OK;
+ case A_EVENTQ_CONS:
+ *data = s->eventq.cons;
+ return MEMTX_OK;
+ default:
+ *data = 0;
+ qemu_log_mask(LOG_UNIMP,
+ "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
+ __func__, offset);
+ return MEMTX_OK;
+ }
+}
+
+static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ SMMUState *sys = opaque;
+ SMMUv3State *s = ARM_SMMUV3(sys);
+ MemTxResult r;
+
+ /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
+ offset &= ~0x10000;
+
+ switch (size) {
+ case 8:
+ r = smmu_readll(s, offset, data, attrs);
+ break;
+ case 4:
+ r = smmu_readl(s, offset, data, attrs);
+ break;
+ default:
+ r = MEMTX_ERROR;
+ break;
+ }
+
+ trace_smmuv3_read_mmio(offset, *data, size, r);
+ return r;
+}
+
+static const MemoryRegionOps smmu_mem_ops = {
+ .read_with_attrs = smmu_read_mmio,
+ .write_with_attrs = smmu_write_mmio,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+};
+
+static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
+ sysbus_init_irq(dev, &s->irq[i]);
+ }
+}
+
+static void smmu_reset(DeviceState *dev)
+{
+ SMMUv3State *s = ARM_SMMUV3(dev);
+ SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
+
+ c->parent_reset(dev);
+
+ smmuv3_init_regs(s);
+}
+
+static void smmu_realize(DeviceState *d, Error **errp)
+{
+ SMMUState *sys = ARM_SMMU(d);
+ SMMUv3State *s = ARM_SMMUV3(sys);
+ SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
+ SysBusDevice *dev = SYS_BUS_DEVICE(d);
+ Error *local_err = NULL;
+
+ c->parent_realize(d, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ memory_region_init_io(&sys->iomem, OBJECT(s),
+ &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
+
+ sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
+
+ sysbus_init_mmio(dev, &sys->iomem);
+
+ smmu_init_irq(s, dev);
+}
+
+static const VMStateDescription vmstate_smmuv3_queue = {
+ .name = "smmuv3_queue",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(base, SMMUQueue),
+ VMSTATE_UINT32(prod, SMMUQueue),
+ VMSTATE_UINT32(cons, SMMUQueue),
+ VMSTATE_UINT8(log2size, SMMUQueue),
+ },
+};
+
+static const VMStateDescription vmstate_smmuv3 = {
+ .name = "smmuv3",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(features, SMMUv3State),
+ VMSTATE_UINT8(sid_size, SMMUv3State),
+ VMSTATE_UINT8(sid_split, SMMUv3State),
+
+ VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
+ VMSTATE_UINT32(cr0ack, SMMUv3State),
+ VMSTATE_UINT32(statusr, SMMUv3State),
+ VMSTATE_UINT32(irq_ctrl, SMMUv3State),
+ VMSTATE_UINT32(gerror, SMMUv3State),
+ VMSTATE_UINT32(gerrorn, SMMUv3State),
+ VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
+ VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
+ VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
+ VMSTATE_UINT64(strtab_base, SMMUv3State),
+ VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
+ VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
+ VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
+ VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
+
+ VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
+ VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
+
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static void smmuv3_instance_init(Object *obj)
+{
+ /* Nothing much to do here as of now */
+}
+
+static void smmuv3_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
+
+ dc->vmsd = &vmstate_smmuv3;
+ device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
+ c->parent_realize = dc->realize;
+ dc->realize = smmu_realize;
+}
+
+static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
+ IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new)
+{
+ if (old == IOMMU_NOTIFIER_NONE) {
+ warn_report("SMMUV3 does not support vhost/vfio integration yet: "
+ "devices of those types will not function properly");
+ }
+}
+
+static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
+ void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = smmuv3_translate;
+ imrc->notify_flag_changed = smmuv3_notify_flag_changed;
+}
+
+static const TypeInfo smmuv3_type_info = {
+ .name = TYPE_ARM_SMMUV3,
+ .parent = TYPE_ARM_SMMU,
+ .instance_size = sizeof(SMMUv3State),
+ .instance_init = smmuv3_instance_init,
+ .class_size = sizeof(SMMUv3Class),
+ .class_init = smmuv3_class_init,
+};
+
+static const TypeInfo smmuv3_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
+ .class_init = smmuv3_iommu_memory_region_class_init,
+};
+
+static void smmuv3_register_types(void)
+{
+ type_register(&smmuv3_type_info);
+ type_register(&smmuv3_iommu_memory_region_info);
+}
+
+type_init(smmuv3_register_types)
+
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index 193063ed99..2d92727602 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -2,3 +2,40 @@
# hw/arm/virt-acpi-build.c
virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out."
+
+# hw/arm/smmu-common.c
+smmu_add_mr(const char *name) "%s"
+smmu_page_walk(int stage, uint64_t baseaddr, int first_level, uint64_t start, uint64_t end) "stage=%d, baseaddr=0x%"PRIx64", first level=%d, start=0x%"PRIx64", end=0x%"PRIx64
+smmu_lookup_table(int level, uint64_t baseaddr, int granule_sz, uint64_t start, uint64_t end, int flags, uint64_t subpage_size) "level=%d baseaddr=0x%"PRIx64" granule=%d, start=0x%"PRIx64" end=0x%"PRIx64" flags=%d subpage_size=0x%"PRIx64
+smmu_ptw_level(int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64
+smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64
+smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64
+smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB"
+smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64
+
+#hw/arm/smmuv3.c
+smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)"
+smmuv3_trigger_irq(int irq) "irq=%d"
+smmuv3_write_gerror(uint32_t toggled, uint32_t gerror) "toggled=0x%x, new GERROR=0x%x"
+smmuv3_write_gerrorn(uint32_t acked, uint32_t gerrorn) "acked=0x%x, new GERRORN=0x%x"
+smmuv3_unhandled_cmd(uint32_t type) "Unhandled command type=%d"
+smmuv3_cmdq_consume(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod=%d cons=%d prod.wrap=%d cons.wrap=%d"
+smmuv3_cmdq_opcode(const char *opcode) "<--- %s"
+smmuv3_cmdq_consume_out(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod:%d, cons:%d, prod_wrap:%d, cons_wrap:%d "
+smmuv3_cmdq_consume_error(const char *cmd_name, uint8_t cmd_error) "Error on %s command execution: %d"
+smmuv3_update(bool is_empty, uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "q empty:%d prod:%d cons:%d p.wrap:%d p.cons:%d"
+smmuv3_update_check_cmd(int error) "cmdq not enabled or error :0x%x"
+smmuv3_write_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)"
+smmuv3_write_mmio_idr(uint64_t addr, uint64_t val) "write to RO/Unimpl reg 0x%"PRIx64" val64:0x%"PRIx64
+smmuv3_write_mmio_evtq_cons_bef_clear(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "Before clearing interrupt prod:0x%x cons:0x%x prod.w:%d cons.w:%d"
+smmuv3_write_mmio_evtq_cons_after_clear(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "after clearing interrupt prod:0x%x cons:0x%x prod.w:%d cons.w:%d"
+smmuv3_record_event(const char *type, uint32_t sid) "%s sid=%d"
+smmuv3_find_ste(uint16_t sid, uint32_t features, uint16_t sid_split) "SID:0x%x features:0x%x, sid_split:0x%x"
+smmuv3_find_ste_2lvl(uint64_t strtab_base, uint64_t l1ptr, int l1_ste_offset, uint64_t l2ptr, int l2_ste_offset, int max_l2_ste) "strtab_base:0x%"PRIx64" l1ptr:0x%"PRIx64" l1_off:0x%x, l2ptr:0x%"PRIx64" l2_off:0x%x max_l2_ste:%d"
+smmuv3_get_ste(uint64_t addr) "STE addr: 0x%"PRIx64
+smmuv3_translate_bypass(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=%d bypass iova:0x%"PRIx64" is_write=%d"
+smmuv3_translate_in(uint16_t sid, int pci_bus_num, uint64_t strtab_base) "SID:0x%x bus:%d strtab_base:0x%"PRIx64
+smmuv3_get_cd(uint64_t addr) "CD addr: 0x%"PRIx64
+smmuv3_translate(const char *n, uint16_t sid, uint64_t iova, uint64_t translated, int perm) "%s sid=%d iova=0x%"PRIx64" translated=0x%"PRIx64" perm=0x%x"
+smmuv3_decode_cd(uint32_t oas) "oas=%d"
+smmuv3_decode_cd_tt(int i, uint32_t tsz, uint64_t ttb, uint32_t granule_sz) "TT[%d]:tsz:%d ttb:0x%"PRIx64" granule_sz:%d"
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index c7c6a57ec5..92ceee9c0f 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -393,19 +393,26 @@ build_rsdp(GArray *rsdp_table, BIOSLinker *linker, unsigned xsdt_tbl_offset)
}
static void
-build_iort(GArray *table_data, BIOSLinker *linker)
+build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
- int iort_start = table_data->len;
+ int nb_nodes, iort_start = table_data->len;
AcpiIortIdMapping *idmap;
AcpiIortItsGroup *its;
AcpiIortTable *iort;
- size_t node_size, iort_length;
+ AcpiIortSmmu3 *smmu;
+ size_t node_size, iort_length, smmu_offset = 0;
AcpiIortRC *rc;
iort = acpi_data_push(table_data, sizeof(*iort));
+ if (vms->iommu == VIRT_IOMMU_SMMUV3) {
+ nb_nodes = 3; /* RC, ITS, SMMUv3 */
+ } else {
+ nb_nodes = 2; /* RC, ITS */
+ }
+
iort_length = sizeof(*iort);
- iort->node_count = cpu_to_le32(2); /* RC and ITS nodes */
+ iort->node_count = cpu_to_le32(nb_nodes);
iort->node_offset = cpu_to_le32(sizeof(*iort));
/* ITS group node */
@@ -418,6 +425,34 @@ build_iort(GArray *table_data, BIOSLinker *linker)
its->its_count = cpu_to_le32(1);
its->identifiers[0] = 0; /* MADT translation_id */
+ if (vms->iommu == VIRT_IOMMU_SMMUV3) {
+ int irq = vms->irqmap[VIRT_SMMU];
+
+ /* SMMUv3 node */
+ smmu_offset = iort->node_offset + node_size;
+ node_size = sizeof(*smmu) + sizeof(*idmap);
+ iort_length += node_size;
+ smmu = acpi_data_push(table_data, node_size);
+
+ smmu->type = ACPI_IORT_NODE_SMMU_V3;
+ smmu->length = cpu_to_le16(node_size);
+ smmu->mapping_count = cpu_to_le32(1);
+ smmu->mapping_offset = cpu_to_le32(sizeof(*smmu));
+ smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base);
+ smmu->event_gsiv = cpu_to_le32(irq);
+ smmu->pri_gsiv = cpu_to_le32(irq + 1);
+ smmu->gerr_gsiv = cpu_to_le32(irq + 2);
+ smmu->sync_gsiv = cpu_to_le32(irq + 3);
+
+ /* Identity RID mapping covering the whole input RID range */
+ idmap = &smmu->id_mapping_array[0];
+ idmap->input_base = 0;
+ idmap->id_count = cpu_to_le32(0xFFFF);
+ idmap->output_base = 0;
+ /* output IORT node is the ITS group node (the first node) */
+ idmap->output_reference = cpu_to_le32(iort->node_offset);
+ }
+
/* Root Complex Node */
node_size = sizeof(*rc) + sizeof(*idmap);
iort_length += node_size;
@@ -438,8 +473,14 @@ build_iort(GArray *table_data, BIOSLinker *linker)
idmap->input_base = 0;
idmap->id_count = cpu_to_le32(0xFFFF);
idmap->output_base = 0;
- /* output IORT node is the ITS group node (the first node) */
- idmap->output_reference = cpu_to_le32(iort->node_offset);
+
+ if (vms->iommu == VIRT_IOMMU_SMMUV3) {
+ /* output IORT node is the smmuv3 node */
+ idmap->output_reference = cpu_to_le32(smmu_offset);
+ } else {
+ /* output IORT node is the ITS group node (the first node) */
+ idmap->output_reference = cpu_to_le32(iort->node_offset);
+ }
iort->length = cpu_to_le32(iort_length);
@@ -777,7 +818,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
if (its_class_name() && !vmc->no_its) {
acpi_add_table(table_offsets, tables_blob);
- build_iort(tables_blob, tables->linker);
+ build_iort(tables_blob, tables->linker, vms);
}
/* XSDT is pointed to by RSDP */
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index a18291c5d5..11b9f599ca 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -58,6 +58,7 @@
#include "hw/smbios/smbios.h"
#include "qapi/visitor.h"
#include "standard-headers/linux/input.h"
+#include "hw/arm/smmuv3.h"
#define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
static void virt_##major##_##minor##_class_init(ObjectClass *oc, \
@@ -141,6 +142,7 @@ static const MemMapEntry a15memmap[] = {
[VIRT_FW_CFG] = { 0x09020000, 0x00000018 },
[VIRT_GPIO] = { 0x09030000, 0x00001000 },
[VIRT_SECURE_UART] = { 0x09040000, 0x00001000 },
+ [VIRT_SMMU] = { 0x09050000, 0x00020000 },
[VIRT_MMIO] = { 0x0a000000, 0x00000200 },
/* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
[VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 },
@@ -161,6 +163,7 @@ static const int a15irqmap[] = {
[VIRT_SECURE_UART] = 8,
[VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
[VIRT_GIC_V2M] = 48, /* ...to 48 + NUM_GICV2M_SPIS - 1 */
+ [VIRT_SMMU] = 74, /* ...to 74 + NUM_SMMU_IRQS - 1 */
[VIRT_PLATFORM_BUS] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */
};
@@ -942,7 +945,57 @@ static void create_pcie_irq_map(const VirtMachineState *vms,
0x7 /* PCI irq */);
}
-static void create_pcie(const VirtMachineState *vms, qemu_irq *pic)
+static void create_smmu(const VirtMachineState *vms, qemu_irq *pic,
+ PCIBus *bus)
+{
+ char *node;
+ const char compat[] = "arm,smmu-v3";
+ int irq = vms->irqmap[VIRT_SMMU];
+ int i;
+ hwaddr base = vms->memmap[VIRT_SMMU].base;
+ hwaddr size = vms->memmap[VIRT_SMMU].size;
+ const char irq_names[] = "eventq\0priq\0cmdq-sync\0gerror";
+ DeviceState *dev;
+
+ if (vms->iommu != VIRT_IOMMU_SMMUV3 || !vms->iommu_phandle) {
+ return;
+ }
+
+ dev = qdev_create(NULL, "arm-smmuv3");
+
+ object_property_set_link(OBJECT(dev), OBJECT(bus), "primary-bus",
+ &error_abort);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
+ for (i = 0; i < NUM_SMMU_IRQS; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
+ }
+
+ node = g_strdup_printf("/smmuv3@%" PRIx64, base);
+ qemu_fdt_add_subnode(vms->fdt, node);
+ qemu_fdt_setprop(vms->fdt, node, "compatible", compat, sizeof(compat));
+ qemu_fdt_setprop_sized_cells(vms->fdt, node, "reg", 2, base, 2, size);
+
+ qemu_fdt_setprop_cells(vms->fdt, node, "interrupts",
+ GIC_FDT_IRQ_TYPE_SPI, irq , GIC_FDT_IRQ_FLAGS_EDGE_LO_HI,
+ GIC_FDT_IRQ_TYPE_SPI, irq + 1, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI,
+ GIC_FDT_IRQ_TYPE_SPI, irq + 2, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI,
+ GIC_FDT_IRQ_TYPE_SPI, irq + 3, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
+
+ qemu_fdt_setprop(vms->fdt, node, "interrupt-names", irq_names,
+ sizeof(irq_names));
+
+ qemu_fdt_setprop_cell(vms->fdt, node, "clocks", vms->clock_phandle);
+ qemu_fdt_setprop_string(vms->fdt, node, "clock-names", "apb_pclk");
+ qemu_fdt_setprop(vms->fdt, node, "dma-coherent", NULL, 0);
+
+ qemu_fdt_setprop_cell(vms->fdt, node, "#iommu-cells", 1);
+
+ qemu_fdt_setprop_cell(vms->fdt, node, "phandle", vms->iommu_phandle);
+ g_free(node);
+}
+
+static void create_pcie(VirtMachineState *vms, qemu_irq *pic)
{
hwaddr base_mmio = vms->memmap[VIRT_PCIE_MMIO].base;
hwaddr size_mmio = vms->memmap[VIRT_PCIE_MMIO].size;
@@ -1023,6 +1076,7 @@ static void create_pcie(const VirtMachineState *vms, qemu_irq *pic)
qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "pci");
qemu_fdt_setprop_cell(vms->fdt, nodename, "#address-cells", 3);
qemu_fdt_setprop_cell(vms->fdt, nodename, "#size-cells", 2);
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "linux,pci-domain", 0);
qemu_fdt_setprop_cells(vms->fdt, nodename, "bus-range", 0,
nr_pcie_buses - 1);
qemu_fdt_setprop(vms->fdt, nodename, "dma-coherent", NULL, 0);
@@ -1055,6 +1109,15 @@ static void create_pcie(const VirtMachineState *vms, qemu_irq *pic)
qemu_fdt_setprop_cell(vms->fdt, nodename, "#interrupt-cells", 1);
create_pcie_irq_map(vms, vms->gic_phandle, irq, nodename);
+ if (vms->iommu) {
+ vms->iommu_phandle = qemu_fdt_alloc_phandle(vms->fdt);
+
+ create_smmu(vms, pic, pci->bus);
+
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "iommu-map",
+ 0x0, vms->iommu_phandle, 0x0, 0x10000);
+ }
+
g_free(nodename);
}
@@ -1498,6 +1561,34 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
}
}
+static char *virt_get_iommu(Object *obj, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ switch (vms->iommu) {
+ case VIRT_IOMMU_NONE:
+ return g_strdup("none");
+ case VIRT_IOMMU_SMMUV3:
+ return g_strdup("smmuv3");
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void virt_set_iommu(Object *obj, const char *value, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ if (!strcmp(value, "smmuv3")) {
+ vms->iommu = VIRT_IOMMU_SMMUV3;
+ } else if (!strcmp(value, "none")) {
+ vms->iommu = VIRT_IOMMU_NONE;
+ } else {
+ error_setg(errp, "Invalid iommu value");
+ error_append_hint(errp, "Valid values are none, smmuv3.\n");
+ }
+}
+
static CpuInstanceProperties
virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
{
@@ -1630,6 +1721,14 @@ static void virt_2_12_instance_init(Object *obj)
NULL);
}
+ /* Default disallows iommu instantiation */
+ vms->iommu = VIRT_IOMMU_NONE;
+ object_property_add_str(obj, "iommu", virt_get_iommu, virt_set_iommu, NULL);
+ object_property_set_description(obj, "iommu",
+ "Set the IOMMU type. "
+ "Valid values are none and smmuv3",
+ NULL);
+
vms->memmap = a15memmap;
vms->irqmap = a15irqmap;
}
diff --git a/hw/char/cmsdk-apb-uart.c b/hw/char/cmsdk-apb-uart.c
index 9c0929d8a2..ddfbb25c24 100644
--- a/hw/char/cmsdk-apb-uart.c
+++ b/hw/char/cmsdk-apb-uart.c
@@ -157,6 +157,7 @@ static uint64_t uart_read(void *opaque, hwaddr offset, unsigned size)
r = s->rxbuf;
s->state &= ~R_STATE_RXFULL_MASK;
cmsdk_apb_uart_update(s);
+ qemu_chr_fe_accept_input(&s->chr);
break;
case A_STATE:
r = s->state;
diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c
index 3b16dcf5a1..c8cc5379b7 100644
--- a/hw/net/smc91c111.c
+++ b/hw/net/smc91c111.c
@@ -625,37 +625,33 @@ static uint32_t smc91c111_readb(void *opaque, hwaddr offset)
return 0;
}
-static void smc91c111_writew(void *opaque, hwaddr offset,
- uint32_t value)
-{
- smc91c111_writeb(opaque, offset, value & 0xff);
- smc91c111_writeb(opaque, offset + 1, value >> 8);
-}
-
-static void smc91c111_writel(void *opaque, hwaddr offset,
- uint32_t value)
+static uint64_t smc91c111_readfn(void *opaque, hwaddr addr, unsigned size)
{
- /* 32-bit writes to offset 0xc only actually write to the bank select
- register (offset 0xe) */
- if (offset != 0xc)
- smc91c111_writew(opaque, offset, value & 0xffff);
- smc91c111_writew(opaque, offset + 2, value >> 16);
-}
+ int i;
+ uint32_t val = 0;
-static uint32_t smc91c111_readw(void *opaque, hwaddr offset)
-{
- uint32_t val;
- val = smc91c111_readb(opaque, offset);
- val |= smc91c111_readb(opaque, offset + 1) << 8;
+ for (i = 0; i < size; i++) {
+ val |= smc91c111_readb(opaque, addr + i) << (i * 8);
+ }
return val;
}
-static uint32_t smc91c111_readl(void *opaque, hwaddr offset)
+static void smc91c111_writefn(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
{
- uint32_t val;
- val = smc91c111_readw(opaque, offset);
- val |= smc91c111_readw(opaque, offset + 2) << 16;
- return val;
+ int i = 0;
+
+ /* 32-bit writes to offset 0xc only actually write to the bank select
+ * register (offset 0xe), so skip the first two bytes we would write.
+ */
+ if (addr == 0xc && size == 4) {
+ i += 2;
+ }
+
+ for (; i < size; i++) {
+ smc91c111_writeb(opaque, addr + i,
+ extract32(value, i * 8, 8));
+ }
}
static int smc91c111_can_receive_nc(NetClientState *nc)
@@ -747,10 +743,10 @@ static const MemoryRegionOps smc91c111_mem_ops = {
/* The special case for 32 bit writes to 0xc means we can't just
* set .impl.min/max_access_size to 1, unfortunately
*/
- .old_mmio = {
- .read = { smc91c111_readb, smc91c111_readw, smc91c111_readl, },
- .write = { smc91c111_writeb, smc91c111_writew, smc91c111_writel, },
- },
+ .read = smc91c111_readfn,
+ .write = smc91c111_writefn,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
.endianness = DEVICE_NATIVE_ENDIAN,
};
diff --git a/hw/usb/tusb6010.c b/hw/usb/tusb6010.c
index 2662c060ed..a2128024c1 100644
--- a/hw/usb/tusb6010.c
+++ b/hw/usb/tusb6010.c
@@ -641,11 +641,43 @@ static void tusb_async_writew(void *opaque, hwaddr addr,
}
}
+static uint64_t tusb_async_readfn(void *opaque, hwaddr addr, unsigned size)
+{
+ switch (size) {
+ case 1:
+ return tusb_async_readb(opaque, addr);
+ case 2:
+ return tusb_async_readh(opaque, addr);
+ case 4:
+ return tusb_async_readw(opaque, addr);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tusb_async_writefn(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ switch (size) {
+ case 1:
+ tusb_async_writeb(opaque, addr, value);
+ break;
+ case 2:
+ tusb_async_writeh(opaque, addr, value);
+ break;
+ case 4:
+ tusb_async_writew(opaque, addr, value);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
static const MemoryRegionOps tusb_async_ops = {
- .old_mmio = {
- .read = { tusb_async_readb, tusb_async_readh, tusb_async_readw, },
- .write = { tusb_async_writeb, tusb_async_writeh, tusb_async_writew, },
- },
+ .read = tusb_async_readfn,
+ .write = tusb_async_writefn,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
.endianness = DEVICE_NATIVE_ENDIAN,
};
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index 5955eb4fc0..af8e023968 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -628,6 +628,21 @@ struct AcpiIortItsGroup {
} QEMU_PACKED;
typedef struct AcpiIortItsGroup AcpiIortItsGroup;
+struct AcpiIortSmmu3 {
+ ACPI_IORT_NODE_HEADER_DEF
+ uint64_t base_address;
+ uint32_t flags;
+ uint32_t reserved2;
+ uint64_t vatos_address;
+ uint32_t model;
+ uint32_t event_gsiv;
+ uint32_t pri_gsiv;
+ uint32_t gerr_gsiv;
+ uint32_t sync_gsiv;
+ AcpiIortIdMapping id_mapping_array[0];
+} QEMU_PACKED;
+typedef struct AcpiIortSmmu3 AcpiIortSmmu3;
+
struct AcpiIortRC {
ACPI_IORT_NODE_HEADER_DEF
AcpiIortMemoryAccess memory_properties;
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
new file mode 100644
index 0000000000..c41eb5c3b0
--- /dev/null
+++ b/include/hw/arm/smmu-common.h
@@ -0,0 +1,145 @@
+/*
+ * ARM SMMU Support
+ *
+ * Copyright (C) 2015-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HW_ARM_SMMU_COMMON_H
+#define HW_ARM_SMMU_COMMON_H
+
+#include "hw/sysbus.h"
+#include "hw/pci/pci.h"
+
+#define SMMU_PCI_BUS_MAX 256
+#define SMMU_PCI_DEVFN_MAX 256
+
+#define SMMU_MAX_VA_BITS 48
+
+/*
+ * Page table walk error types
+ */
+typedef enum {
+ SMMU_PTW_ERR_NONE,
+ SMMU_PTW_ERR_WALK_EABT, /* Translation walk external abort */
+ SMMU_PTW_ERR_TRANSLATION, /* Translation fault */
+ SMMU_PTW_ERR_ADDR_SIZE, /* Address Size fault */
+ SMMU_PTW_ERR_ACCESS, /* Access fault */
+ SMMU_PTW_ERR_PERMISSION, /* Permission fault */
+} SMMUPTWEventType;
+
+typedef struct SMMUPTWEventInfo {
+ SMMUPTWEventType type;
+ dma_addr_t addr; /* fetched address that induced an abort, if any */
+} SMMUPTWEventInfo;
+
+typedef struct SMMUTransTableInfo {
+ bool disabled; /* is the translation table disabled? */
+ uint64_t ttb; /* TT base address */
+ uint8_t tsz; /* input range, ie. 2^(64 -tsz)*/
+ uint8_t granule_sz; /* granule page shift */
+} SMMUTransTableInfo;
+
+/*
+ * Generic structure populated by derived SMMU devices
+ * after decoding the configuration information and used as
+ * input to the page table walk
+ */
+typedef struct SMMUTransCfg {
+ int stage; /* translation stage */
+ bool aa64; /* arch64 or aarch32 translation table */
+ bool disabled; /* smmu is disabled */
+ bool bypassed; /* translation is bypassed */
+ bool aborted; /* translation is aborted */
+ uint64_t ttb; /* TT base address */
+ uint8_t oas; /* output address width */
+ uint8_t tbi; /* Top Byte Ignore */
+ uint16_t asid;
+ SMMUTransTableInfo tt[2];
+} SMMUTransCfg;
+
+typedef struct SMMUDevice {
+ void *smmu;
+ PCIBus *bus;
+ int devfn;
+ IOMMUMemoryRegion iommu;
+ AddressSpace as;
+} SMMUDevice;
+
+typedef struct SMMUNotifierNode {
+ SMMUDevice *sdev;
+ QLIST_ENTRY(SMMUNotifierNode) next;
+} SMMUNotifierNode;
+
+typedef struct SMMUPciBus {
+ PCIBus *bus;
+ SMMUDevice *pbdev[0]; /* Parent array is sparse, so dynamically alloc */
+} SMMUPciBus;
+
+typedef struct SMMUState {
+ /* <private> */
+ SysBusDevice dev;
+ const char *mrtypename;
+ MemoryRegion iomem;
+
+ GHashTable *smmu_pcibus_by_busptr;
+ GHashTable *configs; /* cache for configuration data */
+ GHashTable *iotlb;
+ SMMUPciBus *smmu_pcibus_by_bus_num[SMMU_PCI_BUS_MAX];
+ PCIBus *pci_bus;
+ QLIST_HEAD(, SMMUNotifierNode) notifiers_list;
+ uint8_t bus_num;
+ PCIBus *primary_bus;
+} SMMUState;
+
+typedef struct {
+ /* <private> */
+ SysBusDeviceClass parent_class;
+
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+
+} SMMUBaseClass;
+
+#define TYPE_ARM_SMMU "arm-smmu"
+#define ARM_SMMU(obj) OBJECT_CHECK(SMMUState, (obj), TYPE_ARM_SMMU)
+#define ARM_SMMU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SMMUBaseClass, (klass), TYPE_ARM_SMMU)
+#define ARM_SMMU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SMMUBaseClass, (obj), TYPE_ARM_SMMU)
+
+/* Return the SMMUPciBus handle associated to a PCI bus number */
+SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num);
+
+/* Return the stream ID of an SMMU device */
+static inline uint16_t smmu_get_sid(SMMUDevice *sdev)
+{
+ return PCI_BUILD_BDF(pci_bus_num(sdev->bus), sdev->devfn);
+}
+
+/**
+ * smmu_ptw - Perform the page table walk for a given iova / access flags
+ * pair, according to @cfg translation config
+ */
+int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
+ IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info);
+
+/**
+ * select_tt - compute which translation table shall be used according to
+ * the input iova and translation config and return the TT specific info
+ */
+SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova);
+
+#endif /* HW_ARM_SMMU_COMMON */
diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h
new file mode 100644
index 0000000000..23f70363e5
--- /dev/null
+++ b/include/hw/arm/smmuv3.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ARM_SMMUV3_H
+#define HW_ARM_SMMUV3_H
+
+#include "hw/arm/smmu-common.h"
+#include "hw/registerfields.h"
+
+#define TYPE_SMMUV3_IOMMU_MEMORY_REGION "smmuv3-iommu-memory-region"
+
+typedef struct SMMUQueue {
+ uint64_t base; /* base register */
+ uint32_t prod;
+ uint32_t cons;
+ uint8_t entry_size;
+ uint8_t log2size;
+} SMMUQueue;
+
+typedef struct SMMUv3State {
+ SMMUState smmu_state;
+
+ uint32_t features;
+ uint8_t sid_size;
+ uint8_t sid_split;
+
+ uint32_t idr[6];
+ uint32_t iidr;
+ uint32_t cr[3];
+ uint32_t cr0ack;
+ uint32_t statusr;
+ uint32_t irq_ctrl;
+ uint32_t gerror;
+ uint32_t gerrorn;
+ uint64_t gerror_irq_cfg0;
+ uint32_t gerror_irq_cfg1;
+ uint32_t gerror_irq_cfg2;
+ uint64_t strtab_base;
+ uint32_t strtab_base_cfg;
+ uint64_t eventq_irq_cfg0;
+ uint32_t eventq_irq_cfg1;
+ uint32_t eventq_irq_cfg2;
+
+ SMMUQueue eventq, cmdq;
+
+ qemu_irq irq[4];
+} SMMUv3State;
+
+typedef enum {
+ SMMU_IRQ_EVTQ,
+ SMMU_IRQ_PRIQ,
+ SMMU_IRQ_CMD_SYNC,
+ SMMU_IRQ_GERROR,
+} SMMUIrq;
+
+typedef struct {
+ /*< private >*/
+ SMMUBaseClass smmu_base_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+} SMMUv3Class;
+
+#define TYPE_ARM_SMMUV3 "arm-smmuv3"
+#define ARM_SMMUV3(obj) OBJECT_CHECK(SMMUv3State, (obj), TYPE_ARM_SMMUV3)
+#define ARM_SMMUV3_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SMMUv3Class, (klass), TYPE_ARM_SMMUV3)
+#define ARM_SMMUV3_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SMMUv3Class, (obj), TYPE_ARM_SMMUV3)
+
+#endif
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index ba0c1a4faa..886372cdbb 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -38,6 +38,7 @@
#define NUM_GICV2M_SPIS 64
#define NUM_VIRTIO_TRANSPORTS 32
+#define NUM_SMMU_IRQS 4
#define ARCH_GICV3_MAINT_IRQ 9
@@ -59,6 +60,7 @@ enum {
VIRT_GIC_V2M,
VIRT_GIC_ITS,
VIRT_GIC_REDIST,
+ VIRT_SMMU,
VIRT_UART,
VIRT_MMIO,
VIRT_RTC,
@@ -74,6 +76,12 @@ enum {
VIRT_SECURE_MEM,
};
+typedef enum VirtIOMMUType {
+ VIRT_IOMMU_NONE,
+ VIRT_IOMMU_SMMUV3,
+ VIRT_IOMMU_VIRTIO,
+} VirtIOMMUType;
+
typedef struct MemMapEntry {
hwaddr base;
hwaddr size;
@@ -97,6 +105,7 @@ typedef struct {
bool its;
bool virt;
int32_t gic_version;
+ VirtIOMMUType iommu;
struct arm_boot_info bootinfo;
const MemMapEntry *memmap;
const int *irqmap;
@@ -106,6 +115,7 @@ typedef struct {
uint32_t clock_phandle;
uint32_t gic_phandle;
uint32_t msi_phandle;
+ uint32_t iommu_phandle;
int psci_conduit;
} VirtMachineState;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 52a88e0297..0fef5d4d06 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -5347,7 +5347,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
r->access = PL1_RW;
}
- id_tlbtr_reginfo.access = PL1_RW;
+ id_mpuir_reginfo.access = PL1_RW;
id_tlbtr_reginfo.access = PL1_RW;
}
if (arm_feature(env, ARM_FEATURE_V8)) {
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index ecc39ac295..5141d0adc5 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -20,8 +20,10 @@
#include "sysemu/kvm.h"
#include "kvm_arm.h"
#include "cpu.h"
+#include "trace.h"
#include "internals.h"
#include "hw/arm/arm.h"
+#include "hw/pci/pci.h"
#include "exec/memattrs.h"
#include "exec/address-spaces.h"
#include "hw/boards.h"
@@ -649,7 +651,41 @@ int kvm_arm_vgic_probe(void)
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data, PCIDevice *dev)
{
- return 0;
+ AddressSpace *as = pci_device_iommu_address_space(dev);
+ hwaddr xlat, len, doorbell_gpa;
+ MemoryRegionSection mrs;
+ MemoryRegion *mr;
+ int ret = 1;
+
+ if (as == &address_space_memory) {
+ return 0;
+ }
+
+ /* MSI doorbell address is translated by an IOMMU */
+
+ rcu_read_lock();
+ mr = address_space_translate(as, address, &xlat, &len, true);
+ if (!mr) {
+ goto unlock;
+ }
+ mrs = memory_region_find(mr, xlat, 1);
+ if (!mrs.mr) {
+ goto unlock;
+ }
+
+ doorbell_gpa = mrs.offset_within_address_space;
+ memory_region_unref(mrs.mr);
+
+ route->u.msi.address_lo = doorbell_gpa;
+ route->u.msi.address_hi = doorbell_gpa >> 32;
+
+ trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
+
+ ret = 0;
+
+unlock:
+ rcu_read_unlock();
+ return ret;
}
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
diff --git a/target/arm/trace-events b/target/arm/trace-events
index 9e37131115..6b759f9d4f 100644
--- a/target/arm/trace-events
+++ b/target/arm/trace-events
@@ -8,3 +8,6 @@ arm_gt_tval_write(int timer, uint64_t value) "gt_tval_write: timer %d value 0x%"
arm_gt_ctl_write(int timer, uint64_t value) "gt_ctl_write: timer %d value 0x%" PRIx64
arm_gt_imask_toggle(int timer, int irqstate) "gt_ctl_write: timer %d IMASK toggle, new irqstate %d"
arm_gt_cntvoff_write(uint64_t value) "gt_cntvoff_write: value 0x%" PRIx64
+
+# target/arm/kvm.c
+kvm_arm_fixup_msi_route(uint64_t iova, uint64_t gpa) "MSI iova = 0x%"PRIx64" is translated into 0x%"PRIx64
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index bff4e13bf6..6d49f30b4a 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -9019,11 +9019,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
unallocated_encoding(s);
return;
}
-
- if (size > 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
+ tcg_debug_assert(size <= 3);
if (!fp_access_check(s)) {
return;
@@ -11477,7 +11473,11 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
/* All 64-bit element operations can be shared with scalar 2misc */
int pass;
- for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
+ /* Coverity claims (size == 3 && !is_q) has been eliminated
+ * from all paths leading to here.
+ */
+ tcg_debug_assert(is_q);
+ for (pass = 0; pass < 2; pass++) {
TCGv_i64 tcg_op = tcg_temp_new_i64();
TCGv_i64 tcg_res = tcg_temp_new_i64();
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 9bc2ce1a0b..ad208867a7 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -10795,8 +10795,23 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
/* Coprocessor. */
if (arm_dc_feature(s, ARM_FEATURE_M)) {
/* We don't currently implement M profile FP support,
- * so this entire space should give a NOCP fault.
+ * so this entire space should give a NOCP fault, with
+ * the exception of the v8M VLLDM and VLSTM insns, which
+ * must be NOPs in Secure state and UNDEF in Nonsecure state.
*/
+ if (arm_dc_feature(s, ARM_FEATURE_V8) &&
+ (insn & 0xffa00f00) == 0xec200a00) {
+ /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
+ * - VLLDM, VLSTM
+ * We choose to UNDEF if the RAZ bits are non-zero.
+ */
+ if (!s->v8m_secure || (insn & 0x0040f0ff)) {
+ goto illegal_op;
+ }
+ /* Just NOP since FP support is not implemented */
+ break;
+ }
+ /* All other insns: NOCP */
gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
default_exception_el(s));
break;