summaryrefslogtreecommitdiff
path: root/hw/vfio/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/vfio/common.c')
-rw-r--r--hw/vfio/common.c77
1 files changed, 71 insertions, 6 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index f895e3c335..5e84716218 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -544,18 +544,40 @@ static void vfio_listener_region_add(MemoryListener *listener,
llsize = int128_sub(llend, int128_make64(iova));
+ if (memory_region_is_ram_device(section->mr)) {
+ hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
+
+ if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
+ error_report("Region 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx
+ " is not aligned to 0x%"HWADDR_PRIx
+ " and cannot be mapped for DMA",
+ section->offset_within_region,
+ int128_getlo(section->size),
+ pgmask + 1);
+ return;
+ }
+ }
+
ret = vfio_dma_map(container, iova, int128_get64(llsize),
vaddr, section->readonly);
if (ret) {
error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx", %p) = %d (%m)",
container, iova, int128_get64(llsize), vaddr, ret);
+ if (memory_region_is_ram_device(section->mr)) {
+ /* Allow unexpected mappings not to be fatal for RAM devices */
+ return;
+ }
goto fail;
}
return;
fail:
+ if (memory_region_is_ram_device(section->mr)) {
+ error_report("failed to vfio_dma_map. pci p2p may not work");
+ return;
+ }
/*
* On the initfn path, store the first error in the container so we
* can gracefully fail. Runtime, there's not much we can do other
@@ -577,6 +599,7 @@ static void vfio_listener_region_del(MemoryListener *listener,
hwaddr iova, end;
Int128 llend, llsize;
int ret;
+ bool try_unmap = true;
if (vfio_listener_skipped_section(section)) {
trace_vfio_listener_region_del_skip(
@@ -629,14 +652,34 @@ static void vfio_listener_region_del(MemoryListener *listener,
trace_vfio_listener_region_del(iova, end);
- ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
- memory_region_unref(section->mr);
- if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%m)",
- container, iova, int128_get64(llsize), ret);
+ if (memory_region_is_ram_device(section->mr)) {
+ hwaddr pgmask;
+ VFIOHostDMAWindow *hostwin;
+ bool hostwin_found = false;
+
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
+ hostwin_found = true;
+ break;
+ }
+ }
+ assert(hostwin_found); /* or region_add() would have failed */
+
+ pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
+ try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
}
+ if (try_unmap) {
+ ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
+ if (ret) {
+ error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%m)",
+ container, iova, int128_get64(llsize), ret);
+ }
+ }
+
+ memory_region_unref(section->mr);
+
if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
vfio_spapr_remove_window(container,
section->offset_within_address_space);
@@ -858,6 +901,13 @@ void vfio_region_finalize(VFIORegion *region)
g_free(region->mmaps);
trace_vfio_region_finalize(region->vbasedev->name, region->nr);
+
+ region->mem = NULL;
+ region->mmaps = NULL;
+ region->nr_mmaps = 0;
+ region->size = 0;
+ region->flags = 0;
+ region->nr = 0;
}
void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
@@ -1421,6 +1471,21 @@ int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
return -ENODEV;
}
+bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
+{
+ struct vfio_region_info *info = NULL;
+ bool ret = false;
+
+ if (!vfio_get_region_info(vbasedev, region, &info)) {
+ if (vfio_get_region_info_cap(info, cap_type)) {
+ ret = true;
+ }
+ g_free(info);
+ }
+
+ return ret;
+}
+
/*
* Interfaces for IBM EEH (Enhanced Error Handling)
*/