summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPaul Durrant <paul.durrant@citrix.com>2016-08-01 10:16:25 +0100
committerStefano Stabellini <sstabellini@kernel.org>2016-08-12 16:38:30 -0700
commitb7665c6027c972c23668ee74b878b5c617218514 (patch)
tree19e37bd685e3a9c46805d2b86a727ea9fd59263a /include
parentc4f68f0b52f4d84658ee977eb7e44a80857e0b50 (diff)
downloadqemu-b7665c6027c972c23668ee74b878b5c617218514.tar.gz
xen: handle inbound migration of VMs without ioreq server pages
VMs created on older versions on Xen will not have been provisioned with pages to support creation of non-default ioreq servers. In this case the ioreq server API is not supported and QEMU's only option is to fall back to using the default ioreq server pages as it did prior to commit 3996e85c ("Xen: Use the ioreq-server API when available"). This patch therefore changes the code in xen_common.h to stop considering a failure of xc_hvm_create_ioreq_server() as a hard failure but simply as an indication that the guest is too old to support the ioreq server API. Instead a boolean is set to cause reversion to old behaviour such that the default ioreq server is then used. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: Stefano Stabellini <sstabellini@kernel.org> Acked-by: Anthony PERARD <anthony.perard@citrix.com> Acked-by: Stefano Stabellini <sstabellini@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/hw/xen/xen_common.h125
1 files changed, 90 insertions, 35 deletions
diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h
index 640c31e2c1..bd39287b8f 100644
--- a/include/hw/xen/xen_common.h
+++ b/include/hw/xen/xen_common.h
@@ -107,6 +107,44 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
#endif
+static inline int xen_get_default_ioreq_server_info(xc_interface *xc,
+ domid_t dom,
+ xen_pfn_t *ioreq_pfn,
+ xen_pfn_t *bufioreq_pfn,
+ evtchn_port_t
+ *bufioreq_evtchn)
+{
+ unsigned long param;
+ int rc;
+
+ rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
+ if (rc < 0) {
+ fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
+ return -1;
+ }
+
+ *ioreq_pfn = param;
+
+ rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
+ if (rc < 0) {
+ fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
+ return -1;
+ }
+
+ *bufioreq_pfn = param;
+
+ rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
+ &param);
+ if (rc < 0) {
+ fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
+ return -1;
+ }
+
+ *bufioreq_evtchn = param;
+
+ return 0;
+}
+
/* Xen before 4.5 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
@@ -154,10 +192,9 @@ static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
{
}
-static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
- ioservid_t *ioservid)
+static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom,
+ ioservid_t *ioservid)
{
- return 0;
}
static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
@@ -171,35 +208,8 @@ static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
xen_pfn_t *bufioreq_pfn,
evtchn_port_t *bufioreq_evtchn)
{
- unsigned long param;
- int rc;
-
- rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
- if (rc < 0) {
- fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
- return -1;
- }
-
- *ioreq_pfn = param;
-
- rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
- if (rc < 0) {
- fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
- return -1;
- }
-
- *bufioreq_pfn = param;
-
- rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
- &param);
- if (rc < 0) {
- fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
- return -1;
- }
-
- *bufioreq_evtchn = param;
-
- return 0;
+ return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, bufioreq_pfn,
+ bufioreq_evtchn);
}
static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
@@ -212,6 +222,8 @@ static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
/* Xen 4.5 */
#else
+static bool use_default_ioreq_server;
+
static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
ioservid_t ioservid,
MemoryRegionSection *section)
@@ -220,6 +232,10 @@ static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
ram_addr_t size = int128_get64(section->size);
hwaddr end_addr = start_addr + size - 1;
+ if (use_default_ioreq_server) {
+ return;
+ }
+
trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
start_addr, end_addr);
@@ -233,6 +249,11 @@ static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
ram_addr_t size = int128_get64(section->size);
hwaddr end_addr = start_addr + size - 1;
+ if (use_default_ioreq_server) {
+ return;
+ }
+
+
trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
start_addr, end_addr);
@@ -246,6 +267,11 @@ static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
ram_addr_t size = int128_get64(section->size);
hwaddr end_addr = start_addr + size - 1;
+ if (use_default_ioreq_server) {
+ return;
+ }
+
+
trace_xen_map_portio_range(ioservid, start_addr, end_addr);
xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
start_addr, end_addr);
@@ -259,6 +285,10 @@ static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
ram_addr_t size = int128_get64(section->size);
hwaddr end_addr = start_addr + size - 1;
+ if (use_default_ioreq_server) {
+ return;
+ }
+
trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
start_addr, end_addr);
@@ -268,6 +298,10 @@ static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
ioservid_t ioservid,
PCIDevice *pci_dev)
{
+ if (use_default_ioreq_server) {
+ return;
+ }
+
trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
@@ -280,6 +314,10 @@ static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
ioservid_t ioservid,
PCIDevice *pci_dev)
{
+ if (use_default_ioreq_server) {
+ return;
+ }
+
trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
@@ -288,22 +326,29 @@ static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
PCI_FUNC(pci_dev->devfn));
}
-static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
- ioservid_t *ioservid)
+static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom,
+ ioservid_t *ioservid)
{
int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
ioservid);
if (rc == 0) {
trace_xen_ioreq_server_create(*ioservid);
+ return;
}
- return rc;
+ *ioservid = 0;
+ use_default_ioreq_server = true;
+ trace_xen_default_ioreq_server();
}
static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
ioservid_t ioservid)
{
+ if (use_default_ioreq_server) {
+ return;
+ }
+
trace_xen_ioreq_server_destroy(ioservid);
xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
}
@@ -314,6 +359,12 @@ static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
xen_pfn_t *bufioreq_pfn,
evtchn_port_t *bufioreq_evtchn)
{
+ if (use_default_ioreq_server) {
+ return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn,
+ bufioreq_pfn,
+ bufioreq_evtchn);
+ }
+
return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
ioreq_pfn, bufioreq_pfn,
bufioreq_evtchn);
@@ -323,6 +374,10 @@ static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
ioservid_t ioservid,
bool enable)
{
+ if (use_default_ioreq_server) {
+ return 0;
+ }
+
trace_xen_ioreq_server_state(ioservid, enable);
return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
}