summaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-06-07 18:24:08 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-06-07 18:24:08 +0100
commitbbfa326fc8028e275eddf8c9965c2a1b59405b2e (patch)
tree18462ee41801d922e941ee50e1e4a54a96324464 /target
parent64175afc695c0672876fbbfc31b299c86d562cb4 (diff)
parentac06724a715864942e2b5e28f92d5d5421f0a0b0 (diff)
downloadqemu-bbfa326fc8028e275eddf8c9965c2a1b59405b2e.tar.gz
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* virtio-scsi use-after-free fix (Fam) * SMM fixes and improvements for TCG (myself, Mihail) * irqchip and AddressSpaceDispatch cleanups and fixes (Peter) * Coverity fix (Stefano) * NBD cleanups and fixes (Vladimir, Eric, myself) * RTC accuracy improvements and code cleanups (Guangrong+Yunfang) * socket error reporting improvement (Daniel) * GDB XML description for SSE registers (Abdallah) * kvmclock update fix (Denis) * SMM memory savings (Gonglei) * -cpu 486 fix (myself) * various bugfixes (Roman, Peter, myself, Thomas) * rtc-test improvement (Guangrong) * migration throttling fix (Felipe) * create docs/ subdirectories (myself) # gpg: Signature made Wed 07 Jun 2017 17:22:07 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (31 commits) docs: create config/, devel/ and spin/ subdirectories cpus: reset throttle_thread_scheduled after sleep kvm: don't register smram_listener when smm is off nbd: make it thread-safe, fix qcow2 over nbd target/i386: Add GDB XML description for SSE registers i386/kvm: do not zero out segment flags if segment is unusable or not present edu: fix memory leak on msi_broken platforms linuxboot_dma: compile for i486 kvmclock: update system_time_msr address forcibly nbd: Fully initialize client in case of failed negotiation sockets: improve error reporting if UNIX socket path is too long i386: fix read/write cr with icount option target/i386: use multiple CPU AddressSpaces target/i386: enable A20 automatically in system management mode virtio-scsi: Unset hotplug handler when unrealize exec: simplify phys_page_find() params nbd/client.c: use errp instead of LOG nbd: add errp to read_sync, write_sync and drop_sync nbd: add errp parameter to nbd_wr_syncv() nbd: read_sync and friends: return 0 on success ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/i386/arch_memory_mapping.c18
-rw-r--r--target/i386/cpu.c23
-rw-r--r--target/i386/cpu.h20
-rw-r--r--target/i386/helper.c96
-rw-r--r--target/i386/kvm.c36
-rw-r--r--target/i386/machine.c4
-rw-r--r--target/i386/smm_helper.c18
-rw-r--r--target/i386/translate.c12
8 files changed, 125 insertions, 102 deletions
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c
index 826aee597b..647cff2829 100644
--- a/target/i386/arch_memory_mapping.c
+++ b/target/i386/arch_memory_mapping.c
@@ -272,25 +272,27 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ int32_t a20_mask;
if (!cpu_paging_enabled(cs)) {
/* paging is disabled */
return;
}
+ a20_mask = x86_get_a20_mask(env);
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (env->cr[4] & CR4_LA57_MASK) {
hwaddr pml5e_addr;
- pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
- walk_pml5e(list, cs->as, pml5e_addr, env->a20_mask);
+ pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
+ walk_pml5e(list, cs->as, pml5e_addr, a20_mask);
} else {
hwaddr pml4e_addr;
- pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
- walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask,
+ pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
+ walk_pml4e(list, cs->as, pml4e_addr, a20_mask,
0xffffULL << 48);
}
} else
@@ -298,16 +300,16 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
{
hwaddr pdpe_addr;
- pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
- walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask);
+ pdpe_addr = (env->cr[3] & ~0x1f) & a20_mask;
+ walk_pdpe2(list, cs->as, pdpe_addr, a20_mask);
}
} else {
hwaddr pde_addr;
bool pse;
- pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
+ pde_addr = (env->cr[3] & ~0xfff) & a20_mask;
pse = !!(env->cr[4] & CR4_PSE_MASK);
- walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse);
+ walk_pde2(list, cs->as, pde_addr, a20_mask, pse);
}
}
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index ffb5267162..b2b1d20cee 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3239,7 +3239,7 @@ static void x86_cpu_machine_done(Notifier *n, void *unused)
cpu->smram = g_new(MemoryRegion, 1);
memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
smram, 0, 1ull << 32);
- memory_region_set_enabled(cpu->smram, false);
+ memory_region_set_enabled(cpu->smram, true);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
}
}
@@ -3619,7 +3619,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
if (tcg_enabled()) {
- AddressSpace *newas = g_new(AddressSpace, 1);
+ AddressSpace *as_normal = address_space_init_shareable(cs->memory,
+ "cpu-memory");
+ AddressSpace *as_smm = g_new(AddressSpace, 1);
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
@@ -3635,9 +3637,11 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
- address_space_init(newas, cpu->cpu_as_root, "CPU");
- cs->num_ases = 1;
- cpu_address_space_init(cs, newas, 0);
+ address_space_init(as_smm, cpu->cpu_as_root, "CPU");
+
+ cs->num_ases = 2;
+ cpu_address_space_init(cs, as_normal, 0);
+ cpu_address_space_init(cs, as_smm, 1);
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = x86_cpu_machine_done;
@@ -4053,6 +4057,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
#else
+ cc->asidx_from_attrs = x86_asidx_from_attrs;
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
cc->write_elf64_note = x86_cpu_write_elf64_note;
@@ -4063,11 +4068,11 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#endif
cc->gdb_arch_name = x86_gdb_arch_name;
#ifdef TARGET_X86_64
- cc->gdb_core_xml_file = "i386-64bit-core.xml";
- cc->gdb_num_core_regs = 40;
+ cc->gdb_core_xml_file = "i386-64bit.xml";
+ cc->gdb_num_core_regs = 57;
#else
- cc->gdb_core_xml_file = "i386-32bit-core.xml";
- cc->gdb_num_core_regs = 32;
+ cc->gdb_core_xml_file = "i386-32bit.xml";
+ cc->gdb_num_core_regs = 41;
#endif
#ifndef CONFIG_USER_ONLY
cc->debug_excp_handler = breakpoint_handler;
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index cfe825f0a4..de0551f775 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1451,6 +1451,16 @@ int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
#ifndef CONFIG_USER_ONLY
+static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+ return !!attrs.secure;
+}
+
+static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
+}
+
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
@@ -1625,6 +1635,15 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
}
+static inline int32_t x86_get_a20_mask(CPUX86State *env)
+{
+ if (env->hflags & HF_SMM_MASK) {
+ return -1;
+ } else {
+ return env->a20_mask;
+ }
+}
+
/* fpu_helper.c */
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
@@ -1644,7 +1663,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
/* smm_helper.c */
void do_smm_enter(X86CPU *cpu);
-void cpu_smm_update(X86CPU *cpu);
/* apic.c */
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
diff --git a/target/i386/helper.c b/target/i386/helper.c
index ee7eff2f6f..ef0505949a 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -724,6 +724,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t ptep, pte;
+ int32_t a20_mask;
target_ulong pde_addr, pte_addr;
int error_code = 0;
int is_dirty, prot, page_size, is_write, is_user;
@@ -739,6 +740,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
#endif
is_write = is_write1 & 1;
+ a20_mask = x86_get_a20_mask(env);
if (!(env->cr[0] & CR0_PG_MASK)) {
pte = addr;
#ifdef TARGET_X86_64
@@ -777,7 +779,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
if (la57) {
pml5e_addr = ((env->cr[3] & ~0xfff) +
- (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
pml5e = x86_ldq_phys(cs, pml5e_addr);
if (!(pml5e & PG_PRESENT_MASK)) {
goto do_fault;
@@ -796,7 +798,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
}
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
- (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
goto do_fault;
@@ -810,7 +812,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
}
ptep &= pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
- env->a20_mask;
+ a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
@@ -835,7 +837,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
{
/* XXX: load them when cr3 is loaded ? */
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
- env->a20_mask;
+ a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
@@ -848,7 +850,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
}
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
- env->a20_mask;
+ a20_mask;
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
@@ -870,7 +872,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
- env->a20_mask;
+ a20_mask;
pte = x86_ldq_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
@@ -886,7 +888,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
- env->a20_mask;
+ a20_mask;
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
@@ -913,7 +915,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
- env->a20_mask;
+ a20_mask;
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
@@ -992,7 +994,7 @@ do_check_protect_pse36:
}
do_mapping:
- pte = pte & env->a20_mask;
+ pte = pte & a20_mask;
/* align to page_size */
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
@@ -1039,11 +1041,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
CPUX86State *env = &cpu->env;
target_ulong pde_addr, pte_addr;
uint64_t pte;
+ int32_t a20_mask;
uint32_t page_offset;
int page_size;
+ a20_mask = x86_get_a20_mask(env);
if (!(env->cr[0] & CR0_PG_MASK)) {
- pte = addr & env->a20_mask;
+ pte = addr & a20_mask;
page_size = 4096;
} else if (env->cr[4] & CR4_PAE_MASK) {
target_ulong pdpe_addr;
@@ -1064,7 +1068,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (la57) {
pml5e_addr = ((env->cr[3] & ~0xfff) +
- (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
pml5e = x86_ldq_phys(cs, pml5e_addr);
if (!(pml5e & PG_PRESENT_MASK)) {
return -1;
@@ -1074,13 +1078,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
- (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
return -1;
}
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
- (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
return -1;
@@ -1095,14 +1099,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
#endif
{
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
- env->a20_mask;
+ a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
}
pde_addr = ((pdpe & PG_ADDRESS_MASK) +
- (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
return -1;
@@ -1114,7 +1118,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
} else {
/* 4 KB page */
pte_addr = ((pde & PG_ADDRESS_MASK) +
- (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
page_size = 4096;
pte = x86_ldq_phys(cs, pte_addr);
}
@@ -1125,7 +1129,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
uint32_t pde;
/* page directory entry */
- pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
+ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK))
return -1;
@@ -1134,14 +1138,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
page_size = 4096 * 1024;
} else {
/* page directory entry */
- pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
+ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
return -1;
}
page_size = 4096;
}
- pte = pte & env->a20_mask;
+ pte = pte & a20_mask;
}
#ifdef TARGET_X86_64
@@ -1399,89 +1403,89 @@ uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_ldub(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_ldub(as, addr, attrs, NULL);
}
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_lduw(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_lduw(as, addr, attrs, NULL);
}
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_ldl(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_ldl(as, addr, attrs, NULL);
}
uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_ldq(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_ldq(as, addr, attrs, NULL);
}
void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stb(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stb(as, addr, val, attrs, NULL);
}
void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stl_notdirty(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stl_notdirty(as, addr, val, attrs, NULL);
}
void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stw(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stw(as, addr, val, attrs, NULL);
}
void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stl(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stl(as, addr, val, attrs, NULL);
}
void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stq(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stq(as, addr, val, attrs, NULL);
}
#endif
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 49b6115eae..ee36502789 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -43,6 +43,7 @@
#include "standard-headers/asm-x86/hyperv.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
#include "migration/blocker.h"
#include "exec/memattrs.h"
#include "trace.h"
@@ -1254,7 +1255,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
}
- if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
+ if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
+ object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
+ pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
smram_machine_done.notify = register_smram_listener;
qemu_add_machine_init_done_notifier(&smram_machine_done);
}
@@ -1300,18 +1303,14 @@ static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
lhs->selector = rhs->selector;
lhs->base = rhs->base;
lhs->limit = rhs->limit;
- if (rhs->unusable) {
- lhs->flags = 0;
- } else {
- lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
- (rhs->present * DESC_P_MASK) |
- (rhs->dpl << DESC_DPL_SHIFT) |
- (rhs->db << DESC_B_SHIFT) |
- (rhs->s * DESC_S_MASK) |
- (rhs->l << DESC_L_SHIFT) |
- (rhs->g * DESC_G_MASK) |
- (rhs->avl * DESC_AVL_MASK);
- }
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+ ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
+ (rhs->dpl << DESC_DPL_SHIFT) |
+ (rhs->db << DESC_B_SHIFT) |
+ (rhs->s * DESC_S_MASK) |
+ (rhs->l << DESC_L_SHIFT) |
+ (rhs->g * DESC_G_MASK) |
+ (rhs->avl * DESC_AVL_MASK);
}
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
@@ -3510,12 +3509,17 @@ static void kvm_update_msi_routes_all(void *private, bool global,
int cnt = 0;
MSIRouteEntry *entry;
MSIMessage msg;
+ PCIDevice *dev;
+
/* TODO: explicit route update */
QLIST_FOREACH(entry, &msi_route_list, list) {
cnt++;
- msg = pci_get_msi_message(entry->dev, entry->vector);
- kvm_irqchip_update_msi_route(kvm_state, entry->virq,
- msg, entry->dev);
+ dev = entry->dev;
+ if (!msix_enabled(dev) && !msi_enabled(dev)) {
+ continue;
+ }
+ msg = pci_get_msi_message(dev, entry->vector);
+ kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
}
kvm_irqchip_commit_routes(kvm_state);
trace_kvm_x86_update_msi_routes(cnt);
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 3cb272948e..8c7a822e9f 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -274,10 +274,6 @@ static int cpu_post_load(void *opaque, int version_id)
cpu_x86_update_dr7(env, dr7);
}
tlb_flush(cs);
-
- if (tcg_enabled()) {
- cpu_smm_update(cpu);
- }
return 0;
}
diff --git a/target/i386/smm_helper.c b/target/i386/smm_helper.c
index f051a77c4a..90621e5977 100644
--- a/target/i386/smm_helper.c
+++ b/target/i386/smm_helper.c
@@ -43,19 +43,6 @@ void helper_rsm(CPUX86State *env)
#define SMM_REVISION_ID 0x00020000
#endif
-/* Called with iothread lock taken */
-void cpu_smm_update(X86CPU *cpu)
-{
- CPUX86State *env = &cpu->env;
- bool smm_enabled = (env->hflags & HF_SMM_MASK);
-
- g_assert(qemu_mutex_iothread_locked());
-
- if (cpu->smram) {
- memory_region_set_enabled(cpu->smram, smm_enabled);
- }
-}
-
void do_smm_enter(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@@ -73,7 +60,6 @@ void do_smm_enter(X86CPU *cpu)
} else {
env->hflags2 |= HF2_NMI_MASK;
}
- cpu_smm_update(cpu);
sm_state = env->smbase + 0x8000;
@@ -338,10 +324,6 @@ void helper_rsm(CPUX86State *env)
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
env->hflags &= ~HF_SMM_MASK;
- qemu_mutex_lock_iothread();
- cpu_smm_update(cpu);
- qemu_mutex_unlock_iothread();
-
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
}
diff --git a/target/i386/translate.c b/target/i386/translate.c
index 674ec96d5a..ed3b896db4 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -7939,14 +7939,26 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
}
break;
default: