summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
774ead3)
Do not hold kvm->lock mutex across the entire pagefault code,
only acquire it in places where it is necessary, such as mmu
hash list, active list, rmap and parent pte handling.
Allow concurrent guest walkers by switching walk_addr() to use
mmap_sem in read-mode.
And get rid of the lockless __gfn_to_page.
[avi: move kvm_mmu_pte_write() locking inside the function]
[avi: add locking for real mode]
[avi: fix cmpxchg locking]
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
+static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->arch.mmu.root_hpa;
{
int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->arch.mmu.root_hpa;
+static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
+{
+ int r;
+
+ mutex_lock(&vcpu->kvm->lock);
+ r = __nonpaging_map(vcpu, v, write, gfn);
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
+}
+
+
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
+ mutex_lock(&vcpu->kvm->lock);
#ifdef CONFIG_X86_64
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
#ifdef CONFIG_X86_64
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root);
--sp->root_count;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
sp = page_header(root);
--sp->root_count;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+ mutex_unlock(&vcpu->kvm->lock);
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
+ mutex_unlock(&vcpu->kvm->lock);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
}
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
}
- mutex_lock(&vcpu->kvm->lock);
r = mmu_topup_memory_caches(vcpu);
if (r)
goto out;
r = mmu_topup_memory_caches(vcpu);
if (r)
goto out;
+ mutex_lock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->lock);
kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
kvm_mmu_flush_tlb(vcpu);
out:
kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
kvm_mmu_flush_tlb(vcpu);
out:
- mutex_unlock(&vcpu->kvm->lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_load);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_load);
int npte;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
int npte;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+ mutex_lock(&vcpu->kvm->lock);
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, "pre pte write");
if (gfn == vcpu->arch.last_pt_write_gfn
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, "pre pte write");
if (gfn == vcpu->arch.last_pt_write_gfn
}
}
kvm_mmu_audit(vcpu, "post pte write");
}
}
kvm_mmu_audit(vcpu, "post pte write");
+ mutex_unlock(&vcpu->kvm->lock);
}
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
}
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
- return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ down_read(¤t->mm->mmap_sem);
+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
+ up_read(¤t->mm->mmap_sem);
+
+ mutex_lock(&vcpu->kvm->lock);
+ r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
}
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
}
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
+ mutex_lock(&vcpu->kvm->lock);
while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
struct kvm_mmu_page *sp;
while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
struct kvm_mmu_page *sp;
kvm_mmu_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_recycled;
}
kvm_mmu_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_recycled;
}
+ mutex_unlock(&vcpu->kvm->lock);
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
int r;
enum emulation_result er;
int r;
enum emulation_result er;
- mutex_lock(&vcpu->kvm->lock);
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
if (r < 0)
goto out;
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
if (r < 0)
goto out;
goto out;
er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
goto out;
er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
- mutex_unlock(&vcpu->kvm->lock);
switch (er) {
case EMULATE_DONE:
switch (er) {
case EMULATE_DONE:
- mutex_unlock(&vcpu->kvm->lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
{
struct kvm_mmu_page *sp, *node;
{
struct kvm_mmu_page *sp, *node;
+ mutex_lock(&kvm->lock);
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
kvm_mmu_zap_page(kvm, sp);
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
kvm_mmu_zap_page(kvm, sp);
+ mutex_unlock(&kvm->lock);
kvm_flush_remote_tlbs(kvm);
}
kvm_flush_remote_tlbs(kvm);
}
+ down_read(¤t->mm->mmap_sem);
/*
* Look up the shadow pte for the faulting address.
*/
r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
fetch_fault);
/*
* Look up the shadow pte for the faulting address.
*/
r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
fetch_fault);
+ up_read(¤t->mm->mmap_sem);
/*
* The page is not mapped by the guest. Let the guest handle it.
/*
* The page is not mapped by the guest. Let the guest handle it.
+ mutex_lock(&vcpu->kvm->lock);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
&write_pt);
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
&write_pt);
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
/*
* mmio: emulate if accessible, otherwise its a guest fault.
*/
/*
* mmio: emulate if accessible, otherwise its a guest fault.
*/
- if (shadow_pte && is_io_pte(*shadow_pte))
+ if (shadow_pte && is_io_pte(*shadow_pte)) {
+ mutex_unlock(&vcpu->kvm->lock);
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
+ mutex_unlock(&vcpu->kvm->lock);
{
gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
u16 data = 0;
{
gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
u16 data = 0;
+ down_read(¤t->mm->mmap_sem);
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
if (r < 0)
data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
if (r < 0)
r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
if (r < 0)
r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
if (r < 0)
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
- r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
- sizeof(u8));
+ r = kvm_write_guest_page(kvm, fn, &data,
+ RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
+ sizeof(u8));
+ goto out;
+
+ ret = 1;
+out:
+ up_read(¤t->mm->mmap_sem);
+ return ret;
}
static void seg_setup(int seg)
}
static void seg_setup(int seg)
int r = 0;
mutex_lock(&kvm->lock);
int r = 0;
mutex_lock(&kvm->lock);
+ down_write(¤t->mm->mmap_sem);
if (kvm->arch.apic_access_page)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
if (kvm->arch.apic_access_page)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
goto out;
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
out:
goto out;
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
out:
+ up_write(¤t->mm->mmap_sem);
mutex_unlock(&kvm->lock);
return r;
}
mutex_unlock(&kvm->lock);
return r;
}
int ret;
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
int ret;
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
- mutex_lock(&vcpu->kvm->lock);
+ down_read(¤t->mm->mmap_sem);
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte));
if (ret < 0) {
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte));
if (ret < 0) {
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
out:
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
out:
- mutex_unlock(&vcpu->kvm->lock);
+ up_read(¤t->mm->mmap_sem);
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
- mutex_lock(&vcpu->kvm->lock);
+ down_read(¤t->mm->mmap_sem);
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0)
goto out;
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
out:
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0)
goto out;
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
out:
- mutex_unlock(&vcpu->kvm->lock);
+ up_read(¤t->mm->mmap_sem);
kvm_x86_ops->set_cr0(vcpu, cr0);
vcpu->arch.cr0 = cr0;
kvm_x86_ops->set_cr0(vcpu, cr0);
vcpu->arch.cr0 = cr0;
- mutex_lock(&vcpu->kvm->lock);
kvm_mmu_reset_context(vcpu);
kvm_mmu_reset_context(vcpu);
- mutex_unlock(&vcpu->kvm->lock);
return;
}
EXPORT_SYMBOL_GPL(set_cr0);
return;
}
EXPORT_SYMBOL_GPL(set_cr0);
}
kvm_x86_ops->set_cr4(vcpu, cr4);
vcpu->arch.cr4 = cr4;
}
kvm_x86_ops->set_cr4(vcpu, cr4);
vcpu->arch.cr4 = cr4;
- mutex_lock(&vcpu->kvm->lock);
kvm_mmu_reset_context(vcpu);
kvm_mmu_reset_context(vcpu);
- mutex_unlock(&vcpu->kvm->lock);
}
EXPORT_SYMBOL_GPL(set_cr4);
}
EXPORT_SYMBOL_GPL(set_cr4);
- mutex_lock(&vcpu->kvm->lock);
+ down_read(¤t->mm->mmap_sem);
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
vcpu->arch.cr3 = cr3;
vcpu->arch.mmu.new_cr3(vcpu);
}
vcpu->arch.cr3 = cr3;
vcpu->arch.mmu.new_cr3(vcpu);
}
- mutex_unlock(&vcpu->kvm->lock);
+ up_read(¤t->mm->mmap_sem);
}
EXPORT_SYMBOL_GPL(set_cr3);
}
EXPORT_SYMBOL_GPL(set_cr3);
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
- mutex_lock(&kvm->lock);
+ down_write(¤t->mm->mmap_sem);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
- mutex_unlock(&kvm->lock);
+ up_write(¤t->mm->mmap_sem);
< alias->target_phys_addr)
goto out;
< alias->target_phys_addr)
goto out;
- mutex_lock(&kvm->lock);
+ down_write(¤t->mm->mmap_sem);
p = &kvm->arch.aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
p = &kvm->arch.aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
- mutex_unlock(&kvm->lock);
+ up_write(¤t->mm->mmap_sem);
struct kvm_memory_slot *memslot;
int is_dirty = 0;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
- mutex_lock(&kvm->lock);
+ down_write(¤t->mm->mmap_sem);
r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r)
r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r)
- mutex_unlock(&kvm->lock);
+ up_write(¤t->mm->mmap_sem);
struct kvm_vcpu *vcpu)
{
void *data = val;
struct kvm_vcpu *vcpu)
{
void *data = val;
+ int r = X86EMUL_CONTINUE;
+ down_read(¤t->mm->mmap_sem);
while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
- if (gpa == UNMAPPED_GVA)
- return X86EMUL_PROPAGATE_FAULT;
+ if (gpa == UNMAPPED_GVA) {
+ r = X86EMUL_PROPAGATE_FAULT;
+ goto out;
+ }
ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
- if (ret < 0)
- return X86EMUL_UNHANDLEABLE;
+ if (ret < 0) {
+ r = X86EMUL_UNHANDLEABLE;
+ goto out;
+ }
bytes -= tocopy;
data += tocopy;
addr += tocopy;
}
bytes -= tocopy;
data += tocopy;
addr += tocopy;
}
-
- return X86EMUL_CONTINUE;
+out:
+ up_read(¤t->mm->mmap_sem);
+ return r;
}
EXPORT_SYMBOL_GPL(emulator_read_std);
}
EXPORT_SYMBOL_GPL(emulator_read_std);
return X86EMUL_CONTINUE;
}
return X86EMUL_CONTINUE;
}
+ down_read(¤t->mm->mmap_sem);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ up_read(¤t->mm->mmap_sem);
/* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
/* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
/*
* Is this MMIO handled locally?
*/
/*
* Is this MMIO handled locally?
*/
+ mutex_lock(&vcpu->kvm->lock);
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
if (mmio_dev) {
kvm_iodevice_read(mmio_dev, gpa, bytes, val);
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
if (mmio_dev) {
kvm_iodevice_read(mmio_dev, gpa, bytes, val);
+ mutex_unlock(&vcpu->kvm->lock);
return X86EMUL_CONTINUE;
}
return X86EMUL_CONTINUE;
}
+ mutex_unlock(&vcpu->kvm->lock);
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
+ down_read(¤t->mm->mmap_sem);
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
+ if (ret < 0) {
+ up_read(¤t->mm->mmap_sem);
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
+ up_read(¤t->mm->mmap_sem);
struct kvm_vcpu *vcpu)
{
struct kvm_io_device *mmio_dev;
struct kvm_vcpu *vcpu)
{
struct kvm_io_device *mmio_dev;
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa_t gpa;
+
+ down_read(¤t->mm->mmap_sem);
+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ up_read(¤t->mm->mmap_sem);
if (gpa == UNMAPPED_GVA) {
kvm_inject_page_fault(vcpu, addr, 2);
if (gpa == UNMAPPED_GVA) {
kvm_inject_page_fault(vcpu, addr, 2);
/*
* Is this MMIO handled locally?
*/
/*
* Is this MMIO handled locally?
*/
+ mutex_lock(&vcpu->kvm->lock);
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
if (mmio_dev) {
kvm_iodevice_write(mmio_dev, gpa, bytes, val);
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
if (mmio_dev) {
kvm_iodevice_write(mmio_dev, gpa, bytes, val);
+ mutex_unlock(&vcpu->kvm->lock);
return X86EMUL_CONTINUE;
}
return X86EMUL_CONTINUE;
}
+ mutex_unlock(&vcpu->kvm->lock);
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
#ifndef CONFIG_X86_64
/* guests cmpxchg8b have to be emulated atomically */
if (bytes == 8) {
#ifndef CONFIG_X86_64
/* guests cmpxchg8b have to be emulated atomically */
if (bytes == 8) {
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
struct page *page;
char *addr;
u64 val;
struct page *page;
char *addr;
u64 val;
+ down_read(¤t->mm->mmap_sem);
+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+
if (gpa == UNMAPPED_GVA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;
if (gpa == UNMAPPED_GVA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;
set_64bit((u64 *)(addr + offset_in_page(gpa)), val);
kunmap_atomic(addr, KM_USER0);
kvm_release_page_dirty(page);
set_64bit((u64 *)(addr + offset_in_page(gpa)), val);
kunmap_atomic(addr, KM_USER0);
kvm_release_page_dirty(page);
+ emul_write:
+ up_read(¤t->mm->mmap_sem);
#endif
return emulator_write_emulated(addr, new, bytes, vcpu);
#endif
return emulator_write_emulated(addr, new, bytes, vcpu);
kvm_x86_ops->skip_emulated_instruction(vcpu);
for (i = 0; i < nr_pages; ++i) {
kvm_x86_ops->skip_emulated_instruction(vcpu);
for (i = 0; i < nr_pages; ++i) {
- mutex_lock(&vcpu->kvm->lock);
+ down_read(¤t->mm->mmap_sem);
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
vcpu->arch.pio.guest_pages[i] = page;
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
vcpu->arch.pio.guest_pages[i] = page;
- mutex_unlock(&vcpu->kvm->lock);
+ up_read(¤t->mm->mmap_sem);
if (!page) {
kvm_inject_gp(vcpu, 0);
free_pio_guest_pages(vcpu);
if (!page) {
kvm_inject_gp(vcpu, 0);
free_pio_guest_pages(vcpu);
char instruction[3];
int ret = 0;
char instruction[3];
int ret = 0;
- mutex_lock(&vcpu->kvm->lock);
/*
* Blow out the MMU to ensure that no other VCPU has an active mapping
/*
* Blow out the MMU to ensure that no other VCPU has an active mapping
!= X86EMUL_CONTINUE)
ret = -EFAULT;
!= X86EMUL_CONTINUE)
ret = -EFAULT;
- mutex_unlock(&vcpu->kvm->lock);
-
if (!apic || !apic->vapic_addr)
return;
if (!apic || !apic->vapic_addr)
return;
+ down_read(¤t->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
vcpu->arch.apic->vapic_page = page;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
vcpu->arch.apic->vapic_page = page;
+ up_read(¤t->mm->mmap_sem);
}
static void vapic_exit(struct kvm_vcpu *vcpu)
}
static void vapic_exit(struct kvm_vcpu *vcpu)
gpa_t gpa;
vcpu_load(vcpu);
gpa_t gpa;
vcpu_load(vcpu);
- mutex_lock(&vcpu->kvm->lock);
+ down_read(¤t->mm->mmap_sem);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
+ up_read(¤t->mm->mmap_sem);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
tr->usermode = 0;
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
tr->usermode = 0;
- mutex_unlock(&vcpu->kvm->lock);
vcpu_put(vcpu);
return 0;
vcpu_put(vcpu);
return 0;
*/
if (!user_alloc) {
if (npages && !old.rmap) {
*/
if (!user_alloc) {
if (npages && !old.rmap) {
- down_write(¤t->mm->mmap_sem);
memslot->userspace_addr = do_mmap(NULL, 0,
npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
0);
memslot->userspace_addr = do_mmap(NULL, 0,
npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
0);
- up_write(¤t->mm->mmap_sem);
if (IS_ERR((void *)memslot->userspace_addr))
return PTR_ERR((void *)memslot->userspace_addr);
if (IS_ERR((void *)memslot->userspace_addr))
return PTR_ERR((void *)memslot->userspace_addr);
if (!old.user_alloc && old.rmap) {
int ret;
if (!old.user_alloc && old.rmap) {
int ret;
- down_write(¤t->mm->mmap_sem);
ret = do_munmap(current->mm, old.userspace_addr,
old.npages * PAGE_SIZE);
ret = do_munmap(current->mm, old.userspace_addr,
old.npages * PAGE_SIZE);
- up_write(¤t->mm->mmap_sem);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
*
* Discontiguous memory is allowed, mostly for framebuffers.
*
*
* Discontiguous memory is allowed, mostly for framebuffers.
*
- * Must be called holding kvm->lock.
+ * Must be called holding mmap_sem for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- mutex_lock(&kvm->lock);
+ down_write(¤t->mm->mmap_sem);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
- mutex_unlock(&kvm->lock);
+ up_write(¤t->mm->mmap_sem);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
/*
* Requires current->mm->mmap_sem to be held
*/
/*
* Requires current->mm->mmap_sem to be held
*/
-static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
struct page *page[1];
unsigned long addr;
{
struct page *page[1];
unsigned long addr;
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
-{
- struct page *page;
-
- down_read(¤t->mm->mmap_sem);
- page = __gfn_to_page(kvm, gfn);
- up_read(¤t->mm->mmap_sem);
-
- return page;
-}
-
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page_clean(struct page *page)
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page_clean(struct page *page)
if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
return VM_FAULT_SIGBUS;
if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
return VM_FAULT_SIGBUS;
- /* current->mm->mmap_sem is already held so call lockless version */
- page = __gfn_to_page(kvm, vmf->pgoff);
+ page = gfn_to_page(kvm, vmf->pgoff);
if (is_error_page(page)) {
kvm_release_page_clean(page);
return VM_FAULT_SIGBUS;
if (is_error_page(page)) {
kvm_release_page_clean(page);
return VM_FAULT_SIGBUS;