Skip to content

Commit

Permalink
KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed
Browse files Browse the repository at this point in the history
Given certain conditions, some TDP MMU functions may not yield
reliably / frequently enough. For example, if a paging structure was
very large but had few, if any writable entries, wrprot_gfn_range
could traverse many entries before finding a writable entry and yielding
because the check for yielding only happens after an SPTE is modified.

Fix this issue by moving the yield to the beginning of the loop.

Fixes: a6a0b05 ("kvm: x86/mmu: Support dirty logging for the TDP MMU")
Reviewed-by: Peter Feiner <[email protected]>
Signed-off-by: Ben Gardon <[email protected]>

Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
Ben Gardon authored and bonzini committed Feb 4, 2021
1 parent ed5e484 commit 1af4a96
Showing 1 changed file with 22 additions and 10 deletions.
32 changes: 22 additions & 10 deletions arch/x86/kvm/mmu/tdp_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -501,6 +501,12 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool flush_needed = false;

tdp_root_for_each_pte(iter, root, start, end) {
if (can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
flush_needed = false;
continue;
}

if (!is_shadow_present_pte(iter.old_spte))
continue;

Expand All @@ -515,9 +521,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
continue;

tdp_mmu_set_spte(kvm, &iter, 0);

flush_needed = !(can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, true));
flush_needed = true;
}
return flush_needed;
}
Expand Down Expand Up @@ -880,6 +884,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,

for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
min_level, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
continue;

if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;
Expand All @@ -888,8 +895,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,

tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;

tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
}
Expand Down Expand Up @@ -933,6 +938,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool spte_set = false;

tdp_root_for_each_leaf_pte(iter, root, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
continue;

if (spte_ad_need_write_protect(iter.old_spte)) {
if (is_writable_pte(iter.old_spte))
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
Expand All @@ -947,8 +955,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,

tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;

tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
}
Expand Down Expand Up @@ -1056,15 +1062,16 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool spte_set = false;

tdp_root_for_each_pte(iter, root, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
continue;

if (!is_shadow_present_pte(iter.old_spte))
continue;

new_spte = iter.old_spte | shadow_dirty_mask;

tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true;

tdp_mmu_iter_cond_resched(kvm, &iter, false);
}

return spte_set;
Expand Down Expand Up @@ -1105,6 +1112,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
bool spte_set = false;

tdp_root_for_each_pte(iter, root, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
spte_set = false;
continue;
}

if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;
Expand All @@ -1116,7 +1128,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,

tdp_mmu_set_spte(kvm, &iter, 0);

spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true);
spte_set = true;
}

if (spte_set)
Expand Down

0 comments on commit 1af4a96

Please sign in to comment.