Skip to content

Commit 25fedfc

Browse files
paulusmackagraf
authored andcommitted
KVM: PPC: Book3S HV: Move vcore preemption point up into kvmppc_run_vcpu
Rather than calling cond_resched() in kvmppc_run_core() before doing the post-processing for the vcpus that we have just run (that is, calling kvmppc_handle_exit_hv(), kvmppc_set_timer(), etc.), we now do that post-processing before calling cond_resched(), and that post- processing is moved out into its own function, post_guest_process(). The reschedule point is now in kvmppc_run_vcpu() and we define a new vcore state, VCORE_PREEMPT, to indicate that that the vcore's runner task is runnable but not running. (Doing the reschedule with the vcore in VCORE_INACTIVE state would be bad because there are potentially other vcpus waiting for the runner in kvmppc_wait_for_exec() which then wouldn't get woken up.) Also, we make use of the handy cond_resched_lock() function, which unlocks and relocks vc->lock for us around the reschedule. Signed-off-by: Paul Mackerras <[email protected]> Signed-off-by: Alexander Graf <[email protected]>
1 parent 1f09c3e commit 25fedfc

File tree

2 files changed

+55
-42
lines changed

2 files changed

+55
-42
lines changed

arch/powerpc/include/asm/kvm_host.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -304,8 +304,9 @@ struct kvmppc_vcore {
304304
/* Values for vcore_state */
305305
#define VCORE_INACTIVE 0
306306
#define VCORE_SLEEPING 1
307-
#define VCORE_RUNNING 2
308-
#define VCORE_EXITING 3
307+
#define VCORE_PREEMPT 2
308+
#define VCORE_RUNNING 3
309+
#define VCORE_EXITING 4
309310

310311
/*
311312
* Struct used to manage memory for a virtual processor area

arch/powerpc/kvm/book3s_hv.c

Lines changed: 52 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1882,15 +1882,50 @@ static void prepare_threads(struct kvmppc_vcore *vc)
18821882
}
18831883
}
18841884

1885+
static void post_guest_process(struct kvmppc_vcore *vc)
1886+
{
1887+
u64 now;
1888+
long ret;
1889+
struct kvm_vcpu *vcpu, *vnext;
1890+
1891+
now = get_tb();
1892+
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1893+
arch.run_list) {
1894+
/* cancel pending dec exception if dec is positive */
1895+
if (now < vcpu->arch.dec_expires &&
1896+
kvmppc_core_pending_dec(vcpu))
1897+
kvmppc_core_dequeue_dec(vcpu);
1898+
1899+
trace_kvm_guest_exit(vcpu);
1900+
1901+
ret = RESUME_GUEST;
1902+
if (vcpu->arch.trap)
1903+
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1904+
vcpu->arch.run_task);
1905+
1906+
vcpu->arch.ret = ret;
1907+
vcpu->arch.trap = 0;
1908+
1909+
if (vcpu->arch.ceded) {
1910+
if (!is_kvmppc_resume_guest(ret))
1911+
kvmppc_end_cede(vcpu);
1912+
else
1913+
kvmppc_set_timer(vcpu);
1914+
}
1915+
if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
1916+
kvmppc_remove_runnable(vc, vcpu);
1917+
wake_up(&vcpu->arch.cpu_run);
1918+
}
1919+
}
1920+
}
1921+
18851922
/*
18861923
* Run a set of guest threads on a physical core.
18871924
* Called with vc->lock held.
18881925
*/
18891926
static void kvmppc_run_core(struct kvmppc_vcore *vc)
18901927
{
1891-
struct kvm_vcpu *vcpu, *vnext;
1892-
long ret;
1893-
u64 now;
1928+
struct kvm_vcpu *vcpu;
18941929
int i;
18951930
int srcu_idx;
18961931

@@ -1922,8 +1957,11 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
19221957
*/
19231958
if ((threads_per_core > 1) &&
19241959
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1925-
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1960+
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
19261961
vcpu->arch.ret = -EBUSY;
1962+
kvmppc_remove_runnable(vc, vcpu);
1963+
wake_up(&vcpu->arch.cpu_run);
1964+
}
19271965
goto out;
19281966
}
19291967

@@ -1979,44 +2017,12 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
19792017
kvm_guest_exit();
19802018

19812019
preempt_enable();
1982-
cond_resched();
19832020

19842021
spin_lock(&vc->lock);
1985-
now = get_tb();
1986-
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1987-
/* cancel pending dec exception if dec is positive */
1988-
if (now < vcpu->arch.dec_expires &&
1989-
kvmppc_core_pending_dec(vcpu))
1990-
kvmppc_core_dequeue_dec(vcpu);
1991-
1992-
trace_kvm_guest_exit(vcpu);
1993-
1994-
ret = RESUME_GUEST;
1995-
if (vcpu->arch.trap)
1996-
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1997-
vcpu->arch.run_task);
1998-
1999-
vcpu->arch.ret = ret;
2000-
vcpu->arch.trap = 0;
2001-
2002-
if (vcpu->arch.ceded) {
2003-
if (!is_kvmppc_resume_guest(ret))
2004-
kvmppc_end_cede(vcpu);
2005-
else
2006-
kvmppc_set_timer(vcpu);
2007-
}
2008-
}
2022+
post_guest_process(vc);
20092023

20102024
out:
20112025
vc->vcore_state = VCORE_INACTIVE;
2012-
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2013-
arch.run_list) {
2014-
if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
2015-
kvmppc_remove_runnable(vc, vcpu);
2016-
wake_up(&vcpu->arch.cpu_run);
2017-
}
2018-
}
2019-
20202026
trace_kvmppc_run_core(vc, 1);
20212027
}
20222028

@@ -2138,18 +2144,24 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
21382144
}
21392145
if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
21402146
break;
2141-
vc->runner = vcpu;
21422147
n_ceded = 0;
21432148
list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
21442149
if (!v->arch.pending_exceptions)
21452150
n_ceded += v->arch.ceded;
21462151
else
21472152
v->arch.ceded = 0;
21482153
}
2149-
if (n_ceded == vc->n_runnable)
2154+
vc->runner = vcpu;
2155+
if (n_ceded == vc->n_runnable) {
21502156
kvmppc_vcore_blocked(vc);
2151-
else
2157+
} else if (should_resched()) {
2158+
vc->vcore_state = VCORE_PREEMPT;
2159+
/* Let something else run */
2160+
cond_resched_lock(&vc->lock);
2161+
vc->vcore_state = VCORE_INACTIVE;
2162+
} else {
21522163
kvmppc_run_core(vc);
2164+
}
21532165
vc->runner = NULL;
21542166
}
21552167

0 commit comments

Comments
 (0)