Skip to content

Commit 36867c0

Browse files
committed
Merge tag 'kvmarm-fixes-6.15-3' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.15, round #3 - Avoid use of uninitialized memcache pointer in user_mem_abort() - Always set HCR_EL2.xMO bits when running in VHE, allowing interrupts to be taken while TGE=0 and fixing an ugly bug on AmpereOne that occurs when taking an interrupt while clearing the xMO bits (AC03_CPU_36) - Prevent VMMs from hiding support for AArch64 at any EL virtualized by KVM - Save/restore the host value for HCRX_EL2 instead of restoring an incorrect fixed value - Make host_stage2_set_owner_locked() check that the entire requested range is memory rather than just the first page
2 parents 6a74470 + 3949e28 commit 36867c0

File tree

8 files changed

+48
-35
lines changed

8 files changed

+48
-35
lines changed

arch/arm64/include/asm/el2_setup.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
mrs x0, id_aa64mmfr1_el1
5353
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
5454
cbz x0, .Lskip_hcrx_\@
55-
mov_q x0, HCRX_HOST_FLAGS
55+
mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
5656

5757
/* Enable GCS if supported */
5858
mrs_s x1, SYS_ID_AA64PFR1_EL1

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,8 @@
100100
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
101101
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
102102
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
103-
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
103+
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
104104

105-
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
106105
#define MPAMHCR_HOST_FLAGS 0
107106

108107
/* TCR_EL2 Registers bits */

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void)
235235

236236
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
237237
{
238+
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
239+
238240
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
239241
write_sysreg(1 << 15, hstr_el2);
240242

@@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
245247
* EL1 instead of being trapped to EL2.
246248
*/
247249
if (system_supports_pmuv3()) {
248-
struct kvm_cpu_context *hctxt;
249-
250250
write_sysreg(0, pmselr_el0);
251251

252-
hctxt = host_data_ptr(host_ctxt);
253252
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
254253
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
255254
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
@@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
269268
hcrx &= ~clr;
270269
}
271270

271+
ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
272272
write_sysreg_s(hcrx, SYS_HCRX_EL2);
273273
}
274274

@@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
278278

279279
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
280280
{
281+
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
282+
281283
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
282284

283285
write_sysreg(0, hstr_el2);
284286
if (system_supports_pmuv3()) {
285-
struct kvm_cpu_context *hctxt;
286-
287-
hctxt = host_data_ptr(host_ctxt);
288287
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
289288
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
290289
}
291290

292291
if (cpus_have_final_cap(ARM64_HAS_HCX))
293-
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
292+
write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
294293

295294
__deactivate_traps_hfgxtr(vcpu);
296295
__deactivate_traps_mpam();

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
503503
{
504504
int ret;
505505

506-
if (!addr_is_memory(addr))
506+
if (!range_is_memory(addr, addr + size))
507507
return -EPERM;
508508

509509
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -429,35 +429,41 @@ u64 __vgic_v3_get_gic_config(void)
429429
/*
430430
* To check whether we have a MMIO-based (GICv2 compatible)
431431
* CPU interface, we need to disable the system register
432-
* view. To do that safely, we have to prevent any interrupt
433-
* from firing (which would be deadly).
432+
* view.
434433
*
435-
* Note that this only makes sense on VHE, as interrupts are
436-
* already masked for nVHE as part of the exception entry to
437-
* EL2.
438-
*/
439-
if (has_vhe())
440-
flags = local_daif_save();
441-
442-
/*
443434
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
444435
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
445436
* interrupt overrides must be set. You've got to love this.
437+
*
438+
* As we always run VHE with HCR_xMO set, no extra xMO
439+
* manipulation is required in that case.
440+
*
441+
* To safely disable SRE, we have to prevent any interrupt
442+
* from firing (which would be deadly). This only makes sense
443+
* on VHE, as interrupts are already masked for nVHE as part
444+
* of the exception entry to EL2.
446445
*/
447-
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
448-
isb();
446+
if (has_vhe()) {
447+
flags = local_daif_save();
448+
} else {
449+
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
450+
isb();
451+
}
452+
449453
write_gicreg(0, ICC_SRE_EL1);
450454
isb();
451455

452456
val = read_gicreg(ICC_SRE_EL1);
453457

454458
write_gicreg(sre, ICC_SRE_EL1);
455459
isb();
456-
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
457-
isb();
458460

459-
if (has_vhe())
461+
if (has_vhe()) {
460462
local_daif_restore(flags);
463+
} else {
464+
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
465+
isb();
466+
}
461467

462468
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
463469
val |= read_gicreg(ICH_VTR_EL2);

arch/arm64/kvm/mmu.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15011501
return -EFAULT;
15021502
}
15031503

1504+
if (!is_protected_kvm_enabled())
1505+
memcache = &vcpu->arch.mmu_page_cache;
1506+
else
1507+
memcache = &vcpu->arch.pkvm_memcache;
1508+
15041509
/*
15051510
* Permission faults just need to update the existing leaf entry,
15061511
* and so normally don't require allocations from the memcache. The
@@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15101515
if (!fault_is_perm || (logging_active && write_fault)) {
15111516
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
15121517

1513-
if (!is_protected_kvm_enabled()) {
1514-
memcache = &vcpu->arch.mmu_page_cache;
1518+
if (!is_protected_kvm_enabled())
15151519
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
1516-
} else {
1517-
memcache = &vcpu->arch.pkvm_memcache;
1520+
else
15181521
ret = topup_hyp_memcache(memcache, min_pages);
1519-
}
1522+
15201523
if (ret)
15211524
return ret;
15221525
}

arch/arm64/kvm/sys_regs.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
19451945
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
19461946
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
19471947

1948+
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
1949+
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
1950+
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
1951+
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
1952+
return -EINVAL;
1953+
19481954
return set_id_reg(vcpu, rd, user_val);
19491955
}
19501956

tools/testing/selftests/kvm/arm64/set_id_regs.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,10 +129,10 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
129129
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
130130
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
131131
REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
132-
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
133-
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
134-
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
135-
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
132+
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1),
133+
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1),
134+
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1),
135+
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 1),
136136
REG_FTR_END,
137137
};
138138

0 commit comments

Comments
 (0)