diff --git a/Documentation/arch/powerpc/kvm-nested.rst b/Documentation/arch/powerpc/kvm-nested.rst index 630602a8aa008..5defd13cc6c17 100644 --- a/Documentation/arch/powerpc/kvm-nested.rst +++ b/Documentation/arch/powerpc/kvm-nested.rst @@ -546,7 +546,9 @@ table information. +--------+-------+----+--------+----------------------------------+ | 0x1052 | 0x08 | RW | T | CTRL | +--------+-------+----+--------+----------------------------------+ -| 0x1053-| | | | Reserved | +| 0x1053 | 0x08 | RW | T | DPDES | ++--------+-------+----+--------+----------------------------------+ +| 0x1054-| | | | Reserved | | 0x1FFF | | | | | +--------+-------+----+--------+----------------------------------+ | 0x2000 | 0x04 | RW | T | CR | diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a71d91978d9ef..899480d4acaf7 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -2439,8 +2439,11 @@ registers, find a list below: PPC KVM_REG_PPC_PSSCR 64 PPC KVM_REG_PPC_DEC_EXPIRY 64 PPC KVM_REG_PPC_PTCR 64 + PPC KVM_REG_PPC_HASHKEYR 64 + PPC KVM_REG_PPC_HASHPKEYR 64 PPC KVM_REG_PPC_DAWR1 64 PPC KVM_REG_PPC_DAWRX1 64 + PPC KVM_REG_PPC_DEXCR 64 PPC KVM_REG_PPC_TM_GPR0 64 ... PPC KVM_REG_PPC_TM_GPR31 64 diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h index 808149f315763..d107abe1468fe 100644 --- a/arch/powerpc/include/asm/guest-state-buffer.h +++ b/arch/powerpc/include/asm/guest-state-buffer.h @@ -81,6 +81,7 @@ #define KVMPPC_GSID_HASHKEYR 0x1050 #define KVMPPC_GSID_HASHPKEYR 0x1051 #define KVMPPC_GSID_CTRL 0x1052 +#define KVMPPC_GSID_DPDES 0x1053 #define KVMPPC_GSID_CR 0x2000 #define KVMPPC_GSID_PIDR 0x2001 @@ -110,7 +111,7 @@ #define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1) #define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0) -#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL +#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_DPDES #define KVMPPC_GSE_DW_REGS_COUNT \ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 3e1e2a698c9e7..10618622d7efc 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -594,6 +594,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) +KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index d8729ec81ca08..2ef9a5f4e5d14 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -684,6 +684,11 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1); int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu); int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa); +int kmvhv_counters_tracepoint_regfunc(void); +void kmvhv_counters_tracepoint_unregfunc(void); +int kvmhv_get_l2_counters_status(void); +void kvmhv_set_l2_counters_status(int cpu, bool status); + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8abac532146e7..6a0c771d3ce89 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -599,6 +599,9 @@ struct kvm_vcpu_arch { ulong dawrx0; ulong dawr1; ulong dawrx1; + ulong dexcr; + ulong hashkeyr; + ulong hashpkeyr; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 61ec2447dabf5..f40a646bee3cb 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -62,7 +62,8 @@ struct lppaca { u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ u8 fpregs_in_use; u8 pmcregs_in_use; - u8 reserved8[28]; + u8 l2_counters_enable; /* Enable usage of counters for KVM guest */ + u8 reserved8[27]; __be64 wait_state_cycles; /* Wait cycles for this proc */ u8 reserved9[28]; __be16 slb_count; /* # of SLBs to maintain */ @@ -92,9 +93,13 @@ struct lppaca { /* cacheline 4-5 */ __be32 page_ins; /* CMO Hint - # page ins by OS */ - u8 reserved12[148]; + u8 reserved12[28]; + volatile __be64 l1_to_l2_cs_tb; + volatile __be64 l2_to_l1_cs_tb; + volatile __be64 l2_runtime_tb; + u8 reserved13[96]; volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ - u8 reserved13[96]; + u8 reserved14[96]; } ____cacheline_aligned; #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 1691297a766a9..eaeda001784eb 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -645,6 +645,9 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) #define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) #define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) +#define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6) +#define KVM_REG_PPC_HASHKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc7) +#define KVM_REG_PPC_HASHPKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc8) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index daaf7faf21a5e..bcf286df067fd 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2305,7 +2305,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); break; case KVM_REG_PPC_SDAR: - *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); + *val = get_reg_val(id, kvmppc_get_sdar_hv(vcpu)); break; case KVM_REG_PPC_SIER: *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0)); @@ -2349,6 +2349,15 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DAWRX1: *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu)); break; + case KVM_REG_PPC_DEXCR: + *val = get_reg_val(id, kvmppc_get_dexcr_hv(vcpu)); + break; + case KVM_REG_PPC_HASHKEYR: + *val = get_reg_val(id, kvmppc_get_hashkeyr_hv(vcpu)); + break; + case KVM_REG_PPC_HASHPKEYR: + *val = get_reg_val(id, kvmppc_get_hashpkeyr_hv(vcpu)); + break; case KVM_REG_PPC_CIABR: *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu)); break; @@ -2540,7 +2549,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.mmcrs = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCR3: - *val = get_reg_val(id, vcpu->arch.mmcr[3]); + kvmppc_set_mmcr_hv(vcpu, 3, set_reg_val(id, *val)); break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; @@ -2592,6 +2601,15 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DAWRX1: kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); break; + case KVM_REG_PPC_DEXCR: + kvmppc_set_dexcr_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_HASHKEYR: + kvmppc_set_hashkeyr_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_HASHPKEYR: + kvmppc_set_hashpkeyr_hv(vcpu, set_reg_val(id, *val)); + break; case KVM_REG_PPC_CIABR: kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val)); /* Don't allow setting breakpoints in hypervisor code */ @@ -4108,6 +4126,77 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) } } +/* Helper functions for reading L2's stats from L1's VPA */ +#ifdef CONFIG_PPC_PSERIES +static DEFINE_PER_CPU(u64, l1_to_l2_cs); +static DEFINE_PER_CPU(u64, l2_to_l1_cs); +static DEFINE_PER_CPU(u64, l2_runtime_agg); + +int kvmhv_get_l2_counters_status(void) +{ + return firmware_has_feature(FW_FEATURE_LPAR) && + get_lppaca()->l2_counters_enable; +} + +void kvmhv_set_l2_counters_status(int cpu, bool status) +{ + if (!firmware_has_feature(FW_FEATURE_LPAR)) + return; + if (status) + lppaca_of(cpu).l2_counters_enable = 1; + else + lppaca_of(cpu).l2_counters_enable = 0; +} + +int kmvhv_counters_tracepoint_regfunc(void) +{ + int cpu; + + for_each_present_cpu(cpu) { + kvmhv_set_l2_counters_status(cpu, true); + } + return 0; +} + +void kmvhv_counters_tracepoint_unregfunc(void) +{ + int cpu; + + for_each_present_cpu(cpu) { + kvmhv_set_l2_counters_status(cpu, false); + } +} + +static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu) +{ + struct lppaca *lp = get_lppaca(); + u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns; + u64 *l1_to_l2_cs_ptr = this_cpu_ptr(&l1_to_l2_cs); + u64 *l2_to_l1_cs_ptr = this_cpu_ptr(&l2_to_l1_cs); + u64 *l2_runtime_agg_ptr = this_cpu_ptr(&l2_runtime_agg); + + l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb)); + l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb)); + l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb)); + trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr, + l2_to_l1_ns - *l2_to_l1_cs_ptr, + l2_runtime_ns - *l2_runtime_agg_ptr); + *l1_to_l2_cs_ptr = l1_to_l2_ns; + *l2_to_l1_cs_ptr = l2_to_l1_ns; + *l2_runtime_agg_ptr = l2_runtime_ns; +} + +#else +int kvmhv_get_l2_counters_status(void) +{ + return 0; +} + +static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu) +{ +} +#endif + static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { @@ -4116,6 +4205,11 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, int trap; long rc; + if (vcpu->arch.doorbell_request) { + vcpu->arch.doorbell_request = 0; + kvmppc_set_dpdes(vcpu, 1); + } + io = &vcpu->arch.nestedv2_io; msr = mfmsr(); @@ -4156,6 +4250,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, timer_rearm_host_dec(*tb); + /* Record context switch and guest_run_time data */ + if (kvmhv_get_l2_counters_status()) + do_trace_nested_cs_time(vcpu); + return trap; } diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h index 47b2c815641e4..a404c9b221c1a 100644 --- a/arch/powerpc/kvm/book3s_hv.h +++ b/arch/powerpc/kvm/book3s_hv.h @@ -116,6 +116,9 @@ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dexcr, 64, KVMPPC_GSID_DEXCR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashkeyr, 64, KVMPPC_GSID_HASHKEYR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashpkeyr, 64, KVMPPC_GSID_HASHPKEYR) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR) diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c index 1091f7a83b255..eeecea8f202b3 100644 --- a/arch/powerpc/kvm/book3s_hv_nestedv2.c +++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c @@ -193,6 +193,15 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, case KVMPPC_GSID_DAWRX1: rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1); break; + case KVMPPC_GSID_DEXCR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr); + break; + case KVMPPC_GSID_HASHKEYR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr); + break; + case KVMPPC_GSID_HASHPKEYR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr); + break; case KVMPPC_GSID_CIABR: rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr); break; @@ -311,6 +320,10 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.vcore->vtb); break; + case KVMPPC_GSID_DPDES: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.vcore->dpdes); + break; case KVMPPC_GSID_LPCR: rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.vcore->lpcr); @@ -441,6 +454,15 @@ static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm, case KVMPPC_GSID_DAWRX1: vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse); break; + case KVMPPC_GSID_DEXCR: + vcpu->arch.dexcr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_HASHKEYR: + vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_HASHPKEYR: + vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse); + break; case KVMPPC_GSID_CIABR: vcpu->arch.ciabr = kvmppc_gse_get_u64(gse); break; @@ -543,6 +565,9 @@ static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm, case KVMPPC_GSID_VTB: vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse); break; + case KVMPPC_GSID_DPDES: + vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse); + break; case KVMPPC_GSID_LPCR: vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse); break; diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c index 4720b8dc88379..2571ccc618c96 100644 --- a/arch/powerpc/kvm/test-guest-state-buffer.c +++ b/arch/powerpc/kvm/test-guest-state-buffer.c @@ -151,7 +151,7 @@ static void test_gs_bitmap(struct kunit *test) i++; } - for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) { + for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSE_DW_REGS_END; iden++) { kvmppc_gsbm_set(&gsbm, iden); kvmppc_gsbm_set(&gsbm1, iden); KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index 8d57c84285319..77ebc724e6cdf 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h @@ -512,6 +512,35 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, __entry->vcpu_id, __entry->exit, __entry->ret) ); +#ifdef CONFIG_PPC_PSERIES + +TRACE_EVENT_FN_COND(kvmppc_vcpu_stats, + TP_PROTO(struct kvm_vcpu *vcpu, u64 l1_to_l2_cs, u64 l2_to_l1_cs, u64 l2_runtime), + + TP_ARGS(vcpu, l1_to_l2_cs, l2_to_l1_cs, l2_runtime), + + TP_CONDITION(l1_to_l2_cs || l2_to_l1_cs || l2_runtime), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u64, l1_to_l2_cs) + __field(u64, l2_to_l1_cs) + __field(u64, l2_runtime) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->l1_to_l2_cs = l1_to_l2_cs; + __entry->l2_to_l1_cs = l2_to_l1_cs; + __entry->l2_runtime = l2_runtime; + ), + + TP_printk("VCPU %d: l1_to_l2_cs_time=%llu ns l2_to_l1_cs_time=%llu ns l2_runtime=%llu ns", + __entry->vcpu_id, __entry->l1_to_l2_cs, + __entry->l2_to_l1_cs, __entry->l2_runtime), + kmvhv_counters_tracepoint_regfunc, kmvhv_counters_tracepoint_unregfunc +); +#endif #endif /* _TRACE_KVM_HV_H */ /* This part must be outside protection */