arm64: KVM: rename pm_fake handler to trap_raz_wi
authorMarc Zyngier <marc.zyngier@arm.com>
Thu, 24 Apr 2014 09:21:16 +0000 (10:21 +0100)
committerChristoffer Dall <christoffer.dall@linaro.org>
Fri, 11 Jul 2014 11:57:43 +0000 (04:57 -0700)
pm_fake doesn't quite describe what the handler does (ignoring writes
and returning 0 for reads).

As we're about to use it (a lot) in a different context, rename it
with a (admitedly cryptic) name that make sense for all users.

Reviewed-by: Anup Patel <anup.patel@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm64/kvm/sys_regs.c

index 56288f3..492ba30 100644 (file)
@@ -161,18 +161,9 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
        return true;
 }
 
-/*
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters.  Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
- */
-static bool pm_fake(struct kvm_vcpu *vcpu,
-                   const struct sys_reg_params *p,
-                   const struct sys_reg_desc *r)
+static bool trap_raz_wi(struct kvm_vcpu *vcpu,
+                       const struct sys_reg_params *p,
+                       const struct sys_reg_desc *r)
 {
        if (p->is_write)
                return ignore_write(vcpu, p);
@@ -199,6 +190,17 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 /*
  * Architected system registers.
  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ *
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+ *
+ * Therefore we tell the guest we have 0 counters.  Unfortunately, we
+ * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
+ * all PM registers, which doesn't crash the guest kernel at least.
+ *
+ * Same goes for the whole debug infrastructure, which probably breaks
+ * some guest functionnality. This should be fixed.
  */
 static const struct sys_reg_desc sys_reg_descs[] = {
        /* DC ISW */
@@ -258,10 +260,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
        /* PMINTENSET_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
-         pm_fake },
+         trap_raz_wi },
        /* PMINTENCLR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
-         pm_fake },
+         trap_raz_wi },
 
        /* MAIR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
@@ -290,43 +292,43 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
        /* PMCR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
-         pm_fake },
+         trap_raz_wi },
        /* PMCNTENSET_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
-         pm_fake },
+         trap_raz_wi },
        /* PMCNTENCLR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
-         pm_fake },
+         trap_raz_wi },
        /* PMOVSCLR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
-         pm_fake },
+         trap_raz_wi },
        /* PMSWINC_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
-         pm_fake },
+         trap_raz_wi },
        /* PMSELR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
-         pm_fake },
+         trap_raz_wi },
        /* PMCEID0_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
-         pm_fake },
+         trap_raz_wi },
        /* PMCEID1_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
-         pm_fake },
+         trap_raz_wi },
        /* PMCCNTR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
-         pm_fake },
+         trap_raz_wi },
        /* PMXEVTYPER_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
-         pm_fake },
+         trap_raz_wi },
        /* PMXEVCNTR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
-         pm_fake },
+         trap_raz_wi },
        /* PMUSERENR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
-         pm_fake },
+         trap_raz_wi },
        /* PMOVSSET_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
-         pm_fake },
+         trap_raz_wi },
 
        /* TPIDR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -372,19 +374,20 @@ static const struct sys_reg_desc cp15_regs[] = {
        { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
        { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
 
-       { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
-       { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
-       { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
-       { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
-       { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
-       { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
-       { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
+       /* PMU */
+       { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
 
        { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
        { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },