]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
KVM: x86: hyper-v: Rename vcpu_to_synic()/synic_to_vcpu()
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 26 Jan 2021 13:48:06 +0000 (14:48 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 9 Feb 2021 13:17:11 +0000 (08:17 -0500)
vcpu_to_synic()'s argument is almost always 'vcpu' so there's no need to
have an additional prefix. Also, as this is used outside of hyper-v
emulation code, add '_hv_' part to make it clear what this s. This makes
the naming more consistent with to_hv_vcpu().

Rename synic_to_vcpu() to hv_synic_to_vcpu() for consistency.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-6-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/lapic.c
arch/x86/kvm/x86.c

index 46ccbb1e5e886628de1cf7affcdd984fca50133b..b9e1f0609ae41ad51420f6e0f05224729c6b3942 100644 (file)
@@ -129,7 +129,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
        synic_update_vector(synic, vector);
 
        /* Load SynIC vectors into EOI exit bitmap */
-       kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
+       kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
        return 0;
 }
 
@@ -158,14 +158,14 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
        vcpu = get_vcpu_by_vpidx(kvm, vpidx);
        if (!vcpu)
                return NULL;
-       synic = vcpu_to_synic(vcpu);
+       synic = to_hv_synic(vcpu);
        return (synic->active) ? synic : NULL;
 }
 
 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
 {
        struct kvm *kvm = vcpu->kvm;
-       struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+       struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct kvm_vcpu_hv_stimer *stimer;
        int gsi, idx;
@@ -190,7 +190,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
 
 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
 {
-       struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+       struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
        struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
        hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
@@ -205,7 +205,7 @@ static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
                         u32 msr, u64 data, bool host)
 {
-       struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+       struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
        int ret;
 
        if (!synic->active && !host)
@@ -422,7 +422,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
 
 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
 {
-       struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+       struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
        struct kvm_lapic_irq irq;
        int ret, vector;
 
@@ -458,7 +458,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
 
 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
 {
-       struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+       struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
        int i;
 
        trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
@@ -635,7 +635,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
        union hv_stimer_config new_config = {.as_uint64 = config},
                old_config = {.as_uint64 = stimer->config.as_uint64};
        struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
-       struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+       struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
 
        if (!synic->active && !host)
                return 1;
@@ -659,7 +659,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
                            bool host)
 {
        struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
-       struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+       struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
 
        if (!synic->active && !host)
                return 1;
@@ -695,7 +695,7 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
                             struct hv_message *src_msg, bool no_retry)
 {
-       struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+       struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
        int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
        gfn_t msg_page_gfn;
        struct hv_message_header hv_hdr;
@@ -764,7 +764,7 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
 
        payload->expiration_time = stimer->exp_time;
        payload->delivery_time = get_time_ref_counter(vcpu->kvm);
-       return synic_deliver_msg(vcpu_to_synic(vcpu),
+       return synic_deliver_msg(to_hv_synic(vcpu),
                                 stimer->config.sintx, msg,
                                 no_retry);
 }
@@ -902,7 +902,7 @@ void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
 {
-       struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+       struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
 
        /*
         * Hyper-V SynIC auto EOI SINT's are
@@ -1309,7 +1309,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        case HV_X64_MSR_SIMP:
        case HV_X64_MSR_EOM:
        case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
-               return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
+               return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
        case HV_X64_MSR_STIMER0_CONFIG:
        case HV_X64_MSR_STIMER1_CONFIG:
        case HV_X64_MSR_STIMER2_CONFIG:
@@ -1421,7 +1421,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
        case HV_X64_MSR_SIMP:
        case HV_X64_MSR_EOM:
        case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
-               return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
+               return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
        case HV_X64_MSR_STIMER0_CONFIG:
        case HV_X64_MSR_STIMER1_CONFIG:
        case HV_X64_MSR_STIMER2_CONFIG:
@@ -1811,7 +1811,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
                fallthrough;    /* maybe userspace knows this conn_id */
        case HVCALL_POST_MESSAGE:
                /* don't bother userspace if it has no way to handle it */
-               if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
+               if (unlikely(rep || !to_hv_synic(vcpu)->active)) {
                        ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
                        break;
                }
index f5e0d3d1d54ade21b092b2d2ed33904c1393acd5..d47b3f045a25f558ee4e0d8dcb5218b158e56db7 100644 (file)
@@ -63,12 +63,12 @@ static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu)
        return container_of(arch, struct kvm_vcpu, arch);
 }
 
-static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu)
+static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
 {
        return &vcpu->arch.hyperv.synic;
 }
 
-static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
+static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
 {
        return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic));
 }
index 9e0f78c0a256bbd84bfb03a0e5e60066e2deb85c..847fe11a49f5a1ff311493e969983e788070e0ac 100644 (file)
@@ -1245,7 +1245,7 @@ static int apic_set_eoi(struct kvm_lapic *apic)
        apic_clear_isr(vector, apic);
        apic_update_ppr(apic);
 
-       if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
+       if (test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
                kvm_hv_synic_send_eoi(apic->vcpu, vector);
 
        kvm_ioapic_send_eoi(apic, vector);
@@ -2512,7 +2512,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
         */
 
        apic_clear_irr(vector, apic);
-       if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
+       if (test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
                /*
                 * For auto-EOI interrupts, there might be another pending
                 * interrupt above PPR, so check whether to raise another
index baa90ae76ba579fafe552906816211f36868df88..4d410c722728803a1e5133cc3721964eb8a9f0e9 100644 (file)
@@ -8804,7 +8804,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
                return;
 
        bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
-                 vcpu_to_synic(vcpu)->vec_bitmap, 256);
+                 to_hv_synic(vcpu)->vec_bitmap, 256);
        static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
 }