]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: PPC: Book3S HV: Move struct kvmppc_vcore from kvm_host.h to kvm_book3s.h
authorSuraj Jitindar Singh <sjitindarsingh@gmail.com>
Tue, 2 Aug 2016 04:03:19 +0000 (14:03 +1000)
committerPaul Mackerras <paulus@ozlabs.org>
Thu, 8 Sep 2016 02:21:44 +0000 (12:21 +1000)
The next commit will introduce a member to the kvmppc_vcore struct which
references MAX_SMT_THREADS which is defined in kvm_book3s_asm.h, however
this file isn't included in kvm_host.h directly. Thus compiling for
certain platforms such as pmac32_defconfig and ppc64e_defconfig with KVM
fails due to MAX_SMT_THREADS not being defined.

Move the struct kvmppc_vcore definition to kvm_book3s.h which explicitly
includes kvm_book3s_asm.h.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_host.h

index 8f39796c9da8dffaede2751f607375b1d5a4f24b..a50c5fec97905f618e77b212d1b369fe6e3e5728 100644 (file)
@@ -69,6 +69,41 @@ struct hpte_cache {
        int pagesize;
 };
 
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_map combines a bitmap of threads that have entered
+ * in the bottom 8 bits and a bitmap of threads that have exited in the
+ * next 8 bits.  This is so that we can atomically set the entry bit
+ * iff the exit map is 0 without taking a lock.
+ */
+struct kvmppc_vcore {
+       int n_runnable;
+       int num_threads;
+       int entry_exit_map;
+       int napping_threads;
+       int first_vcpuid;
+       u16 pcpu;
+       u16 last_cpu;
+       u8 vcore_state;
+       u8 in_guest;
+       struct kvmppc_vcore *master_vcore;
+       struct list_head runnable_threads;
+       struct list_head preempt_list;
+       spinlock_t lock;
+       struct swait_queue_head wq;
+       spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
+       u64 stolen_tb;
+       u64 preempt_tb;
+       struct kvm_vcpu *runner;
+       struct kvm *kvm;
+       u64 tb_offset;          /* guest timebase - host timebase */
+       ulong lpcr;
+       u32 arch_compat;
+       ulong pcr;
+       ulong dpdes;            /* doorbell state (POWER8) */
+       ulong conferring_threads;
+};
+
 struct kvmppc_vcpu_book3s {
        struct kvmppc_sid_map sid_map[SID_MAP_NUM];
        struct {
index e36ce0cff76658707d1adffdad36b3c9548c02ac..7ff9919916c386fe5ce293de97092ce61952851c 100644 (file)
@@ -277,41 +277,6 @@ struct kvm_arch {
 #endif
 };
 
-/*
- * Struct for a virtual core.
- * Note: entry_exit_map combines a bitmap of threads that have entered
- * in the bottom 8 bits and a bitmap of threads that have exited in the
- * next 8 bits.  This is so that we can atomically set the entry bit
- * iff the exit map is 0 without taking a lock.
- */
-struct kvmppc_vcore {
-       int n_runnable;
-       int num_threads;
-       int entry_exit_map;
-       int napping_threads;
-       int first_vcpuid;
-       u16 pcpu;
-       u16 last_cpu;
-       u8 vcore_state;
-       u8 in_guest;
-       struct kvmppc_vcore *master_vcore;
-       struct list_head runnable_threads;
-       struct list_head preempt_list;
-       spinlock_t lock;
-       struct swait_queue_head wq;
-       spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
-       u64 stolen_tb;
-       u64 preempt_tb;
-       struct kvm_vcpu *runner;
-       struct kvm *kvm;
-       u64 tb_offset;          /* guest timebase - host timebase */
-       ulong lpcr;
-       u32 arch_compat;
-       ulong pcr;
-       ulong dpdes;            /* doorbell state (POWER8) */
-       ulong conferring_threads;
-};
-
 #define VCORE_ENTRY_MAP(vc)    ((vc)->entry_exit_map & 0xff)
 #define VCORE_EXIT_MAP(vc)     ((vc)->entry_exit_map >> 8)
 #define VCORE_IS_EXITING(vc)   (VCORE_EXIT_MAP(vc) != 0)