Replace the va_b and va_b fields with the interval tree node.
The actual interval tree is not yet used.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
+#include "qemu/interval-tree.h"
/* PA-RISC 1.x processors have a strong memory model. */
/* ??? While we do not yet implement PA-RISC 2.0, those processors have
/* PA-RISC 1.x processors have a strong memory model. */
/* ??? While we do not yet implement PA-RISC 2.0, those processors have
#endif
typedef struct HPPATLBEntry {
#endif
typedef struct HPPATLBEntry {
- uint64_t va_b;
- uint64_t va_e;
+ IntervalTreeNode itree;
+
target_ureg pa;
unsigned u : 1;
unsigned t : 1;
target_ureg pa;
unsigned u : 1;
unsigned t : 1;
memset(ent, 0, sizeof(*ent));
memset(ent, 0, sizeof(*ent));
- ent->va_b = qemu_get_be64(f);
+ ent->itree.start = qemu_get_be64(f);
ent->pa = qemu_get_betr(f);
val = qemu_get_be32(f);
ent->pa = qemu_get_betr(f);
val = qemu_get_be32(f);
ent->d = extract32(val, 28, 1);
ent->t = extract32(val, 29, 1);
ent->d = extract32(val, 28, 1);
ent->t = extract32(val, 29, 1);
- ent->va_e = ent->va_b + TARGET_PAGE_SIZE - 1;
+ ent->itree.last = ent->itree.start + TARGET_PAGE_SIZE - 1;
val = deposit32(val, 29, 1, ent->t);
}
val = deposit32(val, 29, 1, ent->t);
}
- qemu_put_be64(f, ent->va_b);
+ qemu_put_be64(f, ent->itree.start);
qemu_put_betr(f, ent->pa);
qemu_put_be32(f, val);
return 0;
qemu_put_betr(f, ent->pa);
qemu_put_be32(f, val);
return 0;
for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
HPPATLBEntry *ent = &env->tlb[i];
for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
HPPATLBEntry *ent = &env->tlb[i];
- if (ent->va_b <= addr && addr <= ent->va_e) {
+ if (ent->itree.start <= addr && addr <= ent->itree.last) {
trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
- ent->va_b, ent->va_e, ent->pa);
+ ent->itree.start, ent->itree.last,
+ ent->pa);
- trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
+ trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
+ ent->itree.last, ent->pa);
- tlb_flush_range_by_mmuidx(cs, ent->va_b,
- ent->va_e - ent->va_b + 1,
- HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
+ tlb_flush_range_by_mmuidx(cs, ent->itree.start,
+ ent->itree.last - ent->itree.start + 1,
+ HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
/* never clear BTLBs, unless forced to do so. */
if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
/* never clear BTLBs, unless forced to do so. */
if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
}
memset(ent, 0, sizeof(*ent));
}
memset(ent, 0, sizeof(*ent));
}
static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
}
static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
}
/* We now know the physical address. */
}
/* We now know the physical address. */
- phys = ent->pa + (addr - ent->va_b);
+ phys = ent->pa + (addr - ent->itree.start);
/* Map TLB access_rights field to QEMU protection. */
priv = MMU_IDX_TO_PRIV(mmu_idx);
/* Map TLB access_rights field to QEMU protection. */
priv = MMU_IDX_TO_PRIV(mmu_idx);
/* Zap any old entries covering ADDR; notice empty entries on the way. */
for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
HPPATLBEntry *ent = &env->tlb[i];
/* Zap any old entries covering ADDR; notice empty entries on the way. */
for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
HPPATLBEntry *ent = &env->tlb[i];
- if (ent->va_b <= addr && addr <= ent->va_e) {
+ if (ent->itree.start <= addr && addr <= ent->itree.last) {
if (ent->entry_valid) {
hppa_flush_tlb_ent(env, ent, false);
}
if (ent->entry_valid) {
hppa_flush_tlb_ent(env, ent, false);
}
}
/* Note that empty->entry_valid == 0 already. */
}
/* Note that empty->entry_valid == 0 already. */
- empty->va_b = addr & TARGET_PAGE_MASK;
- empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
+ empty->itree.start = addr & TARGET_PAGE_MASK;
+ empty->itree.last = empty->itree.start + TARGET_PAGE_SIZE - 1;
empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
- trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
+ trace_hppa_tlb_itlba(env, empty, empty->itree.start,
+ empty->itree.last, empty->pa);
}
static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
}
static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
/* force flush of possibly existing BTLB entry */
hppa_flush_tlb_ent(env, btlb, true);
/* create new BTLB entry */
/* force flush of possibly existing BTLB entry */
hppa_flush_tlb_ent(env, btlb, true);
/* create new BTLB entry */
- btlb->va_b = virt_page << TARGET_PAGE_BITS;
- btlb->va_e = btlb->va_b + len * TARGET_PAGE_SIZE - 1;
+ btlb->itree.start = virt_page << TARGET_PAGE_BITS;
+ btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
btlb->pa = phys_page << TARGET_PAGE_BITS;
set_access_bits(env, btlb, env->gr[20]);
btlb->t = 0;
btlb->pa = phys_page << TARGET_PAGE_BITS;
set_access_bits(env, btlb, env->gr[20]);
btlb->t = 0;