2 * TLB support routines.
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
8 * Modified RID allocation for SMP
9 * Goutham Rao <goutham.rao@intel.com>
10 * IPI based ptc implementation and A-step IPI implementation.
11 * Rohit Seth <rohit.seth@intel.com>
12 * Ken Chen <kenneth.w.chen@intel.com>
13 * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/smp.h>
21 #include <linux/bootmem.h>
23 #include <asm/delay.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgalloc.h>
27 #include <asm/tlbflush.h>
29 #include <asm/processor.h>
33 unsigned long mask
; /* mask of supported purge page-sizes */
34 unsigned long max_bits
; /* log2 of largest supported purge page-size */
37 struct ia64_ctx ia64_ctx
= {
38 .lock
= __SPIN_LOCK_UNLOCKED(ia64_ctx
.lock
),
43 DEFINE_PER_CPU(u8
, ia64_need_tlb_flush
);
44 DEFINE_PER_CPU(u8
, ia64_tr_num
); /*Number of TR slots in current processor*/
45 DEFINE_PER_CPU(u8
, ia64_tr_used
); /*Max Slot number used by kernel*/
47 struct ia64_tr_entry __per_cpu_idtrs
[NR_CPUS
][2][IA64_TR_ALLOC_MAX
];
50 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
51 * Called after cpu_init() has setup ia64_ctx.max_ctx based on
52 * maximum RID that is supported by boot CPU.
55 mmu_context_init (void)
57 ia64_ctx
.bitmap
= alloc_bootmem((ia64_ctx
.max_ctx
+1)>>3);
58 ia64_ctx
.flushmap
= alloc_bootmem((ia64_ctx
.max_ctx
+1)>>3);
62 * Acquire the ia64_ctx.lock before calling this function!
65 wrap_mmu_context (struct mm_struct
*mm
)
68 unsigned long flush_bit
;
70 for (i
=0; i
<= ia64_ctx
.max_ctx
/ BITS_PER_LONG
; i
++) {
71 flush_bit
= xchg(&ia64_ctx
.flushmap
[i
], 0);
72 ia64_ctx
.bitmap
[i
] ^= flush_bit
;
75 /* use offset at 300 to skip daemons */
76 ia64_ctx
.next
= find_next_zero_bit(ia64_ctx
.bitmap
,
77 ia64_ctx
.max_ctx
, 300);
78 ia64_ctx
.limit
= find_next_bit(ia64_ctx
.bitmap
,
79 ia64_ctx
.max_ctx
, ia64_ctx
.next
);
82 * can't call flush_tlb_all() here because of race condition
83 * with O(1) scheduler [EF]
85 cpu
= get_cpu(); /* prevent preemption/migration */
86 for_each_online_cpu(i
)
88 per_cpu(ia64_need_tlb_flush
, i
) = 1;
90 local_flush_tlb_all();
94 ia64_global_tlb_purge (struct mm_struct
*mm
, unsigned long start
,
95 unsigned long end
, unsigned long nbits
)
97 static DEFINE_SPINLOCK(ptcg_lock
);
99 struct mm_struct
*active_mm
= current
->active_mm
;
101 if (mm
!= active_mm
) {
102 /* Restore region IDs for mm */
103 if (mm
&& active_mm
) {
104 activate_context(mm
);
111 /* HW requires global serialization of ptc.ga. */
112 spin_lock(&ptcg_lock
);
116 * Flush ALAT entries also.
118 ia64_ptcga(start
, (nbits
<<2));
120 start
+= (1UL << nbits
);
121 } while (start
< end
);
123 spin_unlock(&ptcg_lock
);
125 if (mm
!= active_mm
) {
126 activate_context(active_mm
);
131 local_flush_tlb_all (void)
133 unsigned long i
, j
, flags
, count0
, count1
, stride0
, stride1
, addr
;
135 addr
= local_cpu_data
->ptce_base
;
136 count0
= local_cpu_data
->ptce_count
[0];
137 count1
= local_cpu_data
->ptce_count
[1];
138 stride0
= local_cpu_data
->ptce_stride
[0];
139 stride1
= local_cpu_data
->ptce_stride
[1];
141 local_irq_save(flags
);
142 for (i
= 0; i
< count0
; ++i
) {
143 for (j
= 0; j
< count1
; ++j
) {
149 local_irq_restore(flags
);
150 ia64_srlz_i(); /* srlz.i implies srlz.d */
154 flush_tlb_range (struct vm_area_struct
*vma
, unsigned long start
,
157 struct mm_struct
*mm
= vma
->vm_mm
;
158 unsigned long size
= end
- start
;
162 if (mm
!= current
->active_mm
) {
168 nbits
= ia64_fls(size
+ 0xfff);
169 while (unlikely (((1UL << nbits
) & purge
.mask
) == 0) &&
170 (nbits
< purge
.max_bits
))
172 if (nbits
> purge
.max_bits
)
173 nbits
= purge
.max_bits
;
174 start
&= ~((1UL << nbits
) - 1);
178 if (mm
!= current
->active_mm
|| cpus_weight(mm
->cpu_vm_mask
) != 1) {
179 platform_global_tlb_purge(mm
, start
, end
, nbits
);
185 ia64_ptcl(start
, (nbits
<<2));
186 start
+= (1UL << nbits
);
187 } while (start
< end
);
189 ia64_srlz_i(); /* srlz.i implies srlz.d */
191 EXPORT_SYMBOL(flush_tlb_range
);
196 ia64_ptce_info_t
uninitialized_var(ptce_info
); /* GCC be quiet */
197 unsigned long tr_pgbits
;
199 pal_vm_info_1_u_t vm_info_1
;
200 pal_vm_info_2_u_t vm_info_2
;
201 int cpu
= smp_processor_id();
203 if ((status
= ia64_pal_vm_page_size(&tr_pgbits
, &purge
.mask
)) != 0) {
204 printk(KERN_ERR
"PAL_VM_PAGE_SIZE failed with status=%ld; "
205 "defaulting to architected purge page-sizes.\n", status
);
206 purge
.mask
= 0x115557000UL
;
208 purge
.max_bits
= ia64_fls(purge
.mask
);
210 ia64_get_ptce(&ptce_info
);
211 local_cpu_data
->ptce_base
= ptce_info
.base
;
212 local_cpu_data
->ptce_count
[0] = ptce_info
.count
[0];
213 local_cpu_data
->ptce_count
[1] = ptce_info
.count
[1];
214 local_cpu_data
->ptce_stride
[0] = ptce_info
.stride
[0];
215 local_cpu_data
->ptce_stride
[1] = ptce_info
.stride
[1];
217 local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
218 status
= ia64_pal_vm_summary(&vm_info_1
, &vm_info_2
);
221 printk(KERN_ERR
"ia64_pal_vm_summary=%ld\n", status
);
222 per_cpu(ia64_tr_num
, cpu
) = 8;
225 per_cpu(ia64_tr_num
, cpu
) = vm_info_1
.pal_vm_info_1_s
.max_itr_entry
+1;
226 if (per_cpu(ia64_tr_num
, cpu
) >
227 (vm_info_1
.pal_vm_info_1_s
.max_dtr_entry
+1))
228 per_cpu(ia64_tr_num
, cpu
) =
229 vm_info_1
.pal_vm_info_1_s
.max_dtr_entry
+1;
230 if (per_cpu(ia64_tr_num
, cpu
) > IA64_TR_ALLOC_MAX
) {
231 per_cpu(ia64_tr_num
, cpu
) = IA64_TR_ALLOC_MAX
;
232 printk(KERN_DEBUG
"TR register number exceeds IA64_TR_ALLOC_MAX!"
233 "IA64_TR_ALLOC_MAX should be extended\n");
240 * Check overlap with inserted TRs.
242 static int is_tr_overlap(struct ia64_tr_entry
*p
, u64 va
, u64 log_size
)
246 u64 va_rr
= ia64_get_rr(va
);
247 u64 va_rid
= RR_TO_RID(va_rr
);
248 u64 va_end
= va
+ (1<<log_size
) - 1;
250 if (va_rid
!= RR_TO_RID(p
->rr
))
252 tr_log_size
= (p
->itir
& 0xff) >> 2;
253 tr_end
= p
->ifa
+ (1<<tr_log_size
) - 1;
255 if (va
> tr_end
|| p
->ifa
> va_end
)
262 * ia64_insert_tr in virtual mode. Allocate a TR slot
264 * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
266 * va : virtual address.
267 * pte : pte entries inserted.
268 * log_size: range to be covered.
270 * Return value: <0 : error No.
272 * >=0 : slot number allocated for TR.
273 * Must be called with preemption disabled.
275 int ia64_itr_entry(u64 target_mask
, u64 va
, u64 pte
, u64 log_size
)
279 struct ia64_tr_entry
*p
;
280 int cpu
= smp_processor_id();
283 /*Check overlap with existing TR entries*/
284 if (target_mask
& 0x1) {
285 p
= &__per_cpu_idtrs
[cpu
][0][0];
286 for (i
= IA64_TR_ALLOC_BASE
; i
<= per_cpu(ia64_tr_used
, cpu
);
289 if (is_tr_overlap(p
, va
, log_size
)) {
290 printk(KERN_DEBUG
"Overlapped Entry"
291 "Inserted for TR Reigster!!\n");
296 if (target_mask
& 0x2) {
297 p
= &__per_cpu_idtrs
[cpu
][1][0];
298 for (i
= IA64_TR_ALLOC_BASE
; i
<= per_cpu(ia64_tr_used
, cpu
);
301 if (is_tr_overlap(p
, va
, log_size
)) {
302 printk(KERN_DEBUG
"Overlapped Entry"
303 "Inserted for TR Reigster!!\n");
309 for (i
= IA64_TR_ALLOC_BASE
; i
< per_cpu(ia64_tr_num
, cpu
); i
++) {
310 switch (target_mask
& 0x3) {
312 if (!(__per_cpu_idtrs
[cpu
][0][i
].pte
& 0x1))
316 if (!(__per_cpu_idtrs
[cpu
][1][i
].pte
& 0x1))
320 if (!(__per_cpu_idtrs
[cpu
][0][i
].pte
& 0x1) &&
321 !(__per_cpu_idtrs
[cpu
][1][i
].pte
& 0x1))
330 if (i
>= per_cpu(ia64_tr_num
, cpu
))
333 /*Record tr info for mca hander use!*/
334 if (i
> per_cpu(ia64_tr_used
, cpu
))
335 per_cpu(ia64_tr_used
, cpu
) = i
;
337 psr
= ia64_clear_ic();
338 if (target_mask
& 0x1) {
339 ia64_itr(0x1, i
, va
, pte
, log_size
);
341 p
= &__per_cpu_idtrs
[cpu
][0][i
];
344 p
->itir
= log_size
<< 2;
345 p
->rr
= ia64_get_rr(va
);
347 if (target_mask
& 0x2) {
348 ia64_itr(0x2, i
, va
, pte
, log_size
);
350 p
= &__per_cpu_idtrs
[cpu
][1][i
];
353 p
->itir
= log_size
<< 2;
354 p
->rr
= ia64_get_rr(va
);
361 EXPORT_SYMBOL_GPL(ia64_itr_entry
);
366 * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
367 * slot: slot number to be freed.
369 * Must be called with preemption disabled.
371 void ia64_ptr_entry(u64 target_mask
, int slot
)
373 int cpu
= smp_processor_id();
375 struct ia64_tr_entry
*p
;
377 if (slot
< IA64_TR_ALLOC_BASE
|| slot
>= per_cpu(ia64_tr_num
, cpu
))
380 if (target_mask
& 0x1) {
381 p
= &__per_cpu_idtrs
[cpu
][0][slot
];
382 if ((p
->pte
&0x1) && is_tr_overlap(p
, p
->ifa
, p
->itir
>>2)) {
384 ia64_ptr(0x1, p
->ifa
, p
->itir
>>2);
389 if (target_mask
& 0x2) {
390 p
= &__per_cpu_idtrs
[cpu
][1][slot
];
391 if ((p
->pte
& 0x1) && is_tr_overlap(p
, p
->ifa
, p
->itir
>>2)) {
393 ia64_ptr(0x2, p
->ifa
, p
->itir
>>2);
398 for (i
= per_cpu(ia64_tr_used
, cpu
); i
>= IA64_TR_ALLOC_BASE
; i
--) {
399 if ((__per_cpu_idtrs
[cpu
][0][i
].pte
& 0x1) ||
400 (__per_cpu_idtrs
[cpu
][1][i
].pte
& 0x1))
403 per_cpu(ia64_tr_used
, cpu
) = i
;
405 EXPORT_SYMBOL_GPL(ia64_ptr_entry
);