1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
5 * Copyright (C) 1998-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11 * 11/24/98 S.Eranian added ia64_set_iva()
12 * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
13 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
17 #include <asm/intrinsics.h>
18 #include <asm/kregs.h>
19 #include <asm/ptrace.h>
20 #include <asm/ustack.h>
22 #define IA64_NUM_PHYS_STACK_REG 96
23 #define IA64_NUM_DBG_REGS 8
25 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
26 #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
29 * TASK_SIZE really is a mis-named. It really is the maximum user
30 * space address (plus one). On IA-64, there are five regions of 2TB
31 * each (assuming 8KB page size), for a total of 8TB of user virtual
34 #define TASK_SIZE DEFAULT_TASK_SIZE
37 * This decides where the kernel will search for a free chunk of vm
38 * space during mmap's.
40 #define TASK_UNMAPPED_BASE (current->thread.map_base)
42 #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
43 #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
44 #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
45 #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
46 #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
47 #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
49 #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
50 #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
52 #define IA64_THREAD_UAC_SHIFT 3
53 #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
54 #define IA64_THREAD_FPEMU_SHIFT 6
55 #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
59 * This shift should be large enough to be able to represent 1000000000/itc_freq with good
60 * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
61 * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
63 #define IA64_NSEC_PER_CYC_SHIFT 30
67 #include <linux/cache.h>
68 #include <linux/compiler.h>
69 #include <linux/threads.h>
70 #include <linux/types.h>
71 #include <linux/bitops.h>
75 #include <asm/percpu.h>
77 #include <asm/unwind.h>
78 #include <linux/atomic.h>
80 #include <asm/nodedata.h>
83 /* like above but expressed as bitfields for more efficient access: */
119 __u64 reserved4
: 19;
139 __u64 reserved2
: 20;
167 __u64 rv3
: 2; /* 0-1 */
168 __u64 ps
: 6; /* 2-7 */
169 __u64 key
: 24; /* 8-31 */
170 __u64 rv4
: 32; /* 32-63 */
177 __u64 ve
: 1; /* enable hw walker */
178 __u64 reserved0
: 1; /* reserved */
179 __u64 ps
: 6; /* log page size */
180 __u64 rid
: 24; /* region id */
181 __u64 reserved1
: 32; /* reserved */
186 * CPU type, hardware bug flags, and per-CPU state. Frequently used
187 * state comes earlier:
189 struct cpuinfo_ia64
{
190 unsigned int softirq_pending
;
191 unsigned long itm_delta
; /* # of clock cycles between clock ticks */
192 unsigned long itm_next
; /* interval timer mask value to use for next clock tick */
193 unsigned long nsec_per_cyc
; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
194 unsigned long unimpl_va_mask
; /* mask of unimplemented virtual address bits (from PAL) */
195 unsigned long unimpl_pa_mask
; /* mask of unimplemented physical address bits (from PAL) */
196 unsigned long itc_freq
; /* frequency of ITC counter */
197 unsigned long proc_freq
; /* frequency of processor */
198 unsigned long cyc_per_usec
; /* itc_freq/1000000 */
199 unsigned long ptce_base
;
200 unsigned int ptce_count
[2];
201 unsigned int ptce_stride
[2];
202 struct task_struct
*ksoftirqd
; /* kernel softirq daemon for this CPU */
205 unsigned long loops_per_jiffy
;
207 unsigned int socket_id
; /* physical processor socket id */
208 unsigned short core_id
; /* core id */
209 unsigned short thread_id
; /* thread id */
210 unsigned short num_log
; /* Total number of logical processors on
211 * this socket that were successfully booted */
212 unsigned char cores_per_socket
; /* Cores per processor socket */
213 unsigned char threads_per_core
; /* Threads per core */
216 /* CPUID-derived information: */
218 unsigned long features
;
219 unsigned char number
;
220 unsigned char revision
;
222 unsigned char family
;
223 unsigned char archrev
;
228 struct ia64_node_data
*node_data
;
232 DECLARE_PER_CPU(struct cpuinfo_ia64
, ia64_cpu_info
);
235 * The "local" data variable. It refers to the per-CPU data of the currently executing
236 * CPU, much like "current" points to the per-task data of the currently executing task.
237 * Do not use the address of local_cpu_data, since it will be different from
238 * cpu_data(smp_processor_id())!
240 #define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
241 #define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
243 extern void print_cpu_info (struct cpuinfo_ia64
*);
249 #define SET_UNALIGN_CTL(task,value) \
251 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
252 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
255 #define GET_UNALIGN_CTL(task,addr) \
257 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
258 (int __user *) (addr)); \
261 #define SET_FPEMU_CTL(task,value) \
263 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
264 | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
267 #define GET_FPEMU_CTL(task,addr) \
269 put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
270 (int __user *) (addr)); \
273 struct thread_struct
{
274 __u32 flags
; /* various thread flags (see IA64_THREAD_*) */
275 /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
276 __u8 on_ustack
; /* executing on user-stacks? */
278 __u64 ksp
; /* kernel stack pointer */
279 __u64 map_base
; /* base address for get_unmapped_area() */
280 __u64 rbs_bot
; /* the base address for the RBS */
281 int last_fph_cpu
; /* CPU that may hold the contents of f32-f127 */
283 #ifdef CONFIG_PERFMON
284 void *pfm_context
; /* pointer to detailed PMU context */
285 unsigned long pfm_needs_checking
; /* when >0, pending perfmon work on kernel exit */
286 # define INIT_THREAD_PM .pfm_context = NULL, \
287 .pfm_needs_checking = 0UL,
289 # define INIT_THREAD_PM
291 unsigned long dbr
[IA64_NUM_DBG_REGS
];
292 unsigned long ibr
[IA64_NUM_DBG_REGS
];
293 struct ia64_fpreg fph
[96]; /* saved/loaded on demand */
296 #define INIT_THREAD { \
300 .map_base = DEFAULT_MAP_BASE, \
301 .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
302 .last_fph_cpu = -1, \
309 #define start_thread(regs,new_ip,new_sp) do { \
310 regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
311 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
312 regs->cr_iip = new_ip; \
313 regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
315 regs->ar_bspstore = current->thread.rbs_bot; \
316 regs->ar_fpsr = FPSR_DEFAULT; \
318 regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
319 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
320 if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
322 * Zap scratch regs to avoid leaking bits between processes with different \
325 regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
326 regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
330 /* Forward declarations, a strange C thing... */
335 * Free all resources held by a thread. This is called after the
336 * parent of DEAD_TASK has collected the exit status of the task via
339 #define release_thread(dead_task)
341 /* Get wait channel for task P. */
342 extern unsigned long get_wchan (struct task_struct
*p
);
344 /* Return instruction pointer of blocked task TSK. */
345 #define KSTK_EIP(tsk) \
347 struct pt_regs *_regs = task_pt_regs(tsk); \
348 _regs->cr_iip + ia64_psr(_regs)->ri; \
351 /* Return stack pointer of blocked task TSK. */
352 #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
354 extern void ia64_getreg_unknown_kr (void);
355 extern void ia64_setreg_unknown_kr (void);
357 #define ia64_get_kr(regnum) \
359 unsigned long r = 0; \
362 case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
363 case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
364 case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
365 case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
366 case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
367 case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
368 case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
369 case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
370 default: ia64_getreg_unknown_kr(); break; \
375 #define ia64_set_kr(regnum, r) \
378 case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
379 case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
380 case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
381 case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
382 case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
383 case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
384 case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
385 case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
386 default: ia64_setreg_unknown_kr(); break; \
391 * The following three macros can't be inline functions because we don't have struct
392 * task_struct at this point.
396 * Return TRUE if task T owns the fph partition of the CPU we're running on.
397 * Must be called from code that has preemption disabled.
399 #define ia64_is_local_fpu_owner(t) \
401 struct task_struct *__ia64_islfo_task = (t); \
402 (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
403 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
407 * Mark task T as owning the fph partition of the CPU we're running on.
408 * Must be called from code that has preemption disabled.
410 #define ia64_set_local_fpu_owner(t) do { \
411 struct task_struct *__ia64_slfo_task = (t); \
412 __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
413 ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
416 /* Mark the fph partition of task T as being invalid on all CPUs. */
417 #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
419 extern void __ia64_init_fpu (void);
420 extern void __ia64_save_fpu (struct ia64_fpreg
*fph
);
421 extern void __ia64_load_fpu (struct ia64_fpreg
*fph
);
422 extern void ia64_save_debug_regs (unsigned long *save_area
);
423 extern void ia64_load_debug_regs (unsigned long *save_area
);
425 #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
426 #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
428 /* load fp 0.0 into fph */
430 ia64_init_fpu (void) {
436 /* save f32-f127 at FPH */
438 ia64_save_fpu (struct ia64_fpreg
*fph
) {
440 __ia64_save_fpu(fph
);
444 /* load f32-f127 from FPH */
446 ia64_load_fpu (struct ia64_fpreg
*fph
) {
448 __ia64_load_fpu(fph
);
456 psr
= ia64_getreg(_IA64_REG_PSR
);
458 ia64_rsm(IA64_PSR_I
| IA64_PSR_IC
);
467 ia64_set_psr (__u64 psr
)
470 ia64_setreg(_IA64_REG_PSR_L
, psr
);
475 * Insert a translation into an instruction and/or data translation
479 ia64_itr (__u64 target_mask
, __u64 tr_num
,
480 __u64 vmaddr
, __u64 pte
,
483 ia64_setreg(_IA64_REG_CR_ITIR
, (log_page_size
<< 2));
484 ia64_setreg(_IA64_REG_CR_IFA
, vmaddr
);
486 if (target_mask
& 0x1)
487 ia64_itri(tr_num
, pte
);
488 if (target_mask
& 0x2)
489 ia64_itrd(tr_num
, pte
);
493 * Insert a translation into the instruction and/or data translation
497 ia64_itc (__u64 target_mask
, __u64 vmaddr
, __u64 pte
,
500 ia64_setreg(_IA64_REG_CR_ITIR
, (log_page_size
<< 2));
501 ia64_setreg(_IA64_REG_CR_IFA
, vmaddr
);
503 /* as per EAS2.6, itc must be the last instruction in an instruction group */
504 if (target_mask
& 0x1)
506 if (target_mask
& 0x2)
511 * Purge a range of addresses from instruction and/or data translation
515 ia64_ptr (__u64 target_mask
, __u64 vmaddr
, __u64 log_size
)
517 if (target_mask
& 0x1)
518 ia64_ptri(vmaddr
, (log_size
<< 2));
519 if (target_mask
& 0x2)
520 ia64_ptrd(vmaddr
, (log_size
<< 2));
523 /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
525 ia64_set_iva (void *ivt_addr
)
527 ia64_setreg(_IA64_REG_CR_IVA
, (__u64
) ivt_addr
);
531 /* Set the page table address and control bits. */
533 ia64_set_pta (__u64 pta
)
535 /* Note: srlz.i implies srlz.d */
536 ia64_setreg(_IA64_REG_CR_PTA
, pta
);
543 ia64_setreg(_IA64_REG_CR_EOI
, 0);
547 #define cpu_relax() ia64_hint(ia64_hint_pause)
550 ia64_get_irr(unsigned int vector
)
552 unsigned int reg
= vector
/ 64;
553 unsigned int bit
= vector
% 64;
557 case 0: irr
= ia64_getreg(_IA64_REG_CR_IRR0
); break;
558 case 1: irr
= ia64_getreg(_IA64_REG_CR_IRR1
); break;
559 case 2: irr
= ia64_getreg(_IA64_REG_CR_IRR2
); break;
560 case 3: irr
= ia64_getreg(_IA64_REG_CR_IRR3
); break;
563 return test_bit(bit
, &irr
);
567 ia64_set_lrr0 (unsigned long val
)
569 ia64_setreg(_IA64_REG_CR_LRR0
, val
);
574 ia64_set_lrr1 (unsigned long val
)
576 ia64_setreg(_IA64_REG_CR_LRR1
, val
);
582 * Given the address to which a spill occurred, return the unat bit
583 * number that corresponds to this address.
586 ia64_unat_pos (void *spill_addr
)
588 return ((__u64
) spill_addr
>> 3) & 0x3f;
592 * Set the NaT bit of an integer register which was spilled at address
593 * SPILL_ADDR. UNAT is the mask to be updated.
596 ia64_set_unat (__u64
*unat
, void *spill_addr
, unsigned long nat
)
598 __u64 bit
= ia64_unat_pos(spill_addr
);
599 __u64 mask
= 1UL << bit
;
601 *unat
= (*unat
& ~mask
) | (nat
<< bit
);
605 * Return saved PC of a blocked thread.
606 * Note that the only way T can block is through a call to schedule() -> switch_to().
608 static inline unsigned long
609 thread_saved_pc (struct task_struct
*t
)
611 struct unw_frame_info info
;
614 unw_init_from_blocked_task(&info
, t
);
615 if (unw_unwind(&info
) < 0)
617 unw_get_ip(&info
, &ip
);
622 * Get the current instruction/program counter value.
624 #define current_text_addr() \
625 ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
632 r
= ia64_getreg(_IA64_REG_CR_IVR
);
638 ia64_set_dbr (__u64 regnum
, __u64 value
)
640 __ia64_set_dbr(regnum
, value
);
641 #ifdef CONFIG_ITANIUM
647 ia64_get_dbr (__u64 regnum
)
651 retval
= __ia64_get_dbr(regnum
);
652 #ifdef CONFIG_ITANIUM
659 ia64_rotr (__u64 w
, __u64 n
)
661 return (w
>> n
) | (w
<< (64 - n
));
664 #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
667 * Take a mapped kernel address and return the equivalent address
668 * in the region 7 identity mapped virtual area.
671 ia64_imva (void *addr
)
674 result
= (void *) ia64_tpa(addr
);
678 #define ARCH_HAS_PREFETCH
679 #define ARCH_HAS_PREFETCHW
680 #define ARCH_HAS_SPINLOCK_PREFETCH
681 #define PREFETCH_STRIDE L1_CACHE_BYTES
684 prefetch (const void *x
)
686 ia64_lfetch(ia64_lfhint_none
, x
);
690 prefetchw (const void *x
)
692 ia64_lfetch_excl(ia64_lfhint_none
, x
);
695 #define spin_lock_prefetch(x) prefetchw(x)
697 extern unsigned long boot_option_idle_override
;
699 enum idle_boot_override
{IDLE_NO_OVERRIDE
=0, IDLE_HALT
, IDLE_FORCE_MWAIT
,
700 IDLE_NOMWAIT
, IDLE_POLL
};
702 void default_idle(void);
704 #define ia64_platform_is(x) (strcmp(x, ia64_platform_name) == 0)
706 #endif /* !__ASSEMBLY__ */
708 #endif /* _ASM_IA64_PROCESSOR_H */