]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/asm-ia64/gcc_intrin.h
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / include / asm-ia64 / gcc_intrin.h
1 #ifndef _ASM_IA64_GCC_INTRIN_H
2 #define _ASM_IA64_GCC_INTRIN_H
3 /*
4 *
5 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
7 */
8
9 #include <linux/compiler.h>
10
11 /* define this macro to get some asm stmts included in 'c' files */
12 #define ASM_SUPPORTED
13
14 /* Optimization barrier */
15 /* The "volatile" is due to gcc bugs */
16 #define ia64_barrier() asm volatile ("":::"memory")
17
18 #define ia64_stop() asm volatile (";;"::)
19
20 #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
21
22 #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
23
24 extern void ia64_bad_param_for_setreg (void);
25 extern void ia64_bad_param_for_getreg (void);
26
27 register unsigned long ia64_r13 asm ("r13") __attribute_used__;
28
29 #define ia64_setreg(regnum, val) \
30 ({ \
31 switch (regnum) { \
32 case _IA64_REG_PSR_L: \
33 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
34 break; \
35 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
36 asm volatile ("mov ar%0=%1" :: \
37 "i" (regnum - _IA64_REG_AR_KR0), \
38 "r"(val): "memory"); \
39 break; \
40 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
41 asm volatile ("mov cr%0=%1" :: \
42 "i" (regnum - _IA64_REG_CR_DCR), \
43 "r"(val): "memory" ); \
44 break; \
45 case _IA64_REG_SP: \
46 asm volatile ("mov r12=%0" :: \
47 "r"(val): "memory"); \
48 break; \
49 case _IA64_REG_GP: \
50 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
51 break; \
52 default: \
53 ia64_bad_param_for_setreg(); \
54 break; \
55 } \
56 })
57
58 #define ia64_getreg(regnum) \
59 ({ \
60 __u64 ia64_intri_res; \
61 \
62 switch (regnum) { \
63 case _IA64_REG_GP: \
64 asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
65 break; \
66 case _IA64_REG_IP: \
67 asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
68 break; \
69 case _IA64_REG_PSR: \
70 asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
71 break; \
72 case _IA64_REG_TP: /* for current() */ \
73 ia64_intri_res = ia64_r13; \
74 break; \
75 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
76 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
77 : "i"(regnum - _IA64_REG_AR_KR0)); \
78 break; \
79 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
80 asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
81 : "i" (regnum - _IA64_REG_CR_DCR)); \
82 break; \
83 case _IA64_REG_SP: \
84 asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
85 break; \
86 default: \
87 ia64_bad_param_for_getreg(); \
88 break; \
89 } \
90 ia64_intri_res; \
91 })
92
93 #define ia64_hint_pause 0
94
95 #define ia64_hint(mode) \
96 ({ \
97 switch (mode) { \
98 case ia64_hint_pause: \
99 asm volatile ("hint @pause" ::: "memory"); \
100 break; \
101 } \
102 })
103
104
105 /* Integer values for mux1 instruction */
106 #define ia64_mux1_brcst 0
107 #define ia64_mux1_mix 8
108 #define ia64_mux1_shuf 9
109 #define ia64_mux1_alt 10
110 #define ia64_mux1_rev 11
111
112 #define ia64_mux1(x, mode) \
113 ({ \
114 __u64 ia64_intri_res; \
115 \
116 switch (mode) { \
117 case ia64_mux1_brcst: \
118 asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
119 break; \
120 case ia64_mux1_mix: \
121 asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
122 break; \
123 case ia64_mux1_shuf: \
124 asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
125 break; \
126 case ia64_mux1_alt: \
127 asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
128 break; \
129 case ia64_mux1_rev: \
130 asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
131 break; \
132 } \
133 ia64_intri_res; \
134 })
135
136 #define ia64_popcnt(x) \
137 ({ \
138 __u64 ia64_intri_res; \
139 asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
140 \
141 ia64_intri_res; \
142 })
143
144 #define ia64_getf_exp(x) \
145 ({ \
146 long ia64_intri_res; \
147 \
148 asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
149 \
150 ia64_intri_res; \
151 })
152
153 #define ia64_shrp(a, b, count) \
154 ({ \
155 __u64 ia64_intri_res; \
156 asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
157 ia64_intri_res; \
158 })
159
160 #define ia64_ldfs(regnum, x) \
161 ({ \
162 register double __f__ asm ("f"#regnum); \
163 asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
164 })
165
166 #define ia64_ldfd(regnum, x) \
167 ({ \
168 register double __f__ asm ("f"#regnum); \
169 asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
170 })
171
172 #define ia64_ldfe(regnum, x) \
173 ({ \
174 register double __f__ asm ("f"#regnum); \
175 asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
176 })
177
178 #define ia64_ldf8(regnum, x) \
179 ({ \
180 register double __f__ asm ("f"#regnum); \
181 asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
182 })
183
184 #define ia64_ldf_fill(regnum, x) \
185 ({ \
186 register double __f__ asm ("f"#regnum); \
187 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
188 })
189
190 #define ia64_stfs(x, regnum) \
191 ({ \
192 register double __f__ asm ("f"#regnum); \
193 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
194 })
195
196 #define ia64_stfd(x, regnum) \
197 ({ \
198 register double __f__ asm ("f"#regnum); \
199 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
200 })
201
202 #define ia64_stfe(x, regnum) \
203 ({ \
204 register double __f__ asm ("f"#regnum); \
205 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
206 })
207
208 #define ia64_stf8(x, regnum) \
209 ({ \
210 register double __f__ asm ("f"#regnum); \
211 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
212 })
213
214 #define ia64_stf_spill(x, regnum) \
215 ({ \
216 register double __f__ asm ("f"#regnum); \
217 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
218 })
219
220 #define ia64_fetchadd4_acq(p, inc) \
221 ({ \
222 \
223 __u64 ia64_intri_res; \
224 asm volatile ("fetchadd4.acq %0=[%1],%2" \
225 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
226 : "memory"); \
227 \
228 ia64_intri_res; \
229 })
230
231 #define ia64_fetchadd4_rel(p, inc) \
232 ({ \
233 __u64 ia64_intri_res; \
234 asm volatile ("fetchadd4.rel %0=[%1],%2" \
235 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
236 : "memory"); \
237 \
238 ia64_intri_res; \
239 })
240
241 #define ia64_fetchadd8_acq(p, inc) \
242 ({ \
243 \
244 __u64 ia64_intri_res; \
245 asm volatile ("fetchadd8.acq %0=[%1],%2" \
246 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
247 : "memory"); \
248 \
249 ia64_intri_res; \
250 })
251
252 #define ia64_fetchadd8_rel(p, inc) \
253 ({ \
254 __u64 ia64_intri_res; \
255 asm volatile ("fetchadd8.rel %0=[%1],%2" \
256 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
257 : "memory"); \
258 \
259 ia64_intri_res; \
260 })
261
262 #define ia64_xchg1(ptr,x) \
263 ({ \
264 __u64 ia64_intri_res; \
265 asm volatile ("xchg1 %0=[%1],%2" \
266 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
267 ia64_intri_res; \
268 })
269
270 #define ia64_xchg2(ptr,x) \
271 ({ \
272 __u64 ia64_intri_res; \
273 asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
274 : "r" (ptr), "r" (x) : "memory"); \
275 ia64_intri_res; \
276 })
277
278 #define ia64_xchg4(ptr,x) \
279 ({ \
280 __u64 ia64_intri_res; \
281 asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
282 : "r" (ptr), "r" (x) : "memory"); \
283 ia64_intri_res; \
284 })
285
286 #define ia64_xchg8(ptr,x) \
287 ({ \
288 __u64 ia64_intri_res; \
289 asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
290 : "r" (ptr), "r" (x) : "memory"); \
291 ia64_intri_res; \
292 })
293
294 #define ia64_cmpxchg1_acq(ptr, new, old) \
295 ({ \
296 __u64 ia64_intri_res; \
297 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
298 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
299 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
300 ia64_intri_res; \
301 })
302
303 #define ia64_cmpxchg1_rel(ptr, new, old) \
304 ({ \
305 __u64 ia64_intri_res; \
306 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
307 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
308 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
309 ia64_intri_res; \
310 })
311
312 #define ia64_cmpxchg2_acq(ptr, new, old) \
313 ({ \
314 __u64 ia64_intri_res; \
315 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
316 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
317 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
318 ia64_intri_res; \
319 })
320
321 #define ia64_cmpxchg2_rel(ptr, new, old) \
322 ({ \
323 __u64 ia64_intri_res; \
324 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
325 \
326 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
327 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
328 ia64_intri_res; \
329 })
330
331 #define ia64_cmpxchg4_acq(ptr, new, old) \
332 ({ \
333 __u64 ia64_intri_res; \
334 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
335 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
336 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
337 ia64_intri_res; \
338 })
339
340 #define ia64_cmpxchg4_rel(ptr, new, old) \
341 ({ \
342 __u64 ia64_intri_res; \
343 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
344 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
345 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
346 ia64_intri_res; \
347 })
348
349 #define ia64_cmpxchg8_acq(ptr, new, old) \
350 ({ \
351 __u64 ia64_intri_res; \
352 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
353 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
354 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
355 ia64_intri_res; \
356 })
357
358 #define ia64_cmpxchg8_rel(ptr, new, old) \
359 ({ \
360 __u64 ia64_intri_res; \
361 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
362 \
363 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
364 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
365 ia64_intri_res; \
366 })
367
368 #define ia64_mf() asm volatile ("mf" ::: "memory")
369 #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
370
371 #define ia64_invala() asm volatile ("invala" ::: "memory")
372
373 #define ia64_thash(addr) \
374 ({ \
375 __u64 ia64_intri_res; \
376 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
377 ia64_intri_res; \
378 })
379
380 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
381 #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
382
383 #ifdef HAVE_SERIALIZE_DIRECTIVE
384 # define ia64_dv_serialize_data() asm volatile (".serialize.data");
385 # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
386 #else
387 # define ia64_dv_serialize_data()
388 # define ia64_dv_serialize_instruction()
389 #endif
390
391 #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
392
393 #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
394
395 #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
396
397
398 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
399 :: "r"(trnum), "r"(addr) : "memory")
400
401 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
402 :: "r"(trnum), "r"(addr) : "memory")
403
404 #define ia64_tpa(addr) \
405 ({ \
406 __u64 ia64_pa; \
407 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
408 ia64_pa; \
409 })
410
411 #define __ia64_set_dbr(index, val) \
412 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
413
414 #define ia64_set_ibr(index, val) \
415 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
416
417 #define ia64_set_pkr(index, val) \
418 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
419
420 #define ia64_set_pmc(index, val) \
421 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
422
423 #define ia64_set_pmd(index, val) \
424 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
425
426 #define ia64_set_rr(index, val) \
427 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
428
429 #define ia64_get_cpuid(index) \
430 ({ \
431 __u64 ia64_intri_res; \
432 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
433 ia64_intri_res; \
434 })
435
436 #define __ia64_get_dbr(index) \
437 ({ \
438 __u64 ia64_intri_res; \
439 asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
440 ia64_intri_res; \
441 })
442
443 #define ia64_get_ibr(index) \
444 ({ \
445 __u64 ia64_intri_res; \
446 asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
447 ia64_intri_res; \
448 })
449
450 #define ia64_get_pkr(index) \
451 ({ \
452 __u64 ia64_intri_res; \
453 asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
454 ia64_intri_res; \
455 })
456
457 #define ia64_get_pmc(index) \
458 ({ \
459 __u64 ia64_intri_res; \
460 asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
461 ia64_intri_res; \
462 })
463
464
465 #define ia64_get_pmd(index) \
466 ({ \
467 __u64 ia64_intri_res; \
468 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
469 ia64_intri_res; \
470 })
471
472 #define ia64_get_rr(index) \
473 ({ \
474 __u64 ia64_intri_res; \
475 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
476 ia64_intri_res; \
477 })
478
479 #define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
480
481
482 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
483
484 #define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
485 #define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
486 #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
487 #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
488
489 #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
490
491 #define ia64_ptcga(addr, size) \
492 do { \
493 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
494 ia64_dv_serialize_data(); \
495 } while (0)
496
497 #define ia64_ptcl(addr, size) \
498 do { \
499 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
500 ia64_dv_serialize_data(); \
501 } while (0)
502
503 #define ia64_ptri(addr, size) \
504 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
505
506 #define ia64_ptrd(addr, size) \
507 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
508
509 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
510
511 #define ia64_lfhint_none 0
512 #define ia64_lfhint_nt1 1
513 #define ia64_lfhint_nt2 2
514 #define ia64_lfhint_nta 3
515
516 #define ia64_lfetch(lfhint, y) \
517 ({ \
518 switch (lfhint) { \
519 case ia64_lfhint_none: \
520 asm volatile ("lfetch [%0]" : : "r"(y)); \
521 break; \
522 case ia64_lfhint_nt1: \
523 asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
524 break; \
525 case ia64_lfhint_nt2: \
526 asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
527 break; \
528 case ia64_lfhint_nta: \
529 asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
530 break; \
531 } \
532 })
533
534 #define ia64_lfetch_excl(lfhint, y) \
535 ({ \
536 switch (lfhint) { \
537 case ia64_lfhint_none: \
538 asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
539 break; \
540 case ia64_lfhint_nt1: \
541 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
542 break; \
543 case ia64_lfhint_nt2: \
544 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
545 break; \
546 case ia64_lfhint_nta: \
547 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
548 break; \
549 } \
550 })
551
552 #define ia64_lfetch_fault(lfhint, y) \
553 ({ \
554 switch (lfhint) { \
555 case ia64_lfhint_none: \
556 asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
557 break; \
558 case ia64_lfhint_nt1: \
559 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
560 break; \
561 case ia64_lfhint_nt2: \
562 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
563 break; \
564 case ia64_lfhint_nta: \
565 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
566 break; \
567 } \
568 })
569
570 #define ia64_lfetch_fault_excl(lfhint, y) \
571 ({ \
572 switch (lfhint) { \
573 case ia64_lfhint_none: \
574 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
575 break; \
576 case ia64_lfhint_nt1: \
577 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
578 break; \
579 case ia64_lfhint_nt2: \
580 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
581 break; \
582 case ia64_lfhint_nta: \
583 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
584 break; \
585 } \
586 })
587
588 #define ia64_intrin_local_irq_restore(x) \
589 do { \
590 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
591 "(p6) ssm psr.i;" \
592 "(p7) rsm psr.i;;" \
593 "(p6) srlz.d" \
594 :: "r"((x)) : "p6", "p7", "memory"); \
595 } while (0)
596
597 #endif /* _ASM_IA64_GCC_INTRIN_H */