2 * Copyright 2007-2008 Analog Devices Inc.
3 * Philippe Gerum <rpm@xenomai.org>
5 * Licensed under the GPL-2 or later.
8 #include <linux/linkage.h>
9 #include <asm/blackfin.h>
10 #include <asm/cache.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/rwlock.h>
17 .macro coreslot_loadaddr reg:req
18 \reg\().l = _corelock;
19 \reg\().h = _corelock;
23 * r0 = address of atomic data to flush and invalidate (32bit).
25 * Clear interrupts and return the old mask.
26 * We assume that no atomic data can span cachelines.
37 if cc jump .Ldone_corelock;
46 ENDPROC(_get_core_lock)
49 * r0 = address of atomic data in uncacheable memory region (32bit).
51 * Clear interrupts and return the old mask.
55 ENTRY(_get_core_lock_noflush)
58 .Lretry_corelock_noflush:
60 if cc jump .Ldone_corelock_noflush;
62 jump .Lretry_corelock_noflush
63 .Ldone_corelock_noflush:
65 ENDPROC(_get_core_lock_noflush)
68 * r0 = interrupt mask to restore.
69 * r1 = address of atomic data to flush and invalidate (32bit).
71 * Interrupts are masked on entry (see _get_core_lock).
75 /* Write-through cache assumed, so no flush needed here. */
82 ENDPROC(_put_core_lock)
84 #ifdef __ARCH_SYNC_CORE_DCACHE
86 ENTRY(___raw_smp_mark_barrier_asm)
92 call _get_core_lock_noflush;
95 * Calculate current core mask
102 * Set bit of other cores in barrier mask. Don't change current core bit.
104 p1.l = _barrier_mask;
105 p1.h = _barrier_mask;
123 ENDPROC(___raw_smp_mark_barrier_asm)
125 ENTRY(___raw_smp_check_barrier_asm)
131 call _get_core_lock_noflush;
134 * Calculate current core mask
141 * Clear current core bit in barrier mask if it is set.
143 p1.l = _barrier_mask;
144 p1.h = _barrier_mask;
157 * Invalidate the entire D-cache of current core.
160 call _resync_core_dcache
172 ENDPROC(___raw_smp_check_barrier_asm)
176 * r1 = address of atomic data
178 * Clobbers: r2:0, p1:0
180 _start_lock_coherent:
188 * Determine whether the atomic data was previously
189 * owned by another CPU (=r6).
197 r1 >>= 28; /* CPU fingerprints are stored in the high nibble. */
205 * Release the core lock now, but keep IRQs disabled while we are
206 * performing the remaining housekeeping chores for the current CPU.
208 coreslot_loadaddr p0;
213 * If another CPU has owned the same atomic section before us,
214 * then our D-cached copy of the shared data protected by the
215 * current spin/write_lock may be obsolete.
218 if cc jump .Lcache_synced
221 * Invalidate the entire D-cache of the current core.
224 call _resync_core_dcache
236 * r1 = address of atomic data
238 * Clobbers: r2:0, p1:0
253 #endif /* __ARCH_SYNC_CORE_DCACHE */
256 * r0 = &spinlock->lock
258 * Clobbers: r3:0, p1:0
260 ENTRY(___raw_spin_is_locked_asm)
265 cc = bittst( r3, 0 );
272 ENDPROC(___raw_spin_is_locked_asm)
275 * r0 = &spinlock->lock
277 * Clobbers: r3:0, p1:0
279 ENTRY(___raw_spin_lock_asm)
286 cc = bittst( r2, 0 );
287 if cc jump .Lbusy_spinlock
288 #ifdef __ARCH_SYNC_CORE_DCACHE
290 bitset ( r2, 0 ); /* Raise the lock bit. */
292 call _start_lock_coherent
302 /* We don't touch the atomic area if busy, so that flush
303 will behave like nop in _put_core_lock. */
307 jump .Lretry_spinlock
308 ENDPROC(___raw_spin_lock_asm)
311 * r0 = &spinlock->lock
313 * Clobbers: r3:0, p1:0
315 ENTRY(___raw_spin_trylock_asm)
321 cc = bittst( r3, 0 );
322 if cc jump .Lfailed_trylock
323 #ifdef __ARCH_SYNC_CORE_DCACHE
324 bitset ( r3, 0 ); /* Raise the lock bit. */
326 call _start_lock_coherent
340 ENDPROC(___raw_spin_trylock_asm)
343 * r0 = &spinlock->lock
345 * Clobbers: r2:0, p1:0
347 ENTRY(___raw_spin_unlock_asm)
355 #ifdef __ARCH_SYNC_CORE_DCACHE
356 call _end_lock_coherent
362 ENDPROC(___raw_spin_unlock_asm)
367 * Clobbers: r2:0, p1:0
369 ENTRY(___raw_read_lock_asm)
378 if cc jump .Lrdlock_failed
380 #ifdef __ARCH_SYNC_CORE_DCACHE
381 call _start_lock_coherent
399 if cc jump .Lrdlock_wait;
401 ENDPROC(___raw_read_lock_asm)
406 * Clobbers: r3:0, p1:0
408 ENTRY(___raw_read_trylock_asm)
414 if cc jump .Lfailed_tryrdlock;
418 #ifdef __ARCH_SYNC_CORE_DCACHE
419 call _start_lock_coherent
432 ENDPROC(___raw_read_trylock_asm)
437 * Note: Processing controlled by a reader lock should not have
438 * any side-effect on cache issues with the other core, so we
439 * just release the core lock and exit (no _end_lock_coherent).
441 * Clobbers: r3:0, p1:0
443 ENTRY(___raw_read_unlock_asm)
454 ENDPROC(___raw_read_unlock_asm)
459 * Clobbers: r3:0, p1:0
461 ENTRY(___raw_write_lock_asm)
463 r3.l = lo(RW_LOCK_BIAS);
464 r3.h = hi(RW_LOCK_BIAS);
470 #ifdef __ARCH_SYNC_CORE_DCACHE
478 if !cc jump .Lwrlock_wait
481 #ifdef __ARCH_SYNC_CORE_DCACHE
482 call _start_lock_coherent
496 #ifdef __ARCH_SYNC_CORE_DCACHE
501 if !cc jump .Lwrlock_wait;
503 ENDPROC(___raw_write_lock_asm)
508 * Clobbers: r3:0, p1:0
510 ENTRY(___raw_write_trylock_asm)
515 r2.l = lo(RW_LOCK_BIAS);
516 r2.h = hi(RW_LOCK_BIAS);
518 if !cc jump .Lfailed_trywrlock;
519 #ifdef __ARCH_SYNC_CORE_DCACHE
527 #ifdef __ARCH_SYNC_CORE_DCACHE
528 call _start_lock_coherent
542 ENDPROC(___raw_write_trylock_asm)
547 * Clobbers: r3:0, p1:0
549 ENTRY(___raw_write_unlock_asm)
551 r3.l = lo(RW_LOCK_BIAS);
552 r3.h = hi(RW_LOCK_BIAS);
559 #ifdef __ARCH_SYNC_CORE_DCACHE
560 call _end_lock_coherent
566 ENDPROC(___raw_write_unlock_asm)
572 * Add a signed value to a 32bit word and return the new value atomically.
573 * Clobbers: r3:0, p1:0
575 ENTRY(___raw_atomic_update_asm)
588 ENDPROC(___raw_atomic_update_asm)
594 * Clear the mask bits from a 32bit word and return the old 32bit value
596 * Clobbers: r3:0, p1:0
598 ENTRY(___raw_atomic_clear_asm)
612 ENDPROC(___raw_atomic_clear_asm)
618 * Set the mask bits into a 32bit word and return the old 32bit value
620 * Clobbers: r3:0, p1:0
622 ENTRY(___raw_atomic_set_asm)
636 ENDPROC(___raw_atomic_set_asm)
642 * XOR the mask bits with a 32bit word and return the old 32bit value
644 * Clobbers: r3:0, p1:0
646 ENTRY(___raw_atomic_xor_asm)
660 ENDPROC(___raw_atomic_xor_asm)
666 * Perform a logical AND between the mask bits and a 32bit word, and
667 * return the masked value. We need this on this architecture in
668 * order to invalidate the local cache before testing.
670 * Clobbers: r3:0, p1:0
672 ENTRY(___raw_atomic_test_asm)
675 r1 = -L1_CACHE_BYTES;
683 ENDPROC(___raw_atomic_test_asm)
689 * Swap *ptr with value and return the old 32bit value atomically.
690 * Clobbers: r3:0, p1:0
692 #define __do_xchg(src, dst) \
696 call _get_core_lock; \
701 call _put_core_lock; \
706 ENTRY(___raw_xchg_1_asm)
707 __do_xchg(b[p1] (z), b[p1])
708 ENDPROC(___raw_xchg_1_asm)
710 ENTRY(___raw_xchg_2_asm)
711 __do_xchg(w[p1] (z), w[p1])
712 ENDPROC(___raw_xchg_2_asm)
714 ENTRY(___raw_xchg_4_asm)
715 __do_xchg([p1], [p1])
716 ENDPROC(___raw_xchg_4_asm)
723 * Swap *ptr with new if *ptr == old and return the previous *ptr
726 * Clobbers: r3:0, p1:0
728 #define __do_cmpxchg(src, dst) \
734 call _get_core_lock; \
741 call _put_core_lock; \
747 ENTRY(___raw_cmpxchg_1_asm)
748 __do_cmpxchg(b[p1] (z), b[p1])
749 ENDPROC(___raw_cmpxchg_1_asm)
751 ENTRY(___raw_cmpxchg_2_asm)
752 __do_cmpxchg(w[p1] (z), w[p1])
753 ENDPROC(___raw_cmpxchg_2_asm)
755 ENTRY(___raw_cmpxchg_4_asm)
756 __do_cmpxchg([p1], [p1])
757 ENDPROC(___raw_cmpxchg_4_asm)
763 * Set a bit in a 32bit word and return the old 32bit value atomically.
764 * Clobbers: r3:0, p1:0
766 ENTRY(___raw_bit_set_asm)
770 jump ___raw_atomic_set_asm
771 ENDPROC(___raw_bit_set_asm)
777 * Clear a bit in a 32bit word and return the old 32bit value atomically.
778 * Clobbers: r3:0, p1:0
780 ENTRY(___raw_bit_clear_asm)
784 jump ___raw_atomic_clear_asm
785 ENDPROC(___raw_bit_clear_asm)
791 * Toggle a bit in a 32bit word and return the old 32bit value atomically.
792 * Clobbers: r3:0, p1:0
794 ENTRY(___raw_bit_toggle_asm)
798 jump ___raw_atomic_xor_asm
799 ENDPROC(___raw_bit_toggle_asm)
805 * Test-and-set a bit in a 32bit word and return the old bit value atomically.
806 * Clobbers: r3:0, p1:0
808 ENTRY(___raw_bit_test_set_asm)
811 call ___raw_bit_set_asm
822 ENDPROC(___raw_bit_test_set_asm)
828 * Test-and-clear a bit in a 32bit word and return the old bit value atomically.
829 * Clobbers: r3:0, p1:0
831 ENTRY(___raw_bit_test_clear_asm)
834 call ___raw_bit_clear_asm
845 ENDPROC(___raw_bit_test_clear_asm)
851 * Test-and-toggle a bit in a 32bit word,
852 * and return the old bit value atomically.
853 * Clobbers: r3:0, p1:0
855 ENTRY(___raw_bit_test_toggle_asm)
858 call ___raw_bit_toggle_asm
869 ENDPROC(___raw_bit_test_toggle_asm)
875 * Test a bit in a 32bit word and return its value.
876 * We need this on this architecture in order to invalidate
877 * the local cache before testing.
879 * Clobbers: r3:0, p1:0
881 ENTRY(___raw_bit_test_asm)
885 jump ___raw_atomic_test_asm
886 ENDPROC(___raw_bit_test_asm)
891 * Fetch and return an uncached 32bit value.
893 * Clobbers: r2:0, p1:0
895 ENTRY(___raw_uncached_fetch_asm)
897 r1 = -L1_CACHE_BYTES;
904 ENDPROC(___raw_uncached_fetch_asm)