1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Reader/Writer Lock Implementation.
25 \*****************************************************************************/
27 #include <sys/rwlock.h>
29 #if defined(CONFIG_PREEMPT_RT_FULL)
31 #include <linux/rtmutex.h>
32 #define RT_MUTEX_OWNER_MASKALL 1UL
35 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
38 ASSERT((struct task_struct
*)
39 ((unsigned long)rwsem
->lock
.owner
& ~RT_MUTEX_OWNER_MASKALL
) ==
43 * Under the realtime patch series, rwsem is implemented as a
44 * single mutex held by readers and writers alike. However,
45 * this implementation would prevent a thread from taking a
46 * read lock twice, as the mutex would already be locked on
47 * the second attempt. Therefore the implementation allows a
48 * single thread to take a rwsem as read lock multiple times
49 * tracking that nesting as read_depth counter.
51 if (rwsem
->read_depth
<= 1) {
53 * In case, the current thread has not taken the lock
54 * more than once as read lock, we can allow an
55 * upgrade to a write lock. rwsem_rt.h implements
56 * write locks as read_depth == 0.
58 rwsem
->read_depth
= 0;
63 #elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
65 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
69 spl_rwsem_lock_irqsave(&rwsem
->wait_lock
, flags
);
70 if (RWSEM_COUNT(rwsem
) == SPL_RWSEM_SINGLE_READER_VALUE
&&
71 list_empty(&rwsem
->wait_list
)) {
73 RWSEM_COUNT(rwsem
) = SPL_RWSEM_SINGLE_WRITER_VALUE
;
75 spl_rwsem_unlock_irqrestore(&rwsem
->wait_lock
, flags
);
78 #elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
80 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
83 val
= atomic_long_cmpxchg(&rwsem
->count
, SPL_RWSEM_SINGLE_READER_VALUE
,
84 SPL_RWSEM_SINGLE_WRITER_VALUE
);
85 return (val
== SPL_RWSEM_SINGLE_READER_VALUE
);
89 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
91 typeof(rwsem
->count
) val
;
92 val
= cmpxchg(&rwsem
->count
, SPL_RWSEM_SINGLE_READER_VALUE
,
93 SPL_RWSEM_SINGLE_WRITER_VALUE
);
94 return (val
== SPL_RWSEM_SINGLE_READER_VALUE
);
99 rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
101 if (__rwsem_tryupgrade(rwsem
)) {
102 rwsem_release(&rwsem
->dep_map
, 1, _RET_IP_
);
103 rwsem_acquire(&rwsem
->dep_map
, 0, 1, _RET_IP_
);
104 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
105 rwsem
->owner
= current
;
111 EXPORT_SYMBOL(rwsem_tryupgrade
);
113 int spl_rw_init(void) { return 0; }
114 void spl_rw_fini(void) { }