1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Reader/Writer Lock Implementation.
25 \*****************************************************************************/
27 #include <sys/rwlock.h>
29 #ifdef DEBUG_SUBSYSTEM
30 #undef DEBUG_SUBSYSTEM
33 #define DEBUG_SUBSYSTEM S_RWLOCK
35 #if defined(CONFIG_PREEMPT_RT_FULL)
37 #include <linux/rtmutex.h>
38 #define RT_MUTEX_OWNER_MASKALL 1UL
41 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
44 ASSERT((struct task_struct
*)
45 ((unsigned long)rwsem
->lock
.owner
& ~RT_MUTEX_OWNER_MASKALL
) ==
49 * Under the realtime patch series, rwsem is implemented as a
50 * single mutex held by readers and writers alike. However,
51 * this implementation would prevent a thread from taking a
52 * read lock twice, as the mutex would already be locked on
53 * the second attempt. Therefore the implementation allows a
54 * single thread to take a rwsem as read lock multiple times
55 * tracking that nesting as read_depth counter.
57 if (rwsem
->read_depth
<= 1) {
59 * In case, the current thread has not taken the lock
60 * more than once as read lock, we can allow an
61 * upgrade to a write lock. rwsem_rt.h implements
62 * write locks as read_depth == 0.
64 rwsem
->read_depth
= 0;
69 #elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
71 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
75 spl_rwsem_lock_irqsave(&rwsem
->wait_lock
, flags
);
76 if (RWSEM_COUNT(rwsem
) == SPL_RWSEM_SINGLE_READER_VALUE
&&
77 list_empty(&rwsem
->wait_list
)) {
79 RWSEM_COUNT(rwsem
) = SPL_RWSEM_SINGLE_WRITER_VALUE
;
81 spl_rwsem_unlock_irqrestore(&rwsem
->wait_lock
, flags
);
84 #elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
86 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
89 val
= atomic_long_cmpxchg(&rwsem
->count
, SPL_RWSEM_SINGLE_READER_VALUE
,
90 SPL_RWSEM_SINGLE_WRITER_VALUE
);
91 return (val
== SPL_RWSEM_SINGLE_READER_VALUE
);
95 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
97 typeof (rwsem
->count
) val
;
98 val
= cmpxchg(&rwsem
->count
, SPL_RWSEM_SINGLE_READER_VALUE
,
99 SPL_RWSEM_SINGLE_WRITER_VALUE
);
100 return (val
== SPL_RWSEM_SINGLE_READER_VALUE
);
105 rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
107 if (__rwsem_tryupgrade(rwsem
)) {
108 rwsem_release(&rwsem
->dep_map
, 1, _RET_IP_
);
109 rwsem_acquire(&rwsem
->dep_map
, 0, 1, _RET_IP_
);
110 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
111 rwsem
->owner
= current
;
117 EXPORT_SYMBOL(rwsem_tryupgrade
);
119 int spl_rw_init(void) { return 0; }
120 void spl_rw_fini(void) { }