1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Reader/Writer Lock Implementation.
25 \*****************************************************************************/
27 #include <sys/rwlock.h>
29 #ifdef DEBUG_SUBSYSTEM
30 #undef DEBUG_SUBSYSTEM
33 #define DEBUG_SUBSYSTEM S_RWLOCK
35 #if defined(CONFIG_PREEMPT_RT_FULL)
37 #include <linux/rtmutex.h>
40 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
42 ASSERT(rt_mutex_owner(&rwsem
->lock
) == current
);
45 * Under the realtime patch series, rwsem is implemented as a
46 * single mutex held by readers and writers alike. However,
47 * this implementation would prevent a thread from taking a
48 * read lock twice, as the mutex would already be locked on
49 * the second attempt. Therefore the implementation allows a
50 * single thread to take a rwsem as read lock multiple times
51 * tracking that nesting as read_depth counter.
53 if (rwsem
->read_depth
<= 1) {
55 * In case, the current thread has not taken the lock
56 * more than once as read lock, we can allow an
57 * upgrade to a write lock. rwsem_rt.h implements
58 * write locks as read_depth == 0.
60 rwsem
->read_depth
= 0;
65 #elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
67 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
71 spl_rwsem_lock_irqsave(&rwsem
->wait_lock
, flags
);
72 if (RWSEM_COUNT(rwsem
) == SPL_RWSEM_SINGLE_READER_VALUE
&&
73 list_empty(&rwsem
->wait_list
)) {
75 RWSEM_COUNT(rwsem
) = SPL_RWSEM_SINGLE_WRITER_VALUE
;
77 spl_rwsem_unlock_irqrestore(&rwsem
->wait_lock
, flags
);
80 #elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
82 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
85 val
= atomic_long_cmpxchg(&rwsem
->count
, SPL_RWSEM_SINGLE_READER_VALUE
,
86 SPL_RWSEM_SINGLE_WRITER_VALUE
);
87 return (val
== SPL_RWSEM_SINGLE_READER_VALUE
);
91 __rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
93 typeof (rwsem
->count
) val
;
94 val
= cmpxchg(&rwsem
->count
, SPL_RWSEM_SINGLE_READER_VALUE
,
95 SPL_RWSEM_SINGLE_WRITER_VALUE
);
96 return (val
== SPL_RWSEM_SINGLE_READER_VALUE
);
101 rwsem_tryupgrade(struct rw_semaphore
*rwsem
)
103 if (__rwsem_tryupgrade(rwsem
)) {
104 rwsem_release(&rwsem
->dep_map
, 1, _RET_IP_
);
105 rwsem_acquire(&rwsem
->dep_map
, 0, 1, _RET_IP_
);
106 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
107 rwsem
->owner
= current
;
113 EXPORT_SYMBOL(rwsem_tryupgrade
);
115 int spl_rw_init(void) { return 0; }
116 void spl_rw_fini(void) { }