]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/xfs/linux-2.6/xfs_vnode.c
[XFS] Dynamically allocate vattr in places it makes sense to do so, to
[mirror_ubuntu-artful-kernel.git] / fs / xfs / linux-2.6 / xfs_vnode.c
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19
20 uint64_t vn_generation; /* vnode generation number */
21 DEFINE_SPINLOCK(vnumber_lock);
22
23 /*
24 * Dedicated vnode inactive/reclaim sync semaphores.
25 * Prime number of hash buckets since address is used as the key.
26 */
27 #define NVSYNC 37
28 #define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
29 STATIC wait_queue_head_t vsync[NVSYNC];
30
31 void
32 vn_init(void)
33 {
34 int i;
35
36 for (i = 0; i < NVSYNC; i++)
37 init_waitqueue_head(&vsync[i]);
38 }
39
40 void
41 vn_iowait(
42 struct vnode *vp)
43 {
44 wait_queue_head_t *wq = vptosync(vp);
45
46 wait_event(*wq, (atomic_read(&vp->v_iocount) == 0));
47 }
48
49 void
50 vn_iowake(
51 struct vnode *vp)
52 {
53 if (atomic_dec_and_test(&vp->v_iocount))
54 wake_up(vptosync(vp));
55 }
56
57 struct vnode *
58 vn_initialize(
59 struct inode *inode)
60 {
61 struct vnode *vp = LINVFS_GET_VP(inode);
62
63 XFS_STATS_INC(vn_active);
64 XFS_STATS_INC(vn_alloc);
65
66 vp->v_flag = VMODIFIED;
67 spinlock_init(&vp->v_lock, "v_lock");
68
69 spin_lock(&vnumber_lock);
70 if (!++vn_generation) /* v_number shouldn't be zero */
71 vn_generation++;
72 vp->v_number = vn_generation;
73 spin_unlock(&vnumber_lock);
74
75 ASSERT(VN_CACHED(vp) == 0);
76
77 /* Initialize the first behavior and the behavior chain head. */
78 vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");
79
80 atomic_set(&vp->v_iocount, 0);
81
82 #ifdef XFS_VNODE_TRACE
83 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
84 #endif /* XFS_VNODE_TRACE */
85
86 vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address);
87 return vp;
88 }
89
90 /*
91 * Revalidate the Linux inode from the vattr.
92 * Note: i_size _not_ updated; we must hold the inode
93 * semaphore when doing that - callers responsibility.
94 */
95 void
96 vn_revalidate_core(
97 struct vnode *vp,
98 vattr_t *vap)
99 {
100 struct inode *inode = LINVFS_GET_IP(vp);
101
102 inode->i_mode = vap->va_mode;
103 inode->i_nlink = vap->va_nlink;
104 inode->i_uid = vap->va_uid;
105 inode->i_gid = vap->va_gid;
106 inode->i_blocks = vap->va_nblocks;
107 inode->i_mtime = vap->va_mtime;
108 inode->i_ctime = vap->va_ctime;
109 inode->i_blksize = vap->va_blocksize;
110 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
111 inode->i_flags |= S_IMMUTABLE;
112 else
113 inode->i_flags &= ~S_IMMUTABLE;
114 if (vap->va_xflags & XFS_XFLAG_APPEND)
115 inode->i_flags |= S_APPEND;
116 else
117 inode->i_flags &= ~S_APPEND;
118 if (vap->va_xflags & XFS_XFLAG_SYNC)
119 inode->i_flags |= S_SYNC;
120 else
121 inode->i_flags &= ~S_SYNC;
122 if (vap->va_xflags & XFS_XFLAG_NOATIME)
123 inode->i_flags |= S_NOATIME;
124 else
125 inode->i_flags &= ~S_NOATIME;
126 }
127
128 /*
129 * Revalidate the Linux inode from the vnode.
130 */
131 int
132 __vn_revalidate(
133 struct vnode *vp,
134 struct vattr *vattr)
135 {
136 int error;
137
138 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
139 vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS;
140 VOP_GETATTR(vp, vattr, 0, NULL, error);
141 if (likely(!error)) {
142 vn_revalidate_core(vp, vattr);
143 VUNMODIFY(vp);
144 }
145 return -error;
146 }
147
148 int
149 vn_revalidate(
150 struct vnode *vp)
151 {
152 vattr_t vattr;
153
154 return __vn_revalidate(vp, &vattr);
155 }
156
157 /*
158 * Add a reference to a referenced vnode.
159 */
160 struct vnode *
161 vn_hold(
162 struct vnode *vp)
163 {
164 struct inode *inode;
165
166 XFS_STATS_INC(vn_hold);
167
168 VN_LOCK(vp);
169 inode = igrab(LINVFS_GET_IP(vp));
170 ASSERT(inode);
171 VN_UNLOCK(vp, 0);
172
173 return vp;
174 }
175
176 #ifdef XFS_VNODE_TRACE
177
178 #define KTRACE_ENTER(vp, vk, s, line, ra) \
179 ktrace_enter( (vp)->v_trace, \
180 /* 0 */ (void *)(__psint_t)(vk), \
181 /* 1 */ (void *)(s), \
182 /* 2 */ (void *)(__psint_t) line, \
183 /* 3 */ (void *)(__psint_t)(vn_count(vp)), \
184 /* 4 */ (void *)(ra), \
185 /* 5 */ (void *)(__psunsigned_t)(vp)->v_flag, \
186 /* 6 */ (void *)(__psint_t)current_cpu(), \
187 /* 7 */ (void *)(__psint_t)current_pid(), \
188 /* 8 */ (void *)__return_address, \
189 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
190
191 /*
192 * Vnode tracing code.
193 */
194 void
195 vn_trace_entry(vnode_t *vp, const char *func, inst_t *ra)
196 {
197 KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra);
198 }
199
200 void
201 vn_trace_exit(vnode_t *vp, const char *func, inst_t *ra)
202 {
203 KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra);
204 }
205
206 void
207 vn_trace_hold(vnode_t *vp, char *file, int line, inst_t *ra)
208 {
209 KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra);
210 }
211
212 void
213 vn_trace_ref(vnode_t *vp, char *file, int line, inst_t *ra)
214 {
215 KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra);
216 }
217
218 void
219 vn_trace_rele(vnode_t *vp, char *file, int line, inst_t *ra)
220 {
221 KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra);
222 }
223 #endif /* XFS_VNODE_TRACE */