]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/xfs/xfs_dquot_item.c
Merge branch 'next' into for-linus
[mirror_ubuntu-zesty-kernel.git] / fs / xfs / xfs_dquot_item.c
1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_bmap.h"
32 #include "xfs_rtalloc.h"
33 #include "xfs_error.h"
34 #include "xfs_itable.h"
35 #include "xfs_attr.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_qm.h"
39
40 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
41 {
42 return container_of(lip, struct xfs_dq_logitem, qli_item);
43 }
44
45 /*
46 * returns the number of iovecs needed to log the given dquot item.
47 */
48 STATIC uint
49 xfs_qm_dquot_logitem_size(
50 struct xfs_log_item *lip)
51 {
52 /*
53 * we need only two iovecs, one for the format, one for the real thing
54 */
55 return 2;
56 }
57
58 /*
59 * fills in the vector of log iovecs for the given dquot log item.
60 */
61 STATIC void
62 xfs_qm_dquot_logitem_format(
63 struct xfs_log_item *lip,
64 struct xfs_log_iovec *logvec)
65 {
66 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
67
68 logvec->i_addr = &qlip->qli_format;
69 logvec->i_len = sizeof(xfs_dq_logformat_t);
70 logvec->i_type = XLOG_REG_TYPE_QFORMAT;
71 logvec++;
72 logvec->i_addr = &qlip->qli_dquot->q_core;
73 logvec->i_len = sizeof(xfs_disk_dquot_t);
74 logvec->i_type = XLOG_REG_TYPE_DQUOT;
75
76 ASSERT(2 == lip->li_desc->lid_size);
77 qlip->qli_format.qlf_size = 2;
78
79 }
80
81 /*
82 * Increment the pin count of the given dquot.
83 */
84 STATIC void
85 xfs_qm_dquot_logitem_pin(
86 struct xfs_log_item *lip)
87 {
88 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
89
90 ASSERT(XFS_DQ_IS_LOCKED(dqp));
91 atomic_inc(&dqp->q_pincount);
92 }
93
94 /*
95 * Decrement the pin count of the given dquot, and wake up
96 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
97 * dquot must have been previously pinned with a call to
98 * xfs_qm_dquot_logitem_pin().
99 */
100 STATIC void
101 xfs_qm_dquot_logitem_unpin(
102 struct xfs_log_item *lip,
103 int remove)
104 {
105 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
106
107 ASSERT(atomic_read(&dqp->q_pincount) > 0);
108 if (atomic_dec_and_test(&dqp->q_pincount))
109 wake_up(&dqp->q_pinwait);
110 }
111
112 /*
113 * Given the logitem, this writes the corresponding dquot entry to disk
114 * asynchronously. This is called with the dquot entry securely locked;
115 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
116 * at the end.
117 */
118 STATIC void
119 xfs_qm_dquot_logitem_push(
120 struct xfs_log_item *lip)
121 {
122 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
123 int error;
124
125 ASSERT(XFS_DQ_IS_LOCKED(dqp));
126 ASSERT(!completion_done(&dqp->q_flush));
127
128 /*
129 * Since we were able to lock the dquot's flush lock and
130 * we found it on the AIL, the dquot must be dirty. This
131 * is because the dquot is removed from the AIL while still
132 * holding the flush lock in xfs_dqflush_done(). Thus, if
133 * we found it in the AIL and were able to obtain the flush
134 * lock without sleeping, then there must not have been
135 * anyone in the process of flushing the dquot.
136 */
137 error = xfs_qm_dqflush(dqp, 0);
138 if (error)
139 xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
140 __func__, error, dqp);
141 xfs_dqunlock(dqp);
142 }
143
144 STATIC xfs_lsn_t
145 xfs_qm_dquot_logitem_committed(
146 struct xfs_log_item *lip,
147 xfs_lsn_t lsn)
148 {
149 /*
150 * We always re-log the entire dquot when it becomes dirty,
151 * so, the latest copy _is_ the only one that matters.
152 */
153 return lsn;
154 }
155
156 /*
157 * This is called to wait for the given dquot to be unpinned.
158 * Most of these pin/unpin routines are plagiarized from inode code.
159 */
160 void
161 xfs_qm_dqunpin_wait(
162 struct xfs_dquot *dqp)
163 {
164 ASSERT(XFS_DQ_IS_LOCKED(dqp));
165 if (atomic_read(&dqp->q_pincount) == 0)
166 return;
167
168 /*
169 * Give the log a push so we don't wait here too long.
170 */
171 xfs_log_force(dqp->q_mount, 0);
172 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
173 }
174
175 /*
176 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
177 * the dquot is locked by us, but the flush lock isn't. So, here we are
178 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
179 * If so, we want to push it out to help us take this item off the AIL as soon
180 * as possible.
181 *
182 * We must not be holding the AIL lock at this point. Calling incore() to
183 * search the buffer cache can be a time consuming thing, and AIL lock is a
184 * spinlock.
185 */
186 STATIC void
187 xfs_qm_dquot_logitem_pushbuf(
188 struct xfs_log_item *lip)
189 {
190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
191 struct xfs_dquot *dqp = qlip->qli_dquot;
192 struct xfs_buf *bp;
193
194 ASSERT(XFS_DQ_IS_LOCKED(dqp));
195
196 /*
197 * If flushlock isn't locked anymore, chances are that the
198 * inode flush completed and the inode was taken off the AIL.
199 * So, just get out.
200 */
201 if (completion_done(&dqp->q_flush) ||
202 !(lip->li_flags & XFS_LI_IN_AIL)) {
203 xfs_dqunlock(dqp);
204 return;
205 }
206
207 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
208 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
209 xfs_dqunlock(dqp);
210 if (!bp)
211 return;
212 if (XFS_BUF_ISDELAYWRITE(bp))
213 xfs_buf_delwri_promote(bp);
214 xfs_buf_relse(bp);
215 }
216
217 /*
218 * This is called to attempt to lock the dquot associated with this
219 * dquot log item. Don't sleep on the dquot lock or the flush lock.
220 * If the flush lock is already held, indicating that the dquot has
221 * been or is in the process of being flushed, then see if we can
222 * find the dquot's buffer in the buffer cache without sleeping. If
223 * we can and it is marked delayed write, then we want to send it out.
224 * We delay doing so until the push routine, though, to avoid sleeping
225 * in any device strategy routines.
226 */
227 STATIC uint
228 xfs_qm_dquot_logitem_trylock(
229 struct xfs_log_item *lip)
230 {
231 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
232
233 if (atomic_read(&dqp->q_pincount) > 0)
234 return XFS_ITEM_PINNED;
235
236 if (!xfs_qm_dqlock_nowait(dqp))
237 return XFS_ITEM_LOCKED;
238
239 if (!xfs_dqflock_nowait(dqp)) {
240 /*
241 * dquot has already been flushed to the backing buffer,
242 * leave it locked, pushbuf routine will unlock it.
243 */
244 return XFS_ITEM_PUSHBUF;
245 }
246
247 ASSERT(lip->li_flags & XFS_LI_IN_AIL);
248 return XFS_ITEM_SUCCESS;
249 }
250
251 /*
252 * Unlock the dquot associated with the log item.
253 * Clear the fields of the dquot and dquot log item that
254 * are specific to the current transaction. If the
255 * hold flags is set, do not unlock the dquot.
256 */
257 STATIC void
258 xfs_qm_dquot_logitem_unlock(
259 struct xfs_log_item *lip)
260 {
261 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
262
263 ASSERT(XFS_DQ_IS_LOCKED(dqp));
264
265 /*
266 * Clear the transaction pointer in the dquot
267 */
268 dqp->q_transp = NULL;
269
270 /*
271 * dquots are never 'held' from getting unlocked at the end of
272 * a transaction. Their locking and unlocking is hidden inside the
273 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
274 * for the logitem.
275 */
276 xfs_dqunlock(dqp);
277 }
278
279 /*
280 * this needs to stamp an lsn into the dquot, I think.
281 * rpc's that look at user dquot's would then have to
282 * push on the dependency recorded in the dquot
283 */
284 STATIC void
285 xfs_qm_dquot_logitem_committing(
286 struct xfs_log_item *lip,
287 xfs_lsn_t lsn)
288 {
289 }
290
291 /*
292 * This is the ops vector for dquots
293 */
294 static struct xfs_item_ops xfs_dquot_item_ops = {
295 .iop_size = xfs_qm_dquot_logitem_size,
296 .iop_format = xfs_qm_dquot_logitem_format,
297 .iop_pin = xfs_qm_dquot_logitem_pin,
298 .iop_unpin = xfs_qm_dquot_logitem_unpin,
299 .iop_trylock = xfs_qm_dquot_logitem_trylock,
300 .iop_unlock = xfs_qm_dquot_logitem_unlock,
301 .iop_committed = xfs_qm_dquot_logitem_committed,
302 .iop_push = xfs_qm_dquot_logitem_push,
303 .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
304 .iop_committing = xfs_qm_dquot_logitem_committing
305 };
306
307 /*
308 * Initialize the dquot log item for a newly allocated dquot.
309 * The dquot isn't locked at this point, but it isn't on any of the lists
310 * either, so we don't care.
311 */
312 void
313 xfs_qm_dquot_logitem_init(
314 struct xfs_dquot *dqp)
315 {
316 struct xfs_dq_logitem *lp = &dqp->q_logitem;
317
318 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
319 &xfs_dquot_item_ops);
320 lp->qli_dquot = dqp;
321 lp->qli_format.qlf_type = XFS_LI_DQUOT;
322 lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
323 lp->qli_format.qlf_blkno = dqp->q_blkno;
324 lp->qli_format.qlf_len = 1;
325 /*
326 * This is just the offset of this dquot within its buffer
327 * (which is currently 1 FSB and probably won't change).
328 * Hence 32 bits for this offset should be just fine.
329 * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
330 * here, and recompute it at recovery time.
331 */
332 lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
333 }
334
335 /*------------------ QUOTAOFF LOG ITEMS -------------------*/
336
337 static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
338 {
339 return container_of(lip, struct xfs_qoff_logitem, qql_item);
340 }
341
342
343 /*
344 * This returns the number of iovecs needed to log the given quotaoff item.
345 * We only need 1 iovec for an quotaoff item. It just logs the
346 * quotaoff_log_format structure.
347 */
348 STATIC uint
349 xfs_qm_qoff_logitem_size(
350 struct xfs_log_item *lip)
351 {
352 return 1;
353 }
354
355 /*
356 * This is called to fill in the vector of log iovecs for the
357 * given quotaoff log item. We use only 1 iovec, and we point that
358 * at the quotaoff_log_format structure embedded in the quotaoff item.
359 * It is at this point that we assert that all of the extent
360 * slots in the quotaoff item have been filled.
361 */
362 STATIC void
363 xfs_qm_qoff_logitem_format(
364 struct xfs_log_item *lip,
365 struct xfs_log_iovec *log_vector)
366 {
367 struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
368
369 ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF);
370
371 log_vector->i_addr = &qflip->qql_format;
372 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
373 log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
374 qflip->qql_format.qf_size = 1;
375 }
376
377 /*
378 * Pinning has no meaning for an quotaoff item, so just return.
379 */
380 STATIC void
381 xfs_qm_qoff_logitem_pin(
382 struct xfs_log_item *lip)
383 {
384 }
385
386 /*
387 * Since pinning has no meaning for an quotaoff item, unpinning does
388 * not either.
389 */
390 STATIC void
391 xfs_qm_qoff_logitem_unpin(
392 struct xfs_log_item *lip,
393 int remove)
394 {
395 }
396
397 /*
398 * Quotaoff items have no locking, so just return success.
399 */
400 STATIC uint
401 xfs_qm_qoff_logitem_trylock(
402 struct xfs_log_item *lip)
403 {
404 return XFS_ITEM_LOCKED;
405 }
406
407 /*
408 * Quotaoff items have no locking or pushing, so return failure
409 * so that the caller doesn't bother with us.
410 */
411 STATIC void
412 xfs_qm_qoff_logitem_unlock(
413 struct xfs_log_item *lip)
414 {
415 }
416
417 /*
418 * The quotaoff-start-item is logged only once and cannot be moved in the log,
419 * so simply return the lsn at which it's been logged.
420 */
421 STATIC xfs_lsn_t
422 xfs_qm_qoff_logitem_committed(
423 struct xfs_log_item *lip,
424 xfs_lsn_t lsn)
425 {
426 return lsn;
427 }
428
429 /*
430 * There isn't much you can do to push on an quotaoff item. It is simply
431 * stuck waiting for the log to be flushed to disk.
432 */
433 STATIC void
434 xfs_qm_qoff_logitem_push(
435 struct xfs_log_item *lip)
436 {
437 }
438
439
440 STATIC xfs_lsn_t
441 xfs_qm_qoffend_logitem_committed(
442 struct xfs_log_item *lip,
443 xfs_lsn_t lsn)
444 {
445 struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
446 struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
447 struct xfs_ail *ailp = qfs->qql_item.li_ailp;
448
449 /*
450 * Delete the qoff-start logitem from the AIL.
451 * xfs_trans_ail_delete() drops the AIL lock.
452 */
453 spin_lock(&ailp->xa_lock);
454 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
455
456 kmem_free(qfs);
457 kmem_free(qfe);
458 return (xfs_lsn_t)-1;
459 }
460
461 /*
462 * XXX rcc - don't know quite what to do with this. I think we can
463 * just ignore it. The only time that isn't the case is if we allow
464 * the client to somehow see that quotas have been turned off in which
465 * we can't allow that to get back until the quotaoff hits the disk.
466 * So how would that happen? Also, do we need different routines for
467 * quotaoff start and quotaoff end? I suspect the answer is yes but
468 * to be sure, I need to look at the recovery code and see how quota off
469 * recovery is handled (do we roll forward or back or do something else).
470 * If we roll forwards or backwards, then we need two separate routines,
471 * one that does nothing and one that stamps in the lsn that matters
472 * (truly makes the quotaoff irrevocable). If we do something else,
473 * then maybe we don't need two.
474 */
475 STATIC void
476 xfs_qm_qoff_logitem_committing(
477 struct xfs_log_item *lip,
478 xfs_lsn_t commit_lsn)
479 {
480 }
481
482 static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
483 .iop_size = xfs_qm_qoff_logitem_size,
484 .iop_format = xfs_qm_qoff_logitem_format,
485 .iop_pin = xfs_qm_qoff_logitem_pin,
486 .iop_unpin = xfs_qm_qoff_logitem_unpin,
487 .iop_trylock = xfs_qm_qoff_logitem_trylock,
488 .iop_unlock = xfs_qm_qoff_logitem_unlock,
489 .iop_committed = xfs_qm_qoffend_logitem_committed,
490 .iop_push = xfs_qm_qoff_logitem_push,
491 .iop_committing = xfs_qm_qoff_logitem_committing
492 };
493
494 /*
495 * This is the ops vector shared by all quotaoff-start log items.
496 */
497 static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
498 .iop_size = xfs_qm_qoff_logitem_size,
499 .iop_format = xfs_qm_qoff_logitem_format,
500 .iop_pin = xfs_qm_qoff_logitem_pin,
501 .iop_unpin = xfs_qm_qoff_logitem_unpin,
502 .iop_trylock = xfs_qm_qoff_logitem_trylock,
503 .iop_unlock = xfs_qm_qoff_logitem_unlock,
504 .iop_committed = xfs_qm_qoff_logitem_committed,
505 .iop_push = xfs_qm_qoff_logitem_push,
506 .iop_committing = xfs_qm_qoff_logitem_committing
507 };
508
509 /*
510 * Allocate and initialize an quotaoff item of the correct quota type(s).
511 */
512 struct xfs_qoff_logitem *
513 xfs_qm_qoff_logitem_init(
514 struct xfs_mount *mp,
515 struct xfs_qoff_logitem *start,
516 uint flags)
517 {
518 struct xfs_qoff_logitem *qf;
519
520 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
521
522 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
523 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
524 qf->qql_item.li_mountp = mp;
525 qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
526 qf->qql_format.qf_flags = flags;
527 qf->qql_start_lip = start;
528 return qf;
529 }