]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/xfs_dquot_item.c
s390/crypto: Fix return code checking in cbc_paes_crypt()
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / xfs_dquot_item.c
1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_quota.h"
26 #include "xfs_error.h"
27 #include "xfs_trans.h"
28 #include "xfs_buf_item.h"
29 #include "xfs_trans_priv.h"
30 #include "xfs_qm.h"
31 #include "xfs_log.h"
32
33 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
34 {
35 return container_of(lip, struct xfs_dq_logitem, qli_item);
36 }
37
38 /*
39 * returns the number of iovecs needed to log the given dquot item.
40 */
41 STATIC void
42 xfs_qm_dquot_logitem_size(
43 struct xfs_log_item *lip,
44 int *nvecs,
45 int *nbytes)
46 {
47 *nvecs += 2;
48 *nbytes += sizeof(struct xfs_dq_logformat) +
49 sizeof(struct xfs_disk_dquot);
50 }
51
52 /*
53 * fills in the vector of log iovecs for the given dquot log item.
54 */
55 STATIC void
56 xfs_qm_dquot_logitem_format(
57 struct xfs_log_item *lip,
58 struct xfs_log_vec *lv)
59 {
60 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
61 struct xfs_log_iovec *vecp = NULL;
62 struct xfs_dq_logformat *qlf;
63
64 qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT);
65 qlf->qlf_type = XFS_LI_DQUOT;
66 qlf->qlf_size = 2;
67 qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id);
68 qlf->qlf_blkno = qlip->qli_dquot->q_blkno;
69 qlf->qlf_len = 1;
70 qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset;
71 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat));
72
73 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT,
74 &qlip->qli_dquot->q_core,
75 sizeof(struct xfs_disk_dquot));
76 }
77
78 /*
79 * Increment the pin count of the given dquot.
80 */
81 STATIC void
82 xfs_qm_dquot_logitem_pin(
83 struct xfs_log_item *lip)
84 {
85 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
86
87 ASSERT(XFS_DQ_IS_LOCKED(dqp));
88 atomic_inc(&dqp->q_pincount);
89 }
90
91 /*
92 * Decrement the pin count of the given dquot, and wake up
93 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
94 * dquot must have been previously pinned with a call to
95 * xfs_qm_dquot_logitem_pin().
96 */
97 STATIC void
98 xfs_qm_dquot_logitem_unpin(
99 struct xfs_log_item *lip,
100 int remove)
101 {
102 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
103
104 ASSERT(atomic_read(&dqp->q_pincount) > 0);
105 if (atomic_dec_and_test(&dqp->q_pincount))
106 wake_up(&dqp->q_pinwait);
107 }
108
109 STATIC xfs_lsn_t
110 xfs_qm_dquot_logitem_committed(
111 struct xfs_log_item *lip,
112 xfs_lsn_t lsn)
113 {
114 /*
115 * We always re-log the entire dquot when it becomes dirty,
116 * so, the latest copy _is_ the only one that matters.
117 */
118 return lsn;
119 }
120
121 /*
122 * This is called to wait for the given dquot to be unpinned.
123 * Most of these pin/unpin routines are plagiarized from inode code.
124 */
125 void
126 xfs_qm_dqunpin_wait(
127 struct xfs_dquot *dqp)
128 {
129 ASSERT(XFS_DQ_IS_LOCKED(dqp));
130 if (atomic_read(&dqp->q_pincount) == 0)
131 return;
132
133 /*
134 * Give the log a push so we don't wait here too long.
135 */
136 xfs_log_force(dqp->q_mount, 0);
137 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
138 }
139
140 /*
141 * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
142 * have been failed during writeback
143 *
144 * this informs the AIL that the dquot is already flush locked on the next push,
145 * and acquires a hold on the buffer to ensure that it isn't reclaimed before
146 * dirty data makes it to disk.
147 */
148 STATIC void
149 xfs_dquot_item_error(
150 struct xfs_log_item *lip,
151 struct xfs_buf *bp)
152 {
153 struct xfs_dquot *dqp;
154
155 dqp = DQUOT_ITEM(lip)->qli_dquot;
156 ASSERT(!completion_done(&dqp->q_flush));
157 xfs_set_li_failed(lip, bp);
158 }
159
160 STATIC uint
161 xfs_qm_dquot_logitem_push(
162 struct xfs_log_item *lip,
163 struct list_head *buffer_list) __releases(&lip->li_ailp->xa_lock)
164 __acquires(&lip->li_ailp->xa_lock)
165 {
166 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
167 struct xfs_buf *bp = lip->li_buf;
168 uint rval = XFS_ITEM_SUCCESS;
169 int error;
170
171 if (atomic_read(&dqp->q_pincount) > 0)
172 return XFS_ITEM_PINNED;
173
174 /*
175 * The buffer containing this item failed to be written back
176 * previously. Resubmit the buffer for IO
177 */
178 if (lip->li_flags & XFS_LI_FAILED) {
179 if (!xfs_buf_trylock(bp))
180 return XFS_ITEM_LOCKED;
181
182 if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
183 rval = XFS_ITEM_FLUSHING;
184
185 xfs_buf_unlock(bp);
186 return rval;
187 }
188
189 if (!xfs_dqlock_nowait(dqp))
190 return XFS_ITEM_LOCKED;
191
192 /*
193 * Re-check the pincount now that we stabilized the value by
194 * taking the quota lock.
195 */
196 if (atomic_read(&dqp->q_pincount) > 0) {
197 rval = XFS_ITEM_PINNED;
198 goto out_unlock;
199 }
200
201 /*
202 * Someone else is already flushing the dquot. Nothing we can do
203 * here but wait for the flush to finish and remove the item from
204 * the AIL.
205 */
206 if (!xfs_dqflock_nowait(dqp)) {
207 rval = XFS_ITEM_FLUSHING;
208 goto out_unlock;
209 }
210
211 spin_unlock(&lip->li_ailp->xa_lock);
212
213 error = xfs_qm_dqflush(dqp, &bp);
214 if (error) {
215 xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
216 __func__, error, dqp);
217 } else {
218 if (!xfs_buf_delwri_queue(bp, buffer_list))
219 rval = XFS_ITEM_FLUSHING;
220 xfs_buf_relse(bp);
221 }
222
223 spin_lock(&lip->li_ailp->xa_lock);
224 out_unlock:
225 xfs_dqunlock(dqp);
226 return rval;
227 }
228
229 /*
230 * Unlock the dquot associated with the log item.
231 * Clear the fields of the dquot and dquot log item that
232 * are specific to the current transaction. If the
233 * hold flags is set, do not unlock the dquot.
234 */
235 STATIC void
236 xfs_qm_dquot_logitem_unlock(
237 struct xfs_log_item *lip)
238 {
239 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
240
241 ASSERT(XFS_DQ_IS_LOCKED(dqp));
242
243 /*
244 * Clear the transaction pointer in the dquot
245 */
246 dqp->q_transp = NULL;
247
248 /*
249 * dquots are never 'held' from getting unlocked at the end of
250 * a transaction. Their locking and unlocking is hidden inside the
251 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
252 * for the logitem.
253 */
254 xfs_dqunlock(dqp);
255 }
256
257 /*
258 * this needs to stamp an lsn into the dquot, I think.
259 * rpc's that look at user dquot's would then have to
260 * push on the dependency recorded in the dquot
261 */
262 STATIC void
263 xfs_qm_dquot_logitem_committing(
264 struct xfs_log_item *lip,
265 xfs_lsn_t lsn)
266 {
267 }
268
269 /*
270 * This is the ops vector for dquots
271 */
272 static const struct xfs_item_ops xfs_dquot_item_ops = {
273 .iop_size = xfs_qm_dquot_logitem_size,
274 .iop_format = xfs_qm_dquot_logitem_format,
275 .iop_pin = xfs_qm_dquot_logitem_pin,
276 .iop_unpin = xfs_qm_dquot_logitem_unpin,
277 .iop_unlock = xfs_qm_dquot_logitem_unlock,
278 .iop_committed = xfs_qm_dquot_logitem_committed,
279 .iop_push = xfs_qm_dquot_logitem_push,
280 .iop_committing = xfs_qm_dquot_logitem_committing,
281 .iop_error = xfs_dquot_item_error
282 };
283
284 /*
285 * Initialize the dquot log item for a newly allocated dquot.
286 * The dquot isn't locked at this point, but it isn't on any of the lists
287 * either, so we don't care.
288 */
289 void
290 xfs_qm_dquot_logitem_init(
291 struct xfs_dquot *dqp)
292 {
293 struct xfs_dq_logitem *lp = &dqp->q_logitem;
294
295 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
296 &xfs_dquot_item_ops);
297 lp->qli_dquot = dqp;
298 }
299
300 /*------------------ QUOTAOFF LOG ITEMS -------------------*/
301
302 static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
303 {
304 return container_of(lip, struct xfs_qoff_logitem, qql_item);
305 }
306
307
308 /*
309 * This returns the number of iovecs needed to log the given quotaoff item.
310 * We only need 1 iovec for an quotaoff item. It just logs the
311 * quotaoff_log_format structure.
312 */
313 STATIC void
314 xfs_qm_qoff_logitem_size(
315 struct xfs_log_item *lip,
316 int *nvecs,
317 int *nbytes)
318 {
319 *nvecs += 1;
320 *nbytes += sizeof(struct xfs_qoff_logitem);
321 }
322
323 STATIC void
324 xfs_qm_qoff_logitem_format(
325 struct xfs_log_item *lip,
326 struct xfs_log_vec *lv)
327 {
328 struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
329 struct xfs_log_iovec *vecp = NULL;
330 struct xfs_qoff_logformat *qlf;
331
332 qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QUOTAOFF);
333 qlf->qf_type = XFS_LI_QUOTAOFF;
334 qlf->qf_size = 1;
335 qlf->qf_flags = qflip->qql_flags;
336 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_qoff_logitem));
337 }
338
339 /*
340 * Pinning has no meaning for an quotaoff item, so just return.
341 */
342 STATIC void
343 xfs_qm_qoff_logitem_pin(
344 struct xfs_log_item *lip)
345 {
346 }
347
348 /*
349 * Since pinning has no meaning for an quotaoff item, unpinning does
350 * not either.
351 */
352 STATIC void
353 xfs_qm_qoff_logitem_unpin(
354 struct xfs_log_item *lip,
355 int remove)
356 {
357 }
358
359 /*
360 * There isn't much you can do to push a quotaoff item. It is simply
361 * stuck waiting for the log to be flushed to disk.
362 */
363 STATIC uint
364 xfs_qm_qoff_logitem_push(
365 struct xfs_log_item *lip,
366 struct list_head *buffer_list)
367 {
368 return XFS_ITEM_LOCKED;
369 }
370
371 /*
372 * Quotaoff items have no locking or pushing, so return failure
373 * so that the caller doesn't bother with us.
374 */
375 STATIC void
376 xfs_qm_qoff_logitem_unlock(
377 struct xfs_log_item *lip)
378 {
379 }
380
381 /*
382 * The quotaoff-start-item is logged only once and cannot be moved in the log,
383 * so simply return the lsn at which it's been logged.
384 */
385 STATIC xfs_lsn_t
386 xfs_qm_qoff_logitem_committed(
387 struct xfs_log_item *lip,
388 xfs_lsn_t lsn)
389 {
390 return lsn;
391 }
392
393 STATIC xfs_lsn_t
394 xfs_qm_qoffend_logitem_committed(
395 struct xfs_log_item *lip,
396 xfs_lsn_t lsn)
397 {
398 struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
399 struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
400 struct xfs_ail *ailp = qfs->qql_item.li_ailp;
401
402 /*
403 * Delete the qoff-start logitem from the AIL.
404 * xfs_trans_ail_delete() drops the AIL lock.
405 */
406 spin_lock(&ailp->xa_lock);
407 xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
408
409 kmem_free(qfs->qql_item.li_lv_shadow);
410 kmem_free(lip->li_lv_shadow);
411 kmem_free(qfs);
412 kmem_free(qfe);
413 return (xfs_lsn_t)-1;
414 }
415
416 /*
417 * XXX rcc - don't know quite what to do with this. I think we can
418 * just ignore it. The only time that isn't the case is if we allow
419 * the client to somehow see that quotas have been turned off in which
420 * we can't allow that to get back until the quotaoff hits the disk.
421 * So how would that happen? Also, do we need different routines for
422 * quotaoff start and quotaoff end? I suspect the answer is yes but
423 * to be sure, I need to look at the recovery code and see how quota off
424 * recovery is handled (do we roll forward or back or do something else).
425 * If we roll forwards or backwards, then we need two separate routines,
426 * one that does nothing and one that stamps in the lsn that matters
427 * (truly makes the quotaoff irrevocable). If we do something else,
428 * then maybe we don't need two.
429 */
430 STATIC void
431 xfs_qm_qoff_logitem_committing(
432 struct xfs_log_item *lip,
433 xfs_lsn_t commit_lsn)
434 {
435 }
436
437 static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
438 .iop_size = xfs_qm_qoff_logitem_size,
439 .iop_format = xfs_qm_qoff_logitem_format,
440 .iop_pin = xfs_qm_qoff_logitem_pin,
441 .iop_unpin = xfs_qm_qoff_logitem_unpin,
442 .iop_unlock = xfs_qm_qoff_logitem_unlock,
443 .iop_committed = xfs_qm_qoffend_logitem_committed,
444 .iop_push = xfs_qm_qoff_logitem_push,
445 .iop_committing = xfs_qm_qoff_logitem_committing
446 };
447
448 /*
449 * This is the ops vector shared by all quotaoff-start log items.
450 */
451 static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
452 .iop_size = xfs_qm_qoff_logitem_size,
453 .iop_format = xfs_qm_qoff_logitem_format,
454 .iop_pin = xfs_qm_qoff_logitem_pin,
455 .iop_unpin = xfs_qm_qoff_logitem_unpin,
456 .iop_unlock = xfs_qm_qoff_logitem_unlock,
457 .iop_committed = xfs_qm_qoff_logitem_committed,
458 .iop_push = xfs_qm_qoff_logitem_push,
459 .iop_committing = xfs_qm_qoff_logitem_committing
460 };
461
462 /*
463 * Allocate and initialize an quotaoff item of the correct quota type(s).
464 */
465 struct xfs_qoff_logitem *
466 xfs_qm_qoff_logitem_init(
467 struct xfs_mount *mp,
468 struct xfs_qoff_logitem *start,
469 uint flags)
470 {
471 struct xfs_qoff_logitem *qf;
472
473 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
474
475 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
476 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
477 qf->qql_item.li_mountp = mp;
478 qf->qql_start_lip = start;
479 qf->qql_flags = flags;
480 return qf;
481 }