]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_log.c
Add Module Parameter Regarding Log Size Limit
[mirror_zfs.git] / module / zfs / zfs_log.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, 2018 by Delphix. All rights reserved.
24 */
25
26
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #include <sys/thread.h>
33 #include <sys/file.h>
34 #include <sys/vfs.h>
35 #include <sys/zfs_znode.h>
36 #include <sys/zfs_dir.h>
37 #include <sys/zil.h>
38 #include <sys/zil_impl.h>
39 #include <sys/byteorder.h>
40 #include <sys/policy.h>
41 #include <sys/stat.h>
42 #include <sys/acl.h>
43 #include <sys/dmu.h>
44 #include <sys/dbuf.h>
45 #include <sys/spa.h>
46 #include <sys/zfs_fuid.h>
47 #include <sys/dsl_dataset.h>
48
49 /*
50 * These zfs_log_* functions must be called within a dmu tx, in one
51 * of 2 contexts depending on zilog->z_replay:
52 *
53 * Non replay mode
54 * ---------------
55 * We need to record the transaction so that if it is committed to
56 * the Intent Log then it can be replayed. An intent log transaction
57 * structure (itx_t) is allocated and all the information necessary to
58 * possibly replay the transaction is saved in it. The itx is then assigned
59 * a sequence number and inserted in the in-memory list anchored in the zilog.
60 *
61 * Replay mode
62 * -----------
63 * We need to mark the intent log record as replayed in the log header.
64 * This is done in the same transaction as the replay so that they
65 * commit atomically.
66 */
67
68 int
69 zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
70 {
71 int isxvattr = (vap->va_mask & ATTR_XVATTR);
72 switch (type) {
73 case Z_FILE:
74 if (vsecp == NULL && !isxvattr)
75 return (TX_CREATE);
76 if (vsecp && isxvattr)
77 return (TX_CREATE_ACL_ATTR);
78 if (vsecp)
79 return (TX_CREATE_ACL);
80 else
81 return (TX_CREATE_ATTR);
82 /*NOTREACHED*/
83 case Z_DIR:
84 if (vsecp == NULL && !isxvattr)
85 return (TX_MKDIR);
86 if (vsecp && isxvattr)
87 return (TX_MKDIR_ACL_ATTR);
88 if (vsecp)
89 return (TX_MKDIR_ACL);
90 else
91 return (TX_MKDIR_ATTR);
92 case Z_XATTRDIR:
93 return (TX_MKXATTR);
94 }
95 ASSERT(0);
96 return (TX_MAX_TYPE);
97 }
98
99 /*
100 * build up the log data necessary for logging xvattr_t
101 * First lr_attr_t is initialized. following the lr_attr_t
102 * is the mapsize and attribute bitmap copied from the xvattr_t.
103 * Following the bitmap and bitmapsize two 64 bit words are reserved
104 * for the create time which may be set. Following the create time
105 * records a single 64 bit integer which has the bits to set on
106 * replay for the xvattr.
107 */
108 static void
109 zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
110 {
111 uint32_t *bitmap;
112 uint64_t *attrs;
113 uint64_t *crtime;
114 xoptattr_t *xoap;
115 void *scanstamp;
116 int i;
117
118 xoap = xva_getxoptattr(xvap);
119 ASSERT(xoap);
120
121 lrattr->lr_attr_masksize = xvap->xva_mapsize;
122 bitmap = &lrattr->lr_attr_bitmap;
123 for (i = 0; i != xvap->xva_mapsize; i++, bitmap++) {
124 *bitmap = xvap->xva_reqattrmap[i];
125 }
126
127 /* Now pack the attributes up in a single uint64_t */
128 attrs = (uint64_t *)bitmap;
129 crtime = attrs + 1;
130 scanstamp = (caddr_t)(crtime + 2);
131 *attrs = 0;
132 if (XVA_ISSET_REQ(xvap, XAT_READONLY))
133 *attrs |= (xoap->xoa_readonly == 0) ? 0 :
134 XAT0_READONLY;
135 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
136 *attrs |= (xoap->xoa_hidden == 0) ? 0 :
137 XAT0_HIDDEN;
138 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
139 *attrs |= (xoap->xoa_system == 0) ? 0 :
140 XAT0_SYSTEM;
141 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
142 *attrs |= (xoap->xoa_archive == 0) ? 0 :
143 XAT0_ARCHIVE;
144 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
145 *attrs |= (xoap->xoa_immutable == 0) ? 0 :
146 XAT0_IMMUTABLE;
147 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
148 *attrs |= (xoap->xoa_nounlink == 0) ? 0 :
149 XAT0_NOUNLINK;
150 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
151 *attrs |= (xoap->xoa_appendonly == 0) ? 0 :
152 XAT0_APPENDONLY;
153 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
154 *attrs |= (xoap->xoa_opaque == 0) ? 0 :
155 XAT0_APPENDONLY;
156 if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
157 *attrs |= (xoap->xoa_nodump == 0) ? 0 :
158 XAT0_NODUMP;
159 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
160 *attrs |= (xoap->xoa_av_quarantined == 0) ? 0 :
161 XAT0_AV_QUARANTINED;
162 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
163 *attrs |= (xoap->xoa_av_modified == 0) ? 0 :
164 XAT0_AV_MODIFIED;
165 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
166 ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime);
167 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
168 ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
169
170 bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
171 } else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
172 /*
173 * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
174 * at the same time, so we can share the same space.
175 */
176 bcopy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
177 }
178 if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
179 *attrs |= (xoap->xoa_reparse == 0) ? 0 :
180 XAT0_REPARSE;
181 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
182 *attrs |= (xoap->xoa_offline == 0) ? 0 :
183 XAT0_OFFLINE;
184 if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
185 *attrs |= (xoap->xoa_sparse == 0) ? 0 :
186 XAT0_SPARSE;
187 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
188 *attrs |= (xoap->xoa_projinherit == 0) ? 0 :
189 XAT0_PROJINHERIT;
190 }
191
192 static void *
193 zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start)
194 {
195 zfs_fuid_t *zfuid;
196 uint64_t *fuidloc = start;
197
198 /* First copy in the ACE FUIDs */
199 for (zfuid = list_head(&fuidp->z_fuids); zfuid;
200 zfuid = list_next(&fuidp->z_fuids, zfuid)) {
201 *fuidloc++ = zfuid->z_logfuid;
202 }
203 return (fuidloc);
204 }
205
206
207 static void *
208 zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
209 {
210 zfs_fuid_domain_t *zdomain;
211
212 /* now copy in the domain info, if any */
213 if (fuidp->z_domain_str_sz != 0) {
214 for (zdomain = list_head(&fuidp->z_domains); zdomain;
215 zdomain = list_next(&fuidp->z_domains, zdomain)) {
216 bcopy((void *)zdomain->z_domain, start,
217 strlen(zdomain->z_domain) + 1);
218 start = (caddr_t)start +
219 strlen(zdomain->z_domain) + 1;
220 }
221 }
222 return (start);
223 }
224
225 /*
226 * If zp is an xattr node, check whether the xattr owner is unlinked.
227 * We don't want to log anything if the owner is unlinked.
228 */
229 static int
230 zfs_xattr_owner_unlinked(znode_t *zp)
231 {
232 int unlinked = 0;
233 znode_t *dzp;
234 #ifdef __FreeBSD__
235 znode_t *tzp = zp;
236
237 /*
238 * zrele drops the vnode lock which violates the VOP locking contract
239 * on FreeBSD. See comment at the top of zfs_replay.c for more detail.
240 */
241 /*
242 * if zp is XATTR node, keep walking up via z_xattr_parent until we
243 * get the owner
244 */
245 while (tzp->z_pflags & ZFS_XATTR) {
246 ASSERT3U(zp->z_xattr_parent, !=, 0);
247 if (zfs_zget(ZTOZSB(tzp), tzp->z_xattr_parent, &dzp) != 0) {
248 unlinked = 1;
249 break;
250 }
251
252 if (tzp != zp)
253 zrele(tzp);
254 tzp = dzp;
255 unlinked = tzp->z_unlinked;
256 }
257 if (tzp != zp)
258 zrele(tzp);
259 #else
260 zhold(zp);
261 /*
262 * if zp is XATTR node, keep walking up via z_xattr_parent until we
263 * get the owner
264 */
265 while (zp->z_pflags & ZFS_XATTR) {
266 ASSERT3U(zp->z_xattr_parent, !=, 0);
267 if (zfs_zget(ZTOZSB(zp), zp->z_xattr_parent, &dzp) != 0) {
268 unlinked = 1;
269 break;
270 }
271
272 zrele(zp);
273 zp = dzp;
274 unlinked = zp->z_unlinked;
275 }
276 zrele(zp);
277 #endif
278 return (unlinked);
279 }
280
281 /*
282 * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
283 * TK_MKXATTR transactions.
284 *
285 * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
286 * domain information appended prior to the name. In this case the
287 * uid/gid in the log record will be a log centric FUID.
288 *
289 * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
290 * may contain attributes, ACL and optional fuid information.
291 *
292 * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
293 * and ACL and normal users/groups in the ACEs.
294 *
295 * There may be an optional xvattr attribute information similar
296 * to zfs_log_setattr.
297 *
298 * Also, after the file name "domain" strings may be appended.
299 */
300 void
301 zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
302 znode_t *dzp, znode_t *zp, const char *name, vsecattr_t *vsecp,
303 zfs_fuid_info_t *fuidp, vattr_t *vap)
304 {
305 itx_t *itx;
306 lr_create_t *lr;
307 lr_acl_create_t *lracl;
308 size_t aclsize = 0;
309 size_t xvatsize = 0;
310 size_t txsize;
311 xvattr_t *xvap = (xvattr_t *)vap;
312 void *end;
313 size_t lrsize;
314 size_t namesize = strlen(name) + 1;
315 size_t fuidsz = 0;
316
317 if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
318 return;
319
320 /*
321 * If we have FUIDs present then add in space for
322 * domains and ACE fuid's if any.
323 */
324 if (fuidp) {
325 fuidsz += fuidp->z_domain_str_sz;
326 fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
327 }
328
329 if (vap->va_mask & ATTR_XVATTR)
330 xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
331
332 if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
333 (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
334 (int)txtype == TX_MKXATTR) {
335 txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
336 lrsize = sizeof (*lr);
337 } else {
338 txsize =
339 sizeof (lr_acl_create_t) + namesize + fuidsz +
340 ZIL_ACE_LENGTH(aclsize) + xvatsize;
341 lrsize = sizeof (lr_acl_create_t);
342 }
343
344 itx = zil_itx_create(txtype, txsize);
345
346 lr = (lr_create_t *)&itx->itx_lr;
347 lr->lr_doid = dzp->z_id;
348 lr->lr_foid = zp->z_id;
349 /* Store dnode slot count in 8 bits above object id. */
350 LR_FOID_SET_SLOTS(lr->lr_foid, zp->z_dnodesize >> DNODE_SHIFT);
351 lr->lr_mode = zp->z_mode;
352 if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOUID(zp)))) {
353 lr->lr_uid = (uint64_t)KUID_TO_SUID(ZTOUID(zp));
354 } else {
355 lr->lr_uid = fuidp->z_fuid_owner;
356 }
357 if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOGID(zp)))) {
358 lr->lr_gid = (uint64_t)KGID_TO_SGID(ZTOGID(zp));
359 } else {
360 lr->lr_gid = fuidp->z_fuid_group;
361 }
362 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
363 sizeof (uint64_t));
364 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
365 lr->lr_crtime, sizeof (uint64_t) * 2);
366
367 if (sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(ZTOZSB(zp)), &lr->lr_rdev,
368 sizeof (lr->lr_rdev)) != 0)
369 lr->lr_rdev = 0;
370
371 /*
372 * Fill in xvattr info if any
373 */
374 if (vap->va_mask & ATTR_XVATTR) {
375 zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
376 end = (caddr_t)lr + lrsize + xvatsize;
377 } else {
378 end = (caddr_t)lr + lrsize;
379 }
380
381 /* Now fill in any ACL info */
382
383 if (vsecp) {
384 lracl = (lr_acl_create_t *)&itx->itx_lr;
385 lracl->lr_aclcnt = vsecp->vsa_aclcnt;
386 lracl->lr_acl_bytes = aclsize;
387 lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
388 lracl->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
389 if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS)
390 lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
391 else
392 lracl->lr_acl_flags = 0;
393
394 bcopy(vsecp->vsa_aclentp, end, aclsize);
395 end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
396 }
397
398 /* drop in FUID info */
399 if (fuidp) {
400 end = zfs_log_fuid_ids(fuidp, end);
401 end = zfs_log_fuid_domains(fuidp, end);
402 }
403 /*
404 * Now place file name in log record
405 */
406 bcopy(name, end, namesize);
407
408 zil_itx_assign(zilog, itx, tx);
409 }
410
411 /*
412 * Handles both TX_REMOVE and TX_RMDIR transactions.
413 */
414 void
415 zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
416 znode_t *dzp, const char *name, uint64_t foid, boolean_t unlinked)
417 {
418 itx_t *itx;
419 lr_remove_t *lr;
420 size_t namesize = strlen(name) + 1;
421
422 if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
423 return;
424
425 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
426 lr = (lr_remove_t *)&itx->itx_lr;
427 lr->lr_doid = dzp->z_id;
428 bcopy(name, (char *)(lr + 1), namesize);
429
430 itx->itx_oid = foid;
431
432 /*
433 * Object ids can be re-instantiated in the next txg so
434 * remove any async transactions to avoid future leaks.
435 * This can happen if a fsync occurs on the re-instantiated
436 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
437 * the new file data and flushes a write record for the old object.
438 */
439 if (unlinked) {
440 ASSERT((txtype & ~TX_CI) == TX_REMOVE);
441 zil_remove_async(zilog, foid);
442 }
443 zil_itx_assign(zilog, itx, tx);
444 }
445
446 /*
447 * Handles TX_LINK transactions.
448 */
449 void
450 zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
451 znode_t *dzp, znode_t *zp, const char *name)
452 {
453 itx_t *itx;
454 lr_link_t *lr;
455 size_t namesize = strlen(name) + 1;
456
457 if (zil_replaying(zilog, tx))
458 return;
459
460 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
461 lr = (lr_link_t *)&itx->itx_lr;
462 lr->lr_doid = dzp->z_id;
463 lr->lr_link_obj = zp->z_id;
464 bcopy(name, (char *)(lr + 1), namesize);
465
466 zil_itx_assign(zilog, itx, tx);
467 }
468
469 /*
470 * Handles TX_SYMLINK transactions.
471 */
472 void
473 zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
474 znode_t *dzp, znode_t *zp, const char *name, const char *link)
475 {
476 itx_t *itx;
477 lr_create_t *lr;
478 size_t namesize = strlen(name) + 1;
479 size_t linksize = strlen(link) + 1;
480
481 if (zil_replaying(zilog, tx))
482 return;
483
484 itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize);
485 lr = (lr_create_t *)&itx->itx_lr;
486 lr->lr_doid = dzp->z_id;
487 lr->lr_foid = zp->z_id;
488 lr->lr_uid = KUID_TO_SUID(ZTOUID(zp));
489 lr->lr_gid = KGID_TO_SGID(ZTOGID(zp));
490 lr->lr_mode = zp->z_mode;
491 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
492 sizeof (uint64_t));
493 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
494 lr->lr_crtime, sizeof (uint64_t) * 2);
495 bcopy(name, (char *)(lr + 1), namesize);
496 bcopy(link, (char *)(lr + 1) + namesize, linksize);
497
498 zil_itx_assign(zilog, itx, tx);
499 }
500
501 /*
502 * Handles TX_RENAME transactions.
503 */
504 void
505 zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp,
506 const char *sname, znode_t *tdzp, const char *dname, znode_t *szp)
507 {
508 itx_t *itx;
509 lr_rename_t *lr;
510 size_t snamesize = strlen(sname) + 1;
511 size_t dnamesize = strlen(dname) + 1;
512
513 if (zil_replaying(zilog, tx))
514 return;
515
516 itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
517 lr = (lr_rename_t *)&itx->itx_lr;
518 lr->lr_sdoid = sdzp->z_id;
519 lr->lr_tdoid = tdzp->z_id;
520 bcopy(sname, (char *)(lr + 1), snamesize);
521 bcopy(dname, (char *)(lr + 1) + snamesize, dnamesize);
522 itx->itx_oid = szp->z_id;
523
524 zil_itx_assign(zilog, itx, tx);
525 }
526
527 /*
528 * zfs_log_write() handles TX_WRITE transactions. The specified callback is
529 * called as soon as the write is on stable storage (be it via a DMU sync or a
530 * ZIL commit).
531 */
532 long zfs_immediate_write_sz = 32768;
533
534 void
535 zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
536 znode_t *zp, offset_t off, ssize_t resid, int ioflag,
537 zil_callback_t callback, void *callback_data)
538 {
539 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
540 uint32_t blocksize = zp->z_blksz;
541 itx_wr_state_t write_state;
542 uintptr_t fsync_cnt;
543 uint64_t gen = 0;
544 ssize_t size = resid;
545
546 if (zil_replaying(zilog, tx) || zp->z_unlinked ||
547 zfs_xattr_owner_unlinked(zp)) {
548 if (callback != NULL)
549 callback(callback_data);
550 return;
551 }
552
553 if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
554 write_state = WR_INDIRECT;
555 else if (!spa_has_slogs(zilog->zl_spa) &&
556 resid >= zfs_immediate_write_sz)
557 write_state = WR_INDIRECT;
558 else if (ioflag & (O_SYNC | O_DSYNC))
559 write_state = WR_COPIED;
560 else
561 write_state = WR_NEED_COPY;
562
563 if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) {
564 (void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1));
565 }
566
567 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &gen,
568 sizeof (gen));
569
570 while (resid) {
571 itx_t *itx;
572 lr_write_t *lr;
573 itx_wr_state_t wr_state = write_state;
574 ssize_t len = resid;
575
576 /*
577 * A WR_COPIED record must fit entirely in one log block.
578 * Large writes can use WR_NEED_COPY, which the ZIL will
579 * split into multiple records across several log blocks
580 * if necessary.
581 */
582 if (wr_state == WR_COPIED &&
583 resid > zil_max_copied_data(zilog))
584 wr_state = WR_NEED_COPY;
585 else if (wr_state == WR_INDIRECT)
586 len = MIN(blocksize - P2PHASE(off, blocksize), resid);
587
588 itx = zil_itx_create(txtype, sizeof (*lr) +
589 (wr_state == WR_COPIED ? len : 0));
590 lr = (lr_write_t *)&itx->itx_lr;
591
592 /*
593 * For WR_COPIED records, copy the data into the lr_write_t.
594 */
595 if (wr_state == WR_COPIED) {
596 int err;
597 DB_DNODE_ENTER(db);
598 err = dmu_read_by_dnode(DB_DNODE(db), off, len, lr + 1,
599 DMU_READ_NO_PREFETCH);
600 if (err != 0) {
601 zil_itx_destroy(itx);
602 itx = zil_itx_create(txtype, sizeof (*lr));
603 lr = (lr_write_t *)&itx->itx_lr;
604 wr_state = WR_NEED_COPY;
605 }
606 DB_DNODE_EXIT(db);
607 }
608
609 itx->itx_wr_state = wr_state;
610 lr->lr_foid = zp->z_id;
611 lr->lr_offset = off;
612 lr->lr_length = len;
613 lr->lr_blkoff = 0;
614 BP_ZERO(&lr->lr_blkptr);
615
616 itx->itx_private = ZTOZSB(zp);
617 itx->itx_gen = gen;
618
619 if (!(ioflag & (O_SYNC | O_DSYNC)) && (zp->z_sync_cnt == 0) &&
620 (fsync_cnt == 0))
621 itx->itx_sync = B_FALSE;
622
623 itx->itx_callback = callback;
624 itx->itx_callback_data = callback_data;
625 zil_itx_assign(zilog, itx, tx);
626
627 off += len;
628 resid -= len;
629 }
630
631 if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
632 dsl_pool_wrlog_count(zilog->zl_dmu_pool, size, tx->tx_txg);
633 }
634 }
635
636 /*
637 * Handles TX_TRUNCATE transactions.
638 */
639 void
640 zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
641 znode_t *zp, uint64_t off, uint64_t len)
642 {
643 itx_t *itx;
644 lr_truncate_t *lr;
645
646 if (zil_replaying(zilog, tx) || zp->z_unlinked ||
647 zfs_xattr_owner_unlinked(zp))
648 return;
649
650 itx = zil_itx_create(txtype, sizeof (*lr));
651 lr = (lr_truncate_t *)&itx->itx_lr;
652 lr->lr_foid = zp->z_id;
653 lr->lr_offset = off;
654 lr->lr_length = len;
655
656 itx->itx_sync = (zp->z_sync_cnt != 0);
657 zil_itx_assign(zilog, itx, tx);
658 }
659
660 /*
661 * Handles TX_SETATTR transactions.
662 */
663 void
664 zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
665 znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
666 {
667 itx_t *itx;
668 lr_setattr_t *lr;
669 xvattr_t *xvap = (xvattr_t *)vap;
670 size_t recsize = sizeof (lr_setattr_t);
671 void *start;
672
673 if (zil_replaying(zilog, tx) || zp->z_unlinked)
674 return;
675
676 /*
677 * If XVATTR set, then log record size needs to allow
678 * for lr_attr_t + xvattr mask, mapsize and create time
679 * plus actual attribute values
680 */
681 if (vap->va_mask & ATTR_XVATTR)
682 recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
683
684 if (fuidp)
685 recsize += fuidp->z_domain_str_sz;
686
687 itx = zil_itx_create(txtype, recsize);
688 lr = (lr_setattr_t *)&itx->itx_lr;
689 lr->lr_foid = zp->z_id;
690 lr->lr_mask = (uint64_t)mask_applied;
691 lr->lr_mode = (uint64_t)vap->va_mode;
692 if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(vap->va_uid))
693 lr->lr_uid = fuidp->z_fuid_owner;
694 else
695 lr->lr_uid = (uint64_t)vap->va_uid;
696
697 if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(vap->va_gid))
698 lr->lr_gid = fuidp->z_fuid_group;
699 else
700 lr->lr_gid = (uint64_t)vap->va_gid;
701
702 lr->lr_size = (uint64_t)vap->va_size;
703 ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
704 ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
705 start = (lr_setattr_t *)(lr + 1);
706 if (vap->va_mask & ATTR_XVATTR) {
707 zfs_log_xvattr((lr_attr_t *)start, xvap);
708 start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
709 }
710
711 /*
712 * Now stick on domain information if any on end
713 */
714
715 if (fuidp)
716 (void) zfs_log_fuid_domains(fuidp, start);
717
718 itx->itx_sync = (zp->z_sync_cnt != 0);
719 zil_itx_assign(zilog, itx, tx);
720 }
721
722 /*
723 * Handles TX_ACL transactions.
724 */
725 void
726 zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
727 vsecattr_t *vsecp, zfs_fuid_info_t *fuidp)
728 {
729 itx_t *itx;
730 lr_acl_v0_t *lrv0;
731 lr_acl_t *lr;
732 int txtype;
733 int lrsize;
734 size_t txsize;
735 size_t aclbytes = vsecp->vsa_aclentsz;
736
737 if (zil_replaying(zilog, tx) || zp->z_unlinked)
738 return;
739
740 txtype = (ZTOZSB(zp)->z_version < ZPL_VERSION_FUID) ?
741 TX_ACL_V0 : TX_ACL;
742
743 if (txtype == TX_ACL)
744 lrsize = sizeof (*lr);
745 else
746 lrsize = sizeof (*lrv0);
747
748 txsize = lrsize +
749 ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) +
750 (fuidp ? fuidp->z_domain_str_sz : 0) +
751 sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0);
752
753 itx = zil_itx_create(txtype, txsize);
754
755 lr = (lr_acl_t *)&itx->itx_lr;
756 lr->lr_foid = zp->z_id;
757 if (txtype == TX_ACL) {
758 lr->lr_acl_bytes = aclbytes;
759 lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
760 lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
761 if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS)
762 lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
763 else
764 lr->lr_acl_flags = 0;
765 }
766 lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt;
767
768 if (txtype == TX_ACL_V0) {
769 lrv0 = (lr_acl_v0_t *)lr;
770 bcopy(vsecp->vsa_aclentp, (ace_t *)(lrv0 + 1), aclbytes);
771 } else {
772 void *start = (ace_t *)(lr + 1);
773
774 bcopy(vsecp->vsa_aclentp, start, aclbytes);
775
776 start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
777
778 if (fuidp) {
779 start = zfs_log_fuid_ids(fuidp, start);
780 (void) zfs_log_fuid_domains(fuidp, start);
781 }
782 }
783
784 itx->itx_sync = (zp->z_sync_cnt != 0);
785 zil_itx_assign(zilog, itx, tx);
786 }
787
788 /* BEGIN CSTYLED */
789 ZFS_MODULE_PARAM(zfs, zfs_, immediate_write_sz, LONG, ZMOD_RW,
790 "Largest data block to write to zil");
791 /* END CSTYLED */