]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_log.c
zfs_rename: support RENAME_* flags
[mirror_zfs.git] / module / zfs / zfs_log.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, 2018 by Delphix. All rights reserved.
24 */
25
26
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #include <sys/thread.h>
33 #include <sys/file.h>
34 #include <sys/vfs.h>
35 #include <sys/zfs_znode.h>
36 #include <sys/zfs_dir.h>
37 #include <sys/zil.h>
38 #include <sys/zil_impl.h>
39 #include <sys/byteorder.h>
40 #include <sys/policy.h>
41 #include <sys/stat.h>
42 #include <sys/acl.h>
43 #include <sys/dmu.h>
44 #include <sys/dbuf.h>
45 #include <sys/spa.h>
46 #include <sys/zfs_fuid.h>
47 #include <sys/dsl_dataset.h>
48
49 /*
50 * These zfs_log_* functions must be called within a dmu tx, in one
51 * of 2 contexts depending on zilog->z_replay:
52 *
53 * Non replay mode
54 * ---------------
55 * We need to record the transaction so that if it is committed to
56 * the Intent Log then it can be replayed. An intent log transaction
57 * structure (itx_t) is allocated and all the information necessary to
58 * possibly replay the transaction is saved in it. The itx is then assigned
59 * a sequence number and inserted in the in-memory list anchored in the zilog.
60 *
61 * Replay mode
62 * -----------
63 * We need to mark the intent log record as replayed in the log header.
64 * This is done in the same transaction as the replay so that they
65 * commit atomically.
66 */
67
68 int
69 zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
70 {
71 int isxvattr = (vap->va_mask & ATTR_XVATTR);
72 switch (type) {
73 case Z_FILE:
74 if (vsecp == NULL && !isxvattr)
75 return (TX_CREATE);
76 if (vsecp && isxvattr)
77 return (TX_CREATE_ACL_ATTR);
78 if (vsecp)
79 return (TX_CREATE_ACL);
80 else
81 return (TX_CREATE_ATTR);
82 case Z_DIR:
83 if (vsecp == NULL && !isxvattr)
84 return (TX_MKDIR);
85 if (vsecp && isxvattr)
86 return (TX_MKDIR_ACL_ATTR);
87 if (vsecp)
88 return (TX_MKDIR_ACL);
89 else
90 return (TX_MKDIR_ATTR);
91 case Z_XATTRDIR:
92 return (TX_MKXATTR);
93 }
94 ASSERT(0);
95 return (TX_MAX_TYPE);
96 }
97
98 /*
99 * build up the log data necessary for logging xvattr_t
100 * First lr_attr_t is initialized. following the lr_attr_t
101 * is the mapsize and attribute bitmap copied from the xvattr_t.
102 * Following the bitmap and bitmapsize two 64 bit words are reserved
103 * for the create time which may be set. Following the create time
104 * records a single 64 bit integer which has the bits to set on
105 * replay for the xvattr.
106 */
107 static void
108 zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
109 {
110 xoptattr_t *xoap;
111
112 xoap = xva_getxoptattr(xvap);
113 ASSERT(xoap);
114
115 lrattr->lr_attr_masksize = xvap->xva_mapsize;
116 uint32_t *bitmap = &lrattr->lr_attr_bitmap;
117 for (int i = 0; i != xvap->xva_mapsize; i++, bitmap++)
118 *bitmap = xvap->xva_reqattrmap[i];
119
120 lr_attr_end_t *end = (lr_attr_end_t *)bitmap;
121 end->lr_attr_attrs = 0;
122 end->lr_attr_crtime[0] = 0;
123 end->lr_attr_crtime[1] = 0;
124 memset(end->lr_attr_scanstamp, 0, AV_SCANSTAMP_SZ);
125
126 if (XVA_ISSET_REQ(xvap, XAT_READONLY))
127 end->lr_attr_attrs |= (xoap->xoa_readonly == 0) ? 0 :
128 XAT0_READONLY;
129 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
130 end->lr_attr_attrs |= (xoap->xoa_hidden == 0) ? 0 :
131 XAT0_HIDDEN;
132 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
133 end->lr_attr_attrs |= (xoap->xoa_system == 0) ? 0 :
134 XAT0_SYSTEM;
135 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
136 end->lr_attr_attrs |= (xoap->xoa_archive == 0) ? 0 :
137 XAT0_ARCHIVE;
138 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
139 end->lr_attr_attrs |= (xoap->xoa_immutable == 0) ? 0 :
140 XAT0_IMMUTABLE;
141 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
142 end->lr_attr_attrs |= (xoap->xoa_nounlink == 0) ? 0 :
143 XAT0_NOUNLINK;
144 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
145 end->lr_attr_attrs |= (xoap->xoa_appendonly == 0) ? 0 :
146 XAT0_APPENDONLY;
147 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
148 end->lr_attr_attrs |= (xoap->xoa_opaque == 0) ? 0 :
149 XAT0_APPENDONLY;
150 if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
151 end->lr_attr_attrs |= (xoap->xoa_nodump == 0) ? 0 :
152 XAT0_NODUMP;
153 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
154 end->lr_attr_attrs |= (xoap->xoa_av_quarantined == 0) ? 0 :
155 XAT0_AV_QUARANTINED;
156 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
157 end->lr_attr_attrs |= (xoap->xoa_av_modified == 0) ? 0 :
158 XAT0_AV_MODIFIED;
159 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
160 ZFS_TIME_ENCODE(&xoap->xoa_createtime, end->lr_attr_crtime);
161 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
162 ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
163
164 memcpy(end->lr_attr_scanstamp, xoap->xoa_av_scanstamp,
165 AV_SCANSTAMP_SZ);
166 } else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
167 /*
168 * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
169 * at the same time, so we can share the same space.
170 */
171 memcpy(end->lr_attr_scanstamp, &xoap->xoa_projid,
172 sizeof (uint64_t));
173 }
174 if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
175 end->lr_attr_attrs |= (xoap->xoa_reparse == 0) ? 0 :
176 XAT0_REPARSE;
177 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
178 end->lr_attr_attrs |= (xoap->xoa_offline == 0) ? 0 :
179 XAT0_OFFLINE;
180 if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
181 end->lr_attr_attrs |= (xoap->xoa_sparse == 0) ? 0 :
182 XAT0_SPARSE;
183 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
184 end->lr_attr_attrs |= (xoap->xoa_projinherit == 0) ? 0 :
185 XAT0_PROJINHERIT;
186 }
187
188 static void *
189 zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start)
190 {
191 zfs_fuid_t *zfuid;
192 uint64_t *fuidloc = start;
193
194 /* First copy in the ACE FUIDs */
195 for (zfuid = list_head(&fuidp->z_fuids); zfuid;
196 zfuid = list_next(&fuidp->z_fuids, zfuid)) {
197 *fuidloc++ = zfuid->z_logfuid;
198 }
199 return (fuidloc);
200 }
201
202
203 static void *
204 zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
205 {
206 zfs_fuid_domain_t *zdomain;
207
208 /* now copy in the domain info, if any */
209 if (fuidp->z_domain_str_sz != 0) {
210 for (zdomain = list_head(&fuidp->z_domains); zdomain;
211 zdomain = list_next(&fuidp->z_domains, zdomain)) {
212 memcpy(start, zdomain->z_domain,
213 strlen(zdomain->z_domain) + 1);
214 start = (caddr_t)start +
215 strlen(zdomain->z_domain) + 1;
216 }
217 }
218 return (start);
219 }
220
221 /*
222 * If zp is an xattr node, check whether the xattr owner is unlinked.
223 * We don't want to log anything if the owner is unlinked.
224 */
225 static int
226 zfs_xattr_owner_unlinked(znode_t *zp)
227 {
228 int unlinked = 0;
229 znode_t *dzp;
230 #ifdef __FreeBSD__
231 znode_t *tzp = zp;
232
233 /*
234 * zrele drops the vnode lock which violates the VOP locking contract
235 * on FreeBSD. See comment at the top of zfs_replay.c for more detail.
236 */
237 /*
238 * if zp is XATTR node, keep walking up via z_xattr_parent until we
239 * get the owner
240 */
241 while (tzp->z_pflags & ZFS_XATTR) {
242 ASSERT3U(zp->z_xattr_parent, !=, 0);
243 if (zfs_zget(ZTOZSB(tzp), tzp->z_xattr_parent, &dzp) != 0) {
244 unlinked = 1;
245 break;
246 }
247
248 if (tzp != zp)
249 zrele(tzp);
250 tzp = dzp;
251 unlinked = tzp->z_unlinked;
252 }
253 if (tzp != zp)
254 zrele(tzp);
255 #else
256 zhold(zp);
257 /*
258 * if zp is XATTR node, keep walking up via z_xattr_parent until we
259 * get the owner
260 */
261 while (zp->z_pflags & ZFS_XATTR) {
262 ASSERT3U(zp->z_xattr_parent, !=, 0);
263 if (zfs_zget(ZTOZSB(zp), zp->z_xattr_parent, &dzp) != 0) {
264 unlinked = 1;
265 break;
266 }
267
268 zrele(zp);
269 zp = dzp;
270 unlinked = zp->z_unlinked;
271 }
272 zrele(zp);
273 #endif
274 return (unlinked);
275 }
276
277 /*
278 * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
279 * TK_MKXATTR transactions.
280 *
281 * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
282 * domain information appended prior to the name. In this case the
283 * uid/gid in the log record will be a log centric FUID.
284 *
285 * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
286 * may contain attributes, ACL and optional fuid information.
287 *
288 * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
289 * and ACL and normal users/groups in the ACEs.
290 *
291 * There may be an optional xvattr attribute information similar
292 * to zfs_log_setattr.
293 *
294 * Also, after the file name "domain" strings may be appended.
295 */
296 void
297 zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
298 znode_t *dzp, znode_t *zp, const char *name, vsecattr_t *vsecp,
299 zfs_fuid_info_t *fuidp, vattr_t *vap)
300 {
301 itx_t *itx;
302 lr_create_t *lr;
303 lr_acl_create_t *lracl;
304 size_t aclsize = 0;
305 size_t xvatsize = 0;
306 size_t txsize;
307 xvattr_t *xvap = (xvattr_t *)vap;
308 void *end;
309 size_t lrsize;
310 size_t namesize = strlen(name) + 1;
311 size_t fuidsz = 0;
312
313 if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
314 return;
315
316 /*
317 * If we have FUIDs present then add in space for
318 * domains and ACE fuid's if any.
319 */
320 if (fuidp) {
321 fuidsz += fuidp->z_domain_str_sz;
322 fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
323 }
324
325 if (vap->va_mask & ATTR_XVATTR)
326 xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
327
328 if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
329 (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
330 (int)txtype == TX_MKXATTR) {
331 txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
332 lrsize = sizeof (*lr);
333 } else {
334 txsize =
335 sizeof (lr_acl_create_t) + namesize + fuidsz +
336 ZIL_ACE_LENGTH(aclsize) + xvatsize;
337 lrsize = sizeof (lr_acl_create_t);
338 }
339
340 itx = zil_itx_create(txtype, txsize);
341
342 lr = (lr_create_t *)&itx->itx_lr;
343 lr->lr_doid = dzp->z_id;
344 lr->lr_foid = zp->z_id;
345 /* Store dnode slot count in 8 bits above object id. */
346 LR_FOID_SET_SLOTS(lr->lr_foid, zp->z_dnodesize >> DNODE_SHIFT);
347 lr->lr_mode = zp->z_mode;
348 if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOUID(zp)))) {
349 lr->lr_uid = (uint64_t)KUID_TO_SUID(ZTOUID(zp));
350 } else {
351 lr->lr_uid = fuidp->z_fuid_owner;
352 }
353 if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOGID(zp)))) {
354 lr->lr_gid = (uint64_t)KGID_TO_SGID(ZTOGID(zp));
355 } else {
356 lr->lr_gid = fuidp->z_fuid_group;
357 }
358 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
359 sizeof (uint64_t));
360 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
361 lr->lr_crtime, sizeof (uint64_t) * 2);
362
363 if (sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(ZTOZSB(zp)), &lr->lr_rdev,
364 sizeof (lr->lr_rdev)) != 0)
365 lr->lr_rdev = 0;
366
367 /*
368 * Fill in xvattr info if any
369 */
370 if (vap->va_mask & ATTR_XVATTR) {
371 zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
372 end = (caddr_t)lr + lrsize + xvatsize;
373 } else {
374 end = (caddr_t)lr + lrsize;
375 }
376
377 /* Now fill in any ACL info */
378
379 if (vsecp) {
380 lracl = (lr_acl_create_t *)&itx->itx_lr;
381 lracl->lr_aclcnt = vsecp->vsa_aclcnt;
382 lracl->lr_acl_bytes = aclsize;
383 lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
384 lracl->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
385 if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS)
386 lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
387 else
388 lracl->lr_acl_flags = 0;
389
390 memcpy(end, vsecp->vsa_aclentp, aclsize);
391 end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
392 }
393
394 /* drop in FUID info */
395 if (fuidp) {
396 end = zfs_log_fuid_ids(fuidp, end);
397 end = zfs_log_fuid_domains(fuidp, end);
398 }
399 /*
400 * Now place file name in log record
401 */
402 memcpy(end, name, namesize);
403
404 zil_itx_assign(zilog, itx, tx);
405 }
406
407 /*
408 * Handles both TX_REMOVE and TX_RMDIR transactions.
409 */
410 void
411 zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
412 znode_t *dzp, const char *name, uint64_t foid, boolean_t unlinked)
413 {
414 itx_t *itx;
415 lr_remove_t *lr;
416 size_t namesize = strlen(name) + 1;
417
418 if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
419 return;
420
421 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
422 lr = (lr_remove_t *)&itx->itx_lr;
423 lr->lr_doid = dzp->z_id;
424 memcpy(lr + 1, name, namesize);
425
426 itx->itx_oid = foid;
427
428 /*
429 * Object ids can be re-instantiated in the next txg so
430 * remove any async transactions to avoid future leaks.
431 * This can happen if a fsync occurs on the re-instantiated
432 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
433 * the new file data and flushes a write record for the old object.
434 */
435 if (unlinked) {
436 ASSERT((txtype & ~TX_CI) == TX_REMOVE);
437 zil_remove_async(zilog, foid);
438 }
439 zil_itx_assign(zilog, itx, tx);
440 }
441
442 /*
443 * Handles TX_LINK transactions.
444 */
445 void
446 zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
447 znode_t *dzp, znode_t *zp, const char *name)
448 {
449 itx_t *itx;
450 lr_link_t *lr;
451 size_t namesize = strlen(name) + 1;
452
453 if (zil_replaying(zilog, tx))
454 return;
455
456 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
457 lr = (lr_link_t *)&itx->itx_lr;
458 lr->lr_doid = dzp->z_id;
459 lr->lr_link_obj = zp->z_id;
460 memcpy(lr + 1, name, namesize);
461
462 zil_itx_assign(zilog, itx, tx);
463 }
464
465 /*
466 * Handles TX_SYMLINK transactions.
467 */
468 void
469 zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
470 znode_t *dzp, znode_t *zp, const char *name, const char *link)
471 {
472 itx_t *itx;
473 lr_create_t *lr;
474 size_t namesize = strlen(name) + 1;
475 size_t linksize = strlen(link) + 1;
476
477 if (zil_replaying(zilog, tx))
478 return;
479
480 itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize);
481 lr = (lr_create_t *)&itx->itx_lr;
482 lr->lr_doid = dzp->z_id;
483 lr->lr_foid = zp->z_id;
484 lr->lr_uid = KUID_TO_SUID(ZTOUID(zp));
485 lr->lr_gid = KGID_TO_SGID(ZTOGID(zp));
486 lr->lr_mode = zp->z_mode;
487 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
488 sizeof (uint64_t));
489 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
490 lr->lr_crtime, sizeof (uint64_t) * 2);
491 memcpy((char *)(lr + 1), name, namesize);
492 memcpy((char *)(lr + 1) + namesize, link, linksize);
493
494 zil_itx_assign(zilog, itx, tx);
495 }
496
497 static void
498 do_zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp,
499 const char *sname, znode_t *tdzp, const char *dname, znode_t *szp)
500 {
501 itx_t *itx;
502 lr_rename_t *lr;
503 size_t snamesize = strlen(sname) + 1;
504 size_t dnamesize = strlen(dname) + 1;
505
506 if (zil_replaying(zilog, tx))
507 return;
508
509 itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
510 lr = (lr_rename_t *)&itx->itx_lr;
511 lr->lr_sdoid = sdzp->z_id;
512 lr->lr_tdoid = tdzp->z_id;
513 memcpy((char *)(lr + 1), sname, snamesize);
514 memcpy((char *)(lr + 1) + snamesize, dname, dnamesize);
515 itx->itx_oid = szp->z_id;
516
517 zil_itx_assign(zilog, itx, tx);
518 }
519
520 /*
521 * Handles TX_RENAME transactions.
522 */
523 void
524 zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp,
525 const char *sname, znode_t *tdzp, const char *dname, znode_t *szp)
526 {
527 txtype |= TX_RENAME;
528 do_zfs_log_rename(zilog, tx, txtype, sdzp, sname, tdzp, dname, szp);
529 }
530
531 /*
532 * Handles TX_RENAME_EXCHANGE transactions.
533 */
534 void
535 zfs_log_rename_exchange(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
536 znode_t *sdzp, const char *sname, znode_t *tdzp, const char *dname,
537 znode_t *szp)
538 {
539 txtype |= TX_RENAME_EXCHANGE;
540 do_zfs_log_rename(zilog, tx, txtype, sdzp, sname, tdzp, dname, szp);
541 }
542
543 /*
544 * Handles TX_RENAME_WHITEOUT transactions.
545 *
546 * Unfortunately we cannot reuse do_zfs_log_rename because we we need to call
547 * zfs_mknode() on replay which requires stashing bits as with TX_CREATE.
548 */
549 void
550 zfs_log_rename_whiteout(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
551 znode_t *sdzp, const char *sname, znode_t *tdzp, const char *dname,
552 znode_t *szp, znode_t *wzp)
553 {
554 itx_t *itx;
555 lr_rename_whiteout_t *lr;
556 size_t snamesize = strlen(sname) + 1;
557 size_t dnamesize = strlen(dname) + 1;
558
559 if (zil_replaying(zilog, tx))
560 return;
561
562 txtype |= TX_RENAME_WHITEOUT;
563 itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
564 lr = (lr_rename_whiteout_t *)&itx->itx_lr;
565 lr->lr_rename.lr_sdoid = sdzp->z_id;
566 lr->lr_rename.lr_tdoid = tdzp->z_id;
567
568 /*
569 * RENAME_WHITEOUT will create an entry at the source znode, so we need
570 * to store the same data that the equivalent call to zfs_log_create()
571 * would.
572 */
573 lr->lr_wfoid = wzp->z_id;
574 LR_FOID_SET_SLOTS(lr->lr_wfoid, wzp->z_dnodesize >> DNODE_SHIFT);
575 (void) sa_lookup(wzp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(wzp)), &lr->lr_wgen,
576 sizeof (uint64_t));
577 (void) sa_lookup(wzp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(wzp)),
578 lr->lr_wcrtime, sizeof (uint64_t) * 2);
579 lr->lr_wmode = wzp->z_mode;
580 lr->lr_wuid = (uint64_t)KUID_TO_SUID(ZTOUID(wzp));
581 lr->lr_wgid = (uint64_t)KGID_TO_SGID(ZTOGID(wzp));
582
583 /*
584 * This rdev will always be makdevice(0, 0) but because the ZIL log and
585 * replay code needs to be platform independent (and there is no
586 * platform independent makdev()) we need to copy the one created
587 * during the rename operation.
588 */
589 (void) sa_lookup(wzp->z_sa_hdl, SA_ZPL_RDEV(ZTOZSB(wzp)), &lr->lr_wrdev,
590 sizeof (lr->lr_wrdev));
591
592 memcpy((char *)(lr + 1), sname, snamesize);
593 memcpy((char *)(lr + 1) + snamesize, dname, dnamesize);
594 itx->itx_oid = szp->z_id;
595
596 zil_itx_assign(zilog, itx, tx);
597 }
598
599 /*
600 * zfs_log_write() handles TX_WRITE transactions. The specified callback is
601 * called as soon as the write is on stable storage (be it via a DMU sync or a
602 * ZIL commit).
603 */
604 static int64_t zfs_immediate_write_sz = 32768;
605
606 void
607 zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
608 znode_t *zp, offset_t off, ssize_t resid, int ioflag,
609 zil_callback_t callback, void *callback_data)
610 {
611 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
612 uint32_t blocksize = zp->z_blksz;
613 itx_wr_state_t write_state;
614 uintptr_t fsync_cnt;
615 uint64_t gen = 0;
616 ssize_t size = resid;
617
618 if (zil_replaying(zilog, tx) || zp->z_unlinked ||
619 zfs_xattr_owner_unlinked(zp)) {
620 if (callback != NULL)
621 callback(callback_data);
622 return;
623 }
624
625 if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
626 write_state = WR_INDIRECT;
627 else if (!spa_has_slogs(zilog->zl_spa) &&
628 resid >= zfs_immediate_write_sz)
629 write_state = WR_INDIRECT;
630 else if (ioflag & (O_SYNC | O_DSYNC))
631 write_state = WR_COPIED;
632 else
633 write_state = WR_NEED_COPY;
634
635 if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) {
636 (void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1));
637 }
638
639 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &gen,
640 sizeof (gen));
641
642 while (resid) {
643 itx_t *itx;
644 lr_write_t *lr;
645 itx_wr_state_t wr_state = write_state;
646 ssize_t len = resid;
647
648 /*
649 * A WR_COPIED record must fit entirely in one log block.
650 * Large writes can use WR_NEED_COPY, which the ZIL will
651 * split into multiple records across several log blocks
652 * if necessary.
653 */
654 if (wr_state == WR_COPIED &&
655 resid > zil_max_copied_data(zilog))
656 wr_state = WR_NEED_COPY;
657 else if (wr_state == WR_INDIRECT)
658 len = MIN(blocksize - P2PHASE(off, blocksize), resid);
659
660 itx = zil_itx_create(txtype, sizeof (*lr) +
661 (wr_state == WR_COPIED ? len : 0));
662 lr = (lr_write_t *)&itx->itx_lr;
663
664 /*
665 * For WR_COPIED records, copy the data into the lr_write_t.
666 */
667 if (wr_state == WR_COPIED) {
668 int err;
669 DB_DNODE_ENTER(db);
670 err = dmu_read_by_dnode(DB_DNODE(db), off, len, lr + 1,
671 DMU_READ_NO_PREFETCH);
672 if (err != 0) {
673 zil_itx_destroy(itx);
674 itx = zil_itx_create(txtype, sizeof (*lr));
675 lr = (lr_write_t *)&itx->itx_lr;
676 wr_state = WR_NEED_COPY;
677 }
678 DB_DNODE_EXIT(db);
679 }
680
681 itx->itx_wr_state = wr_state;
682 lr->lr_foid = zp->z_id;
683 lr->lr_offset = off;
684 lr->lr_length = len;
685 lr->lr_blkoff = 0;
686 BP_ZERO(&lr->lr_blkptr);
687
688 itx->itx_private = ZTOZSB(zp);
689 itx->itx_gen = gen;
690
691 if (!(ioflag & (O_SYNC | O_DSYNC)) && (zp->z_sync_cnt == 0) &&
692 (fsync_cnt == 0))
693 itx->itx_sync = B_FALSE;
694
695 itx->itx_callback = callback;
696 itx->itx_callback_data = callback_data;
697 zil_itx_assign(zilog, itx, tx);
698
699 off += len;
700 resid -= len;
701 }
702
703 if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
704 dsl_pool_wrlog_count(zilog->zl_dmu_pool, size, tx->tx_txg);
705 }
706 }
707
708 /*
709 * Handles TX_TRUNCATE transactions.
710 */
711 void
712 zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
713 znode_t *zp, uint64_t off, uint64_t len)
714 {
715 itx_t *itx;
716 lr_truncate_t *lr;
717
718 if (zil_replaying(zilog, tx) || zp->z_unlinked ||
719 zfs_xattr_owner_unlinked(zp))
720 return;
721
722 itx = zil_itx_create(txtype, sizeof (*lr));
723 lr = (lr_truncate_t *)&itx->itx_lr;
724 lr->lr_foid = zp->z_id;
725 lr->lr_offset = off;
726 lr->lr_length = len;
727
728 itx->itx_sync = (zp->z_sync_cnt != 0);
729 zil_itx_assign(zilog, itx, tx);
730 }
731
732 /*
733 * Handles TX_SETATTR transactions.
734 */
735 void
736 zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
737 znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
738 {
739 itx_t *itx;
740 lr_setattr_t *lr;
741 xvattr_t *xvap = (xvattr_t *)vap;
742 size_t recsize = sizeof (lr_setattr_t);
743 void *start;
744
745 if (zil_replaying(zilog, tx) || zp->z_unlinked)
746 return;
747
748 /*
749 * If XVATTR set, then log record size needs to allow
750 * for lr_attr_t + xvattr mask, mapsize and create time
751 * plus actual attribute values
752 */
753 if (vap->va_mask & ATTR_XVATTR)
754 recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
755
756 if (fuidp)
757 recsize += fuidp->z_domain_str_sz;
758
759 itx = zil_itx_create(txtype, recsize);
760 lr = (lr_setattr_t *)&itx->itx_lr;
761 lr->lr_foid = zp->z_id;
762 lr->lr_mask = (uint64_t)mask_applied;
763 lr->lr_mode = (uint64_t)vap->va_mode;
764 if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(vap->va_uid))
765 lr->lr_uid = fuidp->z_fuid_owner;
766 else
767 lr->lr_uid = (uint64_t)vap->va_uid;
768
769 if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(vap->va_gid))
770 lr->lr_gid = fuidp->z_fuid_group;
771 else
772 lr->lr_gid = (uint64_t)vap->va_gid;
773
774 lr->lr_size = (uint64_t)vap->va_size;
775 ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
776 ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
777 start = (lr_setattr_t *)(lr + 1);
778 if (vap->va_mask & ATTR_XVATTR) {
779 zfs_log_xvattr((lr_attr_t *)start, xvap);
780 start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
781 }
782
783 /*
784 * Now stick on domain information if any on end
785 */
786
787 if (fuidp)
788 (void) zfs_log_fuid_domains(fuidp, start);
789
790 itx->itx_sync = (zp->z_sync_cnt != 0);
791 zil_itx_assign(zilog, itx, tx);
792 }
793
794 /*
795 * Handles TX_SETSAXATTR transactions.
796 */
797 void
798 zfs_log_setsaxattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
799 znode_t *zp, const char *name, const void *value, size_t size)
800 {
801 itx_t *itx;
802 lr_setsaxattr_t *lr;
803 size_t recsize = sizeof (lr_setsaxattr_t);
804 void *xattrstart;
805 int namelen;
806
807 if (zil_replaying(zilog, tx) || zp->z_unlinked)
808 return;
809
810 namelen = strlen(name) + 1;
811 recsize += (namelen + size);
812 itx = zil_itx_create(txtype, recsize);
813 lr = (lr_setsaxattr_t *)&itx->itx_lr;
814 lr->lr_foid = zp->z_id;
815 xattrstart = (char *)(lr + 1);
816 memcpy(xattrstart, name, namelen);
817 if (value != NULL) {
818 memcpy((char *)xattrstart + namelen, value, size);
819 lr->lr_size = size;
820 } else {
821 lr->lr_size = 0;
822 }
823
824 itx->itx_sync = (zp->z_sync_cnt != 0);
825 zil_itx_assign(zilog, itx, tx);
826 }
827
828 /*
829 * Handles TX_ACL transactions.
830 */
831 void
832 zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
833 vsecattr_t *vsecp, zfs_fuid_info_t *fuidp)
834 {
835 itx_t *itx;
836 lr_acl_v0_t *lrv0;
837 lr_acl_t *lr;
838 int txtype;
839 int lrsize;
840 size_t txsize;
841 size_t aclbytes = vsecp->vsa_aclentsz;
842
843 if (zil_replaying(zilog, tx) || zp->z_unlinked)
844 return;
845
846 txtype = (ZTOZSB(zp)->z_version < ZPL_VERSION_FUID) ?
847 TX_ACL_V0 : TX_ACL;
848
849 if (txtype == TX_ACL)
850 lrsize = sizeof (*lr);
851 else
852 lrsize = sizeof (*lrv0);
853
854 txsize = lrsize +
855 ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) +
856 (fuidp ? fuidp->z_domain_str_sz : 0) +
857 sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0);
858
859 itx = zil_itx_create(txtype, txsize);
860
861 lr = (lr_acl_t *)&itx->itx_lr;
862 lr->lr_foid = zp->z_id;
863 if (txtype == TX_ACL) {
864 lr->lr_acl_bytes = aclbytes;
865 lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
866 lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
867 if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS)
868 lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
869 else
870 lr->lr_acl_flags = 0;
871 }
872 lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt;
873
874 if (txtype == TX_ACL_V0) {
875 lrv0 = (lr_acl_v0_t *)lr;
876 memcpy(lrv0 + 1, vsecp->vsa_aclentp, aclbytes);
877 } else {
878 void *start = (ace_t *)(lr + 1);
879
880 memcpy(start, vsecp->vsa_aclentp, aclbytes);
881
882 start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
883
884 if (fuidp) {
885 start = zfs_log_fuid_ids(fuidp, start);
886 (void) zfs_log_fuid_domains(fuidp, start);
887 }
888 }
889
890 itx->itx_sync = (zp->z_sync_cnt != 0);
891 zil_itx_assign(zilog, itx, tx);
892 }
893
894 ZFS_MODULE_PARAM(zfs, zfs_, immediate_write_sz, S64, ZMOD_RW,
895 "Largest data block to write to zil");