]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/spa_history.c
Illumos #3598
[mirror_zfs.git] / module / zfs / spa_history.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Delphix. All rights reserved.
34dc7c2f
BB
25 */
26
34dc7c2f
BB
27#include <sys/spa.h>
28#include <sys/spa_impl.h>
29#include <sys/zap.h>
30#include <sys/dsl_synctask.h>
31#include <sys/dmu_tx.h>
32#include <sys/dmu_objset.h>
6f1ffb06
MA
33#include <sys/dsl_dataset.h>
34#include <sys/dsl_dir.h>
34dc7c2f
BB
35#include <sys/utsname.h>
36#include <sys/cmn_err.h>
37#include <sys/sunddi.h>
6f1ffb06 38#include <sys/cred.h>
428870ff 39#include "zfs_comutil.h"
34dc7c2f
BB
40#ifdef _KERNEL
41#include <sys/zone.h>
42#endif
43
44/*
45 * Routines to manage the on-disk history log.
46 *
47 * The history log is stored as a dmu object containing
48 * <packed record length, record nvlist> tuples.
49 *
50 * Where "record nvlist" is a nvlist containing uint64_ts and strings, and
51 * "packed record length" is the packed length of the "record nvlist" stored
52 * as a little endian uint64_t.
53 *
54 * The log is implemented as a ring buffer, though the original creation
55 * of the pool ('zpool create') is never overwritten.
56 *
57 * The history log is tracked as object 'spa_t::spa_history'. The bonus buffer
58 * of 'spa_history' stores the offsets for logging/retrieving history as
59 * 'spa_history_phys_t'. 'sh_pool_create_len' is the ending offset in bytes of
60 * where the 'zpool create' record is stored. This allows us to never
61 * overwrite the original creation of the pool. 'sh_phys_max_off' is the
62 * physical ending offset in bytes of the log. This tells you the length of
63 * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record
64 * is added, 'sh_eof' is incremented by the the size of the record.
65 * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes).
66 * This is where the consumer should start reading from after reading in
67 * the 'zpool create' portion of the log.
68 *
69 * 'sh_records_lost' keeps track of how many records have been overwritten
70 * and permanently lost.
71 */
72
73/* convert a logical offset to physical */
74static uint64_t
75spa_history_log_to_phys(uint64_t log_off, spa_history_phys_t *shpp)
76{
77 uint64_t phys_len;
78
79 phys_len = shpp->sh_phys_max_off - shpp->sh_pool_create_len;
80 return ((log_off - shpp->sh_pool_create_len) % phys_len
81 + shpp->sh_pool_create_len);
82}
83
84void
85spa_history_create_obj(spa_t *spa, dmu_tx_t *tx)
86{
87 dmu_buf_t *dbp;
88 spa_history_phys_t *shpp;
89 objset_t *mos = spa->spa_meta_objset;
90
91 ASSERT(spa->spa_history == 0);
92 spa->spa_history = dmu_object_alloc(mos, DMU_OT_SPA_HISTORY,
93 SPA_MAXBLOCKSIZE, DMU_OT_SPA_HISTORY_OFFSETS,
94 sizeof (spa_history_phys_t), tx);
95
96 VERIFY(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
97 DMU_POOL_HISTORY, sizeof (uint64_t), 1,
98 &spa->spa_history, tx) == 0);
99
100 VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
101 ASSERT(dbp->db_size >= sizeof (spa_history_phys_t));
102
103 shpp = dbp->db_data;
104 dmu_buf_will_dirty(dbp, tx);
105
106 /*
107 * Figure out maximum size of history log. We set it at
330d06f9 108 * 0.1% of pool size, with a max of 1G and min of 128KB.
34dc7c2f 109 */
428870ff 110 shpp->sh_phys_max_off =
330d06f9
MA
111 metaslab_class_get_dspace(spa_normal_class(spa)) / 1000;
112 shpp->sh_phys_max_off = MIN(shpp->sh_phys_max_off, 1<<30);
34dc7c2f
BB
113 shpp->sh_phys_max_off = MAX(shpp->sh_phys_max_off, 128<<10);
114
115 dmu_buf_rele(dbp, FTAG);
116}
117
118/*
119 * Change 'sh_bof' to the beginning of the next record.
120 */
121static int
122spa_history_advance_bof(spa_t *spa, spa_history_phys_t *shpp)
123{
124 objset_t *mos = spa->spa_meta_objset;
125 uint64_t firstread, reclen, phys_bof;
126 char buf[sizeof (reclen)];
127 int err;
128
129 phys_bof = spa_history_log_to_phys(shpp->sh_bof, shpp);
130 firstread = MIN(sizeof (reclen), shpp->sh_phys_max_off - phys_bof);
131
132 if ((err = dmu_read(mos, spa->spa_history, phys_bof, firstread,
9babb374 133 buf, DMU_READ_PREFETCH)) != 0)
34dc7c2f
BB
134 return (err);
135 if (firstread != sizeof (reclen)) {
136 if ((err = dmu_read(mos, spa->spa_history,
137 shpp->sh_pool_create_len, sizeof (reclen) - firstread,
9babb374 138 buf + firstread, DMU_READ_PREFETCH)) != 0)
34dc7c2f
BB
139 return (err);
140 }
141
142 reclen = LE_64(*((uint64_t *)buf));
143 shpp->sh_bof += reclen + sizeof (reclen);
144 shpp->sh_records_lost++;
145 return (0);
146}
147
148static int
149spa_history_write(spa_t *spa, void *buf, uint64_t len, spa_history_phys_t *shpp,
150 dmu_tx_t *tx)
151{
152 uint64_t firstwrite, phys_eof;
153 objset_t *mos = spa->spa_meta_objset;
154 int err;
155
156 ASSERT(MUTEX_HELD(&spa->spa_history_lock));
157
158 /* see if we need to reset logical BOF */
159 while (shpp->sh_phys_max_off - shpp->sh_pool_create_len -
160 (shpp->sh_eof - shpp->sh_bof) <= len) {
161 if ((err = spa_history_advance_bof(spa, shpp)) != 0) {
162 return (err);
163 }
164 }
165
166 phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp);
167 firstwrite = MIN(len, shpp->sh_phys_max_off - phys_eof);
168 shpp->sh_eof += len;
169 dmu_write(mos, spa->spa_history, phys_eof, firstwrite, buf, tx);
170
171 len -= firstwrite;
172 if (len > 0) {
173 /* write out the rest at the beginning of physical file */
174 dmu_write(mos, spa->spa_history, shpp->sh_pool_create_len,
175 len, (char *)buf + firstwrite, tx);
176 }
177
178 return (0);
179}
180
181static char *
0bc8fd78 182spa_history_zone(void)
34dc7c2f
BB
183{
184#ifdef _KERNEL
c28b2279
BB
185#ifdef HAVE_SPL
186 return ("linux");
187#else
34dc7c2f 188 return (curproc->p_zone->zone_name);
c28b2279 189#endif
34dc7c2f 190#else
6f1ffb06 191 return (NULL);
34dc7c2f
BB
192#endif
193}
194
195/*
196 * Write out a history event.
197 */
428870ff 198/*ARGSUSED*/
34dc7c2f 199static void
13fe0198 200spa_history_log_sync(void *arg, dmu_tx_t *tx)
34dc7c2f 201{
13fe0198
MA
202 nvlist_t *nvl = arg;
203 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
34dc7c2f
BB
204 objset_t *mos = spa->spa_meta_objset;
205 dmu_buf_t *dbp;
206 spa_history_phys_t *shpp;
207 size_t reclen;
208 uint64_t le_len;
34dc7c2f
BB
209 char *record_packed = NULL;
210 int ret;
211
212 /*
213 * If we have an older pool that doesn't have a command
214 * history object, create it now.
215 */
216 mutex_enter(&spa->spa_history_lock);
217 if (!spa->spa_history)
218 spa_history_create_obj(spa, tx);
219 mutex_exit(&spa->spa_history_lock);
220
221 /*
222 * Get the offset of where we need to write via the bonus buffer.
223 * Update the offset when the write completes.
224 */
13fe0198 225 VERIFY0(dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
34dc7c2f
BB
226 shpp = dbp->db_data;
227
228 dmu_buf_will_dirty(dbp, tx);
229
230#ifdef ZFS_DEBUG
231 {
232 dmu_object_info_t doi;
233 dmu_object_info_from_db(dbp, &doi);
234 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
235 }
236#endif
237
6f1ffb06 238 fnvlist_add_uint64(nvl, ZPOOL_HIST_TIME, gethrestime_sec());
34dc7c2f 239#ifdef _KERNEL
6f1ffb06 240 fnvlist_add_string(nvl, ZPOOL_HIST_HOST, utsname.nodename);
34dc7c2f 241#endif
6f1ffb06
MA
242 if (nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
243 zfs_dbgmsg("command: %s",
244 fnvlist_lookup_string(nvl, ZPOOL_HIST_CMD));
245 } else if (nvlist_exists(nvl, ZPOOL_HIST_INT_NAME)) {
246 if (nvlist_exists(nvl, ZPOOL_HIST_DSNAME)) {
247 zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
248 fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
249 fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
250 fnvlist_lookup_string(nvl, ZPOOL_HIST_DSNAME),
251 fnvlist_lookup_uint64(nvl, ZPOOL_HIST_DSID),
252 fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
253 } else {
254 zfs_dbgmsg("txg %lld %s %s",
255 fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
256 fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
257 fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
258 }
259 } else if (nvlist_exists(nvl, ZPOOL_HIST_IOCTL)) {
260 zfs_dbgmsg("ioctl %s",
261 fnvlist_lookup_string(nvl, ZPOOL_HIST_IOCTL));
34dc7c2f
BB
262 }
263
6f1ffb06
MA
264 VERIFY3U(nvlist_pack(nvl, &record_packed, &reclen, NV_ENCODE_NATIVE,
265 KM_PUSHPAGE), ==, 0);
34dc7c2f
BB
266
267 mutex_enter(&spa->spa_history_lock);
34dc7c2f
BB
268
269 /* write out the packed length as little endian */
270 le_len = LE_64((uint64_t)reclen);
271 ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx);
272 if (!ret)
273 ret = spa_history_write(spa, record_packed, reclen, shpp, tx);
274
6f1ffb06
MA
275 /* The first command is the create, which we keep forever */
276 if (ret == 0 && shpp->sh_pool_create_len == 0 &&
277 nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
278 shpp->sh_pool_create_len = shpp->sh_bof = shpp->sh_eof;
34dc7c2f
BB
279 }
280
281 mutex_exit(&spa->spa_history_lock);
6f1ffb06 282 fnvlist_pack_free(record_packed, reclen);
34dc7c2f 283 dmu_buf_rele(dbp, FTAG);
6f1ffb06 284 fnvlist_free(nvl);
34dc7c2f
BB
285}
286
287/*
288 * Write out a history event.
289 */
290int
6f1ffb06
MA
291spa_history_log(spa_t *spa, const char *msg)
292{
293 int err;
294 nvlist_t *nvl;
295
296 VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE));
297
298 fnvlist_add_string(nvl, ZPOOL_HIST_CMD, msg);
299 err = spa_history_log_nvl(spa, nvl);
300 fnvlist_free(nvl);
301 return (err);
302}
303
304int
305spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
34dc7c2f 306{
428870ff
BB
307 int err = 0;
308 dmu_tx_t *tx;
6f1ffb06 309 nvlist_t *nvarg;
34dc7c2f 310
6f1ffb06 311 if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY || !spa_writeable(spa))
2e528b49 312 return (SET_ERROR(EINVAL));
34dc7c2f 313
428870ff
BB
314 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
315 err = dmu_tx_assign(tx, TXG_WAIT);
316 if (err) {
317 dmu_tx_abort(tx);
318 return (err);
319 }
320
6f1ffb06
MA
321 VERIFY0(nvlist_dup(nvl, &nvarg, KM_PUSHPAGE));
322 if (spa_history_zone() != NULL) {
323 fnvlist_add_string(nvarg, ZPOOL_HIST_ZONE,
324 spa_history_zone());
325 }
326 fnvlist_add_uint64(nvarg, ZPOOL_HIST_WHO, crgetruid(CRED()));
428870ff
BB
327
328 /* Kick this off asynchronously; errors are ignored. */
13fe0198
MA
329 dsl_sync_task_nowait(spa_get_dsl(spa), spa_history_log_sync,
330 nvarg, 0, tx);
428870ff
BB
331 dmu_tx_commit(tx);
332
6f1ffb06 333 /* spa_history_log_sync will free nvl */
428870ff 334 return (err);
6f1ffb06 335
34dc7c2f
BB
336}
337
338/*
339 * Read out the command history.
340 */
341int
342spa_history_get(spa_t *spa, uint64_t *offp, uint64_t *len, char *buf)
343{
344 objset_t *mos = spa->spa_meta_objset;
345 dmu_buf_t *dbp;
346 uint64_t read_len, phys_read_off, phys_eof;
347 uint64_t leftover = 0;
348 spa_history_phys_t *shpp;
349 int err;
350
351 /*
6f1ffb06 352 * If the command history doesn't exist (older pool),
34dc7c2f
BB
353 * that's ok, just return ENOENT.
354 */
355 if (!spa->spa_history)
2e528b49 356 return (SET_ERROR(ENOENT));
34dc7c2f 357
428870ff
BB
358 /*
359 * The history is logged asynchronously, so when they request
360 * the first chunk of history, make sure everything has been
361 * synced to disk so that we get it.
362 */
363 if (*offp == 0 && spa_writeable(spa))
364 txg_wait_synced(spa_get_dsl(spa), 0);
365
34dc7c2f
BB
366 if ((err = dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)) != 0)
367 return (err);
368 shpp = dbp->db_data;
369
370#ifdef ZFS_DEBUG
371 {
372 dmu_object_info_t doi;
373 dmu_object_info_from_db(dbp, &doi);
374 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
375 }
376#endif
377
378 mutex_enter(&spa->spa_history_lock);
379 phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp);
380
381 if (*offp < shpp->sh_pool_create_len) {
382 /* read in just the zpool create history */
383 phys_read_off = *offp;
384 read_len = MIN(*len, shpp->sh_pool_create_len -
385 phys_read_off);
386 } else {
387 /*
388 * Need to reset passed in offset to BOF if the passed in
389 * offset has since been overwritten.
390 */
391 *offp = MAX(*offp, shpp->sh_bof);
392 phys_read_off = spa_history_log_to_phys(*offp, shpp);
393
394 /*
395 * Read up to the minimum of what the user passed down or
396 * the EOF (physical or logical). If we hit physical EOF,
397 * use 'leftover' to read from the physical BOF.
398 */
399 if (phys_read_off <= phys_eof) {
400 read_len = MIN(*len, phys_eof - phys_read_off);
401 } else {
402 read_len = MIN(*len,
403 shpp->sh_phys_max_off - phys_read_off);
404 if (phys_read_off + *len > shpp->sh_phys_max_off) {
405 leftover = MIN(*len - read_len,
406 phys_eof - shpp->sh_pool_create_len);
407 }
408 }
409 }
410
411 /* offset for consumer to use next */
412 *offp += read_len + leftover;
413
414 /* tell the consumer how much you actually read */
415 *len = read_len + leftover;
416
417 if (read_len == 0) {
418 mutex_exit(&spa->spa_history_lock);
419 dmu_buf_rele(dbp, FTAG);
420 return (0);
421 }
422
9babb374
BB
423 err = dmu_read(mos, spa->spa_history, phys_read_off, read_len, buf,
424 DMU_READ_PREFETCH);
34dc7c2f
BB
425 if (leftover && err == 0) {
426 err = dmu_read(mos, spa->spa_history, shpp->sh_pool_create_len,
9babb374 427 leftover, buf + read_len, DMU_READ_PREFETCH);
34dc7c2f
BB
428 }
429 mutex_exit(&spa->spa_history_lock);
430
431 dmu_buf_rele(dbp, FTAG);
432 return (err);
433}
434
6f1ffb06
MA
435/*
436 * The nvlist will be consumed by this call.
437 */
45d1cae3 438static void
6f1ffb06 439log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
428870ff 440 dmu_tx_t *tx, const char *fmt, va_list adx)
34dc7c2f 441{
6f1ffb06
MA
442 char *msg;
443 va_list adx1;
222b9480 444 int size;
34dc7c2f 445
b128c09f
BB
446 /*
447 * If this is part of creating a pool, not everything is
448 * initialized yet, so don't bother logging the internal events.
6f1ffb06 449 * Likewise if the pool is not writeable.
b128c09f 450 */
6f1ffb06
MA
451 if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) {
452 fnvlist_free(nvl);
b128c09f 453 return;
6f1ffb06 454 }
b128c09f 455
6f1ffb06 456 va_copy(adx1, adx);
222b9480
BB
457 size = vsnprintf(NULL, 0, fmt, adx1) + 1;
458 msg = kmem_alloc(size, KM_PUSHPAGE);
6f1ffb06
MA
459 va_end(adx1);
460 va_copy(adx1, adx);
461 (void) vsprintf(msg, fmt, adx1);
462 va_end(adx1);
463 fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg);
222b9480 464 kmem_free(msg, size);
6f1ffb06
MA
465
466 fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation);
467 fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg);
34dc7c2f
BB
468
469 if (dmu_tx_is_syncing(tx)) {
13fe0198 470 spa_history_log_sync(nvl, tx);
34dc7c2f 471 } else {
13fe0198
MA
472 dsl_sync_task_nowait(spa_get_dsl(spa),
473 spa_history_log_sync, nvl, 0, tx);
34dc7c2f 474 }
6f1ffb06 475 /* spa_history_log_sync() will free nvl */
34dc7c2f 476}
45d1cae3
BB
477
478void
6f1ffb06 479spa_history_log_internal(spa_t *spa, const char *operation,
428870ff 480 dmu_tx_t *tx, const char *fmt, ...)
45d1cae3
BB
481{
482 dmu_tx_t *htx = tx;
483 va_list adx;
6f1ffb06 484 nvlist_t *nvl;
45d1cae3
BB
485
486 /* create a tx if we didn't get one */
487 if (tx == NULL) {
488 htx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
489 if (dmu_tx_assign(htx, TXG_WAIT) != 0) {
490 dmu_tx_abort(htx);
491 return;
492 }
493 }
494
495 va_start(adx, fmt);
6f1ffb06
MA
496 VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE));
497 log_internal(nvl, operation, spa, htx, fmt, adx);
45d1cae3
BB
498 va_end(adx);
499
500 /* if we didn't get a tx from the caller, commit the one we made */
501 if (tx == NULL)
502 dmu_tx_commit(htx);
503}
504
505void
6f1ffb06
MA
506spa_history_log_internal_ds(dsl_dataset_t *ds, const char *operation,
507 dmu_tx_t *tx, const char *fmt, ...)
508{
509 va_list adx;
510 char namebuf[MAXNAMELEN];
511 nvlist_t *nvl;
512
513 ASSERT(tx != NULL);
514
515 dsl_dataset_name(ds, namebuf);
516 VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE));
517 fnvlist_add_string(nvl, ZPOOL_HIST_DSNAME, namebuf);
518 fnvlist_add_uint64(nvl, ZPOOL_HIST_DSID, ds->ds_object);
519
520 va_start(adx, fmt);
521 log_internal(nvl, operation, dsl_dataset_get_spa(ds), tx, fmt, adx);
522 va_end(adx);
523}
524
525void
526spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
527 dmu_tx_t *tx, const char *fmt, ...)
528{
529 va_list adx;
530 char namebuf[MAXNAMELEN];
531 nvlist_t *nvl;
532
533 ASSERT(tx != NULL);
534
535 dsl_dir_name(dd, namebuf);
536 VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE));
537 fnvlist_add_string(nvl, ZPOOL_HIST_DSNAME, namebuf);
538 fnvlist_add_uint64(nvl, ZPOOL_HIST_DSID,
539 dd->dd_phys->dd_head_dataset_obj);
540
541 va_start(adx, fmt);
542 log_internal(nvl, operation, dd->dd_pool->dp_spa, tx, fmt, adx);
543 va_end(adx);
544}
545
546void
547spa_history_log_version(spa_t *spa, const char *operation)
45d1cae3 548{
6f1ffb06
MA
549 spa_history_log_internal(spa, operation, NULL,
550 "pool version %llu; software version %llu/%d; uts %s %s %s %s",
13fe0198 551 (u_longlong_t)spa_version(spa), SPA_VERSION, ZPL_VERSION,
6f1ffb06
MA
552 utsname.nodename, utsname.release, utsname.version,
553 utsname.machine);
45d1cae3 554}
c28b2279
BB
555
556#if defined(_KERNEL) && defined(HAVE_SPL)
557EXPORT_SYMBOL(spa_history_create_obj);
558EXPORT_SYMBOL(spa_history_get);
559EXPORT_SYMBOL(spa_history_log);
560EXPORT_SYMBOL(spa_history_log_internal);
561EXPORT_SYMBOL(spa_history_log_version);
562#endif