2 This file is part of systemd.
4 Copyright 2011 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/statvfs.h>
29 #include "alloc-util.h"
30 #include "btrfs-util.h"
31 #include "chattr-util.h"
34 #include "journal-authenticate.h"
35 #include "journal-def.h"
36 #include "journal-file.h"
38 #include "parse-util.h"
39 #include "random-util.h"
41 #include "string-util.h"
42 #include "xattr-util.h"
44 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
45 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
47 #define COMPRESSION_SIZE_THRESHOLD (512ULL)
49 /* This is the minimum journal file size */
50 #define JOURNAL_FILE_SIZE_MIN (512ULL*1024ULL) /* 512 KiB */
52 /* These are the lower and upper bounds if we deduce the max_use value
53 * from the file system size */
54 #define DEFAULT_MAX_USE_LOWER (1ULL*1024ULL*1024ULL) /* 1 MiB */
55 #define DEFAULT_MAX_USE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
57 /* This is the default minimal use limit, how much we'll use even if keep_free suggests otherwise. */
58 #define DEFAULT_MIN_USE (1ULL*1024ULL*1024ULL) /* 1 MiB */
60 /* This is the upper bound if we deduce max_size from max_use */
61 #define DEFAULT_MAX_SIZE_UPPER (128ULL*1024ULL*1024ULL) /* 128 MiB */
63 /* This is the upper bound if we deduce the keep_free value from the
65 #define DEFAULT_KEEP_FREE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
67 /* This is the keep_free value when we can't determine the system
69 #define DEFAULT_KEEP_FREE (1024ULL*1024ULL) /* 1 MB */
71 /* This is the default maximum number of journal files to keep around. */
72 #define DEFAULT_N_MAX_FILES (100)
74 /* n_data was the first entry we added after the initial file format design */
75 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
77 /* How many entries to keep in the entry array chain cache at max */
78 #define CHAIN_CACHE_MAX 20
80 /* How much to increase the journal file size at once each time we allocate something new. */
81 #define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */
83 /* Reread fstat() of the file for detecting deletions at least this often */
84 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
86 /* The mmap context to use for the header we pick as one above the last defined typed */
87 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
89 static int journal_file_set_online(JournalFile
*f
) {
95 if (!(f
->fd
>= 0 && f
->header
))
98 if (mmap_cache_got_sigbus(f
->mmap
, f
->fd
))
101 switch (f
->header
->state
) {
106 f
->header
->state
= STATE_ONLINE
;
115 int journal_file_set_offline(JournalFile
*f
) {
121 if (!(f
->fd
>= 0 && f
->header
))
124 if (f
->header
->state
!= STATE_ONLINE
)
129 if (mmap_cache_got_sigbus(f
->mmap
, f
->fd
))
132 f
->header
->state
= STATE_OFFLINE
;
134 if (mmap_cache_got_sigbus(f
->mmap
, f
->fd
))
142 JournalFile
* journal_file_close(JournalFile
*f
) {
146 /* Write the final tag */
147 if (f
->seal
&& f
->writable
)
148 journal_file_append_tag(f
);
151 if (f
->post_change_timer
) {
154 if (sd_event_source_get_enabled(f
->post_change_timer
, &enabled
) >= 0)
155 if (enabled
== SD_EVENT_ONESHOT
)
156 journal_file_post_change(f
);
158 (void) sd_event_source_set_enabled(f
->post_change_timer
, SD_EVENT_OFF
);
159 sd_event_source_unref(f
->post_change_timer
);
162 journal_file_set_offline(f
);
164 if (f
->mmap
&& f
->fd
>= 0)
165 mmap_cache_close_fd(f
->mmap
, f
->fd
);
167 if (f
->fd
>= 0 && f
->defrag_on_close
) {
169 /* Be friendly to btrfs: turn COW back on again now,
170 * and defragment the file. We won't write to the file
171 * ever again, hence remove all fragmentation, and
172 * reenable all the good bits COW usually provides
173 * (such as data checksumming). */
175 (void) chattr_fd(f
->fd
, 0, FS_NOCOW_FL
);
176 (void) btrfs_defrag_fd(f
->fd
);
182 mmap_cache_unref(f
->mmap
);
184 ordered_hashmap_free_free(f
->chain_cache
);
186 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
187 free(f
->compress_buffer
);
192 munmap(f
->fss_file
, PAGE_ALIGN(f
->fss_file_size
));
194 free(f
->fsprg_state
);
199 gcry_md_close(f
->hmac
);
206 static int journal_file_init_header(JournalFile
*f
, JournalFile
*template) {
213 memcpy(h
.signature
, HEADER_SIGNATURE
, 8);
214 h
.header_size
= htole64(ALIGN64(sizeof(h
)));
216 h
.incompatible_flags
|= htole32(
217 f
->compress_xz
* HEADER_INCOMPATIBLE_COMPRESSED_XZ
|
218 f
->compress_lz4
* HEADER_INCOMPATIBLE_COMPRESSED_LZ4
);
220 h
.compatible_flags
= htole32(
221 f
->seal
* HEADER_COMPATIBLE_SEALED
);
223 r
= sd_id128_randomize(&h
.file_id
);
228 h
.seqnum_id
= template->header
->seqnum_id
;
229 h
.tail_entry_seqnum
= template->header
->tail_entry_seqnum
;
231 h
.seqnum_id
= h
.file_id
;
233 k
= pwrite(f
->fd
, &h
, sizeof(h
), 0);
243 static int journal_file_refresh_header(JournalFile
*f
) {
250 r
= sd_id128_get_machine(&f
->header
->machine_id
);
254 r
= sd_id128_get_boot(&boot_id
);
258 if (sd_id128_equal(boot_id
, f
->header
->boot_id
))
259 f
->tail_entry_monotonic_valid
= true;
261 f
->header
->boot_id
= boot_id
;
263 r
= journal_file_set_online(f
);
265 /* Sync the online state to disk */
271 static int journal_file_verify_header(JournalFile
*f
) {
277 if (memcmp(f
->header
->signature
, HEADER_SIGNATURE
, 8))
280 /* In both read and write mode we refuse to open files with
281 * incompatible flags we don't know */
282 flags
= le32toh(f
->header
->incompatible_flags
);
283 if (flags
& ~HEADER_INCOMPATIBLE_SUPPORTED
) {
284 if (flags
& ~HEADER_INCOMPATIBLE_ANY
)
285 log_debug("Journal file %s has unknown incompatible flags %"PRIx32
,
286 f
->path
, flags
& ~HEADER_INCOMPATIBLE_ANY
);
287 flags
= (flags
& HEADER_INCOMPATIBLE_ANY
) & ~HEADER_INCOMPATIBLE_SUPPORTED
;
289 log_debug("Journal file %s uses incompatible flags %"PRIx32
290 " disabled at compilation time.", f
->path
, flags
);
291 return -EPROTONOSUPPORT
;
294 /* When open for writing we refuse to open files with
295 * compatible flags, too */
296 flags
= le32toh(f
->header
->compatible_flags
);
297 if (f
->writable
&& (flags
& ~HEADER_COMPATIBLE_SUPPORTED
)) {
298 if (flags
& ~HEADER_COMPATIBLE_ANY
)
299 log_debug("Journal file %s has unknown compatible flags %"PRIx32
,
300 f
->path
, flags
& ~HEADER_COMPATIBLE_ANY
);
301 flags
= (flags
& HEADER_COMPATIBLE_ANY
) & ~HEADER_COMPATIBLE_SUPPORTED
;
303 log_debug("Journal file %s uses compatible flags %"PRIx32
304 " disabled at compilation time.", f
->path
, flags
);
305 return -EPROTONOSUPPORT
;
308 if (f
->header
->state
>= _STATE_MAX
)
311 /* The first addition was n_data, so check that we are at least this large */
312 if (le64toh(f
->header
->header_size
) < HEADER_SIZE_MIN
)
315 if (JOURNAL_HEADER_SEALED(f
->header
) && !JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
318 if ((le64toh(f
->header
->header_size
) + le64toh(f
->header
->arena_size
)) > (uint64_t) f
->last_stat
.st_size
)
321 if (le64toh(f
->header
->tail_object_offset
) > (le64toh(f
->header
->header_size
) + le64toh(f
->header
->arena_size
)))
324 if (!VALID64(le64toh(f
->header
->data_hash_table_offset
)) ||
325 !VALID64(le64toh(f
->header
->field_hash_table_offset
)) ||
326 !VALID64(le64toh(f
->header
->tail_object_offset
)) ||
327 !VALID64(le64toh(f
->header
->entry_array_offset
)))
332 sd_id128_t machine_id
;
335 r
= sd_id128_get_machine(&machine_id
);
339 if (!sd_id128_equal(machine_id
, f
->header
->machine_id
))
342 state
= f
->header
->state
;
344 if (state
== STATE_ONLINE
) {
345 log_debug("Journal file %s is already online. Assuming unclean closing.", f
->path
);
347 } else if (state
== STATE_ARCHIVED
)
349 else if (state
!= STATE_OFFLINE
) {
350 log_debug("Journal file %s has unknown state %i.", f
->path
, state
);
355 f
->compress_xz
= JOURNAL_HEADER_COMPRESSED_XZ(f
->header
);
356 f
->compress_lz4
= JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
);
358 f
->seal
= JOURNAL_HEADER_SEALED(f
->header
);
363 static int journal_file_fstat(JournalFile
*f
) {
367 if (fstat(f
->fd
, &f
->last_stat
) < 0)
370 f
->last_stat_usec
= now(CLOCK_MONOTONIC
);
372 /* Refuse appending to files that are already deleted */
373 if (f
->last_stat
.st_nlink
<= 0)
379 static int journal_file_allocate(JournalFile
*f
, uint64_t offset
, uint64_t size
) {
380 uint64_t old_size
, new_size
;
386 /* We assume that this file is not sparse, and we know that
387 * for sure, since we always call posix_fallocate()
390 if (mmap_cache_got_sigbus(f
->mmap
, f
->fd
))
394 le64toh(f
->header
->header_size
) +
395 le64toh(f
->header
->arena_size
);
397 new_size
= PAGE_ALIGN(offset
+ size
);
398 if (new_size
< le64toh(f
->header
->header_size
))
399 new_size
= le64toh(f
->header
->header_size
);
401 if (new_size
<= old_size
) {
403 /* We already pre-allocated enough space, but before
404 * we write to it, let's check with fstat() if the
405 * file got deleted, in order make sure we don't throw
406 * away the data immediately. Don't check fstat() for
407 * all writes though, but only once ever 10s. */
409 if (f
->last_stat_usec
+ LAST_STAT_REFRESH_USEC
> now(CLOCK_MONOTONIC
))
412 return journal_file_fstat(f
);
415 /* Allocate more space. */
417 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
420 if (new_size
> f
->metrics
.min_size
&& f
->metrics
.keep_free
> 0) {
423 if (fstatvfs(f
->fd
, &svfs
) >= 0) {
426 available
= LESS_BY((uint64_t) svfs
.f_bfree
* (uint64_t) svfs
.f_bsize
, f
->metrics
.keep_free
);
428 if (new_size
- old_size
> available
)
433 /* Increase by larger blocks at once */
434 new_size
= ((new_size
+FILE_SIZE_INCREASE
-1) / FILE_SIZE_INCREASE
) * FILE_SIZE_INCREASE
;
435 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
436 new_size
= f
->metrics
.max_size
;
438 /* Note that the glibc fallocate() fallback is very
439 inefficient, hence we try to minimize the allocation area
441 r
= posix_fallocate(f
->fd
, old_size
, new_size
- old_size
);
445 f
->header
->arena_size
= htole64(new_size
- le64toh(f
->header
->header_size
));
447 return journal_file_fstat(f
);
450 static unsigned type_to_context(ObjectType type
) {
451 /* One context for each type, plus one catch-all for the rest */
452 assert_cc(_OBJECT_TYPE_MAX
<= MMAP_CACHE_MAX_CONTEXTS
);
453 assert_cc(CONTEXT_HEADER
< MMAP_CACHE_MAX_CONTEXTS
);
454 return type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
? type
: 0;
457 static int journal_file_move_to(JournalFile
*f
, ObjectType type
, bool keep_always
, uint64_t offset
, uint64_t size
, void **ret
) {
466 /* Avoid SIGBUS on invalid accesses */
467 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
) {
468 /* Hmm, out of range? Let's refresh the fstat() data
469 * first, before we trust that check. */
471 r
= journal_file_fstat(f
);
475 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
)
476 return -EADDRNOTAVAIL
;
479 return mmap_cache_get(f
->mmap
, f
->fd
, f
->prot
, type_to_context(type
), keep_always
, offset
, size
, &f
->last_stat
, ret
);
482 static uint64_t minimum_header_size(Object
*o
) {
484 static const uint64_t table
[] = {
485 [OBJECT_DATA
] = sizeof(DataObject
),
486 [OBJECT_FIELD
] = sizeof(FieldObject
),
487 [OBJECT_ENTRY
] = sizeof(EntryObject
),
488 [OBJECT_DATA_HASH_TABLE
] = sizeof(HashTableObject
),
489 [OBJECT_FIELD_HASH_TABLE
] = sizeof(HashTableObject
),
490 [OBJECT_ENTRY_ARRAY
] = sizeof(EntryArrayObject
),
491 [OBJECT_TAG
] = sizeof(TagObject
),
494 if (o
->object
.type
>= ELEMENTSOF(table
) || table
[o
->object
.type
] <= 0)
495 return sizeof(ObjectHeader
);
497 return table
[o
->object
.type
];
500 int journal_file_move_to_object(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
**ret
) {
509 /* Objects may only be located at multiple of 64 bit */
510 if (!VALID64(offset
))
513 r
= journal_file_move_to(f
, type
, false, offset
, sizeof(ObjectHeader
), &t
);
518 s
= le64toh(o
->object
.size
);
520 if (s
< sizeof(ObjectHeader
))
523 if (o
->object
.type
<= OBJECT_UNUSED
)
526 if (s
< minimum_header_size(o
))
529 if (type
> OBJECT_UNUSED
&& o
->object
.type
!= type
)
532 if (s
> sizeof(ObjectHeader
)) {
533 r
= journal_file_move_to(f
, type
, false, offset
, s
, &t
);
544 static uint64_t journal_file_entry_seqnum(JournalFile
*f
, uint64_t *seqnum
) {
550 r
= le64toh(f
->header
->tail_entry_seqnum
) + 1;
553 /* If an external seqnum counter was passed, we update
554 * both the local and the external one, and set it to
555 * the maximum of both */
563 f
->header
->tail_entry_seqnum
= htole64(r
);
565 if (f
->header
->head_entry_seqnum
== 0)
566 f
->header
->head_entry_seqnum
= htole64(r
);
571 int journal_file_append_object(JournalFile
*f
, ObjectType type
, uint64_t size
, Object
**ret
, uint64_t *offset
) {
579 assert(type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
);
580 assert(size
>= sizeof(ObjectHeader
));
584 r
= journal_file_set_online(f
);
588 p
= le64toh(f
->header
->tail_object_offset
);
590 p
= le64toh(f
->header
->header_size
);
592 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &tail
);
596 p
+= ALIGN64(le64toh(tail
->object
.size
));
599 r
= journal_file_allocate(f
, p
, size
);
603 r
= journal_file_move_to(f
, type
, false, p
, size
, &t
);
610 o
->object
.type
= type
;
611 o
->object
.size
= htole64(size
);
613 f
->header
->tail_object_offset
= htole64(p
);
614 f
->header
->n_objects
= htole64(le64toh(f
->header
->n_objects
) + 1);
622 static int journal_file_setup_data_hash_table(JournalFile
*f
) {
630 /* We estimate that we need 1 hash table entry per 768 bytes
631 of journal file and we want to make sure we never get
632 beyond 75% fill level. Calculate the hash table size for
633 the maximum file size based on these metrics. */
635 s
= (f
->metrics
.max_size
* 4 / 768 / 3) * sizeof(HashItem
);
636 if (s
< DEFAULT_DATA_HASH_TABLE_SIZE
)
637 s
= DEFAULT_DATA_HASH_TABLE_SIZE
;
639 log_debug("Reserving %"PRIu64
" entries in hash table.", s
/ sizeof(HashItem
));
641 r
= journal_file_append_object(f
,
642 OBJECT_DATA_HASH_TABLE
,
643 offsetof(Object
, hash_table
.items
) + s
,
648 memzero(o
->hash_table
.items
, s
);
650 f
->header
->data_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
651 f
->header
->data_hash_table_size
= htole64(s
);
656 static int journal_file_setup_field_hash_table(JournalFile
*f
) {
664 /* We use a fixed size hash table for the fields as this
665 * number should grow very slowly only */
667 s
= DEFAULT_FIELD_HASH_TABLE_SIZE
;
668 r
= journal_file_append_object(f
,
669 OBJECT_FIELD_HASH_TABLE
,
670 offsetof(Object
, hash_table
.items
) + s
,
675 memzero(o
->hash_table
.items
, s
);
677 f
->header
->field_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
678 f
->header
->field_hash_table_size
= htole64(s
);
683 int journal_file_map_data_hash_table(JournalFile
*f
) {
691 if (f
->data_hash_table
)
694 p
= le64toh(f
->header
->data_hash_table_offset
);
695 s
= le64toh(f
->header
->data_hash_table_size
);
697 r
= journal_file_move_to(f
,
698 OBJECT_DATA_HASH_TABLE
,
705 f
->data_hash_table
= t
;
709 int journal_file_map_field_hash_table(JournalFile
*f
) {
717 if (f
->field_hash_table
)
720 p
= le64toh(f
->header
->field_hash_table_offset
);
721 s
= le64toh(f
->header
->field_hash_table_size
);
723 r
= journal_file_move_to(f
,
724 OBJECT_FIELD_HASH_TABLE
,
731 f
->field_hash_table
= t
;
735 static int journal_file_link_field(
746 assert(f
->field_hash_table
);
750 if (o
->object
.type
!= OBJECT_FIELD
)
753 m
= le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
);
757 /* This might alter the window we are looking at */
758 o
->field
.next_hash_offset
= o
->field
.head_data_offset
= 0;
761 p
= le64toh(f
->field_hash_table
[h
].tail_hash_offset
);
763 f
->field_hash_table
[h
].head_hash_offset
= htole64(offset
);
765 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
769 o
->field
.next_hash_offset
= htole64(offset
);
772 f
->field_hash_table
[h
].tail_hash_offset
= htole64(offset
);
774 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
775 f
->header
->n_fields
= htole64(le64toh(f
->header
->n_fields
) + 1);
780 static int journal_file_link_data(
791 assert(f
->data_hash_table
);
795 if (o
->object
.type
!= OBJECT_DATA
)
798 m
= le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
);
802 /* This might alter the window we are looking at */
803 o
->data
.next_hash_offset
= o
->data
.next_field_offset
= 0;
804 o
->data
.entry_offset
= o
->data
.entry_array_offset
= 0;
805 o
->data
.n_entries
= 0;
808 p
= le64toh(f
->data_hash_table
[h
].tail_hash_offset
);
810 /* Only entry in the hash table is easy */
811 f
->data_hash_table
[h
].head_hash_offset
= htole64(offset
);
813 /* Move back to the previous data object, to patch in
816 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
820 o
->data
.next_hash_offset
= htole64(offset
);
823 f
->data_hash_table
[h
].tail_hash_offset
= htole64(offset
);
825 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
826 f
->header
->n_data
= htole64(le64toh(f
->header
->n_data
) + 1);
831 int journal_file_find_field_object_with_hash(
833 const void *field
, uint64_t size
, uint64_t hash
,
834 Object
**ret
, uint64_t *offset
) {
836 uint64_t p
, osize
, h
, m
;
841 assert(field
&& size
> 0);
843 /* If the field hash table is empty, we can't find anything */
844 if (le64toh(f
->header
->field_hash_table_size
) <= 0)
847 /* Map the field hash table, if it isn't mapped yet. */
848 r
= journal_file_map_field_hash_table(f
);
852 osize
= offsetof(Object
, field
.payload
) + size
;
854 m
= le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
);
859 p
= le64toh(f
->field_hash_table
[h
].head_hash_offset
);
864 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
868 if (le64toh(o
->field
.hash
) == hash
&&
869 le64toh(o
->object
.size
) == osize
&&
870 memcmp(o
->field
.payload
, field
, size
) == 0) {
880 p
= le64toh(o
->field
.next_hash_offset
);
886 int journal_file_find_field_object(
888 const void *field
, uint64_t size
,
889 Object
**ret
, uint64_t *offset
) {
894 assert(field
&& size
> 0);
896 hash
= hash64(field
, size
);
898 return journal_file_find_field_object_with_hash(f
,
903 int journal_file_find_data_object_with_hash(
905 const void *data
, uint64_t size
, uint64_t hash
,
906 Object
**ret
, uint64_t *offset
) {
908 uint64_t p
, osize
, h
, m
;
913 assert(data
|| size
== 0);
915 /* If there's no data hash table, then there's no entry. */
916 if (le64toh(f
->header
->data_hash_table_size
) <= 0)
919 /* Map the data hash table, if it isn't mapped yet. */
920 r
= journal_file_map_data_hash_table(f
);
924 osize
= offsetof(Object
, data
.payload
) + size
;
926 m
= le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
);
931 p
= le64toh(f
->data_hash_table
[h
].head_hash_offset
);
936 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
940 if (le64toh(o
->data
.hash
) != hash
)
943 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
) {
944 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
948 l
= le64toh(o
->object
.size
);
949 if (l
<= offsetof(Object
, data
.payload
))
952 l
-= offsetof(Object
, data
.payload
);
954 r
= decompress_blob(o
->object
.flags
& OBJECT_COMPRESSION_MASK
,
955 o
->data
.payload
, l
, &f
->compress_buffer
, &f
->compress_buffer_size
, &rsize
, 0);
960 memcmp(f
->compress_buffer
, data
, size
) == 0) {
971 return -EPROTONOSUPPORT
;
973 } else if (le64toh(o
->object
.size
) == osize
&&
974 memcmp(o
->data
.payload
, data
, size
) == 0) {
986 p
= le64toh(o
->data
.next_hash_offset
);
992 int journal_file_find_data_object(
994 const void *data
, uint64_t size
,
995 Object
**ret
, uint64_t *offset
) {
1000 assert(data
|| size
== 0);
1002 hash
= hash64(data
, size
);
1004 return journal_file_find_data_object_with_hash(f
,
1009 static int journal_file_append_field(
1011 const void *field
, uint64_t size
,
1012 Object
**ret
, uint64_t *offset
) {
1020 assert(field
&& size
> 0);
1022 hash
= hash64(field
, size
);
1024 r
= journal_file_find_field_object_with_hash(f
, field
, size
, hash
, &o
, &p
);
1038 osize
= offsetof(Object
, field
.payload
) + size
;
1039 r
= journal_file_append_object(f
, OBJECT_FIELD
, osize
, &o
, &p
);
1043 o
->field
.hash
= htole64(hash
);
1044 memcpy(o
->field
.payload
, field
, size
);
1046 r
= journal_file_link_field(f
, o
, p
, hash
);
1050 /* The linking might have altered the window, so let's
1051 * refresh our pointer */
1052 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1057 r
= journal_file_hmac_put_object(f
, OBJECT_FIELD
, o
, p
);
1071 static int journal_file_append_data(
1073 const void *data
, uint64_t size
,
1074 Object
**ret
, uint64_t *offset
) {
1079 int r
, compression
= 0;
1083 assert(data
|| size
== 0);
1085 hash
= hash64(data
, size
);
1087 r
= journal_file_find_data_object_with_hash(f
, data
, size
, hash
, &o
, &p
);
1101 osize
= offsetof(Object
, data
.payload
) + size
;
1102 r
= journal_file_append_object(f
, OBJECT_DATA
, osize
, &o
, &p
);
1106 o
->data
.hash
= htole64(hash
);
1108 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
1109 if (JOURNAL_FILE_COMPRESS(f
) && size
>= COMPRESSION_SIZE_THRESHOLD
) {
1112 compression
= compress_blob(data
, size
, o
->data
.payload
, size
- 1, &rsize
);
1114 if (compression
>= 0) {
1115 o
->object
.size
= htole64(offsetof(Object
, data
.payload
) + rsize
);
1116 o
->object
.flags
|= compression
;
1118 log_debug("Compressed data object %"PRIu64
" -> %zu using %s",
1119 size
, rsize
, object_compressed_to_string(compression
));
1121 /* Compression didn't work, we don't really care why, let's continue without compression */
1126 if (compression
== 0 && size
> 0)
1127 memcpy(o
->data
.payload
, data
, size
);
1129 r
= journal_file_link_data(f
, o
, p
, hash
);
1133 /* The linking might have altered the window, so let's
1134 * refresh our pointer */
1135 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1142 eq
= memchr(data
, '=', size
);
1143 if (eq
&& eq
> data
) {
1147 /* Create field object ... */
1148 r
= journal_file_append_field(f
, data
, (uint8_t*) eq
- (uint8_t*) data
, &fo
, &fp
);
1152 /* ... and link it in. */
1153 o
->data
.next_field_offset
= fo
->field
.head_data_offset
;
1154 fo
->field
.head_data_offset
= le64toh(p
);
1158 r
= journal_file_hmac_put_object(f
, OBJECT_DATA
, o
, p
);
1172 uint64_t journal_file_entry_n_items(Object
*o
) {
1175 if (o
->object
.type
!= OBJECT_ENTRY
)
1178 return (le64toh(o
->object
.size
) - offsetof(Object
, entry
.items
)) / sizeof(EntryItem
);
1181 uint64_t journal_file_entry_array_n_items(Object
*o
) {
1184 if (o
->object
.type
!= OBJECT_ENTRY_ARRAY
)
1187 return (le64toh(o
->object
.size
) - offsetof(Object
, entry_array
.items
)) / sizeof(uint64_t);
1190 uint64_t journal_file_hash_table_n_items(Object
*o
) {
1193 if (o
->object
.type
!= OBJECT_DATA_HASH_TABLE
&&
1194 o
->object
.type
!= OBJECT_FIELD_HASH_TABLE
)
1197 return (le64toh(o
->object
.size
) - offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
);
1200 static int link_entry_into_array(JournalFile
*f
,
1205 uint64_t n
= 0, ap
= 0, q
, i
, a
, hidx
;
1214 a
= le64toh(*first
);
1215 i
= hidx
= le64toh(*idx
);
1218 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
1222 n
= journal_file_entry_array_n_items(o
);
1224 o
->entry_array
.items
[i
] = htole64(p
);
1225 *idx
= htole64(hidx
+ 1);
1231 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
1242 r
= journal_file_append_object(f
, OBJECT_ENTRY_ARRAY
,
1243 offsetof(Object
, entry_array
.items
) + n
* sizeof(uint64_t),
1249 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY_ARRAY
, o
, q
);
1254 o
->entry_array
.items
[i
] = htole64(p
);
1257 *first
= htole64(q
);
1259 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, ap
, &o
);
1263 o
->entry_array
.next_entry_array_offset
= htole64(q
);
1266 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
1267 f
->header
->n_entry_arrays
= htole64(le64toh(f
->header
->n_entry_arrays
) + 1);
1269 *idx
= htole64(hidx
+ 1);
1274 static int link_entry_into_array_plus_one(JournalFile
*f
,
1289 *extra
= htole64(p
);
1293 i
= htole64(le64toh(*idx
) - 1);
1294 r
= link_entry_into_array(f
, first
, &i
, p
);
1299 *idx
= htole64(le64toh(*idx
) + 1);
1303 static int journal_file_link_entry_item(JournalFile
*f
, Object
*o
, uint64_t offset
, uint64_t i
) {
1310 p
= le64toh(o
->entry
.items
[i
].object_offset
);
1314 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1318 return link_entry_into_array_plus_one(f
,
1319 &o
->data
.entry_offset
,
1320 &o
->data
.entry_array_offset
,
1325 static int journal_file_link_entry(JournalFile
*f
, Object
*o
, uint64_t offset
) {
1334 if (o
->object
.type
!= OBJECT_ENTRY
)
1337 __sync_synchronize();
1339 /* Link up the entry itself */
1340 r
= link_entry_into_array(f
,
1341 &f
->header
->entry_array_offset
,
1342 &f
->header
->n_entries
,
1347 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1349 if (f
->header
->head_entry_realtime
== 0)
1350 f
->header
->head_entry_realtime
= o
->entry
.realtime
;
1352 f
->header
->tail_entry_realtime
= o
->entry
.realtime
;
1353 f
->header
->tail_entry_monotonic
= o
->entry
.monotonic
;
1355 f
->tail_entry_monotonic_valid
= true;
1357 /* Link up the items */
1358 n
= journal_file_entry_n_items(o
);
1359 for (i
= 0; i
< n
; i
++) {
1360 r
= journal_file_link_entry_item(f
, o
, offset
, i
);
1368 static int journal_file_append_entry_internal(
1370 const dual_timestamp
*ts
,
1372 const EntryItem items
[], unsigned n_items
,
1374 Object
**ret
, uint64_t *offset
) {
1382 assert(items
|| n_items
== 0);
1385 osize
= offsetof(Object
, entry
.items
) + (n_items
* sizeof(EntryItem
));
1387 r
= journal_file_append_object(f
, OBJECT_ENTRY
, osize
, &o
, &np
);
1391 o
->entry
.seqnum
= htole64(journal_file_entry_seqnum(f
, seqnum
));
1392 memcpy(o
->entry
.items
, items
, n_items
* sizeof(EntryItem
));
1393 o
->entry
.realtime
= htole64(ts
->realtime
);
1394 o
->entry
.monotonic
= htole64(ts
->monotonic
);
1395 o
->entry
.xor_hash
= htole64(xor_hash
);
1396 o
->entry
.boot_id
= f
->header
->boot_id
;
1399 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY
, o
, np
);
1404 r
= journal_file_link_entry(f
, o
, np
);
1417 void journal_file_post_change(JournalFile
*f
) {
1420 /* inotify() does not receive IN_MODIFY events from file
1421 * accesses done via mmap(). After each access we hence
1422 * trigger IN_MODIFY by truncating the journal file to its
1423 * current size which triggers IN_MODIFY. */
1425 __sync_synchronize();
1427 if (ftruncate(f
->fd
, f
->last_stat
.st_size
) < 0)
1428 log_debug_errno(errno
, "Failed to truncate file to its own size: %m");
1431 static int post_change_thunk(sd_event_source
*timer
, uint64_t usec
, void *userdata
) {
1434 journal_file_post_change(userdata
);
1439 static void schedule_post_change(JournalFile
*f
) {
1440 sd_event_source
*timer
;
1445 assert(f
->post_change_timer
);
1447 timer
= f
->post_change_timer
;
1449 r
= sd_event_source_get_enabled(timer
, &enabled
);
1451 log_debug_errno(r
, "Failed to get ftruncate timer state: %m");
1455 if (enabled
== SD_EVENT_ONESHOT
)
1458 r
= sd_event_now(sd_event_source_get_event(timer
), CLOCK_MONOTONIC
, &now
);
1460 log_debug_errno(r
, "Failed to get clock's now for scheduling ftruncate: %m");
1464 r
= sd_event_source_set_time(timer
, now
+f
->post_change_timer_period
);
1466 log_debug_errno(r
, "Failed to set time for scheduling ftruncate: %m");
1470 r
= sd_event_source_set_enabled(timer
, SD_EVENT_ONESHOT
);
1472 log_debug_errno(r
, "Failed to enable scheduled ftruncate: %m");
1479 /* On failure, let's simply post the change immediately. */
1480 journal_file_post_change(f
);
1483 /* Enable coalesced change posting in a timer on the provided sd_event instance */
1484 int journal_file_enable_post_change_timer(JournalFile
*f
, sd_event
*e
, usec_t t
) {
1485 _cleanup_(sd_event_source_unrefp
) sd_event_source
*timer
= NULL
;
1489 assert_return(!f
->post_change_timer
, -EINVAL
);
1493 r
= sd_event_add_time(e
, &timer
, CLOCK_MONOTONIC
, 0, 0, post_change_thunk
, f
);
1497 r
= sd_event_source_set_enabled(timer
, SD_EVENT_OFF
);
1501 f
->post_change_timer
= timer
;
1503 f
->post_change_timer_period
= t
;
1508 static int entry_item_cmp(const void *_a
, const void *_b
) {
1509 const EntryItem
*a
= _a
, *b
= _b
;
1511 if (le64toh(a
->object_offset
) < le64toh(b
->object_offset
))
1513 if (le64toh(a
->object_offset
) > le64toh(b
->object_offset
))
1518 int journal_file_append_entry(JournalFile
*f
, const dual_timestamp
*ts
, const struct iovec iovec
[], unsigned n_iovec
, uint64_t *seqnum
, Object
**ret
, uint64_t *offset
) {
1522 uint64_t xor_hash
= 0;
1523 struct dual_timestamp _ts
;
1527 assert(iovec
|| n_iovec
== 0);
1530 dual_timestamp_get(&_ts
);
1535 r
= journal_file_maybe_append_tag(f
, ts
->realtime
);
1540 /* alloca() can't take 0, hence let's allocate at least one */
1541 items
= alloca(sizeof(EntryItem
) * MAX(1u, n_iovec
));
1543 for (i
= 0; i
< n_iovec
; i
++) {
1547 r
= journal_file_append_data(f
, iovec
[i
].iov_base
, iovec
[i
].iov_len
, &o
, &p
);
1551 xor_hash
^= le64toh(o
->data
.hash
);
1552 items
[i
].object_offset
= htole64(p
);
1553 items
[i
].hash
= o
->data
.hash
;
1556 /* Order by the position on disk, in order to improve seek
1557 * times for rotating media. */
1558 qsort_safe(items
, n_iovec
, sizeof(EntryItem
), entry_item_cmp
);
1560 r
= journal_file_append_entry_internal(f
, ts
, xor_hash
, items
, n_iovec
, seqnum
, ret
, offset
);
1562 /* If the memory mapping triggered a SIGBUS then we return an
1563 * IO error and ignore the error code passed down to us, since
1564 * it is very likely just an effect of a nullified replacement
1567 if (mmap_cache_got_sigbus(f
->mmap
, f
->fd
))
1570 if (f
->post_change_timer
)
1571 schedule_post_change(f
);
1573 journal_file_post_change(f
);
1578 typedef struct ChainCacheItem
{
1579 uint64_t first
; /* the array at the beginning of the chain */
1580 uint64_t array
; /* the cached array */
1581 uint64_t begin
; /* the first item in the cached array */
1582 uint64_t total
; /* the total number of items in all arrays before this one in the chain */
1583 uint64_t last_index
; /* the last index we looked at, to optimize locality when bisecting */
1586 static void chain_cache_put(
1593 uint64_t last_index
) {
1596 /* If the chain item to cache for this chain is the
1597 * first one it's not worth caching anything */
1601 if (ordered_hashmap_size(h
) >= CHAIN_CACHE_MAX
) {
1602 ci
= ordered_hashmap_steal_first(h
);
1605 ci
= new(ChainCacheItem
, 1);
1612 if (ordered_hashmap_put(h
, &ci
->first
, ci
) < 0) {
1617 assert(ci
->first
== first
);
1622 ci
->last_index
= last_index
;
1625 static int generic_array_get(
1629 Object
**ret
, uint64_t *offset
) {
1632 uint64_t p
= 0, a
, t
= 0;
1640 /* Try the chain cache first */
1641 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
1642 if (ci
&& i
> ci
->total
) {
1651 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
1655 k
= journal_file_entry_array_n_items(o
);
1657 p
= le64toh(o
->entry_array
.items
[i
]);
1663 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
1669 /* Let's cache this item for the next invocation */
1670 chain_cache_put(f
->chain_cache
, ci
, first
, a
, le64toh(o
->entry_array
.items
[0]), t
, i
);
1672 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
1685 static int generic_array_get_plus_one(
1690 Object
**ret
, uint64_t *offset
) {
1699 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, &o
);
1712 return generic_array_get(f
, first
, i
-1, ret
, offset
);
1721 static int generic_array_bisect(
1726 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
1727 direction_t direction
,
1732 uint64_t a
, p
, t
= 0, i
= 0, last_p
= 0, last_index
= (uint64_t) -1;
1733 bool subtract_one
= false;
1734 Object
*o
, *array
= NULL
;
1739 assert(test_object
);
1741 /* Start with the first array in the chain */
1744 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
1745 if (ci
&& n
> ci
->total
) {
1746 /* Ah, we have iterated this bisection array chain
1747 * previously! Let's see if we can skip ahead in the
1748 * chain, as far as the last time. But we can't jump
1749 * backwards in the chain, so let's check that
1752 r
= test_object(f
, ci
->begin
, needle
);
1756 if (r
== TEST_LEFT
) {
1757 /* OK, what we are looking for is right of the
1758 * begin of this EntryArray, so let's jump
1759 * straight to previously cached array in the
1765 last_index
= ci
->last_index
;
1770 uint64_t left
, right
, k
, lp
;
1772 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
1776 k
= journal_file_entry_array_n_items(array
);
1782 lp
= p
= le64toh(array
->entry_array
.items
[i
]);
1786 r
= test_object(f
, p
, needle
);
1790 if (r
== TEST_FOUND
)
1791 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
1793 if (r
== TEST_RIGHT
) {
1797 if (last_index
!= (uint64_t) -1) {
1798 assert(last_index
<= right
);
1800 /* If we cached the last index we
1801 * looked at, let's try to not to jump
1802 * too wildly around and see if we can
1803 * limit the range to look at early to
1804 * the immediate neighbors of the last
1805 * index we looked at. */
1807 if (last_index
> 0) {
1808 uint64_t x
= last_index
- 1;
1810 p
= le64toh(array
->entry_array
.items
[x
]);
1814 r
= test_object(f
, p
, needle
);
1818 if (r
== TEST_FOUND
)
1819 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
1821 if (r
== TEST_RIGHT
)
1827 if (last_index
< right
) {
1828 uint64_t y
= last_index
+ 1;
1830 p
= le64toh(array
->entry_array
.items
[y
]);
1834 r
= test_object(f
, p
, needle
);
1838 if (r
== TEST_FOUND
)
1839 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
1841 if (r
== TEST_RIGHT
)
1849 if (left
== right
) {
1850 if (direction
== DIRECTION_UP
)
1851 subtract_one
= true;
1857 assert(left
< right
);
1858 i
= (left
+ right
) / 2;
1860 p
= le64toh(array
->entry_array
.items
[i
]);
1864 r
= test_object(f
, p
, needle
);
1868 if (r
== TEST_FOUND
)
1869 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
1871 if (r
== TEST_RIGHT
)
1879 if (direction
== DIRECTION_UP
) {
1881 subtract_one
= true;
1892 last_index
= (uint64_t) -1;
1893 a
= le64toh(array
->entry_array
.next_entry_array_offset
);
1899 if (subtract_one
&& t
== 0 && i
== 0)
1902 /* Let's cache this item for the next invocation */
1903 chain_cache_put(f
->chain_cache
, ci
, first
, a
, le64toh(array
->entry_array
.items
[0]), t
, subtract_one
? (i
> 0 ? i
-1 : (uint64_t) -1) : i
);
1905 if (subtract_one
&& i
== 0)
1907 else if (subtract_one
)
1908 p
= le64toh(array
->entry_array
.items
[i
-1]);
1910 p
= le64toh(array
->entry_array
.items
[i
]);
1912 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
1923 *idx
= t
+ i
+ (subtract_one
? -1 : 0);
1928 static int generic_array_bisect_plus_one(
1934 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
1935 direction_t direction
,
1941 bool step_back
= false;
1945 assert(test_object
);
1950 /* This bisects the array in object 'first', but first checks
1952 r
= test_object(f
, extra
, needle
);
1956 if (r
== TEST_FOUND
)
1957 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
1959 /* if we are looking with DIRECTION_UP then we need to first
1960 see if in the actual array there is a matching entry, and
1961 return the last one of that. But if there isn't any we need
1962 to return this one. Hence remember this, and return it
1965 step_back
= direction
== DIRECTION_UP
;
1967 if (r
== TEST_RIGHT
) {
1968 if (direction
== DIRECTION_DOWN
)
1974 r
= generic_array_bisect(f
, first
, n
-1, needle
, test_object
, direction
, ret
, offset
, idx
);
1976 if (r
== 0 && step_back
)
1985 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, &o
);
2001 _pure_
static int test_object_offset(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2007 else if (p
< needle
)
2013 static int test_object_seqnum(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2020 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2024 if (le64toh(o
->entry
.seqnum
) == needle
)
2026 else if (le64toh(o
->entry
.seqnum
) < needle
)
2032 int journal_file_move_to_entry_by_seqnum(
2035 direction_t direction
,
2041 return generic_array_bisect(f
,
2042 le64toh(f
->header
->entry_array_offset
),
2043 le64toh(f
->header
->n_entries
),
2050 static int test_object_realtime(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2057 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2061 if (le64toh(o
->entry
.realtime
) == needle
)
2063 else if (le64toh(o
->entry
.realtime
) < needle
)
2069 int journal_file_move_to_entry_by_realtime(
2072 direction_t direction
,
2078 return generic_array_bisect(f
,
2079 le64toh(f
->header
->entry_array_offset
),
2080 le64toh(f
->header
->n_entries
),
2082 test_object_realtime
,
2087 static int test_object_monotonic(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2094 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2098 if (le64toh(o
->entry
.monotonic
) == needle
)
2100 else if (le64toh(o
->entry
.monotonic
) < needle
)
2106 static int find_data_object_by_boot_id(
2112 char t
[sizeof("_BOOT_ID=")-1 + 32 + 1] = "_BOOT_ID=";
2114 sd_id128_to_string(boot_id
, t
+ 9);
2115 return journal_file_find_data_object(f
, t
, sizeof(t
) - 1, o
, b
);
2118 int journal_file_move_to_entry_by_monotonic(
2122 direction_t direction
,
2131 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, NULL
);
2137 return generic_array_bisect_plus_one(f
,
2138 le64toh(o
->data
.entry_offset
),
2139 le64toh(o
->data
.entry_array_offset
),
2140 le64toh(o
->data
.n_entries
),
2142 test_object_monotonic
,
2147 void journal_file_reset_location(JournalFile
*f
) {
2148 f
->location_type
= LOCATION_HEAD
;
2149 f
->current_offset
= 0;
2150 f
->current_seqnum
= 0;
2151 f
->current_realtime
= 0;
2152 f
->current_monotonic
= 0;
2153 zero(f
->current_boot_id
);
2154 f
->current_xor_hash
= 0;
2157 void journal_file_save_location(JournalFile
*f
, Object
*o
, uint64_t offset
) {
2158 f
->location_type
= LOCATION_SEEK
;
2159 f
->current_offset
= offset
;
2160 f
->current_seqnum
= le64toh(o
->entry
.seqnum
);
2161 f
->current_realtime
= le64toh(o
->entry
.realtime
);
2162 f
->current_monotonic
= le64toh(o
->entry
.monotonic
);
2163 f
->current_boot_id
= o
->entry
.boot_id
;
2164 f
->current_xor_hash
= le64toh(o
->entry
.xor_hash
);
2167 int journal_file_compare_locations(JournalFile
*af
, JournalFile
*bf
) {
2172 assert(af
->location_type
== LOCATION_SEEK
);
2173 assert(bf
->location_type
== LOCATION_SEEK
);
2175 /* If contents and timestamps match, these entries are
2176 * identical, even if the seqnum does not match */
2177 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
) &&
2178 af
->current_monotonic
== bf
->current_monotonic
&&
2179 af
->current_realtime
== bf
->current_realtime
&&
2180 af
->current_xor_hash
== bf
->current_xor_hash
)
2183 if (sd_id128_equal(af
->header
->seqnum_id
, bf
->header
->seqnum_id
)) {
2185 /* If this is from the same seqnum source, compare
2187 if (af
->current_seqnum
< bf
->current_seqnum
)
2189 if (af
->current_seqnum
> bf
->current_seqnum
)
2192 /* Wow! This is weird, different data but the same
2193 * seqnums? Something is borked, but let's make the
2194 * best of it and compare by time. */
2197 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
)) {
2199 /* If the boot id matches, compare monotonic time */
2200 if (af
->current_monotonic
< bf
->current_monotonic
)
2202 if (af
->current_monotonic
> bf
->current_monotonic
)
2206 /* Otherwise, compare UTC time */
2207 if (af
->current_realtime
< bf
->current_realtime
)
2209 if (af
->current_realtime
> bf
->current_realtime
)
2212 /* Finally, compare by contents */
2213 if (af
->current_xor_hash
< bf
->current_xor_hash
)
2215 if (af
->current_xor_hash
> bf
->current_xor_hash
)
2221 int journal_file_next_entry(
2224 direction_t direction
,
2225 Object
**ret
, uint64_t *offset
) {
2233 n
= le64toh(f
->header
->n_entries
);
2238 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
2240 r
= generic_array_bisect(f
,
2241 le64toh(f
->header
->entry_array_offset
),
2242 le64toh(f
->header
->n_entries
),
2251 if (direction
== DIRECTION_DOWN
) {
2264 /* And jump to it */
2265 r
= generic_array_get(f
,
2266 le64toh(f
->header
->entry_array_offset
),
2273 (direction
== DIRECTION_DOWN
? ofs
<= p
: ofs
>= p
)) {
2274 log_debug("%s: entry array corrupted at entry %"PRIu64
,
2285 int journal_file_next_entry_for_data(
2287 Object
*o
, uint64_t p
,
2288 uint64_t data_offset
,
2289 direction_t direction
,
2290 Object
**ret
, uint64_t *offset
) {
2297 assert(p
> 0 || !o
);
2299 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2303 n
= le64toh(d
->data
.n_entries
);
2308 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
2310 if (o
->object
.type
!= OBJECT_ENTRY
)
2313 r
= generic_array_bisect_plus_one(f
,
2314 le64toh(d
->data
.entry_offset
),
2315 le64toh(d
->data
.entry_array_offset
),
2316 le64toh(d
->data
.n_entries
),
2326 if (direction
== DIRECTION_DOWN
) {
2340 return generic_array_get_plus_one(f
,
2341 le64toh(d
->data
.entry_offset
),
2342 le64toh(d
->data
.entry_array_offset
),
2347 int journal_file_move_to_entry_by_offset_for_data(
2349 uint64_t data_offset
,
2351 direction_t direction
,
2352 Object
**ret
, uint64_t *offset
) {
2359 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2363 return generic_array_bisect_plus_one(f
,
2364 le64toh(d
->data
.entry_offset
),
2365 le64toh(d
->data
.entry_array_offset
),
2366 le64toh(d
->data
.n_entries
),
2373 int journal_file_move_to_entry_by_monotonic_for_data(
2375 uint64_t data_offset
,
2378 direction_t direction
,
2379 Object
**ret
, uint64_t *offset
) {
2387 /* First, seek by time */
2388 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &b
);
2394 r
= generic_array_bisect_plus_one(f
,
2395 le64toh(o
->data
.entry_offset
),
2396 le64toh(o
->data
.entry_array_offset
),
2397 le64toh(o
->data
.n_entries
),
2399 test_object_monotonic
,
2405 /* And now, continue seeking until we find an entry that
2406 * exists in both bisection arrays */
2412 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2416 r
= generic_array_bisect_plus_one(f
,
2417 le64toh(d
->data
.entry_offset
),
2418 le64toh(d
->data
.entry_array_offset
),
2419 le64toh(d
->data
.n_entries
),
2427 r
= journal_file_move_to_object(f
, OBJECT_DATA
, b
, &o
);
2431 r
= generic_array_bisect_plus_one(f
,
2432 le64toh(o
->data
.entry_offset
),
2433 le64toh(o
->data
.entry_array_offset
),
2434 le64toh(o
->data
.n_entries
),
2456 int journal_file_move_to_entry_by_seqnum_for_data(
2458 uint64_t data_offset
,
2460 direction_t direction
,
2461 Object
**ret
, uint64_t *offset
) {
2468 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2472 return generic_array_bisect_plus_one(f
,
2473 le64toh(d
->data
.entry_offset
),
2474 le64toh(d
->data
.entry_array_offset
),
2475 le64toh(d
->data
.n_entries
),
2482 int journal_file_move_to_entry_by_realtime_for_data(
2484 uint64_t data_offset
,
2486 direction_t direction
,
2487 Object
**ret
, uint64_t *offset
) {
2494 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2498 return generic_array_bisect_plus_one(f
,
2499 le64toh(d
->data
.entry_offset
),
2500 le64toh(d
->data
.entry_array_offset
),
2501 le64toh(d
->data
.n_entries
),
2503 test_object_realtime
,
2508 void journal_file_dump(JournalFile
*f
) {
2516 journal_file_print_header(f
);
2518 p
= le64toh(f
->header
->header_size
);
2520 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &o
);
2524 switch (o
->object
.type
) {
2527 printf("Type: OBJECT_UNUSED\n");
2531 printf("Type: OBJECT_DATA\n");
2535 printf("Type: OBJECT_FIELD\n");
2539 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64
" monotonic=%"PRIu64
" realtime=%"PRIu64
"\n",
2540 le64toh(o
->entry
.seqnum
),
2541 le64toh(o
->entry
.monotonic
),
2542 le64toh(o
->entry
.realtime
));
2545 case OBJECT_FIELD_HASH_TABLE
:
2546 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
2549 case OBJECT_DATA_HASH_TABLE
:
2550 printf("Type: OBJECT_DATA_HASH_TABLE\n");
2553 case OBJECT_ENTRY_ARRAY
:
2554 printf("Type: OBJECT_ENTRY_ARRAY\n");
2558 printf("Type: OBJECT_TAG seqnum=%"PRIu64
" epoch=%"PRIu64
"\n",
2559 le64toh(o
->tag
.seqnum
),
2560 le64toh(o
->tag
.epoch
));
2564 printf("Type: unknown (%i)\n", o
->object
.type
);
2568 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
)
2569 printf("Flags: %s\n",
2570 object_compressed_to_string(o
->object
.flags
& OBJECT_COMPRESSION_MASK
));
2572 if (p
== le64toh(f
->header
->tail_object_offset
))
2575 p
= p
+ ALIGN64(le64toh(o
->object
.size
));
2580 log_error("File corrupt");
2583 static const char* format_timestamp_safe(char *buf
, size_t l
, usec_t t
) {
2586 x
= format_timestamp(buf
, l
, t
);
2592 void journal_file_print_header(JournalFile
*f
) {
2593 char a
[33], b
[33], c
[33], d
[33];
2594 char x
[FORMAT_TIMESTAMP_MAX
], y
[FORMAT_TIMESTAMP_MAX
], z
[FORMAT_TIMESTAMP_MAX
];
2596 char bytes
[FORMAT_BYTES_MAX
];
2601 printf("File Path: %s\n"
2605 "Sequential Number ID: %s\n"
2607 "Compatible Flags:%s%s\n"
2608 "Incompatible Flags:%s%s%s\n"
2609 "Header size: %"PRIu64
"\n"
2610 "Arena size: %"PRIu64
"\n"
2611 "Data Hash Table Size: %"PRIu64
"\n"
2612 "Field Hash Table Size: %"PRIu64
"\n"
2613 "Rotate Suggested: %s\n"
2614 "Head Sequential Number: %"PRIu64
"\n"
2615 "Tail Sequential Number: %"PRIu64
"\n"
2616 "Head Realtime Timestamp: %s\n"
2617 "Tail Realtime Timestamp: %s\n"
2618 "Tail Monotonic Timestamp: %s\n"
2619 "Objects: %"PRIu64
"\n"
2620 "Entry Objects: %"PRIu64
"\n",
2622 sd_id128_to_string(f
->header
->file_id
, a
),
2623 sd_id128_to_string(f
->header
->machine_id
, b
),
2624 sd_id128_to_string(f
->header
->boot_id
, c
),
2625 sd_id128_to_string(f
->header
->seqnum_id
, d
),
2626 f
->header
->state
== STATE_OFFLINE
? "OFFLINE" :
2627 f
->header
->state
== STATE_ONLINE
? "ONLINE" :
2628 f
->header
->state
== STATE_ARCHIVED
? "ARCHIVED" : "UNKNOWN",
2629 JOURNAL_HEADER_SEALED(f
->header
) ? " SEALED" : "",
2630 (le32toh(f
->header
->compatible_flags
) & ~HEADER_COMPATIBLE_ANY
) ? " ???" : "",
2631 JOURNAL_HEADER_COMPRESSED_XZ(f
->header
) ? " COMPRESSED-XZ" : "",
2632 JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
) ? " COMPRESSED-LZ4" : "",
2633 (le32toh(f
->header
->incompatible_flags
) & ~HEADER_INCOMPATIBLE_ANY
) ? " ???" : "",
2634 le64toh(f
->header
->header_size
),
2635 le64toh(f
->header
->arena_size
),
2636 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
2637 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
),
2638 yes_no(journal_file_rotate_suggested(f
, 0)),
2639 le64toh(f
->header
->head_entry_seqnum
),
2640 le64toh(f
->header
->tail_entry_seqnum
),
2641 format_timestamp_safe(x
, sizeof(x
), le64toh(f
->header
->head_entry_realtime
)),
2642 format_timestamp_safe(y
, sizeof(y
), le64toh(f
->header
->tail_entry_realtime
)),
2643 format_timespan(z
, sizeof(z
), le64toh(f
->header
->tail_entry_monotonic
), USEC_PER_MSEC
),
2644 le64toh(f
->header
->n_objects
),
2645 le64toh(f
->header
->n_entries
));
2647 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
2648 printf("Data Objects: %"PRIu64
"\n"
2649 "Data Hash Table Fill: %.1f%%\n",
2650 le64toh(f
->header
->n_data
),
2651 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))));
2653 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
2654 printf("Field Objects: %"PRIu64
"\n"
2655 "Field Hash Table Fill: %.1f%%\n",
2656 le64toh(f
->header
->n_fields
),
2657 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))));
2659 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
))
2660 printf("Tag Objects: %"PRIu64
"\n",
2661 le64toh(f
->header
->n_tags
));
2662 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
2663 printf("Entry Array Objects: %"PRIu64
"\n",
2664 le64toh(f
->header
->n_entry_arrays
));
2666 if (fstat(f
->fd
, &st
) >= 0)
2667 printf("Disk usage: %s\n", format_bytes(bytes
, sizeof(bytes
), (uint64_t) st
.st_blocks
* 512ULL));
2670 static int journal_file_warn_btrfs(JournalFile
*f
) {
2676 /* Before we write anything, check if the COW logic is turned
2677 * off on btrfs. Given our write pattern that is quite
2678 * unfriendly to COW file systems this should greatly improve
2679 * performance on COW file systems, such as btrfs, at the
2680 * expense of data integrity features (which shouldn't be too
2681 * bad, given that we do our own checksumming). */
2683 r
= btrfs_is_filesystem(f
->fd
);
2685 return log_warning_errno(r
, "Failed to determine if journal is on btrfs: %m");
2689 r
= read_attr_fd(f
->fd
, &attrs
);
2691 return log_warning_errno(r
, "Failed to read file attributes: %m");
2693 if (attrs
& FS_NOCOW_FL
) {
2694 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
2698 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
2699 "This is likely to slow down journal access substantially, please consider turning "
2700 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f
->path
);
2705 int journal_file_open(
2711 JournalMetrics
*metrics
,
2712 MMapCache
*mmap_cache
,
2713 JournalFile
*template,
2714 JournalFile
**ret
) {
2716 bool newly_created
= false;
2724 if ((flags
& O_ACCMODE
) != O_RDONLY
&&
2725 (flags
& O_ACCMODE
) != O_RDWR
)
2728 if (!endswith(fname
, ".journal") &&
2729 !endswith(fname
, ".journal~"))
2732 f
= new0(JournalFile
, 1);
2740 f
->prot
= prot_from_flags(flags
);
2741 f
->writable
= (flags
& O_ACCMODE
) != O_RDONLY
;
2742 #if defined(HAVE_LZ4)
2743 f
->compress_lz4
= compress
;
2744 #elif defined(HAVE_XZ)
2745 f
->compress_xz
= compress
;
2752 f
->mmap
= mmap_cache_ref(mmap_cache
);
2754 f
->mmap
= mmap_cache_new();
2761 f
->path
= strdup(fname
);
2767 f
->chain_cache
= ordered_hashmap_new(&uint64_hash_ops
);
2768 if (!f
->chain_cache
) {
2773 f
->fd
= open(f
->path
, f
->flags
|O_CLOEXEC
, f
->mode
);
2779 r
= journal_file_fstat(f
);
2783 if (f
->last_stat
.st_size
== 0 && f
->writable
) {
2785 (void) journal_file_warn_btrfs(f
);
2787 /* Let's attach the creation time to the journal file,
2788 * so that the vacuuming code knows the age of this
2789 * file even if the file might end up corrupted one
2790 * day... Ideally we'd just use the creation time many
2791 * file systems maintain for each file, but there is
2792 * currently no usable API to query this, hence let's
2793 * emulate this via extended attributes. If extended
2794 * attributes are not supported we'll just skip this,
2795 * and rely solely on mtime/atime/ctime of the file. */
2797 fd_setcrtime(f
->fd
, 0);
2800 /* Try to load the FSPRG state, and if we can't, then
2801 * just don't do sealing */
2803 r
= journal_file_fss_load(f
);
2809 r
= journal_file_init_header(f
, template);
2813 r
= journal_file_fstat(f
);
2817 newly_created
= true;
2820 if (f
->last_stat
.st_size
< (off_t
) HEADER_SIZE_MIN
) {
2825 r
= mmap_cache_get(f
->mmap
, f
->fd
, f
->prot
, CONTEXT_HEADER
, true, 0, PAGE_ALIGN(sizeof(Header
)), &f
->last_stat
, &h
);
2831 if (!newly_created
) {
2832 r
= journal_file_verify_header(f
);
2838 if (!newly_created
&& f
->writable
) {
2839 r
= journal_file_fss_load(f
);
2847 journal_default_metrics(metrics
, f
->fd
);
2848 f
->metrics
= *metrics
;
2849 } else if (template)
2850 f
->metrics
= template->metrics
;
2852 r
= journal_file_refresh_header(f
);
2858 r
= journal_file_hmac_setup(f
);
2863 if (newly_created
) {
2864 r
= journal_file_setup_field_hash_table(f
);
2868 r
= journal_file_setup_data_hash_table(f
);
2873 r
= journal_file_append_first_tag(f
);
2879 if (mmap_cache_got_sigbus(f
->mmap
, f
->fd
)) {
2884 if (template && template->post_change_timer
) {
2885 r
= journal_file_enable_post_change_timer(
2887 sd_event_source_get_event(template->post_change_timer
),
2888 template->post_change_timer_period
);
2898 if (f
->fd
>= 0 && mmap_cache_got_sigbus(f
->mmap
, f
->fd
))
2901 journal_file_close(f
);
2906 int journal_file_rotate(JournalFile
**f
, bool compress
, bool seal
) {
2907 _cleanup_free_
char *p
= NULL
;
2909 JournalFile
*old_file
, *new_file
= NULL
;
2917 if (!old_file
->writable
)
2920 if (!endswith(old_file
->path
, ".journal"))
2923 l
= strlen(old_file
->path
);
2924 r
= asprintf(&p
, "%.*s@" SD_ID128_FORMAT_STR
"-%016"PRIx64
"-%016"PRIx64
".journal",
2925 (int) l
- 8, old_file
->path
,
2926 SD_ID128_FORMAT_VAL(old_file
->header
->seqnum_id
),
2927 le64toh((*f
)->header
->head_entry_seqnum
),
2928 le64toh((*f
)->header
->head_entry_realtime
));
2932 /* Try to rename the file to the archived version. If the file
2933 * already was deleted, we'll get ENOENT, let's ignore that
2935 r
= rename(old_file
->path
, p
);
2936 if (r
< 0 && errno
!= ENOENT
)
2939 old_file
->header
->state
= STATE_ARCHIVED
;
2941 /* Currently, btrfs is not very good with out write patterns
2942 * and fragments heavily. Let's defrag our journal files when
2943 * we archive them */
2944 old_file
->defrag_on_close
= true;
2946 r
= journal_file_open(old_file
->path
, old_file
->flags
, old_file
->mode
, compress
, seal
, NULL
, old_file
->mmap
, old_file
, &new_file
);
2947 journal_file_close(old_file
);
2953 int journal_file_open_reliably(
2959 JournalMetrics
*metrics
,
2960 MMapCache
*mmap_cache
,
2961 JournalFile
*template,
2962 JournalFile
**ret
) {
2966 _cleanup_free_
char *p
= NULL
;
2968 r
= journal_file_open(fname
, flags
, mode
, compress
, seal
, metrics
, mmap_cache
, template, ret
);
2970 -EBADMSG
, /* corrupted */
2971 -ENODATA
, /* truncated */
2972 -EHOSTDOWN
, /* other machine */
2973 -EPROTONOSUPPORT
, /* incompatible feature */
2974 -EBUSY
, /* unclean shutdown */
2975 -ESHUTDOWN
, /* already archived */
2976 -EIO
, /* IO error, including SIGBUS on mmap */
2977 -EIDRM
/* File has been deleted */))
2980 if ((flags
& O_ACCMODE
) == O_RDONLY
)
2983 if (!(flags
& O_CREAT
))
2986 if (!endswith(fname
, ".journal"))
2989 /* The file is corrupted. Rotate it away and try it again (but only once) */
2992 if (asprintf(&p
, "%.*s@%016"PRIx64
"-%016"PRIx64
".journal~",
2994 now(CLOCK_REALTIME
),
2998 if (rename(fname
, p
) < 0)
3001 /* btrfs doesn't cope well with our write pattern and
3002 * fragments heavily. Let's defrag all files we rotate */
3004 (void) chattr_path(p
, false, FS_NOCOW_FL
);
3005 (void) btrfs_defrag(p
);
3007 log_warning_errno(r
, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname
);
3009 return journal_file_open(fname
, flags
, mode
, compress
, seal
, metrics
, mmap_cache
, template, ret
);
3012 int journal_file_copy_entry(JournalFile
*from
, JournalFile
*to
, Object
*o
, uint64_t p
, uint64_t *seqnum
, Object
**ret
, uint64_t *offset
) {
3014 uint64_t q
, xor_hash
= 0;
3027 ts
.monotonic
= le64toh(o
->entry
.monotonic
);
3028 ts
.realtime
= le64toh(o
->entry
.realtime
);
3030 n
= journal_file_entry_n_items(o
);
3031 /* alloca() can't take 0, hence let's allocate at least one */
3032 items
= alloca(sizeof(EntryItem
) * MAX(1u, n
));
3034 for (i
= 0; i
< n
; i
++) {
3041 q
= le64toh(o
->entry
.items
[i
].object_offset
);
3042 le_hash
= o
->entry
.items
[i
].hash
;
3044 r
= journal_file_move_to_object(from
, OBJECT_DATA
, q
, &o
);
3048 if (le_hash
!= o
->data
.hash
)
3051 l
= le64toh(o
->object
.size
) - offsetof(Object
, data
.payload
);
3054 /* We hit the limit on 32bit machines */
3055 if ((uint64_t) t
!= l
)
3058 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
) {
3059 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
3062 r
= decompress_blob(o
->object
.flags
& OBJECT_COMPRESSION_MASK
,
3063 o
->data
.payload
, l
, &from
->compress_buffer
, &from
->compress_buffer_size
, &rsize
, 0);
3067 data
= from
->compress_buffer
;
3070 return -EPROTONOSUPPORT
;
3073 data
= o
->data
.payload
;
3075 r
= journal_file_append_data(to
, data
, l
, &u
, &h
);
3079 xor_hash
^= le64toh(u
->data
.hash
);
3080 items
[i
].object_offset
= htole64(h
);
3081 items
[i
].hash
= u
->data
.hash
;
3083 r
= journal_file_move_to_object(from
, OBJECT_ENTRY
, p
, &o
);
3088 r
= journal_file_append_entry_internal(to
, &ts
, xor_hash
, items
, n
, seqnum
, ret
, offset
);
3090 if (mmap_cache_got_sigbus(to
->mmap
, to
->fd
))
3096 void journal_reset_metrics(JournalMetrics
*m
) {
3099 /* Set everything to "pick automatic values". */
3101 *m
= (JournalMetrics
) {
3102 .min_use
= (uint64_t) -1,
3103 .max_use
= (uint64_t) -1,
3104 .min_size
= (uint64_t) -1,
3105 .max_size
= (uint64_t) -1,
3106 .keep_free
= (uint64_t) -1,
3107 .n_max_files
= (uint64_t) -1,
3111 void journal_default_metrics(JournalMetrics
*m
, int fd
) {
3112 char a
[FORMAT_BYTES_MAX
], b
[FORMAT_BYTES_MAX
], c
[FORMAT_BYTES_MAX
], d
[FORMAT_BYTES_MAX
], e
[FORMAT_BYTES_MAX
];
3119 if (fstatvfs(fd
, &ss
) >= 0)
3120 fs_size
= ss
.f_frsize
* ss
.f_blocks
;
3122 log_debug_errno(errno
, "Failed to detremine disk size: %m");
3126 if (m
->max_use
== (uint64_t) -1) {
3129 m
->max_use
= PAGE_ALIGN(fs_size
/ 10); /* 10% of file system size */
3131 if (m
->max_use
> DEFAULT_MAX_USE_UPPER
)
3132 m
->max_use
= DEFAULT_MAX_USE_UPPER
;
3134 if (m
->max_use
< DEFAULT_MAX_USE_LOWER
)
3135 m
->max_use
= DEFAULT_MAX_USE_LOWER
;
3137 m
->max_use
= DEFAULT_MAX_USE_LOWER
;
3139 m
->max_use
= PAGE_ALIGN(m
->max_use
);
3141 if (m
->max_use
!= 0 && m
->max_use
< JOURNAL_FILE_SIZE_MIN
*2)
3142 m
->max_use
= JOURNAL_FILE_SIZE_MIN
*2;
3145 if (m
->min_use
== (uint64_t) -1)
3146 m
->min_use
= DEFAULT_MIN_USE
;
3148 if (m
->min_use
> m
->max_use
)
3149 m
->min_use
= m
->max_use
;
3151 if (m
->max_size
== (uint64_t) -1) {
3152 m
->max_size
= PAGE_ALIGN(m
->max_use
/ 8); /* 8 chunks */
3154 if (m
->max_size
> DEFAULT_MAX_SIZE_UPPER
)
3155 m
->max_size
= DEFAULT_MAX_SIZE_UPPER
;
3157 m
->max_size
= PAGE_ALIGN(m
->max_size
);
3159 if (m
->max_size
!= 0) {
3160 if (m
->max_size
< JOURNAL_FILE_SIZE_MIN
)
3161 m
->max_size
= JOURNAL_FILE_SIZE_MIN
;
3163 if (m
->max_use
!= 0 && m
->max_size
*2 > m
->max_use
)
3164 m
->max_use
= m
->max_size
*2;
3167 if (m
->min_size
== (uint64_t) -1)
3168 m
->min_size
= JOURNAL_FILE_SIZE_MIN
;
3170 m
->min_size
= PAGE_ALIGN(m
->min_size
);
3172 if (m
->min_size
< JOURNAL_FILE_SIZE_MIN
)
3173 m
->min_size
= JOURNAL_FILE_SIZE_MIN
;
3175 if (m
->max_size
!= 0 && m
->min_size
> m
->max_size
)
3176 m
->max_size
= m
->min_size
;
3179 if (m
->keep_free
== (uint64_t) -1) {
3182 m
->keep_free
= PAGE_ALIGN(fs_size
* 3 / 20); /* 15% of file system size */
3184 if (m
->keep_free
> DEFAULT_KEEP_FREE_UPPER
)
3185 m
->keep_free
= DEFAULT_KEEP_FREE_UPPER
;
3188 m
->keep_free
= DEFAULT_KEEP_FREE
;
3191 if (m
->n_max_files
== (uint64_t) -1)
3192 m
->n_max_files
= DEFAULT_N_MAX_FILES
;
3194 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64
,
3195 format_bytes(a
, sizeof(a
), m
->min_use
),
3196 format_bytes(b
, sizeof(b
), m
->max_use
),
3197 format_bytes(c
, sizeof(c
), m
->max_size
),
3198 format_bytes(d
, sizeof(d
), m
->min_size
),
3199 format_bytes(e
, sizeof(e
), m
->keep_free
),
3203 int journal_file_get_cutoff_realtime_usec(JournalFile
*f
, usec_t
*from
, usec_t
*to
) {
3209 if (f
->header
->head_entry_realtime
== 0)
3212 *from
= le64toh(f
->header
->head_entry_realtime
);
3216 if (f
->header
->tail_entry_realtime
== 0)
3219 *to
= le64toh(f
->header
->tail_entry_realtime
);
3225 int journal_file_get_cutoff_monotonic_usec(JournalFile
*f
, sd_id128_t boot_id
, usec_t
*from
, usec_t
*to
) {
3233 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &p
);
3237 if (le64toh(o
->data
.n_entries
) <= 0)
3241 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, le64toh(o
->data
.entry_offset
), &o
);
3245 *from
= le64toh(o
->entry
.monotonic
);
3249 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
3253 r
= generic_array_get_plus_one(f
,
3254 le64toh(o
->data
.entry_offset
),
3255 le64toh(o
->data
.entry_array_offset
),
3256 le64toh(o
->data
.n_entries
)-1,
3261 *to
= le64toh(o
->entry
.monotonic
);
3267 bool journal_file_rotate_suggested(JournalFile
*f
, usec_t max_file_usec
) {
3271 /* If we gained new header fields we gained new features,
3272 * hence suggest a rotation */
3273 if (le64toh(f
->header
->header_size
) < sizeof(Header
)) {
3274 log_debug("%s uses an outdated header, suggesting rotation.", f
->path
);
3278 /* Let's check if the hash tables grew over a certain fill
3279 * level (75%, borrowing this value from Java's hash table
3280 * implementation), and if so suggest a rotation. To calculate
3281 * the fill level we need the n_data field, which only exists
3282 * in newer versions. */
3284 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
3285 if (le64toh(f
->header
->n_data
) * 4ULL > (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
3286 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items, %llu file size, %"PRIu64
" bytes per hash table item), suggesting rotation.",
3288 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))),
3289 le64toh(f
->header
->n_data
),
3290 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
3291 (unsigned long long) f
->last_stat
.st_size
,
3292 f
->last_stat
.st_size
/ le64toh(f
->header
->n_data
));
3296 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
3297 if (le64toh(f
->header
->n_fields
) * 4ULL > (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
3298 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items), suggesting rotation.",
3300 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))),
3301 le64toh(f
->header
->n_fields
),
3302 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
));
3306 /* Are the data objects properly indexed by field objects? */
3307 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
3308 JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
3309 le64toh(f
->header
->n_data
) > 0 &&
3310 le64toh(f
->header
->n_fields
) == 0)
3313 if (max_file_usec
> 0) {
3316 h
= le64toh(f
->header
->head_entry_realtime
);
3317 t
= now(CLOCK_REALTIME
);
3319 if (h
> 0 && t
> h
+ max_file_usec
)