1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
6 #ifndef __EROFS_FS_ZDATA_H
7 #define __EROFS_FS_ZDATA_H
12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_NR_INLINE_PAGEVECS 3
16 * Structure fields follow one of the following exclusion rules.
18 * I: Modifiable by initialization/destruction paths and read-only
21 * L: Field should be protected by pageset lock;
23 * A: Field should be accessed / updated in atomic for parallelized code.
25 struct z_erofs_collection
{
28 /* I: page offset of start position of decompression */
29 unsigned short pageofs
;
31 /* L: maximum relative page index in pagevec[] */
32 unsigned short nr_pages
;
34 /* L: total number of pages in pagevec[] */
38 /* L: inline a certain number of pagevecs for bootstrap */
39 erofs_vtptr_t pagevec
[Z_EROFS_NR_INLINE_PAGEVECS
];
41 /* I: can be used to free the pcluster by RCU. */
46 #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
47 #define Z_EROFS_PCLUSTER_LENGTH_BIT 1
50 * let's leave a type here in case of introducing
51 * another tagged pointer later.
53 typedef void *z_erofs_next_pcluster_t
;
55 struct z_erofs_pcluster
{
56 struct erofs_workgroup obj
;
57 struct z_erofs_collection primary_collection
;
59 /* A: point to next chained pcluster or TAILs */
60 z_erofs_next_pcluster_t next
;
62 /* A: lower limit of decompressed length and if full length or not */
65 /* I: physical cluster size in pages */
66 unsigned short pclusterpages
;
68 /* I: compression algorithm format */
69 unsigned char algorithmformat
;
71 /* A: compressed pages (can be cached or inplaced pages) */
72 struct page
*compressed_pages
[];
75 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
77 /* let's avoid the valid 32-bit kernel addresses */
79 /* the chained workgroup has't submitted io (still open) */
80 #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
81 /* the chained workgroup has already submitted io */
82 #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
84 #define Z_EROFS_PCLUSTER_NIL (NULL)
86 struct z_erofs_decompressqueue
{
87 struct super_block
*sb
;
88 atomic_t pending_bios
;
89 z_erofs_next_pcluster_t head
;
92 wait_queue_head_t wait
;
93 struct work_struct work
;
97 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
98 static inline bool erofs_page_is_managed(const struct erofs_sb_info
*sbi
,
101 return page
->mapping
== MNGD_MAPPING(sbi
);
104 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
105 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
106 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
109 * waiters (aka. ongoing_packs): # to unlock the page
110 * sub-index: 0 - for partial page, >= 1 full page sub-index
112 typedef atomic_t z_erofs_onlinepage_t
;
115 union z_erofs_onlinepage_converter
{
116 z_erofs_onlinepage_t
*o
;
120 static inline unsigned int z_erofs_onlinepage_index(struct page
*page
)
122 union z_erofs_onlinepage_converter u
;
124 DBG_BUGON(!PagePrivate(page
));
125 u
.v
= &page_private(page
);
127 return atomic_read(u
.o
) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT
;
130 static inline void z_erofs_onlinepage_init(struct page
*page
)
133 z_erofs_onlinepage_t o
;
135 /* keep from being unlocked in advance */
136 } u
= { .o
= ATOMIC_INIT(1) };
138 set_page_private(page
, u
.v
);
140 SetPagePrivate(page
);
143 static inline void z_erofs_onlinepage_fixup(struct page
*page
,
144 uintptr_t index
, bool down
)
146 union z_erofs_onlinepage_converter u
= { .v
= &page_private(page
) };
147 int orig
, orig_index
, val
;
150 orig
= atomic_read(u
.o
);
151 orig_index
= orig
>> Z_EROFS_ONLINEPAGE_INDEX_SHIFT
;
156 DBG_BUGON(orig_index
!= index
);
159 val
= (index
<< Z_EROFS_ONLINEPAGE_INDEX_SHIFT
) |
160 ((orig
& Z_EROFS_ONLINEPAGE_COUNT_MASK
) + (unsigned int)down
);
161 if (atomic_cmpxchg(u
.o
, orig
, val
) != orig
)
165 static inline void z_erofs_onlinepage_endio(struct page
*page
)
167 union z_erofs_onlinepage_converter u
;
170 DBG_BUGON(!PagePrivate(page
));
171 u
.v
= &page_private(page
);
173 v
= atomic_dec_return(u
.o
);
174 if (!(v
& Z_EROFS_ONLINEPAGE_COUNT_MASK
)) {
175 set_page_private(page
, 0);
176 ClearPagePrivate(page
);
177 if (!PageError(page
))
178 SetPageUptodate(page
);
181 erofs_dbg("%s, page %p value %x", __func__
, page
, atomic_read(u
.o
));
184 #define Z_EROFS_VMAP_ONSTACK_PAGES \
185 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
186 #define Z_EROFS_VMAP_GLOBAL_PAGES 2048