1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
6 #ifndef __EROFS_FS_ZPVEC_H
7 #define __EROFS_FS_ZPVEC_H
11 /* page type in pagevec for decompress subsystem */
12 enum z_erofs_page_type
{
13 /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
14 Z_EROFS_PAGE_TYPE_EXCLUSIVE
,
16 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED
,
18 Z_EROFS_VLE_PAGE_TYPE_HEAD
,
19 Z_EROFS_VLE_PAGE_TYPE_MAX
22 extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
23 __bad_page_type_exclusive(void);
25 /* pagevec tagged pointer */
26 typedef tagptr2_t erofs_vtptr_t
;
28 /* pagevec collector */
29 struct z_erofs_pagevec_ctor
{
30 struct page
*curr
, *next
;
33 unsigned int nr
, index
;
36 static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor
*ctor
,
43 kunmap_atomic(ctor
->pages
);
48 static inline struct page
*
49 z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor
*ctor
,
54 /* keep away from occupied pages */
58 for (index
= 0; index
< nr
; ++index
) {
59 const erofs_vtptr_t t
= ctor
->pages
[index
];
60 const unsigned int tags
= tagptr_unfold_tags(t
);
62 if (tags
== Z_EROFS_PAGE_TYPE_EXCLUSIVE
)
63 return tagptr_unfold_ptr(t
);
65 DBG_BUGON(nr
>= ctor
->nr
);
70 z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor
*ctor
,
73 struct page
*next
= z_erofs_pagevec_ctor_next_page(ctor
, ctor
->nr
);
75 z_erofs_pagevec_ctor_exit(ctor
, atomic
);
79 ctor
->pages
= atomic
?
80 kmap_atomic(ctor
->curr
) : kmap(ctor
->curr
);
82 ctor
->nr
= PAGE_SIZE
/ sizeof(struct page
*);
86 static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor
*ctor
,
92 ctor
->curr
= ctor
->next
= NULL
;
97 z_erofs_pagevec_ctor_pagedown(ctor
, false);
98 while (i
> ctor
->nr
) {
100 z_erofs_pagevec_ctor_pagedown(ctor
, false);
103 ctor
->next
= z_erofs_pagevec_ctor_next_page(ctor
, i
);
107 static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor
*ctor
,
109 enum z_erofs_page_type type
,
113 /* some pages cannot be reused as pvec safely without I/O */
114 if (type
== Z_EROFS_PAGE_TYPE_EXCLUSIVE
&& !pvec_safereuse
)
115 type
= Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED
;
117 if (type
!= Z_EROFS_PAGE_TYPE_EXCLUSIVE
&&
118 ctor
->index
+ 1 == ctor
->nr
)
122 if (ctor
->index
>= ctor
->nr
)
123 z_erofs_pagevec_ctor_pagedown(ctor
, false);
125 /* exclusive page type must be 0 */
126 if (Z_EROFS_PAGE_TYPE_EXCLUSIVE
!= (uintptr_t)NULL
)
127 __bad_page_type_exclusive();
129 /* should remind that collector->next never equal to 1, 2 */
130 if (type
== (uintptr_t)ctor
->next
) {
133 ctor
->pages
[ctor
->index
++] = tagptr_fold(erofs_vtptr_t
, page
, type
);
137 static inline struct page
*
138 z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor
*ctor
,
139 enum z_erofs_page_type
*type
)
143 if (ctor
->index
>= ctor
->nr
) {
144 DBG_BUGON(!ctor
->next
);
145 z_erofs_pagevec_ctor_pagedown(ctor
, true);
148 t
= ctor
->pages
[ctor
->index
];
150 *type
= tagptr_unfold_tags(t
);
152 /* should remind that collector->next never equal to 1, 2 */
153 if (*type
== (uintptr_t)ctor
->next
)
154 ctor
->next
= tagptr_unfold_ptr(t
);
156 ctor
->pages
[ctor
->index
++] = tagptr_fold(erofs_vtptr_t
, NULL
, 0);
157 return tagptr_unfold_ptr(t
);