2 * Compatibility functions which bloat the callers too much to make inline.
3 * All of the callers of these functions should be converted to use folios
7 #include <linux/migrate.h>
8 #include <linux/pagemap.h>
9 #include <linux/swap.h>
12 struct address_space
*page_mapping(struct page
*page
)
14 return folio_mapping(page_folio(page
));
16 EXPORT_SYMBOL(page_mapping
);
18 void unlock_page(struct page
*page
)
20 return folio_unlock(page_folio(page
));
22 EXPORT_SYMBOL(unlock_page
);
24 void end_page_writeback(struct page
*page
)
26 return folio_end_writeback(page_folio(page
));
28 EXPORT_SYMBOL(end_page_writeback
);
30 void wait_on_page_writeback(struct page
*page
)
32 return folio_wait_writeback(page_folio(page
));
34 EXPORT_SYMBOL_GPL(wait_on_page_writeback
);
36 void wait_for_stable_page(struct page
*page
)
38 return folio_wait_stable(page_folio(page
));
40 EXPORT_SYMBOL_GPL(wait_for_stable_page
);
42 bool page_mapped(struct page
*page
)
44 return folio_mapped(page_folio(page
));
46 EXPORT_SYMBOL(page_mapped
);
48 void mark_page_accessed(struct page
*page
)
50 folio_mark_accessed(page_folio(page
));
52 EXPORT_SYMBOL(mark_page_accessed
);
54 bool set_page_writeback(struct page
*page
)
56 return folio_start_writeback(page_folio(page
));
58 EXPORT_SYMBOL(set_page_writeback
);
60 bool set_page_dirty(struct page
*page
)
62 return folio_mark_dirty(page_folio(page
));
64 EXPORT_SYMBOL(set_page_dirty
);
66 int __set_page_dirty_nobuffers(struct page
*page
)
68 return filemap_dirty_folio(page_mapping(page
), page_folio(page
));
70 EXPORT_SYMBOL(__set_page_dirty_nobuffers
);
72 bool clear_page_dirty_for_io(struct page
*page
)
74 return folio_clear_dirty_for_io(page_folio(page
));
76 EXPORT_SYMBOL(clear_page_dirty_for_io
);
78 bool redirty_page_for_writepage(struct writeback_control
*wbc
,
81 return folio_redirty_for_writepage(wbc
, page_folio(page
));
83 EXPORT_SYMBOL(redirty_page_for_writepage
);
85 void lru_cache_add(struct page
*page
)
87 folio_add_lru(page_folio(page
));
89 EXPORT_SYMBOL(lru_cache_add
);
91 int add_to_page_cache_lru(struct page
*page
, struct address_space
*mapping
,
92 pgoff_t index
, gfp_t gfp
)
94 return filemap_add_folio(mapping
, page_folio(page
), index
, gfp
);
96 EXPORT_SYMBOL(add_to_page_cache_lru
);
99 struct page
*pagecache_get_page(struct address_space
*mapping
, pgoff_t index
,
100 int fgp_flags
, gfp_t gfp
)
104 folio
= __filemap_get_folio(mapping
, index
, fgp_flags
, gfp
);
105 if ((fgp_flags
& FGP_HEAD
) || !folio
|| xa_is_value(folio
))
107 return folio_file_page(folio
, index
);
109 EXPORT_SYMBOL(pagecache_get_page
);
111 struct page
*grab_cache_page_write_begin(struct address_space
*mapping
,
114 unsigned fgp_flags
= FGP_LOCK
| FGP_WRITE
| FGP_CREAT
| FGP_STABLE
;
116 return pagecache_get_page(mapping
, index
, fgp_flags
,
117 mapping_gfp_mask(mapping
));
119 EXPORT_SYMBOL(grab_cache_page_write_begin
);
121 void delete_from_page_cache(struct page
*page
)
123 return filemap_remove_folio(page_folio(page
));
126 int try_to_release_page(struct page
*page
, gfp_t gfp
)
128 return filemap_release_folio(page_folio(page
), gfp
);
130 EXPORT_SYMBOL(try_to_release_page
);
132 int isolate_lru_page(struct page
*page
)
134 if (WARN_RATELIMIT(PageTail(page
), "trying to isolate tail page"))
136 return folio_isolate_lru((struct folio
*)page
);
139 void putback_lru_page(struct page
*page
)
141 folio_putback_lru(page_folio(page
));