]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/exfat/cache.c
Merge branch 'sched/core'
[mirror_ubuntu-jammy-kernel.git] / fs / exfat / cache.c
CommitLineData
c35b6810
NJ
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * linux/fs/fat/cache.c
4 *
5 * Written 1992,1993 by Werner Almesberger
6 *
7 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
8 * of inode number.
9 * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
10 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
11 */
12
13#include <linux/slab.h>
14#include <asm/unaligned.h>
15#include <linux/buffer_head.h>
16
17#include "exfat_raw.h"
18#include "exfat_fs.h"
19
c35b6810
NJ
20#define EXFAT_MAX_CACHE 16
21
22struct exfat_cache {
23 struct list_head cache_list;
24 unsigned int nr_contig; /* number of contiguous clusters */
25 unsigned int fcluster; /* cluster number in the file. */
26 unsigned int dcluster; /* cluster number on disk. */
27};
28
29struct exfat_cache_id {
30 unsigned int id;
31 unsigned int nr_contig;
32 unsigned int fcluster;
33 unsigned int dcluster;
34};
35
36static struct kmem_cache *exfat_cachep;
37
38static void exfat_cache_init_once(void *c)
39{
40 struct exfat_cache *cache = (struct exfat_cache *)c;
41
42 INIT_LIST_HEAD(&cache->cache_list);
43}
44
45int exfat_cache_init(void)
46{
47 exfat_cachep = kmem_cache_create("exfat_cache",
48 sizeof(struct exfat_cache),
49 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
50 exfat_cache_init_once);
51 if (!exfat_cachep)
52 return -ENOMEM;
53 return 0;
54}
55
56void exfat_cache_shutdown(void)
57{
58 if (!exfat_cachep)
59 return;
60 kmem_cache_destroy(exfat_cachep);
61}
62
c35b6810
NJ
63static inline struct exfat_cache *exfat_cache_alloc(void)
64{
65 return kmem_cache_alloc(exfat_cachep, GFP_NOFS);
66}
67
68static inline void exfat_cache_free(struct exfat_cache *cache)
69{
70 WARN_ON(!list_empty(&cache->cache_list));
71 kmem_cache_free(exfat_cachep, cache);
72}
73
74static inline void exfat_cache_update_lru(struct inode *inode,
75 struct exfat_cache *cache)
76{
77 struct exfat_inode_info *ei = EXFAT_I(inode);
78
79 if (ei->cache_lru.next != &cache->cache_list)
80 list_move(&cache->cache_list, &ei->cache_lru);
81}
82
83static unsigned int exfat_cache_lookup(struct inode *inode,
84 unsigned int fclus, struct exfat_cache_id *cid,
85 unsigned int *cached_fclus, unsigned int *cached_dclus)
86{
87 struct exfat_inode_info *ei = EXFAT_I(inode);
88 static struct exfat_cache nohit = { .fcluster = 0, };
89 struct exfat_cache *hit = &nohit, *p;
90 unsigned int offset = EXFAT_EOF_CLUSTER;
91
92 spin_lock(&ei->cache_lru_lock);
93 list_for_each_entry(p, &ei->cache_lru, cache_list) {
94 /* Find the cache of "fclus" or nearest cache. */
95 if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
96 hit = p;
97 if (hit->fcluster + hit->nr_contig < fclus) {
98 offset = hit->nr_contig;
99 } else {
100 offset = fclus - hit->fcluster;
101 break;
102 }
103 }
104 }
105 if (hit != &nohit) {
106 exfat_cache_update_lru(inode, hit);
107
108 cid->id = ei->cache_valid_id;
109 cid->nr_contig = hit->nr_contig;
110 cid->fcluster = hit->fcluster;
111 cid->dcluster = hit->dcluster;
112 *cached_fclus = cid->fcluster + offset;
113 *cached_dclus = cid->dcluster + offset;
114 }
115 spin_unlock(&ei->cache_lru_lock);
116
117 return offset;
118}
119
120static struct exfat_cache *exfat_cache_merge(struct inode *inode,
121 struct exfat_cache_id *new)
122{
123 struct exfat_inode_info *ei = EXFAT_I(inode);
124 struct exfat_cache *p;
125
126 list_for_each_entry(p, &ei->cache_lru, cache_list) {
127 /* Find the same part as "new" in cluster-chain. */
128 if (p->fcluster == new->fcluster) {
129 if (new->nr_contig > p->nr_contig)
130 p->nr_contig = new->nr_contig;
131 return p;
132 }
133 }
134 return NULL;
135}
136
137static void exfat_cache_add(struct inode *inode,
138 struct exfat_cache_id *new)
139{
140 struct exfat_inode_info *ei = EXFAT_I(inode);
141 struct exfat_cache *cache, *tmp;
142
143 if (new->fcluster == EXFAT_EOF_CLUSTER) /* dummy cache */
144 return;
145
146 spin_lock(&ei->cache_lru_lock);
147 if (new->id != EXFAT_CACHE_VALID &&
148 new->id != ei->cache_valid_id)
149 goto unlock; /* this cache was invalidated */
150
151 cache = exfat_cache_merge(inode, new);
152 if (cache == NULL) {
153 if (ei->nr_caches < EXFAT_MAX_CACHE) {
154 ei->nr_caches++;
155 spin_unlock(&ei->cache_lru_lock);
156
157 tmp = exfat_cache_alloc();
158 if (!tmp) {
159 spin_lock(&ei->cache_lru_lock);
160 ei->nr_caches--;
161 spin_unlock(&ei->cache_lru_lock);
162 return;
163 }
164
165 spin_lock(&ei->cache_lru_lock);
166 cache = exfat_cache_merge(inode, new);
167 if (cache != NULL) {
168 ei->nr_caches--;
169 exfat_cache_free(tmp);
170 goto out_update_lru;
171 }
172 cache = tmp;
173 } else {
174 struct list_head *p = ei->cache_lru.prev;
175
176 cache = list_entry(p,
177 struct exfat_cache, cache_list);
178 }
179 cache->fcluster = new->fcluster;
180 cache->dcluster = new->dcluster;
181 cache->nr_contig = new->nr_contig;
182 }
183out_update_lru:
184 exfat_cache_update_lru(inode, cache);
185unlock:
186 spin_unlock(&ei->cache_lru_lock);
187}
188
189/*
190 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
191 * fixes itself after a while.
192 */
193static void __exfat_cache_inval_inode(struct inode *inode)
194{
195 struct exfat_inode_info *ei = EXFAT_I(inode);
196 struct exfat_cache *cache;
197
198 while (!list_empty(&ei->cache_lru)) {
199 cache = list_entry(ei->cache_lru.next,
200 struct exfat_cache, cache_list);
201 list_del_init(&cache->cache_list);
202 ei->nr_caches--;
203 exfat_cache_free(cache);
204 }
205 /* Update. The copy of caches before this id is discarded. */
206 ei->cache_valid_id++;
207 if (ei->cache_valid_id == EXFAT_CACHE_VALID)
208 ei->cache_valid_id++;
209}
210
211void exfat_cache_inval_inode(struct inode *inode)
212{
213 struct exfat_inode_info *ei = EXFAT_I(inode);
214
215 spin_lock(&ei->cache_lru_lock);
216 __exfat_cache_inval_inode(inode);
217 spin_unlock(&ei->cache_lru_lock);
218}
219
220static inline int cache_contiguous(struct exfat_cache_id *cid,
221 unsigned int dclus)
222{
223 cid->nr_contig++;
224 return cid->dcluster + cid->nr_contig == dclus;
225}
226
227static inline void cache_init(struct exfat_cache_id *cid,
228 unsigned int fclus, unsigned int dclus)
229{
230 cid->id = EXFAT_CACHE_VALID;
231 cid->fcluster = fclus;
232 cid->dcluster = dclus;
233 cid->nr_contig = 0;
234}
235
236int exfat_get_cluster(struct inode *inode, unsigned int cluster,
237 unsigned int *fclus, unsigned int *dclus,
238 unsigned int *last_dclus, int allow_eof)
239{
240 struct super_block *sb = inode->i_sb;
241 struct exfat_sb_info *sbi = EXFAT_SB(sb);
242 unsigned int limit = sbi->num_clusters;
243 struct exfat_inode_info *ei = EXFAT_I(inode);
244 struct exfat_cache_id cid;
245 unsigned int content;
246
247 if (ei->start_clu == EXFAT_FREE_CLUSTER) {
248 exfat_fs_error(sb,
249 "invalid access to exfat cache (entry 0x%08x)",
250 ei->start_clu);
251 return -EIO;
252 }
253
254 *fclus = 0;
255 *dclus = ei->start_clu;
256 *last_dclus = *dclus;
257
258 /*
259 * Don`t use exfat_cache if zero offset or non-cluster allocation
260 */
261 if (cluster == 0 || *dclus == EXFAT_EOF_CLUSTER)
262 return 0;
263
264 cache_init(&cid, EXFAT_EOF_CLUSTER, EXFAT_EOF_CLUSTER);
265
266 if (exfat_cache_lookup(inode, cluster, &cid, fclus, dclus) ==
267 EXFAT_EOF_CLUSTER) {
268 /*
269 * dummy, always not contiguous
270 * This is reinitialized by cache_init(), later.
271 */
272 WARN_ON(cid.id != EXFAT_CACHE_VALID ||
273 cid.fcluster != EXFAT_EOF_CLUSTER ||
274 cid.dcluster != EXFAT_EOF_CLUSTER ||
275 cid.nr_contig != 0);
276 }
277
278 if (*fclus == cluster)
279 return 0;
280
281 while (*fclus < cluster) {
282 /* prevent the infinite loop of cluster chain */
283 if (*fclus > limit) {
284 exfat_fs_error(sb,
285 "detected the cluster chain loop (i_pos %u)",
286 (*fclus));
287 return -EIO;
288 }
289
290 if (exfat_ent_get(sb, *dclus, &content))
291 return -EIO;
292
293 *last_dclus = *dclus;
294 *dclus = content;
295 (*fclus)++;
296
297 if (content == EXFAT_EOF_CLUSTER) {
298 if (!allow_eof) {
299 exfat_fs_error(sb,
300 "invalid cluster chain (i_pos %u, last_clus 0x%08x is EOF)",
301 *fclus, (*last_dclus));
302 return -EIO;
303 }
304
305 break;
306 }
307
308 if (!cache_contiguous(&cid, *dclus))
309 cache_init(&cid, *fclus, *dclus);
310 }
311
312 exfat_cache_add(inode, &cid);
313 return 0;
314}