2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
7 #include "../ocf_cache_priv.h"
10 #include "engine_inv.h"
11 #include "engine_bf.h"
12 #include "engine_common.h"
13 #include "cache_engine.h"
14 #include "../concurrency/ocf_concurrency.h"
15 #include "../utils/utils_io.h"
16 #include "../utils/utils_req.h"
17 #include "../utils/utils_cache_line.h"
18 #include "../utils/utils_part.h"
19 #include "../metadata/metadata.h"
20 #include "../ocf_def_priv.h"
22 #define OCF_ENGINE_DEBUG_IO_NAME "rd"
23 #include "engine_debug.h"
25 static void _ocf_read_generic_hit_complete(struct ocf_request
*req
, int error
)
31 inc_fallback_pt_error_counter(req
->cache
);
33 /* Handle callback-caller race to let only one of the two complete the
34 * request. Also, complete original request only if this is the last
35 * sub-request to complete
37 if (env_atomic_dec_return(&req
->req_remaining
) == 0) {
38 OCF_DEBUG_RQ(req
, "HIT completion");
41 env_atomic_inc(&req
->cache
->core
[req
->core_id
].
42 counters
->cache_errors
.read
);
43 ocf_engine_push_req_front_pt(req
);
48 /* Complete request */
49 req
->complete(req
, req
->error
);
51 /* Free the request at the last point
52 * of the completion path
59 static void _ocf_read_generic_miss_complete(struct ocf_request
*req
, int error
)
61 struct ocf_cache
*cache
= req
->cache
;
66 /* Handle callback-caller race to let only one of the two complete the
67 * request. Also, complete original request only if this is the last
68 * sub-request to complete
70 if (env_atomic_dec_return(&req
->req_remaining
) == 0) {
71 OCF_DEBUG_RQ(req
, "MISS completion");
75 * --- Do not submit this request to write-back-thread.
78 req
->complete(req
, req
->error
);
80 req
->info
.core_error
= 1;
81 env_atomic_inc(&cache
->core
[req
->core_id
].
82 counters
->core_errors
.read
);
84 ctx_data_free(cache
->owner
, req
->cp_data
);
87 /* Invalidate metadata */
88 ocf_engine_invalidate(req
);
93 /* Copy pages to copy vec, since this is the one needed
96 ctx_data_cpy(cache
->owner
, req
->cp_data
, req
->data
, 0, 0,
99 /* Complete request */
100 req
->complete(req
, req
->error
);
102 ocf_engine_backfill(req
);
106 static inline void _ocf_read_generic_submit_hit(struct ocf_request
*req
)
108 env_atomic_set(&req
->req_remaining
, ocf_engine_io_count(req
));
110 ocf_submit_cache_reqs(req
->cache
, req
->map
, req
, OCF_READ
,
111 ocf_engine_io_count(req
), _ocf_read_generic_hit_complete
);
114 static inline void _ocf_read_generic_submit_miss(struct ocf_request
*req
)
116 struct ocf_cache
*cache
= req
->cache
;
119 env_atomic_set(&req
->req_remaining
, 1);
121 req
->cp_data
= ctx_data_alloc(cache
->owner
,
122 BYTES_TO_PAGES(req
->byte_length
));
126 ret
= ctx_data_mlock(cache
->owner
, req
->cp_data
);
130 /* Submit read request to core device. */
131 ocf_submit_volume_req(&cache
->core
[req
->core_id
].volume
, req
,
132 _ocf_read_generic_miss_complete
);
137 _ocf_read_generic_miss_complete(req
, -ENOMEM
);
140 static int _ocf_read_generic_do(struct ocf_request
*req
)
142 struct ocf_cache
*cache
= req
->cache
;
144 if (ocf_engine_is_miss(req
) && req
->map
->rd_locked
) {
145 /* Miss can be handled only on write locks.
146 * Need to switch to PT
148 OCF_DEBUG_RQ(req
, "Switching to PT");
153 /* Get OCF request - increase reference counter */
156 if (ocf_engine_is_miss(req
)) {
157 if (req
->info
.dirty_any
) {
158 OCF_METADATA_LOCK_RD();
160 /* Request is dirty need to clean request */
161 ocf_engine_clean(req
);
163 OCF_METADATA_UNLOCK_RD();
165 /* We need to clean request before processing, return */
171 OCF_METADATA_LOCK_RD();
173 /* Set valid status bits map */
174 ocf_set_valid_map_info(req
);
176 OCF_METADATA_UNLOCK_RD();
179 if (req
->info
.re_part
) {
180 OCF_DEBUG_RQ(req
, "Re-Part");
182 OCF_METADATA_LOCK_WR();
184 /* Probably some cache lines are assigned into wrong
185 * partition. Need to move it to new one
189 OCF_METADATA_UNLOCK_WR();
192 OCF_DEBUG_RQ(req
, "Submit");
195 if (ocf_engine_is_hit(req
))
196 _ocf_read_generic_submit_hit(req
);
198 _ocf_read_generic_submit_miss(req
);
200 /* Updata statistics */
201 ocf_engine_update_request_stats(req
);
202 ocf_engine_update_block_stats(req
);
204 /* Put OCF request - decrease reference counter */
210 static const struct ocf_io_if _io_if_read_generic_resume
= {
211 .read
= _ocf_read_generic_do
,
212 .write
= _ocf_read_generic_do
,
215 int ocf_read_generic(struct ocf_request
*req
)
218 int lock
= OCF_LOCK_NOT_ACQUIRED
;
219 struct ocf_cache
*cache
= req
->cache
;
221 ocf_io_start(req
->io
);
223 if (env_atomic_read(&cache
->pending_read_misses_list_blocked
)) {
224 /* There are conditions to bypass IO */
225 ocf_get_io_if(ocf_cache_mode_pt
)->read(req
);
229 /* Get OCF request - increase reference counter */
232 /* Set resume call backs */
233 req
->resume
= ocf_engine_on_resume
;
234 req
->io_if
= &_io_if_read_generic_resume
;
236 /*- Metadata RD access -----------------------------------------------*/
238 OCF_METADATA_LOCK_RD();
240 /* Traverse request to cache if there is hit */
241 ocf_engine_traverse(req
);
243 mapped
= ocf_engine_is_mapped(req
);
245 /* Request is fully mapped, no need to call eviction */
246 if (ocf_engine_is_hit(req
)) {
247 /* There is a hit, lock request for READ access */
248 lock
= ocf_req_trylock_rd(req
);
250 /* All cache line mapped, but some sectors are not valid
251 * and cache insert will be performed - lock for
254 lock
= ocf_req_trylock_wr(req
);
258 OCF_METADATA_UNLOCK_RD();
260 /*- END Metadata RD access -------------------------------------------*/
264 /*- Metadata WR access ---------------------------------------*/
266 OCF_METADATA_LOCK_WR();
268 /* Now there is exclusive access for metadata. May traverse once
269 * again. If there are misses need to call eviction. This
270 * process is called 'mapping'.
274 if (!req
->info
.eviction_error
) {
275 if (ocf_engine_is_hit(req
)) {
276 /* After mapping turns out there is hit,
277 * so lock OCF request for read access
279 lock
= ocf_req_trylock_rd(req
);
281 /* Miss, new cache lines were mapped,
282 * need to lock OCF request for write access
284 lock
= ocf_req_trylock_wr(req
);
287 OCF_METADATA_UNLOCK_WR();
289 /*- END Metadata WR access -----------------------------------*/
292 if (!req
->info
.eviction_error
) {
294 if (lock
!= OCF_LOCK_ACQUIRED
) {
295 /* Lock was not acquired, need to wait for resume */
296 OCF_DEBUG_RQ(req
, "NO LOCK");
298 /* Lock was acquired can perform IO */
299 _ocf_read_generic_do(req
);
302 OCF_DEBUG_RQ(req
, "LOCK ERROR %d", lock
);
303 req
->complete(req
, lock
);
308 ocf_get_io_if(ocf_cache_mode_pt
)->read(req
);
312 /* Put OCF request - decrease reference counter */