2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
6 #include "metadata_io.h"
7 #include "../ocf_priv.h"
8 #include "../engine/cache_engine.h"
9 #include "../engine/engine_common.h"
10 #include "../engine/engine_bf.h"
11 #include "../utils/utils_cache_line.h"
12 #include "../utils/utils_allocator.h"
13 #include "../utils/utils_io.h"
14 #include "../ocf_def_priv.h"
16 #define OCF_METADATA_IO_DEBUG 0
18 #if 1 == OCF_METADATA_IO_DEBUG
19 #define OCF_DEBUG_TRACE(cache) \
20 ocf_cache_log(cache, log_info, "[Metadata][IO] %s\n", __func__)
22 #define OCF_DEBUG_MSG(cache, msg) \
23 ocf_cache_log(cache, log_info, "[Metadata][IO] %s - %s\n", \
26 #define OCF_DEBUG_PARAM(cache, format, ...) \
27 ocf_cache_log(cache, log_info, "[Metadata][IO] %s - "format"\n", \
28 __func__, ##__VA_ARGS__)
30 #define OCF_DEBUG_TRACE(cache)
31 #define OCF_DEBUG_MSG(cache, msg)
32 #define OCF_DEBUG_PARAM(cache, format, ...)
35 static void metadata_io_i_asynch_end(struct metadata_io_request
*request
,
37 static int ocf_restart_meta_io(struct ocf_request
*req
);
39 static struct ocf_io_if meta_restart_if
= {
40 .read
= ocf_restart_meta_io
,
41 .write
= ocf_restart_meta_io
45 * Get max pages for IO
47 static uint32_t metadata_io_max_page(ocf_cache_t cache
)
49 return ocf_volume_get_max_io_size(&cache
->device
->volume
) / PAGE_SIZE
;
53 * Iterative read end callback
55 static void metadata_io_read_i_atomic_end(struct ocf_io
*io
, int error
)
57 struct metadata_io_request_atomic
*meta_atom_req
= io
->priv1
;
59 OCF_DEBUG_TRACE(ocf_volume_get_cache(io
->volume
));
61 meta_atom_req
->error
|= error
;
62 env_completion_complete(&meta_atom_req
->complete
);
66 * Iterative read request
67 * TODO: Make this function asynchronous to enable async recovery
70 int metadata_io_read_i_atomic(ocf_cache_t cache
, ocf_queue_t queue
,
71 void *context
, ocf_metadata_atomic_io_event_t drain_hndl
,
72 ocf_metadata_io_end_t compl_hndl
)
75 uint64_t max_sectors_count
= PAGE_SIZE
/ OCF_ATOMIC_METADATA_SIZE
;
76 uint64_t io_sectors_count
= cache
->device
->collision_table_entries
*
77 ocf_line_sectors(cache
);
78 uint64_t count
, curr_count
;
82 struct metadata_io_request_atomic meta_atom_req
;
83 unsigned char step
= 0;
85 OCF_DEBUG_TRACE(cache
);
87 /* Allocate one 4k page for metadata*/
88 data
= ctx_data_alloc(cache
->owner
, 1);
90 return -OCF_ERR_NO_MEM
;
92 count
= io_sectors_count
;
93 for (i
= 0; i
< io_sectors_count
; i
+= curr_count
) {
94 /* Get sectors count of this IO iteration */
95 curr_count
= OCF_MIN(max_sectors_count
, count
);
97 env_completion_init(&meta_atom_req
.complete
);
98 meta_atom_req
.error
= 0;
100 /* Reset position in data buffer */
101 ctx_data_seek(cache
->owner
, data
, ctx_data_seek_begin
, 0);
103 /* Allocate new IO */
104 io
= ocf_new_cache_io(cache
);
106 result
= -OCF_ERR_NO_MEM
;
112 cache
->device
->metadata_offset
+
114 SECTORS_TO_BYTES(curr_count
),
116 ocf_io_set_cmpl(io
, &meta_atom_req
, NULL
,
117 metadata_io_read_i_atomic_end
);
118 result
= ocf_io_set_data(io
, data
, 0);
125 ocf_volume_submit_metadata(io
);
128 /* Wait for completion of IO */
129 env_completion_wait(&meta_atom_req
.complete
);
131 /* Check for error */
132 if (meta_atom_req
.error
) {
133 result
= meta_atom_req
.error
;
137 result
|= drain_hndl(cache
, i
, curr_count
, data
);
143 OCF_COND_RESCHED(step
, 128);
147 ctx_data_free(cache
->owner
, data
);
149 compl_hndl(cache
, context
, result
);
154 static void metadata_io_i_asynch_cmpl(struct ocf_io
*io
, int error
)
156 struct metadata_io_request
*request
= io
->priv1
;
158 metadata_io_i_asynch_end(request
, error
);
163 static void metadata_io_req_fill(struct metadata_io_request
*meta_io_req
)
165 ocf_cache_t cache
= meta_io_req
->cache
;
168 for (i
= 0; i
< meta_io_req
->count
; i
++) {
169 meta_io_req
->on_meta_fill(cache
, meta_io_req
->data
,
170 meta_io_req
->page
+ i
, meta_io_req
->context
);
174 static void metadata_io_req_drain(struct metadata_io_request
*meta_io_req
)
176 ocf_cache_t cache
= meta_io_req
->cache
;
179 for (i
= 0; i
< meta_io_req
->count
; i
++) {
180 meta_io_req
->on_meta_drain(cache
, meta_io_req
->data
,
181 meta_io_req
->page
+ i
, meta_io_req
->context
);
185 static int ocf_restart_meta_io(struct ocf_request
*req
)
187 struct metadata_io_request
*meta_io_req
= req
->priv
;
188 ocf_cache_t cache
= req
->cache
;
192 /* Fill with the latest metadata. */
193 OCF_METADATA_LOCK_RD();
194 metadata_io_req_fill(meta_io_req
);
195 OCF_METADATA_UNLOCK_RD();
197 io
= ocf_new_cache_io(cache
);
199 metadata_io_i_asynch_end(meta_io_req
, -OCF_ERR_NO_MEM
);
205 PAGES_TO_BYTES(meta_io_req
->page
),
206 PAGES_TO_BYTES(meta_io_req
->count
),
209 ocf_io_set_cmpl(io
, meta_io_req
, NULL
, metadata_io_i_asynch_cmpl
);
210 ret
= ocf_io_set_data(io
, meta_io_req
->data
, 0);
213 metadata_io_i_asynch_end(meta_io_req
, ret
);
216 ocf_volume_submit_io(io
);
221 * Iterative asynchronous write callback
223 static void metadata_io_i_asynch_end(struct metadata_io_request
*request
,
226 struct metadata_io_request_asynch
*a_req
;
229 OCF_CHECK_NULL(request
);
231 cache
= request
->cache
;
233 a_req
= request
->asynch
;
234 OCF_CHECK_NULL(a_req
);
235 OCF_CHECK_NULL(a_req
->on_complete
);
238 request
->error
|= error
;
239 request
->asynch
->error
|= error
;
241 if (request
->fl_req
.rw
== OCF_READ
)
242 metadata_io_req_drain(request
);
245 if (env_atomic_dec_return(&request
->req_remaining
))
248 OCF_DEBUG_PARAM(cache
, "Page = %u", request
->page
);
250 ctx_data_free(cache
->owner
, request
->data
);
251 request
->data
= NULL
;
253 if (env_atomic_dec_return(&a_req
->req_remaining
)) {
254 env_atomic_set(&request
->finished
, 1);
255 ocf_metadata_updater_kick(cache
);
259 OCF_DEBUG_MSG(cache
, "Asynchronous IO completed");
261 /* All IOs have been finished, call IO end callback */
262 a_req
->on_complete(request
->cache
, a_req
->context
, request
->error
);
265 * If it's last request, we mark is as finished
266 * after calling IO end callback
268 env_atomic_set(&request
->finished
, 1);
269 ocf_metadata_updater_kick(cache
);
272 static void metadata_io_req_error(ocf_cache_t cache
,
273 struct metadata_io_request_asynch
*a_req
,
274 uint32_t i
, int error
)
276 a_req
->error
|= error
;
277 a_req
->reqs
[i
].error
|= error
;
278 a_req
->reqs
[i
].count
= 0;
279 if (a_req
->reqs
[i
].data
)
280 ctx_data_free(cache
->owner
, a_req
->reqs
[i
].data
);
281 a_req
->reqs
[i
].data
= NULL
;
285 * Iterative write request asynchronously
287 static int metadata_io_i_asynch(ocf_cache_t cache
, ocf_queue_t queue
, int dir
,
288 void *context
, uint32_t page
, uint32_t count
,
289 ocf_metadata_io_event_t io_hndl
,
290 ocf_metadata_io_end_t compl_hndl
)
292 uint32_t curr_count
, written
;
293 uint32_t max_count
= metadata_io_max_page(cache
);
294 uint32_t io_count
= OCF_DIV_ROUND_UP(count
, max_count
);
299 /* Allocation and initialization of asynchronous metadata IO request */
300 struct metadata_io_request_asynch
*a_req
;
305 a_req
= env_zalloc(sizeof(*a_req
), ENV_MEM_NOIO
);
307 return -OCF_ERR_NO_MEM
;
309 env_atomic_set(&a_req
->req_remaining
, io_count
);
310 env_atomic_set(&a_req
->req_active
, io_count
);
311 a_req
->on_complete
= compl_hndl
;
312 a_req
->context
= context
;
315 /* Allocate particular requests and initialize them */
316 OCF_REALLOC_CP(&a_req
->reqs
, sizeof(a_req
->reqs
[0]),
317 io_count
, &a_req
->reqs_limit
);
320 ocf_cache_log(cache
, log_warn
,
321 "No memory during metadata IO\n");
322 return -OCF_ERR_NO_MEM
;
324 /* IO Requests initialization */
325 for (i
= 0; i
< io_count
; i
++) {
326 env_atomic_set(&(a_req
->reqs
[i
].req_remaining
), 1);
327 env_atomic_set(&(a_req
->reqs
[i
].finished
), 0);
328 a_req
->reqs
[i
].asynch
= a_req
;
331 OCF_DEBUG_PARAM(cache
, "IO count = %u", io_count
);
336 /* Get pages count of this IO iteration */
337 if (count
> max_count
)
338 curr_count
= max_count
;
343 a_req
->reqs
[i
].cache
= cache
;
344 a_req
->reqs
[i
].context
= context
;
345 a_req
->reqs
[i
].page
= page
+ written
;
346 a_req
->reqs
[i
].count
= curr_count
;
347 a_req
->reqs
[i
].on_meta_fill
= io_hndl
;
348 a_req
->reqs
[i
].on_meta_drain
= io_hndl
;
349 a_req
->reqs
[i
].fl_req
.io_if
= &meta_restart_if
;
350 a_req
->reqs
[i
].fl_req
.io_queue
= queue
;
351 a_req
->reqs
[i
].fl_req
.cache
= cache
;
352 a_req
->reqs
[i
].fl_req
.priv
= &a_req
->reqs
[i
];
353 a_req
->reqs
[i
].fl_req
.info
.internal
= true;
354 a_req
->reqs
[i
].fl_req
.rw
= dir
;
357 * We don't want allocate map for this request in
360 a_req
->reqs
[i
].fl_req
.map
= LIST_POISON1
;
362 INIT_LIST_HEAD(&a_req
->reqs
[i
].list
);
364 a_req
->reqs
[i
].data
= ctx_data_alloc(cache
->owner
, curr_count
);
365 if (!a_req
->reqs
[i
].data
) {
366 error
= -OCF_ERR_NO_MEM
;
367 metadata_io_req_error(cache
, a_req
, i
, error
);
371 /* Issue IO if it is not overlapping with anything else */
372 ret
= metadata_updater_check_overlaps(cache
, &a_req
->reqs
[i
]);
374 /* Allocate new IO */
375 io
= ocf_new_cache_io(cache
);
377 error
= -OCF_ERR_NO_MEM
;
378 metadata_io_req_error(cache
, a_req
, i
, error
);
382 if (dir
== OCF_WRITE
)
383 metadata_io_req_fill(&a_req
->reqs
[i
]);
387 PAGES_TO_BYTES(a_req
->reqs
[i
].page
),
388 PAGES_TO_BYTES(a_req
->reqs
[i
].count
),
391 ocf_io_set_cmpl(io
, &a_req
->reqs
[i
], NULL
,
392 metadata_io_i_asynch_cmpl
);
393 error
= ocf_io_set_data(io
, a_req
->reqs
[i
].data
, 0);
396 metadata_io_req_error(cache
, a_req
, i
, error
);
400 ocf_volume_submit_io(io
);
404 written
+= curr_count
;
409 /* No error, return 0 that indicates operation successful */
413 OCF_DEBUG_MSG(cache
, "ERROR");
417 * If no requests were submitted, we just call completion
418 * callback, free memory and return error.
420 compl_hndl(cache
, context
, error
);
422 OCF_REALLOC_DEINIT(&a_req
->reqs
, &a_req
->reqs_limit
);
429 * Decrement total reaming requests with IO that were not triggered.
430 * If we reached zero, we need to call completion callback.
432 if (env_atomic_sub_return(io_count
- i
, &a_req
->req_remaining
) == 0)
433 compl_hndl(cache
, context
, error
);
436 * Decrement total active requests with IO that were not triggered.
437 * If we reached zero, we need to free memory.
439 if (env_atomic_sub_return(io_count
- i
, &a_req
->req_active
) == 0) {
440 OCF_REALLOC_DEINIT(&a_req
->reqs
, &a_req
->reqs_limit
);
447 int metadata_io_write_i_asynch(ocf_cache_t cache
, ocf_queue_t queue
,
448 void *context
, uint32_t page
, uint32_t count
,
449 ocf_metadata_io_event_t fill_hndl
,
450 ocf_metadata_io_end_t compl_hndl
)
452 return metadata_io_i_asynch(cache
, queue
, OCF_WRITE
, context
,
453 page
, count
, fill_hndl
, compl_hndl
);
456 int metadata_io_read_i_asynch(ocf_cache_t cache
, ocf_queue_t queue
,
457 void *context
, uint32_t page
, uint32_t count
,
458 ocf_metadata_io_event_t drain_hndl
,
459 ocf_metadata_io_end_t compl_hndl
)
461 return metadata_io_i_asynch(cache
, queue
, OCF_READ
, context
,
462 page
, count
, drain_hndl
, compl_hndl
);
465 int ocf_metadata_io_init(ocf_cache_t cache
)
467 return ocf_metadata_updater_init(cache
);
470 void ocf_metadata_io_deinit(ocf_cache_t cache
)
472 ocf_metadata_updater_stop(cache
);