]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/ocf/src/utils/utils_cleaner.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / ocf / src / utils / utils_cleaner.c
CommitLineData
9f95a23c
TL
1/*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6#include "../metadata/metadata.h"
7#include "../engine/cache_engine.h"
8#include "../engine/engine_common.h"
9#include "../concurrency/ocf_concurrency.h"
f67539c2 10#include "../ocf_request.h"
9f95a23c 11#include "utils_cleaner.h"
f67539c2 12#include "utils_part.h"
9f95a23c
TL
13#include "utils_io.h"
14#include "utils_cache_line.h"
15
16#define OCF_UTILS_CLEANER_DEBUG 0
17
18#if 1 == OCF_UTILS_CLEANER_DEBUG
19#define OCF_DEBUG_TRACE(cache) \
20 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s\n", __func__)
21
22#define OCF_DEBUG_MSG(cache, msg) \
23 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - %s\n", \
24 __func__, msg)
25
26#define OCF_DEBUG_PARAM(cache, format, ...) \
27 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - "format"\n", \
28 __func__, ##__VA_ARGS__)
29#else
30#define OCF_DEBUG_TRACE(cache)
31#define OCF_DEBUG_MSG(cache, msg)
32#define OCF_DEBUG_PARAM(cache, format, ...)
33#endif
34
35/*
36 * Allocate cleaning request
37 */
38static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
39 uint32_t count, const struct ocf_cleaner_attribs *attribs)
40{
41 struct ocf_request *req = ocf_req_new_extended(attribs->io_queue, NULL,
42 0, count * ocf_line_size(cache), OCF_READ);
43 int ret;
44
45 if (!req)
46 return NULL;
47
48 req->info.internal = true;
49 req->info.cleaner_cache_line_lock = attribs->cache_line_lock;
50
51 /* Allocate pages for cleaning IO */
52 req->data = ctx_data_alloc(cache->owner,
53 ocf_line_size(cache) / PAGE_SIZE * count);
54 if (!req->data) {
55 ocf_req_put(req);
56 return NULL;
57 }
58
59 ret = ctx_data_mlock(cache->owner, req->data);
60 if (ret) {
61 ctx_data_free(cache->owner, req->data);
62 ocf_req_put(req);
63 return NULL;
64 }
65
66 return req;
67}
68
69enum {
70 ocf_cleaner_req_type_master = 1,
71 ocf_cleaner_req_type_slave = 2
72};
73
74static struct ocf_request *_ocf_cleaner_alloc_master_req(
75 struct ocf_cache *cache, uint32_t count,
76 const struct ocf_cleaner_attribs *attribs)
77{
78 struct ocf_request *req = _ocf_cleaner_alloc_req(cache, count, attribs);
79
80 if (req) {
81 /* Set type of cleaning request */
82 req->master_io_req_type = ocf_cleaner_req_type_master;
83
84 /* In master, save completion context and function */
85 req->priv = attribs->cmpl_context;
86 req->master_io_req = attribs->cmpl_fn;
87
88 /* The count of all requests */
89 env_atomic_set(&req->master_remaining, 1);
90
91 OCF_DEBUG_PARAM(cache, "New master request, count = %u",
92 count);
93 }
94 return req;
95}
96
97static struct ocf_request *_ocf_cleaner_alloc_slave_req(
98 struct ocf_request *master,
99 uint32_t count, const struct ocf_cleaner_attribs *attribs)
100{
101 struct ocf_request *req = _ocf_cleaner_alloc_req(
102 master->cache, count, attribs);
103
104 if (req) {
105 /* Set type of cleaning request */
106 req->master_io_req_type = ocf_cleaner_req_type_slave;
107
108 /* Slave refers to master request, get its reference counter */
109 ocf_req_get(master);
110
111 /* Slave request contains reference to master */
112 req->master_io_req = master;
113
114 /* One more additional slave request, increase global counter
115 * of requests count
116 */
117 env_atomic_inc(&master->master_remaining);
118
119 OCF_DEBUG_PARAM(req->cache,
120 "New slave request, count = %u,all requests count = %d",
121 count, env_atomic_read(&master->master_remaining));
122 }
123 return req;
124}
125
126static void _ocf_cleaner_dealloc_req(struct ocf_request *req)
127{
128 if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
129 /* Slave contains reference to the master request,
130 * release reference counter
131 */
132 struct ocf_request *master = req->master_io_req;
133
134 OCF_DEBUG_MSG(req->cache, "Put master request by slave");
135 ocf_req_put(master);
136
137 OCF_DEBUG_MSG(req->cache, "Free slave request");
138 } else if (ocf_cleaner_req_type_master == req->master_io_req_type) {
139 OCF_DEBUG_MSG(req->cache, "Free master request");
140 } else {
141 ENV_BUG();
142 }
143
144 ctx_data_secure_erase(req->cache->owner, req->data);
145 ctx_data_munlock(req->cache->owner, req->data);
146 ctx_data_free(req->cache->owner, req->data);
147 ocf_req_put(req);
148}
149
150/*
151 * cleaner - Get clean result
152 */
153static void _ocf_cleaner_set_error(struct ocf_request *req)
154{
155 struct ocf_request *master = NULL;
156
157 if (ocf_cleaner_req_type_master == req->master_io_req_type) {
158 master = req;
159 } else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
160 master = req->master_io_req;
161 } else {
162 ENV_BUG();
163 return;
164 }
165
f67539c2 166 master->error = -OCF_ERR_IO;
9f95a23c
TL
167}
168
169static void _ocf_cleaner_complete_req(struct ocf_request *req)
170{
171 struct ocf_request *master = NULL;
172 ocf_req_end_t cmpl;
173
174 if (ocf_cleaner_req_type_master == req->master_io_req_type) {
175 OCF_DEBUG_MSG(req->cache, "Master completion");
176 master = req;
177 } else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
178 OCF_DEBUG_MSG(req->cache, "Slave completion");
179 master = req->master_io_req;
180 } else {
181 ENV_BUG();
182 return;
183 }
184
185 OCF_DEBUG_PARAM(req->cache, "Master requests remaining = %d",
186 env_atomic_read(&master->master_remaining));
187
188 if (env_atomic_dec_return(&master->master_remaining)) {
189 /* Not all requests completed */
190 return;
191 }
192
193 OCF_DEBUG_MSG(req->cache, "All cleaning request completed");
194
195 /* Only master contains completion function and completion context */
196 cmpl = master->master_io_req;
197 cmpl(master->priv, master->error);
198}
199
f67539c2
TL
200static void _ocf_cleaner_on_resume(struct ocf_request *req)
201{
202 OCF_DEBUG_TRACE(req->cache);
203 ocf_engine_push_req_front(req, true);
204}
205
9f95a23c
TL
206/*
207 * cleaner - Cache line lock, function lock cache lines depends on attributes
208 */
209static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
210{
211 if (!req->info.cleaner_cache_line_lock)
212 return OCF_LOCK_ACQUIRED;
213
214 OCF_DEBUG_TRACE(req->cache);
215
f67539c2 216 return ocf_req_async_lock_rd(req, _ocf_cleaner_on_resume);
9f95a23c
TL
217}
218
219/*
220 * cleaner - Cache line unlock, function unlock cache lines
221 * depends on attributes
222 */
223static void _ocf_cleaner_cache_line_unlock(struct ocf_request *req)
224{
225 if (req->info.cleaner_cache_line_lock) {
226 OCF_DEBUG_TRACE(req->cache);
227 ocf_req_unlock(req);
228 }
229}
230
231static bool _ocf_cleaner_sector_is_dirty(struct ocf_cache *cache,
232 ocf_cache_line_t line, uint8_t sector)
233{
234 bool dirty = metadata_test_dirty_one(cache, line, sector);
235 bool valid = metadata_test_valid_one(cache, line, sector);
236
237 if (!valid && dirty) {
238 /* not valid but dirty - IMPROPER STATE!!! */
239 ENV_BUG();
240 }
241
242 return valid ? dirty : false;
243}
244
245static void _ocf_cleaner_finish_req(struct ocf_request *req)
246{
247 /* Handle cache lines unlocks */
248 _ocf_cleaner_cache_line_unlock(req);
249
250 /* Signal completion to the caller of cleaning */
251 _ocf_cleaner_complete_req(req);
252
253 /* Free allocated resources */
254 _ocf_cleaner_dealloc_req(req);
255}
256
257static void _ocf_cleaner_flush_cache_io_end(struct ocf_io *io, int error)
258{
259 struct ocf_request *req = io->priv1;
260
261 if (error) {
262 ocf_metadata_error(req->cache);
263 req->error = error;
264 }
265
266 OCF_DEBUG_MSG(req->cache, "Cache flush finished");
267
268 _ocf_cleaner_finish_req(req);
269
270 ocf_io_put(io);
271}
272
273static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
274{
275 struct ocf_io *io;
276
277 OCF_DEBUG_TRACE(req->cache);
278
f67539c2 279 io = ocf_new_cache_io(req->cache, req->io_queue, 0, 0, OCF_WRITE, 0, 0);
9f95a23c
TL
280 if (!io) {
281 ocf_metadata_error(req->cache);
f67539c2
TL
282 req->error = -OCF_ERR_NO_MEM;
283 return -OCF_ERR_NO_MEM;
9f95a23c
TL
284 }
285
9f95a23c 286 ocf_io_set_cmpl(io, req, NULL, _ocf_cleaner_flush_cache_io_end);
9f95a23c
TL
287
288 ocf_volume_submit_flush(io);
289
290 return 0;
291}
292
293static const struct ocf_io_if _io_if_flush_cache = {
294 .read = _ocf_cleaner_fire_flush_cache,
295 .write = _ocf_cleaner_fire_flush_cache,
296};
297
298static void _ocf_cleaner_metadata_io_end(struct ocf_request *req, int error)
299{
300 if (error) {
301 ocf_metadata_error(req->cache);
302 req->error = error;
303 _ocf_cleaner_finish_req(req);
304 return;
305 }
306
307 OCF_DEBUG_MSG(req->cache, "Metadata flush finished");
308
309 req->io_if = &_io_if_flush_cache;
310 ocf_engine_push_req_front(req, true);
311}
312
313static int _ocf_cleaner_update_metadata(struct ocf_request *req)
314{
315 struct ocf_cache *cache = req->cache;
316 const struct ocf_map_info *iter = req->map;
317 uint32_t i;
318 ocf_cache_line_t cache_line;
f67539c2 319 ocf_core_id_t core_id;
9f95a23c
TL
320
321 OCF_DEBUG_TRACE(req->cache);
322
f67539c2 323 ocf_metadata_start_exclusive_access(&cache->metadata.lock);
9f95a23c
TL
324 /* Update metadata */
325 for (i = 0; i < req->core_line_count; i++, iter++) {
326 if (iter->status == LOOKUP_MISS)
327 continue;
328
329 if (iter->invalid) {
330 /* An error, do not clean */
331 continue;
332 }
333
334 cache_line = iter->coll_idx;
335
336 if (!metadata_test_dirty(cache, cache_line))
337 continue;
338
339 ocf_metadata_get_core_and_part_id(cache, cache_line,
f67539c2
TL
340 &core_id, &req->part_id);
341 req->core = &cache->core[core_id];
9f95a23c
TL
342
343 set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), req,
344 i);
345 }
346
347 ocf_metadata_flush_do_asynch(cache, req, _ocf_cleaner_metadata_io_end);
f67539c2 348 ocf_metadata_end_exclusive_access(&cache->metadata.lock);
9f95a23c
TL
349
350 return 0;
351}
352
353static const struct ocf_io_if _io_if_update_metadata = {
354 .read = _ocf_cleaner_update_metadata,
355 .write = _ocf_cleaner_update_metadata,
356};
357
358static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
359 struct ocf_request *req, int error)
360{
361 uint32_t i;
362 struct ocf_map_info *iter = req->map;
363
364 if (error) {
365 /* Flush error, set error for all cache line of this core */
366 for (i = 0; i < req->core_line_count; i++, iter++) {
367 if (iter->status == LOOKUP_MISS)
368 continue;
369
370 if (iter->core_id == map->core_id)
371 iter->invalid = true;
372 }
373
374 _ocf_cleaner_set_error(req);
375 }
376
377 if (env_atomic_dec_return(&req->req_remaining))
378 return;
379
380 OCF_DEBUG_MSG(req->cache, "Core flush finished");
381
382 /*
383 * All core writes done, switch to post cleaning activities
384 */
385 req->io_if = &_io_if_update_metadata;
386 ocf_engine_push_req_front(req, true);
387}
388
389static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
390{
391 _ocf_cleaner_flush_cores_io_end(io->priv1, io->priv2, error);
392
393 ocf_io_put(io);
394}
395
396static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
397{
398 uint32_t i;
399 ocf_core_id_t core_id = OCF_CORE_MAX;
400 struct ocf_cache *cache = req->cache;
401 struct ocf_map_info *iter = req->map;
f67539c2 402 ocf_core_t core;
9f95a23c
TL
403 struct ocf_io *io;
404
405 OCF_DEBUG_TRACE(req->cache);
406
407 /* Protect IO completion race */
408 env_atomic_set(&req->req_remaining, 1);
409
410 /* Submit flush requests */
411 for (i = 0; i < req->core_line_count; i++, iter++) {
412 if (iter->invalid) {
413 /* IO error, skip this item */
414 continue;
415 }
416
417 if (iter->status == LOOKUP_MISS)
418 continue;
419
420 if (core_id == iter->core_id)
421 continue;
422
423 core_id = iter->core_id;
424
425 env_atomic_inc(&req->req_remaining);
426
f67539c2
TL
427 core = ocf_cache_get_core(cache, core_id);
428 io = ocf_new_core_io(core, req->io_queue, 0, 0,
429 OCF_WRITE, 0, 0);
9f95a23c 430 if (!io) {
f67539c2 431 _ocf_cleaner_flush_cores_io_end(iter, req, -OCF_ERR_NO_MEM);
9f95a23c
TL
432 continue;
433 }
434
9f95a23c 435 ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_flush_cores_io_cmpl);
9f95a23c
TL
436
437 ocf_volume_submit_flush(io);
438 }
439
440 /* Protect IO completion race */
441 _ocf_cleaner_flush_cores_io_end(NULL, req, 0);
442
443 return 0;
444}
445
446static const struct ocf_io_if _io_if_flush_cores = {
447 .read = _ocf_cleaner_fire_flush_cores,
448 .write = _ocf_cleaner_fire_flush_cores,
449};
450
451static void _ocf_cleaner_core_io_end(struct ocf_request *req)
452{
453 if (env_atomic_dec_return(&req->req_remaining))
454 return;
455
456 OCF_DEBUG_MSG(req->cache, "Core writes finished");
457
458 /*
459 * All cache read requests done, now we can submit writes to cores,
460 * Move processing to thread, where IO will be (and can be) submitted
461 */
462 req->io_if = &_io_if_flush_cores;
463 ocf_engine_push_req_front(req, true);
464}
465
466static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
467{
468 struct ocf_map_info *map = io->priv1;
469 struct ocf_request *req = io->priv2;
f67539c2 470 ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
9f95a23c
TL
471
472 if (error) {
473 map->invalid |= 1;
474 _ocf_cleaner_set_error(req);
f67539c2 475 ocf_core_stats_core_error_update(core, OCF_WRITE);
9f95a23c
TL
476 }
477
478 _ocf_cleaner_core_io_end(req);
479
480 ocf_io_put(io);
481}
482
483static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
484 struct ocf_map_info *iter, uint64_t begin, uint64_t end)
485{
486 uint64_t addr, offset;
487 int err;
f67539c2 488 ocf_cache_t cache = req->cache;
9f95a23c 489 struct ocf_io *io;
f67539c2 490 ocf_core_t core = ocf_cache_get_core(cache, iter->core_id);
9f95a23c
TL
491 ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
492 iter->coll_idx);
493
9f95a23c
TL
494 addr = (ocf_line_size(cache) * iter->core_line)
495 + SECTORS_TO_BYTES(begin);
f67539c2 496 offset = (ocf_line_size(cache) * iter->hash)
9f95a23c
TL
497 + SECTORS_TO_BYTES(begin);
498
f67539c2
TL
499 io = ocf_new_core_io(core, req->io_queue, addr,
500 SECTORS_TO_BYTES(end - begin), OCF_WRITE, part_id, 0);
501 if (!io)
502 goto error;
503
9f95a23c
TL
504 err = ocf_io_set_data(io, req->data, offset);
505 if (err) {
506 ocf_io_put(io);
507 goto error;
508 }
509
510 ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_core_io_cmpl);
511
f67539c2
TL
512 ocf_core_stats_core_block_update(core, part_id, OCF_WRITE,
513 SECTORS_TO_BYTES(end - begin));
9f95a23c
TL
514
515 OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, "
516 "sector = %llu, count = %llu", iter->core_line, begin,
517 end - begin);
518
519 /* Increase IO counter to be processed */
520 env_atomic_inc(&req->req_remaining);
521
522 /* Send IO */
523 ocf_volume_submit_io(io);
524
525 return;
526error:
527 iter->invalid = true;
528 _ocf_cleaner_set_error(req);
529}
530
531static void _ocf_cleaner_core_submit_io(struct ocf_request *req,
532 struct ocf_map_info *iter)
533{
534 uint64_t i, dirty_start = 0;
535 struct ocf_cache *cache = req->cache;
536 bool counting_dirty = false;
537
538 /* Check integrity of entry to be cleaned */
539 if (metadata_test_valid(cache, iter->coll_idx)
540 && metadata_test_dirty(cache, iter->coll_idx)) {
541
542 _ocf_cleaner_core_io_for_dirty_range(req, iter, 0,
543 ocf_line_sectors(cache));
544
545 return;
546 }
547
548 /* Sector cleaning, a little effort is required to this */
549 for (i = 0; i < ocf_line_sectors(cache); i++) {
550 if (!_ocf_cleaner_sector_is_dirty(cache, iter->coll_idx, i)) {
551 if (counting_dirty) {
552 counting_dirty = false;
553 _ocf_cleaner_core_io_for_dirty_range(req, iter,
554 dirty_start, i);
555 }
556
557 continue;
558 }
559
560 if (!counting_dirty) {
561 counting_dirty = true;
562 dirty_start = i;
563 }
564
565 }
566
567 if (counting_dirty)
568 _ocf_cleaner_core_io_for_dirty_range(req, iter, dirty_start, i);
569}
570
571static int _ocf_cleaner_fire_core(struct ocf_request *req)
572{
573 uint32_t i;
574 struct ocf_map_info *iter;
575
576 OCF_DEBUG_TRACE(req->cache);
577
578 /* Protect IO completion race */
579 env_atomic_set(&req->req_remaining, 1);
580
581 /* Submits writes to the core */
582 for (i = 0; i < req->core_line_count; i++) {
583 iter = &(req->map[i]);
584
585 if (iter->invalid) {
586 /* IO read error on cache, skip this item */
587 continue;
588 }
589
590 if (iter->status == LOOKUP_MISS)
591 continue;
592
593 _ocf_cleaner_core_submit_io(req, iter);
594 }
595
596 /* Protect IO completion race */
597 _ocf_cleaner_core_io_end(req);
598
599 return 0;
600}
601
602static const struct ocf_io_if _io_if_fire_core = {
603 .read = _ocf_cleaner_fire_core,
604 .write = _ocf_cleaner_fire_core,
605};
606
607static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
608{
609 if (env_atomic_dec_return(&req->req_remaining))
610 return;
611
612 /*
613 * All cache read requests done, now we can submit writes to cores,
614 * Move processing to thread, where IO will be (and can be) submitted
615 */
616 req->io_if = &_io_if_fire_core;
617 ocf_engine_push_req_front(req, true);
618
619 OCF_DEBUG_MSG(req->cache, "Cache reads finished");
620}
621
622static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
623{
624 struct ocf_map_info *map = io->priv1;
625 struct ocf_request *req = io->priv2;
f67539c2 626 ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
9f95a23c
TL
627
628 if (error) {
629 map->invalid |= 1;
630 _ocf_cleaner_set_error(req);
f67539c2 631 ocf_core_stats_cache_error_update(core, OCF_READ);
9f95a23c
TL
632 }
633
634 _ocf_cleaner_cache_io_end(req);
635
636 ocf_io_put(io);
637}
638
639/*
640 * cleaner - Traverse cache lines to be cleaned, detect sequential IO, and
641 * perform cache reads and core writes
642 */
643static int _ocf_cleaner_fire_cache(struct ocf_request *req)
644{
f67539c2
TL
645 ocf_cache_t cache = req->cache;
646 ocf_core_t core;
9f95a23c
TL
647 uint32_t i;
648 struct ocf_map_info *iter = req->map;
649 uint64_t addr, offset;
650 ocf_part_id_t part_id;
651 struct ocf_io *io;
652 int err;
9f95a23c
TL
653
654 /* Protect IO completion race */
655 env_atomic_inc(&req->req_remaining);
656
657 for (i = 0; i < req->core_line_count; i++, iter++) {
f67539c2
TL
658 core = ocf_cache_get_core(cache, iter->core_id);
659 if (!core)
9f95a23c
TL
660 continue;
661 if (iter->status == LOOKUP_MISS)
662 continue;
663
9f95a23c
TL
664 OCF_DEBUG_PARAM(req->cache, "Cache read, line = %u",
665 iter->coll_idx);
666
667 addr = ocf_metadata_map_lg2phy(cache,
668 iter->coll_idx);
669 addr *= ocf_line_size(cache);
670 addr += cache->device->metadata_offset;
671
f67539c2 672 offset = ocf_line_size(cache) * iter->hash;
9f95a23c
TL
673
674 part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
675
f67539c2
TL
676 io = ocf_new_cache_io(cache, req->io_queue,
677 addr, ocf_line_size(cache),
678 OCF_READ, part_id, 0);
679 if (!io) {
680 /* Allocation error */
681 iter->invalid = true;
682 _ocf_cleaner_set_error(req);
683 continue;
684 }
685
9f95a23c 686 ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_cache_io_cmpl);
9f95a23c
TL
687 err = ocf_io_set_data(io, req->data, offset);
688 if (err) {
689 ocf_io_put(io);
690 iter->invalid = true;
691 _ocf_cleaner_set_error(req);
692 continue;
693 }
694
f67539c2
TL
695 ocf_core_stats_cache_block_update(core, part_id, OCF_READ,
696 ocf_line_size(cache));
9f95a23c
TL
697
698 ocf_volume_submit_io(io);
699 }
700
701 /* Protect IO completion race */
702 _ocf_cleaner_cache_io_end(req);
703
704 return 0;
705}
706
707static const struct ocf_io_if _io_if_fire_cache = {
f67539c2
TL
708 .read = _ocf_cleaner_fire_cache,
709 .write = _ocf_cleaner_fire_cache,
9f95a23c
TL
710};
711
9f95a23c
TL
712static int _ocf_cleaner_fire(struct ocf_request *req)
713{
714 int result;
715
9f95a23c
TL
716 req->io_if = &_io_if_fire_cache;
717
718 /* Handle cache lines locks */
719 result = _ocf_cleaner_cache_line_lock(req);
720
721 if (result >= 0) {
722 if (result == OCF_LOCK_ACQUIRED) {
723 OCF_DEBUG_MSG(req->cache, "Lock acquired");
724 _ocf_cleaner_fire_cache(req);
725 } else {
726 OCF_DEBUG_MSG(req->cache, "NO Lock");
727 }
728 return 0;
729 } else {
730 OCF_DEBUG_MSG(req->cache, "Lock error");
731 }
732
733 return result;
734}
735
736/* Helper function for 'sort' */
737static int _ocf_cleaner_cmp_private(const void *a, const void *b)
738{
739 struct ocf_map_info *_a = (struct ocf_map_info *)a;
740 struct ocf_map_info *_b = (struct ocf_map_info *)b;
741
742 static uint32_t step = 0;
743
744 OCF_COND_RESCHED_DEFAULT(step);
745
746 if (_a->core_id == _b->core_id)
747 return (_a->core_line > _b->core_line) ? 1 : -1;
748
749 return (_a->core_id > _b->core_id) ? 1 : -1;
750}
751
752/**
753 * Prepare cleaning request to be fired
754 *
755 * @param req cleaning request
756 * @param i_out number of already filled map requests (remaining to be filled
757 * with missed
758 */
759static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out,
760 bool do_sort)
761{
762 uint32_t i;
763 /* Set counts of cache IOs */
764 env_atomic_set(&req->req_remaining, i_out);
765
766 /* fill tail of a request with fake MISSes so that it won't
767 * be cleaned
768 */
769 for (; i_out < req->core_line_count; ++i_out) {
770 req->map[i_out].core_id = OCF_CORE_MAX;
771 req->map[i_out].core_line = ULLONG_MAX;
772 req->map[i_out].status = LOOKUP_MISS;
f67539c2 773 req->map[i_out].hash = i_out;
9f95a23c
TL
774 }
775
776 if (do_sort) {
777 /* Sort by core id and core line */
778 env_sort(req->map, req->core_line_count, sizeof(req->map[0]),
779 _ocf_cleaner_cmp_private, NULL);
780 for (i = 0; i < req->core_line_count; i++)
f67539c2 781 req->map[i].hash = i;
9f95a23c
TL
782 }
783
784 /* issue actual request */
785 return _ocf_cleaner_fire(req);
786}
787
788static inline uint32_t _ocf_cleaner_get_req_max_count(uint32_t count,
789 bool low_mem)
790{
791 if (low_mem || count <= 4096)
792 return count < 128 ? count : 128;
793
794 return 1024;
795}
796
797static void _ocf_cleaner_fire_error(struct ocf_request *master,
798 struct ocf_request *req, int err)
799{
800 master->error = err;
801 _ocf_cleaner_complete_req(req);
802 _ocf_cleaner_dealloc_req(req);
803}
804
805/*
806 * cleaner - Main function
807 */
808void ocf_cleaner_fire(struct ocf_cache *cache,
809 const struct ocf_cleaner_attribs *attribs)
810{
811 uint32_t i, i_out = 0, count = attribs->count;
812 /* max cache lines to be cleaned with one request: 1024 if over 4k lines
813 * to be flushed, otherwise 128. for large cleaning operations, 1024 is
814 * optimal number, but for smaller 1024 is too large to benefit from
815 * cleaning request overlapping
816 */
817 uint32_t max = _ocf_cleaner_get_req_max_count(count, false);
818 ocf_cache_line_t cache_line;
819 /* it is possible that more than one cleaning request will be generated
820 * for each cleaning order, thus multiple allocations. At the end of
821 * loop, req is set to zero and NOT deallocated, as deallocation is
822 * handled in completion.
823 * In addition first request we call master which contains completion
824 * contexts. Then succeeding request we call salve requests which
825 * contains reference to the master request
826 */
827 struct ocf_request *req = NULL, *master;
828 int err;
829 ocf_core_id_t core_id;
830 uint64_t core_sector;
831
832 /* Allocate master request */
833 master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
834
835 if (!master) {
836 /* Some memory allocation error, try re-allocate request */
837 max = _ocf_cleaner_get_req_max_count(count, true);
838 master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
839 }
840
841 if (!master) {
f67539c2 842 attribs->cmpl_fn(attribs->cmpl_context, -OCF_ERR_NO_MEM);
9f95a23c
TL
843 return;
844 }
845
846 req = master;
847
848 /* prevent cleaning completion race */
849 ocf_req_get(master);
850 env_atomic_inc(&master->master_remaining);
851
852 for (i = 0; i < count; i++) {
853
854 /* when request hasn't yet been allocated or is just issued */
855 if (!req) {
856 if (max > count - i) {
857 /* less than max left */
858 max = count - i;
859 }
860
861 req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
862 }
863
864 if (!req) {
865 /* Some memory allocation error,
866 * try re-allocate request
867 */
868 max = _ocf_cleaner_get_req_max_count(max, true);
869 req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
870 }
871
872 /* when request allocation failed stop processing */
873 if (!req) {
f67539c2 874 master->error = -OCF_ERR_NO_MEM;
9f95a23c
TL
875 break;
876 }
877
878 if (attribs->getter(cache, attribs->getter_context,
879 i, &cache_line)) {
880 OCF_DEBUG_MSG(cache, "Skip");
881 continue;
882 }
883
884 /* when line already cleaned - rare condition under heavy
885 * I/O workload.
886 */
887 if (!metadata_test_dirty(cache, cache_line)) {
888 OCF_DEBUG_MSG(cache, "Not dirty");
889 continue;
890 }
891
892 if (!metadata_test_valid_any(cache, cache_line)) {
893 OCF_DEBUG_MSG(cache, "No any valid");
894
895 /*
896 * Extremely disturbing cache line state
897 * Cache line (sector) cannot be dirty and not valid
898 */
899 ENV_BUG();
900 continue;
901 }
902
903 /* Get mapping info */
904 ocf_metadata_get_core_info(cache, cache_line, &core_id,
905 &core_sector);
906
907 if (unlikely(!cache->core[core_id].opened)) {
908 OCF_DEBUG_MSG(cache, "Core object inactive");
909 continue;
910 }
911
912 req->map[i_out].core_id = core_id;
913 req->map[i_out].core_line = core_sector;
914 req->map[i_out].coll_idx = cache_line;
915 req->map[i_out].status = LOOKUP_HIT;
f67539c2 916 req->map[i_out].hash = i_out;
9f95a23c
TL
917 i_out++;
918
919 if (max == i_out) {
920 err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
921 if (err) {
922 _ocf_cleaner_fire_error(master, req, err);
923 req = NULL;
924 break;
925 }
926 i_out = 0;
927 req = NULL;
928 }
929 }
930
931 if (req) {
932 err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
933 if (err)
934 _ocf_cleaner_fire_error(master, req, err);
935 req = NULL;
936 }
937
938 /* prevent cleaning completion race */
939 _ocf_cleaner_complete_req(master);
940 ocf_req_put(master);
941}
942
943static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache,
944 void *context, uint32_t item, ocf_cache_line_t *line)
945{
946 struct flush_data *flush = context;
947
948 if (flush[item].cache_line < cache->device->collision_table_entries) {
949 (*line) = flush[item].cache_line;
950 return 0;
951 } else {
952 return -1;
953 }
954}
955
956int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache,
957 struct flush_data *flush, uint32_t count,
958 struct ocf_cleaner_attribs *attribs)
959{
960 attribs->getter = _ocf_cleaner_do_flush_data_getter;
961 attribs->getter_context = flush;
962 attribs->count = count;
963
964 ocf_cleaner_fire(cache, attribs);
965
966 return 0;
967}
968
969/* Helper function for 'sort' */
970static int _ocf_cleaner_cmp(const void *a, const void *b)
971{
972 struct flush_data *_a = (struct flush_data *)a;
973 struct flush_data *_b = (struct flush_data *)b;
974
975 /* TODO: FIXME get rid of static */
976 static uint32_t step = 0;
977
978 OCF_COND_RESCHED(step, 1000000)
979
980 if (_a->core_id == _b->core_id)
981 return (_a->core_line > _b->core_line) ? 1 : -1;
982
983 return (_a->core_id > _b->core_id) ? 1 : -1;
984}
985
986static void _ocf_cleaner_swap(void *a, void *b, int size)
987{
988 struct flush_data *_a = (struct flush_data *)a;
989 struct flush_data *_b = (struct flush_data *)b;
990 struct flush_data t;
991
992 t = *_a;
993 *_a = *_b;
994 *_b = t;
995}
996
997void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num)
998{
999 env_sort(tbl, num, sizeof(*tbl), _ocf_cleaner_cmp, _ocf_cleaner_swap);
1000}
1001
1002void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,
1003 uint32_t num)
1004{
1005 int i;
1006
1007 for (i = 0; i < num; i++) {
1008 env_sort(fctbl[i].flush_data, fctbl[i].count,
1009 sizeof(*fctbl[i].flush_data), _ocf_cleaner_cmp,
1010 _ocf_cleaner_swap);
1011 }
1012}
f67539c2
TL
1013
1014void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
1015{
1016 struct ocf_user_part *curr_part;
1017 ocf_part_id_t part_id;
1018
1019 for_each_part(cache, curr_part, part_id)
1020 ocf_refcnt_freeze(&cache->refcnt.cleaning[part_id]);
1021}
1022
1023void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
1024{
1025 struct ocf_user_part *curr_part;
1026 ocf_part_id_t part_id;
1027
1028 for_each_part(cache, curr_part, part_id)
1029 ocf_refcnt_unfreeze(&cache->refcnt.cleaning[part_id]);
1030}
1031
1032static void ocf_cleaner_refcnt_register_zero_cb_finish(void *priv)
1033{
1034 struct ocf_cleaner_wait_context *ctx = priv;
1035
1036 if (!env_atomic_dec_return(&ctx->waiting))
1037 ctx->cb(ctx->priv);
1038}
1039
1040void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
1041 struct ocf_cleaner_wait_context *ctx,
1042 ocf_cleaner_refcnt_zero_cb_t cb, void *priv)
1043{
1044 struct ocf_user_part *curr_part;
1045 ocf_part_id_t part_id;
1046
1047 env_atomic_set(&ctx->waiting, 1);
1048 ctx->cb = cb;
1049 ctx->priv = priv;
1050
1051 for_each_part(cache, curr_part, part_id) {
1052 env_atomic_inc(&ctx->waiting);
1053 ocf_refcnt_register_zero_cb(&cache->refcnt.cleaning[part_id],
1054 ocf_cleaner_refcnt_register_zero_cb_finish, ctx);
1055 }
1056
1057 ocf_cleaner_refcnt_register_zero_cb_finish(ctx);
1058}