2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
6 #include "../metadata/metadata.h"
7 #include "../engine/cache_engine.h"
8 #include "../engine/engine_common.h"
9 #include "../concurrency/ocf_concurrency.h"
10 #include "utils_cleaner.h"
11 #include "utils_req.h"
13 #include "utils_cache_line.h"
15 #define OCF_UTILS_CLEANER_DEBUG 0
17 #if 1 == OCF_UTILS_CLEANER_DEBUG
18 #define OCF_DEBUG_TRACE(cache) \
19 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s\n", __func__)
21 #define OCF_DEBUG_MSG(cache, msg) \
22 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - %s\n", \
25 #define OCF_DEBUG_PARAM(cache, format, ...) \
26 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - "format"\n", \
27 __func__, ##__VA_ARGS__)
29 #define OCF_DEBUG_TRACE(cache)
30 #define OCF_DEBUG_MSG(cache, msg)
31 #define OCF_DEBUG_PARAM(cache, format, ...)
35 * Allocate cleaning request
37 static struct ocf_request
*_ocf_cleaner_alloc_req(struct ocf_cache
*cache
,
38 uint32_t count
, const struct ocf_cleaner_attribs
*attribs
)
40 struct ocf_request
*req
= ocf_req_new_extended(attribs
->io_queue
, NULL
,
41 0, count
* ocf_line_size(cache
), OCF_READ
);
47 req
->info
.internal
= true;
48 req
->info
.cleaner_cache_line_lock
= attribs
->cache_line_lock
;
50 /* Allocate pages for cleaning IO */
51 req
->data
= ctx_data_alloc(cache
->owner
,
52 ocf_line_size(cache
) / PAGE_SIZE
* count
);
58 ret
= ctx_data_mlock(cache
->owner
, req
->data
);
60 ctx_data_free(cache
->owner
, req
->data
);
69 ocf_cleaner_req_type_master
= 1,
70 ocf_cleaner_req_type_slave
= 2
73 static struct ocf_request
*_ocf_cleaner_alloc_master_req(
74 struct ocf_cache
*cache
, uint32_t count
,
75 const struct ocf_cleaner_attribs
*attribs
)
77 struct ocf_request
*req
= _ocf_cleaner_alloc_req(cache
, count
, attribs
);
80 /* Set type of cleaning request */
81 req
->master_io_req_type
= ocf_cleaner_req_type_master
;
83 /* In master, save completion context and function */
84 req
->priv
= attribs
->cmpl_context
;
85 req
->master_io_req
= attribs
->cmpl_fn
;
87 /* The count of all requests */
88 env_atomic_set(&req
->master_remaining
, 1);
90 OCF_DEBUG_PARAM(cache
, "New master request, count = %u",
96 static struct ocf_request
*_ocf_cleaner_alloc_slave_req(
97 struct ocf_request
*master
,
98 uint32_t count
, const struct ocf_cleaner_attribs
*attribs
)
100 struct ocf_request
*req
= _ocf_cleaner_alloc_req(
101 master
->cache
, count
, attribs
);
104 /* Set type of cleaning request */
105 req
->master_io_req_type
= ocf_cleaner_req_type_slave
;
107 /* Slave refers to master request, get its reference counter */
110 /* Slave request contains reference to master */
111 req
->master_io_req
= master
;
113 /* One more additional slave request, increase global counter
116 env_atomic_inc(&master
->master_remaining
);
118 OCF_DEBUG_PARAM(req
->cache
,
119 "New slave request, count = %u,all requests count = %d",
120 count
, env_atomic_read(&master
->master_remaining
));
125 static void _ocf_cleaner_dealloc_req(struct ocf_request
*req
)
127 if (ocf_cleaner_req_type_slave
== req
->master_io_req_type
) {
128 /* Slave contains reference to the master request,
129 * release reference counter
131 struct ocf_request
*master
= req
->master_io_req
;
133 OCF_DEBUG_MSG(req
->cache
, "Put master request by slave");
136 OCF_DEBUG_MSG(req
->cache
, "Free slave request");
137 } else if (ocf_cleaner_req_type_master
== req
->master_io_req_type
) {
138 OCF_DEBUG_MSG(req
->cache
, "Free master request");
143 ctx_data_secure_erase(req
->cache
->owner
, req
->data
);
144 ctx_data_munlock(req
->cache
->owner
, req
->data
);
145 ctx_data_free(req
->cache
->owner
, req
->data
);
150 * cleaner - Get clean result
152 static void _ocf_cleaner_set_error(struct ocf_request
*req
)
154 struct ocf_request
*master
= NULL
;
156 if (ocf_cleaner_req_type_master
== req
->master_io_req_type
) {
158 } else if (ocf_cleaner_req_type_slave
== req
->master_io_req_type
) {
159 master
= req
->master_io_req
;
165 master
->error
= -EIO
;
168 static void _ocf_cleaner_complete_req(struct ocf_request
*req
)
170 struct ocf_request
*master
= NULL
;
173 if (ocf_cleaner_req_type_master
== req
->master_io_req_type
) {
174 OCF_DEBUG_MSG(req
->cache
, "Master completion");
176 } else if (ocf_cleaner_req_type_slave
== req
->master_io_req_type
) {
177 OCF_DEBUG_MSG(req
->cache
, "Slave completion");
178 master
= req
->master_io_req
;
184 OCF_DEBUG_PARAM(req
->cache
, "Master requests remaining = %d",
185 env_atomic_read(&master
->master_remaining
));
187 if (env_atomic_dec_return(&master
->master_remaining
)) {
188 /* Not all requests completed */
192 OCF_DEBUG_MSG(req
->cache
, "All cleaning request completed");
194 /* Only master contains completion function and completion context */
195 cmpl
= master
->master_io_req
;
196 cmpl(master
->priv
, master
->error
);
200 * cleaner - Cache line lock, function lock cache lines depends on attributes
202 static int _ocf_cleaner_cache_line_lock(struct ocf_request
*req
)
204 if (!req
->info
.cleaner_cache_line_lock
)
205 return OCF_LOCK_ACQUIRED
;
207 OCF_DEBUG_TRACE(req
->cache
);
209 return ocf_req_trylock_rd(req
);
213 * cleaner - Cache line unlock, function unlock cache lines
214 * depends on attributes
216 static void _ocf_cleaner_cache_line_unlock(struct ocf_request
*req
)
218 if (req
->info
.cleaner_cache_line_lock
) {
219 OCF_DEBUG_TRACE(req
->cache
);
224 static bool _ocf_cleaner_sector_is_dirty(struct ocf_cache
*cache
,
225 ocf_cache_line_t line
, uint8_t sector
)
227 bool dirty
= metadata_test_dirty_one(cache
, line
, sector
);
228 bool valid
= metadata_test_valid_one(cache
, line
, sector
);
230 if (!valid
&& dirty
) {
231 /* not valid but dirty - IMPROPER STATE!!! */
235 return valid
? dirty
: false;
238 static void _ocf_cleaner_finish_req(struct ocf_request
*req
)
240 /* Handle cache lines unlocks */
241 _ocf_cleaner_cache_line_unlock(req
);
243 /* Signal completion to the caller of cleaning */
244 _ocf_cleaner_complete_req(req
);
246 /* Free allocated resources */
247 _ocf_cleaner_dealloc_req(req
);
250 static void _ocf_cleaner_flush_cache_io_end(struct ocf_io
*io
, int error
)
252 struct ocf_request
*req
= io
->priv1
;
255 ocf_metadata_error(req
->cache
);
259 OCF_DEBUG_MSG(req
->cache
, "Cache flush finished");
261 _ocf_cleaner_finish_req(req
);
266 static int _ocf_cleaner_fire_flush_cache(struct ocf_request
*req
)
270 OCF_DEBUG_TRACE(req
->cache
);
272 io
= ocf_volume_new_io(&req
->cache
->device
->volume
);
274 ocf_metadata_error(req
->cache
);
275 req
->error
= -ENOMEM
;
279 ocf_io_configure(io
, 0, 0, OCF_WRITE
, 0, 0);
280 ocf_io_set_cmpl(io
, req
, NULL
, _ocf_cleaner_flush_cache_io_end
);
281 ocf_io_set_queue(io
, req
->io_queue
);
283 ocf_volume_submit_flush(io
);
288 static const struct ocf_io_if _io_if_flush_cache
= {
289 .read
= _ocf_cleaner_fire_flush_cache
,
290 .write
= _ocf_cleaner_fire_flush_cache
,
293 static void _ocf_cleaner_metadata_io_end(struct ocf_request
*req
, int error
)
296 ocf_metadata_error(req
->cache
);
298 _ocf_cleaner_finish_req(req
);
302 OCF_DEBUG_MSG(req
->cache
, "Metadata flush finished");
304 req
->io_if
= &_io_if_flush_cache
;
305 ocf_engine_push_req_front(req
, true);
308 static int _ocf_cleaner_update_metadata(struct ocf_request
*req
)
310 struct ocf_cache
*cache
= req
->cache
;
311 const struct ocf_map_info
*iter
= req
->map
;
313 ocf_cache_line_t cache_line
;
315 OCF_DEBUG_TRACE(req
->cache
);
317 OCF_METADATA_LOCK_WR();
318 /* Update metadata */
319 for (i
= 0; i
< req
->core_line_count
; i
++, iter
++) {
320 if (iter
->status
== LOOKUP_MISS
)
324 /* An error, do not clean */
328 cache_line
= iter
->coll_idx
;
330 if (!metadata_test_dirty(cache
, cache_line
))
333 ocf_metadata_get_core_and_part_id(cache
, cache_line
,
334 &req
->core_id
, &req
->part_id
);
336 set_cache_line_clean(cache
, 0, ocf_line_end_sector(cache
), req
,
340 ocf_metadata_flush_do_asynch(cache
, req
, _ocf_cleaner_metadata_io_end
);
341 OCF_METADATA_UNLOCK_WR();
346 static const struct ocf_io_if _io_if_update_metadata
= {
347 .read
= _ocf_cleaner_update_metadata
,
348 .write
= _ocf_cleaner_update_metadata
,
351 static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info
*map
,
352 struct ocf_request
*req
, int error
)
355 struct ocf_map_info
*iter
= req
->map
;
358 /* Flush error, set error for all cache line of this core */
359 for (i
= 0; i
< req
->core_line_count
; i
++, iter
++) {
360 if (iter
->status
== LOOKUP_MISS
)
363 if (iter
->core_id
== map
->core_id
)
364 iter
->invalid
= true;
367 _ocf_cleaner_set_error(req
);
370 if (env_atomic_dec_return(&req
->req_remaining
))
373 OCF_DEBUG_MSG(req
->cache
, "Core flush finished");
376 * All core writes done, switch to post cleaning activities
378 req
->io_if
= &_io_if_update_metadata
;
379 ocf_engine_push_req_front(req
, true);
382 static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io
*io
, int error
)
384 _ocf_cleaner_flush_cores_io_end(io
->priv1
, io
->priv2
, error
);
389 static int _ocf_cleaner_fire_flush_cores(struct ocf_request
*req
)
392 ocf_core_id_t core_id
= OCF_CORE_MAX
;
393 struct ocf_cache
*cache
= req
->cache
;
394 struct ocf_map_info
*iter
= req
->map
;
397 OCF_DEBUG_TRACE(req
->cache
);
399 /* Protect IO completion race */
400 env_atomic_set(&req
->req_remaining
, 1);
402 /* Submit flush requests */
403 for (i
= 0; i
< req
->core_line_count
; i
++, iter
++) {
405 /* IO error, skip this item */
409 if (iter
->status
== LOOKUP_MISS
)
412 if (core_id
== iter
->core_id
)
415 core_id
= iter
->core_id
;
417 env_atomic_inc(&req
->req_remaining
);
419 io
= ocf_new_core_io(cache
, core_id
);
421 _ocf_cleaner_flush_cores_io_end(iter
, req
, -ENOMEM
);
425 ocf_io_configure(io
, 0, 0, OCF_WRITE
, 0, 0);
426 ocf_io_set_cmpl(io
, iter
, req
, _ocf_cleaner_flush_cores_io_cmpl
);
427 ocf_io_set_queue(io
, req
->io_queue
);
429 ocf_volume_submit_flush(io
);
432 /* Protect IO completion race */
433 _ocf_cleaner_flush_cores_io_end(NULL
, req
, 0);
438 static const struct ocf_io_if _io_if_flush_cores
= {
439 .read
= _ocf_cleaner_fire_flush_cores
,
440 .write
= _ocf_cleaner_fire_flush_cores
,
443 static void _ocf_cleaner_core_io_end(struct ocf_request
*req
)
445 if (env_atomic_dec_return(&req
->req_remaining
))
448 OCF_DEBUG_MSG(req
->cache
, "Core writes finished");
451 * All cache read requests done, now we can submit writes to cores,
452 * Move processing to thread, where IO will be (and can be) submitted
454 req
->io_if
= &_io_if_flush_cores
;
455 ocf_engine_push_req_front(req
, true);
458 static void _ocf_cleaner_core_io_cmpl(struct ocf_io
*io
, int error
)
460 struct ocf_map_info
*map
= io
->priv1
;
461 struct ocf_request
*req
= io
->priv2
;
465 _ocf_cleaner_set_error(req
);
466 env_atomic_inc(&req
->cache
->core
[map
->core_id
].counters
->
470 _ocf_cleaner_core_io_end(req
);
475 static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request
*req
,
476 struct ocf_map_info
*iter
, uint64_t begin
, uint64_t end
)
478 uint64_t addr
, offset
;
480 struct ocf_cache
*cache
= req
->cache
;
482 struct ocf_counters_block
*core_stats
=
483 &cache
->core
[iter
->core_id
].counters
->core_blocks
;
484 ocf_part_id_t part_id
= ocf_metadata_get_partition_id(cache
,
487 io
= ocf_new_core_io(cache
, iter
->core_id
);
491 addr
= (ocf_line_size(cache
) * iter
->core_line
)
492 + SECTORS_TO_BYTES(begin
);
493 offset
= (ocf_line_size(cache
) * iter
->hash_key
)
494 + SECTORS_TO_BYTES(begin
);
496 ocf_io_configure(io
, addr
, SECTORS_TO_BYTES(end
- begin
), OCF_WRITE
,
498 ocf_io_set_queue(io
, req
->io_queue
);
499 err
= ocf_io_set_data(io
, req
->data
, offset
);
505 ocf_io_set_cmpl(io
, iter
, req
, _ocf_cleaner_core_io_cmpl
);
507 env_atomic64_add(SECTORS_TO_BYTES(end
- begin
), &core_stats
->write_bytes
);
509 OCF_DEBUG_PARAM(req
->cache
, "Core write, line = %llu, "
510 "sector = %llu, count = %llu", iter
->core_line
, begin
,
513 /* Increase IO counter to be processed */
514 env_atomic_inc(&req
->req_remaining
);
517 ocf_volume_submit_io(io
);
521 iter
->invalid
= true;
522 _ocf_cleaner_set_error(req
);
525 static void _ocf_cleaner_core_submit_io(struct ocf_request
*req
,
526 struct ocf_map_info
*iter
)
528 uint64_t i
, dirty_start
= 0;
529 struct ocf_cache
*cache
= req
->cache
;
530 bool counting_dirty
= false;
532 /* Check integrity of entry to be cleaned */
533 if (metadata_test_valid(cache
, iter
->coll_idx
)
534 && metadata_test_dirty(cache
, iter
->coll_idx
)) {
536 _ocf_cleaner_core_io_for_dirty_range(req
, iter
, 0,
537 ocf_line_sectors(cache
));
542 /* Sector cleaning, a little effort is required to this */
543 for (i
= 0; i
< ocf_line_sectors(cache
); i
++) {
544 if (!_ocf_cleaner_sector_is_dirty(cache
, iter
->coll_idx
, i
)) {
545 if (counting_dirty
) {
546 counting_dirty
= false;
547 _ocf_cleaner_core_io_for_dirty_range(req
, iter
,
554 if (!counting_dirty
) {
555 counting_dirty
= true;
562 _ocf_cleaner_core_io_for_dirty_range(req
, iter
, dirty_start
, i
);
565 static int _ocf_cleaner_fire_core(struct ocf_request
*req
)
568 struct ocf_map_info
*iter
;
570 OCF_DEBUG_TRACE(req
->cache
);
572 /* Protect IO completion race */
573 env_atomic_set(&req
->req_remaining
, 1);
575 /* Submits writes to the core */
576 for (i
= 0; i
< req
->core_line_count
; i
++) {
577 iter
= &(req
->map
[i
]);
580 /* IO read error on cache, skip this item */
584 if (iter
->status
== LOOKUP_MISS
)
587 _ocf_cleaner_core_submit_io(req
, iter
);
590 /* Protect IO completion race */
591 _ocf_cleaner_core_io_end(req
);
596 static const struct ocf_io_if _io_if_fire_core
= {
597 .read
= _ocf_cleaner_fire_core
,
598 .write
= _ocf_cleaner_fire_core
,
601 static void _ocf_cleaner_cache_io_end(struct ocf_request
*req
)
603 if (env_atomic_dec_return(&req
->req_remaining
))
607 * All cache read requests done, now we can submit writes to cores,
608 * Move processing to thread, where IO will be (and can be) submitted
610 req
->io_if
= &_io_if_fire_core
;
611 ocf_engine_push_req_front(req
, true);
613 OCF_DEBUG_MSG(req
->cache
, "Cache reads finished");
616 static void _ocf_cleaner_cache_io_cmpl(struct ocf_io
*io
, int error
)
618 struct ocf_map_info
*map
= io
->priv1
;
619 struct ocf_request
*req
= io
->priv2
;
623 _ocf_cleaner_set_error(req
);
624 env_atomic_inc(&req
->cache
->core
[map
->core_id
].counters
->
628 _ocf_cleaner_cache_io_end(req
);
634 * cleaner - Traverse cache lines to be cleaned, detect sequential IO, and
635 * perform cache reads and core writes
637 static int _ocf_cleaner_fire_cache(struct ocf_request
*req
)
639 struct ocf_cache
*cache
= req
->cache
;
641 struct ocf_map_info
*iter
= req
->map
;
642 uint64_t addr
, offset
;
643 ocf_part_id_t part_id
;
646 struct ocf_counters_block
*cache_stats
;
648 /* Protect IO completion race */
649 env_atomic_inc(&req
->req_remaining
);
651 for (i
= 0; i
< req
->core_line_count
; i
++, iter
++) {
652 if (iter
->core_id
== OCF_CORE_MAX
)
654 if (iter
->status
== LOOKUP_MISS
)
657 cache_stats
= &cache
->core
[iter
->core_id
].
658 counters
->cache_blocks
;
660 io
= ocf_new_cache_io(cache
);
662 /* Allocation error */
663 iter
->invalid
= true;
664 _ocf_cleaner_set_error(req
);
668 OCF_DEBUG_PARAM(req
->cache
, "Cache read, line = %u",
671 addr
= ocf_metadata_map_lg2phy(cache
,
673 addr
*= ocf_line_size(cache
);
674 addr
+= cache
->device
->metadata_offset
;
676 offset
= ocf_line_size(cache
) * iter
->hash_key
;
678 part_id
= ocf_metadata_get_partition_id(cache
, iter
->coll_idx
);
680 ocf_io_set_cmpl(io
, iter
, req
, _ocf_cleaner_cache_io_cmpl
);
681 ocf_io_configure(io
, addr
, ocf_line_size(cache
), OCF_READ
,
683 ocf_io_set_queue(io
, req
->io_queue
);
684 err
= ocf_io_set_data(io
, req
->data
, offset
);
687 iter
->invalid
= true;
688 _ocf_cleaner_set_error(req
);
692 env_atomic64_add(ocf_line_size(cache
), &cache_stats
->read_bytes
);
694 ocf_volume_submit_io(io
);
697 /* Protect IO completion race */
698 _ocf_cleaner_cache_io_end(req
);
703 static const struct ocf_io_if _io_if_fire_cache
= {
704 .read
= _ocf_cleaner_fire_cache
,
705 .write
= _ocf_cleaner_fire_cache
,
708 static void _ocf_cleaner_on_resume(struct ocf_request
*req
)
710 OCF_DEBUG_TRACE(req
->cache
);
711 ocf_engine_push_req_front(req
, true);
714 static int _ocf_cleaner_fire(struct ocf_request
*req
)
718 /* Set resume call backs */
719 req
->resume
= _ocf_cleaner_on_resume
;
720 req
->io_if
= &_io_if_fire_cache
;
722 /* Handle cache lines locks */
723 result
= _ocf_cleaner_cache_line_lock(req
);
726 if (result
== OCF_LOCK_ACQUIRED
) {
727 OCF_DEBUG_MSG(req
->cache
, "Lock acquired");
728 _ocf_cleaner_fire_cache(req
);
730 OCF_DEBUG_MSG(req
->cache
, "NO Lock");
734 OCF_DEBUG_MSG(req
->cache
, "Lock error");
740 /* Helper function for 'sort' */
741 static int _ocf_cleaner_cmp_private(const void *a
, const void *b
)
743 struct ocf_map_info
*_a
= (struct ocf_map_info
*)a
;
744 struct ocf_map_info
*_b
= (struct ocf_map_info
*)b
;
746 static uint32_t step
= 0;
748 OCF_COND_RESCHED_DEFAULT(step
);
750 if (_a
->core_id
== _b
->core_id
)
751 return (_a
->core_line
> _b
->core_line
) ? 1 : -1;
753 return (_a
->core_id
> _b
->core_id
) ? 1 : -1;
757 * Prepare cleaning request to be fired
759 * @param req cleaning request
760 * @param i_out number of already filled map requests (remaining to be filled
763 static int _ocf_cleaner_do_fire(struct ocf_request
*req
, uint32_t i_out
,
767 /* Set counts of cache IOs */
768 env_atomic_set(&req
->req_remaining
, i_out
);
770 /* fill tail of a request with fake MISSes so that it won't
773 for (; i_out
< req
->core_line_count
; ++i_out
) {
774 req
->map
[i_out
].core_id
= OCF_CORE_MAX
;
775 req
->map
[i_out
].core_line
= ULLONG_MAX
;
776 req
->map
[i_out
].status
= LOOKUP_MISS
;
777 req
->map
[i_out
].hash_key
= i_out
;
781 /* Sort by core id and core line */
782 env_sort(req
->map
, req
->core_line_count
, sizeof(req
->map
[0]),
783 _ocf_cleaner_cmp_private
, NULL
);
784 for (i
= 0; i
< req
->core_line_count
; i
++)
785 req
->map
[i
].hash_key
= i
;
788 /* issue actual request */
789 return _ocf_cleaner_fire(req
);
792 static inline uint32_t _ocf_cleaner_get_req_max_count(uint32_t count
,
795 if (low_mem
|| count
<= 4096)
796 return count
< 128 ? count
: 128;
801 static void _ocf_cleaner_fire_error(struct ocf_request
*master
,
802 struct ocf_request
*req
, int err
)
805 _ocf_cleaner_complete_req(req
);
806 _ocf_cleaner_dealloc_req(req
);
810 * cleaner - Main function
812 void ocf_cleaner_fire(struct ocf_cache
*cache
,
813 const struct ocf_cleaner_attribs
*attribs
)
815 uint32_t i
, i_out
= 0, count
= attribs
->count
;
816 /* max cache lines to be cleaned with one request: 1024 if over 4k lines
817 * to be flushed, otherwise 128. for large cleaning operations, 1024 is
818 * optimal number, but for smaller 1024 is too large to benefit from
819 * cleaning request overlapping
821 uint32_t max
= _ocf_cleaner_get_req_max_count(count
, false);
822 ocf_cache_line_t cache_line
;
823 /* it is possible that more than one cleaning request will be generated
824 * for each cleaning order, thus multiple allocations. At the end of
825 * loop, req is set to zero and NOT deallocated, as deallocation is
826 * handled in completion.
827 * In addition first request we call master which contains completion
828 * contexts. Then succeeding request we call salve requests which
829 * contains reference to the master request
831 struct ocf_request
*req
= NULL
, *master
;
833 ocf_core_id_t core_id
;
834 uint64_t core_sector
;
836 /* Allocate master request */
837 master
= _ocf_cleaner_alloc_master_req(cache
, max
, attribs
);
840 /* Some memory allocation error, try re-allocate request */
841 max
= _ocf_cleaner_get_req_max_count(count
, true);
842 master
= _ocf_cleaner_alloc_master_req(cache
, max
, attribs
);
846 attribs
->cmpl_fn(attribs
->cmpl_context
, -ENOMEM
);
852 /* prevent cleaning completion race */
854 env_atomic_inc(&master
->master_remaining
);
856 for (i
= 0; i
< count
; i
++) {
858 /* when request hasn't yet been allocated or is just issued */
860 if (max
> count
- i
) {
861 /* less than max left */
865 req
= _ocf_cleaner_alloc_slave_req(master
, max
, attribs
);
869 /* Some memory allocation error,
870 * try re-allocate request
872 max
= _ocf_cleaner_get_req_max_count(max
, true);
873 req
= _ocf_cleaner_alloc_slave_req(master
, max
, attribs
);
876 /* when request allocation failed stop processing */
878 master
->error
= -ENOMEM
;
882 if (attribs
->getter(cache
, attribs
->getter_context
,
884 OCF_DEBUG_MSG(cache
, "Skip");
888 /* when line already cleaned - rare condition under heavy
891 if (!metadata_test_dirty(cache
, cache_line
)) {
892 OCF_DEBUG_MSG(cache
, "Not dirty");
896 if (!metadata_test_valid_any(cache
, cache_line
)) {
897 OCF_DEBUG_MSG(cache
, "No any valid");
900 * Extremely disturbing cache line state
901 * Cache line (sector) cannot be dirty and not valid
907 /* Get mapping info */
908 ocf_metadata_get_core_info(cache
, cache_line
, &core_id
,
911 if (unlikely(!cache
->core
[core_id
].opened
)) {
912 OCF_DEBUG_MSG(cache
, "Core object inactive");
916 req
->map
[i_out
].core_id
= core_id
;
917 req
->map
[i_out
].core_line
= core_sector
;
918 req
->map
[i_out
].coll_idx
= cache_line
;
919 req
->map
[i_out
].status
= LOOKUP_HIT
;
920 req
->map
[i_out
].hash_key
= i_out
;
924 err
= _ocf_cleaner_do_fire(req
, i_out
, attribs
->do_sort
);
926 _ocf_cleaner_fire_error(master
, req
, err
);
936 err
= _ocf_cleaner_do_fire(req
, i_out
, attribs
->do_sort
);
938 _ocf_cleaner_fire_error(master
, req
, err
);
942 /* prevent cleaning completion race */
943 _ocf_cleaner_complete_req(master
);
947 static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache
*cache
,
948 void *context
, uint32_t item
, ocf_cache_line_t
*line
)
950 struct flush_data
*flush
= context
;
952 if (flush
[item
].cache_line
< cache
->device
->collision_table_entries
) {
953 (*line
) = flush
[item
].cache_line
;
960 int ocf_cleaner_do_flush_data_async(struct ocf_cache
*cache
,
961 struct flush_data
*flush
, uint32_t count
,
962 struct ocf_cleaner_attribs
*attribs
)
964 attribs
->getter
= _ocf_cleaner_do_flush_data_getter
;
965 attribs
->getter_context
= flush
;
966 attribs
->count
= count
;
968 ocf_cleaner_fire(cache
, attribs
);
973 /* Helper function for 'sort' */
974 static int _ocf_cleaner_cmp(const void *a
, const void *b
)
976 struct flush_data
*_a
= (struct flush_data
*)a
;
977 struct flush_data
*_b
= (struct flush_data
*)b
;
979 /* TODO: FIXME get rid of static */
980 static uint32_t step
= 0;
982 OCF_COND_RESCHED(step
, 1000000)
984 if (_a
->core_id
== _b
->core_id
)
985 return (_a
->core_line
> _b
->core_line
) ? 1 : -1;
987 return (_a
->core_id
> _b
->core_id
) ? 1 : -1;
990 static void _ocf_cleaner_swap(void *a
, void *b
, int size
)
992 struct flush_data
*_a
= (struct flush_data
*)a
;
993 struct flush_data
*_b
= (struct flush_data
*)b
;
1001 void ocf_cleaner_sort_sectors(struct flush_data
*tbl
, uint32_t num
)
1003 env_sort(tbl
, num
, sizeof(*tbl
), _ocf_cleaner_cmp
, _ocf_cleaner_swap
);
1006 void ocf_cleaner_sort_flush_containers(struct flush_container
*fctbl
,
1011 for (i
= 0; i
< num
; i
++) {
1012 env_sort(fctbl
[i
].flush_data
, fctbl
[i
].count
,
1013 sizeof(*fctbl
[i
].flush_data
), _ocf_cleaner_cmp
,