]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/utils/utils_cleaner.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / utils / utils_cleaner.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "../metadata/metadata.h"
7 #include "../engine/cache_engine.h"
8 #include "../engine/engine_common.h"
9 #include "../concurrency/ocf_concurrency.h"
10 #include "utils_cleaner.h"
11 #include "utils_req.h"
12 #include "utils_io.h"
13 #include "utils_cache_line.h"
14
15 #define OCF_UTILS_CLEANER_DEBUG 0
16
17 #if 1 == OCF_UTILS_CLEANER_DEBUG
18 #define OCF_DEBUG_TRACE(cache) \
19 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s\n", __func__)
20
21 #define OCF_DEBUG_MSG(cache, msg) \
22 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - %s\n", \
23 __func__, msg)
24
25 #define OCF_DEBUG_PARAM(cache, format, ...) \
26 ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - "format"\n", \
27 __func__, ##__VA_ARGS__)
28 #else
29 #define OCF_DEBUG_TRACE(cache)
30 #define OCF_DEBUG_MSG(cache, msg)
31 #define OCF_DEBUG_PARAM(cache, format, ...)
32 #endif
33
34 /*
35 * Allocate cleaning request
36 */
37 static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
38 uint32_t count, const struct ocf_cleaner_attribs *attribs)
39 {
40 struct ocf_request *req = ocf_req_new_extended(attribs->io_queue, NULL,
41 0, count * ocf_line_size(cache), OCF_READ);
42 int ret;
43
44 if (!req)
45 return NULL;
46
47 req->info.internal = true;
48 req->info.cleaner_cache_line_lock = attribs->cache_line_lock;
49
50 /* Allocate pages for cleaning IO */
51 req->data = ctx_data_alloc(cache->owner,
52 ocf_line_size(cache) / PAGE_SIZE * count);
53 if (!req->data) {
54 ocf_req_put(req);
55 return NULL;
56 }
57
58 ret = ctx_data_mlock(cache->owner, req->data);
59 if (ret) {
60 ctx_data_free(cache->owner, req->data);
61 ocf_req_put(req);
62 return NULL;
63 }
64
65 return req;
66 }
67
68 enum {
69 ocf_cleaner_req_type_master = 1,
70 ocf_cleaner_req_type_slave = 2
71 };
72
73 static struct ocf_request *_ocf_cleaner_alloc_master_req(
74 struct ocf_cache *cache, uint32_t count,
75 const struct ocf_cleaner_attribs *attribs)
76 {
77 struct ocf_request *req = _ocf_cleaner_alloc_req(cache, count, attribs);
78
79 if (req) {
80 /* Set type of cleaning request */
81 req->master_io_req_type = ocf_cleaner_req_type_master;
82
83 /* In master, save completion context and function */
84 req->priv = attribs->cmpl_context;
85 req->master_io_req = attribs->cmpl_fn;
86
87 /* The count of all requests */
88 env_atomic_set(&req->master_remaining, 1);
89
90 OCF_DEBUG_PARAM(cache, "New master request, count = %u",
91 count);
92 }
93 return req;
94 }
95
96 static struct ocf_request *_ocf_cleaner_alloc_slave_req(
97 struct ocf_request *master,
98 uint32_t count, const struct ocf_cleaner_attribs *attribs)
99 {
100 struct ocf_request *req = _ocf_cleaner_alloc_req(
101 master->cache, count, attribs);
102
103 if (req) {
104 /* Set type of cleaning request */
105 req->master_io_req_type = ocf_cleaner_req_type_slave;
106
107 /* Slave refers to master request, get its reference counter */
108 ocf_req_get(master);
109
110 /* Slave request contains reference to master */
111 req->master_io_req = master;
112
113 /* One more additional slave request, increase global counter
114 * of requests count
115 */
116 env_atomic_inc(&master->master_remaining);
117
118 OCF_DEBUG_PARAM(req->cache,
119 "New slave request, count = %u,all requests count = %d",
120 count, env_atomic_read(&master->master_remaining));
121 }
122 return req;
123 }
124
125 static void _ocf_cleaner_dealloc_req(struct ocf_request *req)
126 {
127 if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
128 /* Slave contains reference to the master request,
129 * release reference counter
130 */
131 struct ocf_request *master = req->master_io_req;
132
133 OCF_DEBUG_MSG(req->cache, "Put master request by slave");
134 ocf_req_put(master);
135
136 OCF_DEBUG_MSG(req->cache, "Free slave request");
137 } else if (ocf_cleaner_req_type_master == req->master_io_req_type) {
138 OCF_DEBUG_MSG(req->cache, "Free master request");
139 } else {
140 ENV_BUG();
141 }
142
143 ctx_data_secure_erase(req->cache->owner, req->data);
144 ctx_data_munlock(req->cache->owner, req->data);
145 ctx_data_free(req->cache->owner, req->data);
146 ocf_req_put(req);
147 }
148
149 /*
150 * cleaner - Get clean result
151 */
152 static void _ocf_cleaner_set_error(struct ocf_request *req)
153 {
154 struct ocf_request *master = NULL;
155
156 if (ocf_cleaner_req_type_master == req->master_io_req_type) {
157 master = req;
158 } else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
159 master = req->master_io_req;
160 } else {
161 ENV_BUG();
162 return;
163 }
164
165 master->error = -EIO;
166 }
167
168 static void _ocf_cleaner_complete_req(struct ocf_request *req)
169 {
170 struct ocf_request *master = NULL;
171 ocf_req_end_t cmpl;
172
173 if (ocf_cleaner_req_type_master == req->master_io_req_type) {
174 OCF_DEBUG_MSG(req->cache, "Master completion");
175 master = req;
176 } else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
177 OCF_DEBUG_MSG(req->cache, "Slave completion");
178 master = req->master_io_req;
179 } else {
180 ENV_BUG();
181 return;
182 }
183
184 OCF_DEBUG_PARAM(req->cache, "Master requests remaining = %d",
185 env_atomic_read(&master->master_remaining));
186
187 if (env_atomic_dec_return(&master->master_remaining)) {
188 /* Not all requests completed */
189 return;
190 }
191
192 OCF_DEBUG_MSG(req->cache, "All cleaning request completed");
193
194 /* Only master contains completion function and completion context */
195 cmpl = master->master_io_req;
196 cmpl(master->priv, master->error);
197 }
198
199 /*
200 * cleaner - Cache line lock, function lock cache lines depends on attributes
201 */
202 static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
203 {
204 if (!req->info.cleaner_cache_line_lock)
205 return OCF_LOCK_ACQUIRED;
206
207 OCF_DEBUG_TRACE(req->cache);
208
209 return ocf_req_trylock_rd(req);
210 }
211
212 /*
213 * cleaner - Cache line unlock, function unlock cache lines
214 * depends on attributes
215 */
216 static void _ocf_cleaner_cache_line_unlock(struct ocf_request *req)
217 {
218 if (req->info.cleaner_cache_line_lock) {
219 OCF_DEBUG_TRACE(req->cache);
220 ocf_req_unlock(req);
221 }
222 }
223
224 static bool _ocf_cleaner_sector_is_dirty(struct ocf_cache *cache,
225 ocf_cache_line_t line, uint8_t sector)
226 {
227 bool dirty = metadata_test_dirty_one(cache, line, sector);
228 bool valid = metadata_test_valid_one(cache, line, sector);
229
230 if (!valid && dirty) {
231 /* not valid but dirty - IMPROPER STATE!!! */
232 ENV_BUG();
233 }
234
235 return valid ? dirty : false;
236 }
237
238 static void _ocf_cleaner_finish_req(struct ocf_request *req)
239 {
240 /* Handle cache lines unlocks */
241 _ocf_cleaner_cache_line_unlock(req);
242
243 /* Signal completion to the caller of cleaning */
244 _ocf_cleaner_complete_req(req);
245
246 /* Free allocated resources */
247 _ocf_cleaner_dealloc_req(req);
248 }
249
250 static void _ocf_cleaner_flush_cache_io_end(struct ocf_io *io, int error)
251 {
252 struct ocf_request *req = io->priv1;
253
254 if (error) {
255 ocf_metadata_error(req->cache);
256 req->error = error;
257 }
258
259 OCF_DEBUG_MSG(req->cache, "Cache flush finished");
260
261 _ocf_cleaner_finish_req(req);
262
263 ocf_io_put(io);
264 }
265
266 static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
267 {
268 struct ocf_io *io;
269
270 OCF_DEBUG_TRACE(req->cache);
271
272 io = ocf_volume_new_io(&req->cache->device->volume);
273 if (!io) {
274 ocf_metadata_error(req->cache);
275 req->error = -ENOMEM;
276 return -ENOMEM;
277 }
278
279 ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
280 ocf_io_set_cmpl(io, req, NULL, _ocf_cleaner_flush_cache_io_end);
281 ocf_io_set_queue(io, req->io_queue);
282
283 ocf_volume_submit_flush(io);
284
285 return 0;
286 }
287
288 static const struct ocf_io_if _io_if_flush_cache = {
289 .read = _ocf_cleaner_fire_flush_cache,
290 .write = _ocf_cleaner_fire_flush_cache,
291 };
292
293 static void _ocf_cleaner_metadata_io_end(struct ocf_request *req, int error)
294 {
295 if (error) {
296 ocf_metadata_error(req->cache);
297 req->error = error;
298 _ocf_cleaner_finish_req(req);
299 return;
300 }
301
302 OCF_DEBUG_MSG(req->cache, "Metadata flush finished");
303
304 req->io_if = &_io_if_flush_cache;
305 ocf_engine_push_req_front(req, true);
306 }
307
308 static int _ocf_cleaner_update_metadata(struct ocf_request *req)
309 {
310 struct ocf_cache *cache = req->cache;
311 const struct ocf_map_info *iter = req->map;
312 uint32_t i;
313 ocf_cache_line_t cache_line;
314
315 OCF_DEBUG_TRACE(req->cache);
316
317 OCF_METADATA_LOCK_WR();
318 /* Update metadata */
319 for (i = 0; i < req->core_line_count; i++, iter++) {
320 if (iter->status == LOOKUP_MISS)
321 continue;
322
323 if (iter->invalid) {
324 /* An error, do not clean */
325 continue;
326 }
327
328 cache_line = iter->coll_idx;
329
330 if (!metadata_test_dirty(cache, cache_line))
331 continue;
332
333 ocf_metadata_get_core_and_part_id(cache, cache_line,
334 &req->core_id, &req->part_id);
335
336 set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), req,
337 i);
338 }
339
340 ocf_metadata_flush_do_asynch(cache, req, _ocf_cleaner_metadata_io_end);
341 OCF_METADATA_UNLOCK_WR();
342
343 return 0;
344 }
345
346 static const struct ocf_io_if _io_if_update_metadata = {
347 .read = _ocf_cleaner_update_metadata,
348 .write = _ocf_cleaner_update_metadata,
349 };
350
351 static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
352 struct ocf_request *req, int error)
353 {
354 uint32_t i;
355 struct ocf_map_info *iter = req->map;
356
357 if (error) {
358 /* Flush error, set error for all cache line of this core */
359 for (i = 0; i < req->core_line_count; i++, iter++) {
360 if (iter->status == LOOKUP_MISS)
361 continue;
362
363 if (iter->core_id == map->core_id)
364 iter->invalid = true;
365 }
366
367 _ocf_cleaner_set_error(req);
368 }
369
370 if (env_atomic_dec_return(&req->req_remaining))
371 return;
372
373 OCF_DEBUG_MSG(req->cache, "Core flush finished");
374
375 /*
376 * All core writes done, switch to post cleaning activities
377 */
378 req->io_if = &_io_if_update_metadata;
379 ocf_engine_push_req_front(req, true);
380 }
381
382 static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
383 {
384 _ocf_cleaner_flush_cores_io_end(io->priv1, io->priv2, error);
385
386 ocf_io_put(io);
387 }
388
389 static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
390 {
391 uint32_t i;
392 ocf_core_id_t core_id = OCF_CORE_MAX;
393 struct ocf_cache *cache = req->cache;
394 struct ocf_map_info *iter = req->map;
395 struct ocf_io *io;
396
397 OCF_DEBUG_TRACE(req->cache);
398
399 /* Protect IO completion race */
400 env_atomic_set(&req->req_remaining, 1);
401
402 /* Submit flush requests */
403 for (i = 0; i < req->core_line_count; i++, iter++) {
404 if (iter->invalid) {
405 /* IO error, skip this item */
406 continue;
407 }
408
409 if (iter->status == LOOKUP_MISS)
410 continue;
411
412 if (core_id == iter->core_id)
413 continue;
414
415 core_id = iter->core_id;
416
417 env_atomic_inc(&req->req_remaining);
418
419 io = ocf_new_core_io(cache, core_id);
420 if (!io) {
421 _ocf_cleaner_flush_cores_io_end(iter, req, -ENOMEM);
422 continue;
423 }
424
425 ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
426 ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_flush_cores_io_cmpl);
427 ocf_io_set_queue(io, req->io_queue);
428
429 ocf_volume_submit_flush(io);
430 }
431
432 /* Protect IO completion race */
433 _ocf_cleaner_flush_cores_io_end(NULL, req, 0);
434
435 return 0;
436 }
437
438 static const struct ocf_io_if _io_if_flush_cores = {
439 .read = _ocf_cleaner_fire_flush_cores,
440 .write = _ocf_cleaner_fire_flush_cores,
441 };
442
443 static void _ocf_cleaner_core_io_end(struct ocf_request *req)
444 {
445 if (env_atomic_dec_return(&req->req_remaining))
446 return;
447
448 OCF_DEBUG_MSG(req->cache, "Core writes finished");
449
450 /*
451 * All cache read requests done, now we can submit writes to cores,
452 * Move processing to thread, where IO will be (and can be) submitted
453 */
454 req->io_if = &_io_if_flush_cores;
455 ocf_engine_push_req_front(req, true);
456 }
457
458 static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
459 {
460 struct ocf_map_info *map = io->priv1;
461 struct ocf_request *req = io->priv2;
462
463 if (error) {
464 map->invalid |= 1;
465 _ocf_cleaner_set_error(req);
466 env_atomic_inc(&req->cache->core[map->core_id].counters->
467 core_errors.write);
468 }
469
470 _ocf_cleaner_core_io_end(req);
471
472 ocf_io_put(io);
473 }
474
475 static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
476 struct ocf_map_info *iter, uint64_t begin, uint64_t end)
477 {
478 uint64_t addr, offset;
479 int err;
480 struct ocf_cache *cache = req->cache;
481 struct ocf_io *io;
482 struct ocf_counters_block *core_stats =
483 &cache->core[iter->core_id].counters->core_blocks;
484 ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
485 iter->coll_idx);
486
487 io = ocf_new_core_io(cache, iter->core_id);
488 if (!io)
489 goto error;
490
491 addr = (ocf_line_size(cache) * iter->core_line)
492 + SECTORS_TO_BYTES(begin);
493 offset = (ocf_line_size(cache) * iter->hash_key)
494 + SECTORS_TO_BYTES(begin);
495
496 ocf_io_configure(io, addr, SECTORS_TO_BYTES(end - begin), OCF_WRITE,
497 part_id, 0);
498 ocf_io_set_queue(io, req->io_queue);
499 err = ocf_io_set_data(io, req->data, offset);
500 if (err) {
501 ocf_io_put(io);
502 goto error;
503 }
504
505 ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_core_io_cmpl);
506
507 env_atomic64_add(SECTORS_TO_BYTES(end - begin), &core_stats->write_bytes);
508
509 OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, "
510 "sector = %llu, count = %llu", iter->core_line, begin,
511 end - begin);
512
513 /* Increase IO counter to be processed */
514 env_atomic_inc(&req->req_remaining);
515
516 /* Send IO */
517 ocf_volume_submit_io(io);
518
519 return;
520 error:
521 iter->invalid = true;
522 _ocf_cleaner_set_error(req);
523 }
524
525 static void _ocf_cleaner_core_submit_io(struct ocf_request *req,
526 struct ocf_map_info *iter)
527 {
528 uint64_t i, dirty_start = 0;
529 struct ocf_cache *cache = req->cache;
530 bool counting_dirty = false;
531
532 /* Check integrity of entry to be cleaned */
533 if (metadata_test_valid(cache, iter->coll_idx)
534 && metadata_test_dirty(cache, iter->coll_idx)) {
535
536 _ocf_cleaner_core_io_for_dirty_range(req, iter, 0,
537 ocf_line_sectors(cache));
538
539 return;
540 }
541
542 /* Sector cleaning, a little effort is required to this */
543 for (i = 0; i < ocf_line_sectors(cache); i++) {
544 if (!_ocf_cleaner_sector_is_dirty(cache, iter->coll_idx, i)) {
545 if (counting_dirty) {
546 counting_dirty = false;
547 _ocf_cleaner_core_io_for_dirty_range(req, iter,
548 dirty_start, i);
549 }
550
551 continue;
552 }
553
554 if (!counting_dirty) {
555 counting_dirty = true;
556 dirty_start = i;
557 }
558
559 }
560
561 if (counting_dirty)
562 _ocf_cleaner_core_io_for_dirty_range(req, iter, dirty_start, i);
563 }
564
565 static int _ocf_cleaner_fire_core(struct ocf_request *req)
566 {
567 uint32_t i;
568 struct ocf_map_info *iter;
569
570 OCF_DEBUG_TRACE(req->cache);
571
572 /* Protect IO completion race */
573 env_atomic_set(&req->req_remaining, 1);
574
575 /* Submits writes to the core */
576 for (i = 0; i < req->core_line_count; i++) {
577 iter = &(req->map[i]);
578
579 if (iter->invalid) {
580 /* IO read error on cache, skip this item */
581 continue;
582 }
583
584 if (iter->status == LOOKUP_MISS)
585 continue;
586
587 _ocf_cleaner_core_submit_io(req, iter);
588 }
589
590 /* Protect IO completion race */
591 _ocf_cleaner_core_io_end(req);
592
593 return 0;
594 }
595
596 static const struct ocf_io_if _io_if_fire_core = {
597 .read = _ocf_cleaner_fire_core,
598 .write = _ocf_cleaner_fire_core,
599 };
600
601 static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
602 {
603 if (env_atomic_dec_return(&req->req_remaining))
604 return;
605
606 /*
607 * All cache read requests done, now we can submit writes to cores,
608 * Move processing to thread, where IO will be (and can be) submitted
609 */
610 req->io_if = &_io_if_fire_core;
611 ocf_engine_push_req_front(req, true);
612
613 OCF_DEBUG_MSG(req->cache, "Cache reads finished");
614 }
615
616 static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
617 {
618 struct ocf_map_info *map = io->priv1;
619 struct ocf_request *req = io->priv2;
620
621 if (error) {
622 map->invalid |= 1;
623 _ocf_cleaner_set_error(req);
624 env_atomic_inc(&req->cache->core[map->core_id].counters->
625 cache_errors.read);
626 }
627
628 _ocf_cleaner_cache_io_end(req);
629
630 ocf_io_put(io);
631 }
632
633 /*
634 * cleaner - Traverse cache lines to be cleaned, detect sequential IO, and
635 * perform cache reads and core writes
636 */
637 static int _ocf_cleaner_fire_cache(struct ocf_request *req)
638 {
639 struct ocf_cache *cache = req->cache;
640 uint32_t i;
641 struct ocf_map_info *iter = req->map;
642 uint64_t addr, offset;
643 ocf_part_id_t part_id;
644 struct ocf_io *io;
645 int err;
646 struct ocf_counters_block *cache_stats;
647
648 /* Protect IO completion race */
649 env_atomic_inc(&req->req_remaining);
650
651 for (i = 0; i < req->core_line_count; i++, iter++) {
652 if (iter->core_id == OCF_CORE_MAX)
653 continue;
654 if (iter->status == LOOKUP_MISS)
655 continue;
656
657 cache_stats = &cache->core[iter->core_id].
658 counters->cache_blocks;
659
660 io = ocf_new_cache_io(cache);
661 if (!io) {
662 /* Allocation error */
663 iter->invalid = true;
664 _ocf_cleaner_set_error(req);
665 continue;
666 }
667
668 OCF_DEBUG_PARAM(req->cache, "Cache read, line = %u",
669 iter->coll_idx);
670
671 addr = ocf_metadata_map_lg2phy(cache,
672 iter->coll_idx);
673 addr *= ocf_line_size(cache);
674 addr += cache->device->metadata_offset;
675
676 offset = ocf_line_size(cache) * iter->hash_key;
677
678 part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
679
680 ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_cache_io_cmpl);
681 ocf_io_configure(io, addr, ocf_line_size(cache), OCF_READ,
682 part_id, 0);
683 ocf_io_set_queue(io, req->io_queue);
684 err = ocf_io_set_data(io, req->data, offset);
685 if (err) {
686 ocf_io_put(io);
687 iter->invalid = true;
688 _ocf_cleaner_set_error(req);
689 continue;
690 }
691
692 env_atomic64_add(ocf_line_size(cache), &cache_stats->read_bytes);
693
694 ocf_volume_submit_io(io);
695 }
696
697 /* Protect IO completion race */
698 _ocf_cleaner_cache_io_end(req);
699
700 return 0;
701 }
702
703 static const struct ocf_io_if _io_if_fire_cache = {
704 .read = _ocf_cleaner_fire_cache,
705 .write = _ocf_cleaner_fire_cache,
706 };
707
708 static void _ocf_cleaner_on_resume(struct ocf_request *req)
709 {
710 OCF_DEBUG_TRACE(req->cache);
711 ocf_engine_push_req_front(req, true);
712 }
713
714 static int _ocf_cleaner_fire(struct ocf_request *req)
715 {
716 int result;
717
718 /* Set resume call backs */
719 req->resume = _ocf_cleaner_on_resume;
720 req->io_if = &_io_if_fire_cache;
721
722 /* Handle cache lines locks */
723 result = _ocf_cleaner_cache_line_lock(req);
724
725 if (result >= 0) {
726 if (result == OCF_LOCK_ACQUIRED) {
727 OCF_DEBUG_MSG(req->cache, "Lock acquired");
728 _ocf_cleaner_fire_cache(req);
729 } else {
730 OCF_DEBUG_MSG(req->cache, "NO Lock");
731 }
732 return 0;
733 } else {
734 OCF_DEBUG_MSG(req->cache, "Lock error");
735 }
736
737 return result;
738 }
739
740 /* Helper function for 'sort' */
741 static int _ocf_cleaner_cmp_private(const void *a, const void *b)
742 {
743 struct ocf_map_info *_a = (struct ocf_map_info *)a;
744 struct ocf_map_info *_b = (struct ocf_map_info *)b;
745
746 static uint32_t step = 0;
747
748 OCF_COND_RESCHED_DEFAULT(step);
749
750 if (_a->core_id == _b->core_id)
751 return (_a->core_line > _b->core_line) ? 1 : -1;
752
753 return (_a->core_id > _b->core_id) ? 1 : -1;
754 }
755
756 /**
757 * Prepare cleaning request to be fired
758 *
759 * @param req cleaning request
760 * @param i_out number of already filled map requests (remaining to be filled
761 * with missed
762 */
763 static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out,
764 bool do_sort)
765 {
766 uint32_t i;
767 /* Set counts of cache IOs */
768 env_atomic_set(&req->req_remaining, i_out);
769
770 /* fill tail of a request with fake MISSes so that it won't
771 * be cleaned
772 */
773 for (; i_out < req->core_line_count; ++i_out) {
774 req->map[i_out].core_id = OCF_CORE_MAX;
775 req->map[i_out].core_line = ULLONG_MAX;
776 req->map[i_out].status = LOOKUP_MISS;
777 req->map[i_out].hash_key = i_out;
778 }
779
780 if (do_sort) {
781 /* Sort by core id and core line */
782 env_sort(req->map, req->core_line_count, sizeof(req->map[0]),
783 _ocf_cleaner_cmp_private, NULL);
784 for (i = 0; i < req->core_line_count; i++)
785 req->map[i].hash_key = i;
786 }
787
788 /* issue actual request */
789 return _ocf_cleaner_fire(req);
790 }
791
792 static inline uint32_t _ocf_cleaner_get_req_max_count(uint32_t count,
793 bool low_mem)
794 {
795 if (low_mem || count <= 4096)
796 return count < 128 ? count : 128;
797
798 return 1024;
799 }
800
801 static void _ocf_cleaner_fire_error(struct ocf_request *master,
802 struct ocf_request *req, int err)
803 {
804 master->error = err;
805 _ocf_cleaner_complete_req(req);
806 _ocf_cleaner_dealloc_req(req);
807 }
808
809 /*
810 * cleaner - Main function
811 */
812 void ocf_cleaner_fire(struct ocf_cache *cache,
813 const struct ocf_cleaner_attribs *attribs)
814 {
815 uint32_t i, i_out = 0, count = attribs->count;
816 /* max cache lines to be cleaned with one request: 1024 if over 4k lines
817 * to be flushed, otherwise 128. for large cleaning operations, 1024 is
818 * optimal number, but for smaller 1024 is too large to benefit from
819 * cleaning request overlapping
820 */
821 uint32_t max = _ocf_cleaner_get_req_max_count(count, false);
822 ocf_cache_line_t cache_line;
823 /* it is possible that more than one cleaning request will be generated
824 * for each cleaning order, thus multiple allocations. At the end of
825 * loop, req is set to zero and NOT deallocated, as deallocation is
826 * handled in completion.
827 * In addition first request we call master which contains completion
828 * contexts. Then succeeding request we call salve requests which
829 * contains reference to the master request
830 */
831 struct ocf_request *req = NULL, *master;
832 int err;
833 ocf_core_id_t core_id;
834 uint64_t core_sector;
835
836 /* Allocate master request */
837 master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
838
839 if (!master) {
840 /* Some memory allocation error, try re-allocate request */
841 max = _ocf_cleaner_get_req_max_count(count, true);
842 master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
843 }
844
845 if (!master) {
846 attribs->cmpl_fn(attribs->cmpl_context, -ENOMEM);
847 return;
848 }
849
850 req = master;
851
852 /* prevent cleaning completion race */
853 ocf_req_get(master);
854 env_atomic_inc(&master->master_remaining);
855
856 for (i = 0; i < count; i++) {
857
858 /* when request hasn't yet been allocated or is just issued */
859 if (!req) {
860 if (max > count - i) {
861 /* less than max left */
862 max = count - i;
863 }
864
865 req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
866 }
867
868 if (!req) {
869 /* Some memory allocation error,
870 * try re-allocate request
871 */
872 max = _ocf_cleaner_get_req_max_count(max, true);
873 req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
874 }
875
876 /* when request allocation failed stop processing */
877 if (!req) {
878 master->error = -ENOMEM;
879 break;
880 }
881
882 if (attribs->getter(cache, attribs->getter_context,
883 i, &cache_line)) {
884 OCF_DEBUG_MSG(cache, "Skip");
885 continue;
886 }
887
888 /* when line already cleaned - rare condition under heavy
889 * I/O workload.
890 */
891 if (!metadata_test_dirty(cache, cache_line)) {
892 OCF_DEBUG_MSG(cache, "Not dirty");
893 continue;
894 }
895
896 if (!metadata_test_valid_any(cache, cache_line)) {
897 OCF_DEBUG_MSG(cache, "No any valid");
898
899 /*
900 * Extremely disturbing cache line state
901 * Cache line (sector) cannot be dirty and not valid
902 */
903 ENV_BUG();
904 continue;
905 }
906
907 /* Get mapping info */
908 ocf_metadata_get_core_info(cache, cache_line, &core_id,
909 &core_sector);
910
911 if (unlikely(!cache->core[core_id].opened)) {
912 OCF_DEBUG_MSG(cache, "Core object inactive");
913 continue;
914 }
915
916 req->map[i_out].core_id = core_id;
917 req->map[i_out].core_line = core_sector;
918 req->map[i_out].coll_idx = cache_line;
919 req->map[i_out].status = LOOKUP_HIT;
920 req->map[i_out].hash_key = i_out;
921 i_out++;
922
923 if (max == i_out) {
924 err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
925 if (err) {
926 _ocf_cleaner_fire_error(master, req, err);
927 req = NULL;
928 break;
929 }
930 i_out = 0;
931 req = NULL;
932 }
933 }
934
935 if (req) {
936 err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
937 if (err)
938 _ocf_cleaner_fire_error(master, req, err);
939 req = NULL;
940 }
941
942 /* prevent cleaning completion race */
943 _ocf_cleaner_complete_req(master);
944 ocf_req_put(master);
945 }
946
947 static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache,
948 void *context, uint32_t item, ocf_cache_line_t *line)
949 {
950 struct flush_data *flush = context;
951
952 if (flush[item].cache_line < cache->device->collision_table_entries) {
953 (*line) = flush[item].cache_line;
954 return 0;
955 } else {
956 return -1;
957 }
958 }
959
960 int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache,
961 struct flush_data *flush, uint32_t count,
962 struct ocf_cleaner_attribs *attribs)
963 {
964 attribs->getter = _ocf_cleaner_do_flush_data_getter;
965 attribs->getter_context = flush;
966 attribs->count = count;
967
968 ocf_cleaner_fire(cache, attribs);
969
970 return 0;
971 }
972
973 /* Helper function for 'sort' */
974 static int _ocf_cleaner_cmp(const void *a, const void *b)
975 {
976 struct flush_data *_a = (struct flush_data *)a;
977 struct flush_data *_b = (struct flush_data *)b;
978
979 /* TODO: FIXME get rid of static */
980 static uint32_t step = 0;
981
982 OCF_COND_RESCHED(step, 1000000)
983
984 if (_a->core_id == _b->core_id)
985 return (_a->core_line > _b->core_line) ? 1 : -1;
986
987 return (_a->core_id > _b->core_id) ? 1 : -1;
988 }
989
990 static void _ocf_cleaner_swap(void *a, void *b, int size)
991 {
992 struct flush_data *_a = (struct flush_data *)a;
993 struct flush_data *_b = (struct flush_data *)b;
994 struct flush_data t;
995
996 t = *_a;
997 *_a = *_b;
998 *_b = t;
999 }
1000
1001 void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num)
1002 {
1003 env_sort(tbl, num, sizeof(*tbl), _ocf_cleaner_cmp, _ocf_cleaner_swap);
1004 }
1005
1006 void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,
1007 uint32_t num)
1008 {
1009 int i;
1010
1011 for (i = 0; i < num; i++) {
1012 env_sort(fctbl[i].flush_data, fctbl[i].count,
1013 sizeof(*fctbl[i].flush_data), _ocf_cleaner_cmp,
1014 _ocf_cleaner_swap);
1015 }
1016 }