]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/dm-cache-policy-mq.c
Merge branch 'for-3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-cache-policy-mq.c
1 /*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-cache-policy.h"
8 #include "dm.h"
9
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #define DM_MSG_PREFIX "cache-policy-mq"
17
18 static struct kmem_cache *mq_entry_cache;
19
20 /*----------------------------------------------------------------*/
21
22 static unsigned next_power(unsigned n, unsigned min)
23 {
24 return roundup_pow_of_two(max(n, min));
25 }
26
27 /*----------------------------------------------------------------*/
28
29 /*
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
32 *
33 * The io_tracker tries to spot when the io is in one of these sequential
34 * modes.
35 *
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
38 */
39 #define RANDOM_THRESHOLD_DEFAULT 4
40 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
41
42 enum io_pattern {
43 PATTERN_SEQUENTIAL,
44 PATTERN_RANDOM
45 };
46
47 struct io_tracker {
48 enum io_pattern pattern;
49
50 unsigned nr_seq_samples;
51 unsigned nr_rand_samples;
52 unsigned thresholds[2];
53
54 dm_oblock_t last_end_oblock;
55 };
56
57 static void iot_init(struct io_tracker *t,
58 int sequential_threshold, int random_threshold)
59 {
60 t->pattern = PATTERN_RANDOM;
61 t->nr_seq_samples = 0;
62 t->nr_rand_samples = 0;
63 t->last_end_oblock = 0;
64 t->thresholds[PATTERN_RANDOM] = random_threshold;
65 t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
66 }
67
68 static enum io_pattern iot_pattern(struct io_tracker *t)
69 {
70 return t->pattern;
71 }
72
73 static void iot_update_stats(struct io_tracker *t, struct bio *bio)
74 {
75 if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
76 t->nr_seq_samples++;
77 else {
78 /*
79 * Just one non-sequential IO is enough to reset the
80 * counters.
81 */
82 if (t->nr_seq_samples) {
83 t->nr_seq_samples = 0;
84 t->nr_rand_samples = 0;
85 }
86
87 t->nr_rand_samples++;
88 }
89
90 t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
91 }
92
93 static void iot_check_for_pattern_switch(struct io_tracker *t)
94 {
95 switch (t->pattern) {
96 case PATTERN_SEQUENTIAL:
97 if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
98 t->pattern = PATTERN_RANDOM;
99 t->nr_seq_samples = t->nr_rand_samples = 0;
100 }
101 break;
102
103 case PATTERN_RANDOM:
104 if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
105 t->pattern = PATTERN_SEQUENTIAL;
106 t->nr_seq_samples = t->nr_rand_samples = 0;
107 }
108 break;
109 }
110 }
111
112 static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
113 {
114 iot_update_stats(t, bio);
115 iot_check_for_pattern_switch(t);
116 }
117
118 /*----------------------------------------------------------------*/
119
120
121 /*
122 * This queue is divided up into different levels. Allowing us to push
123 * entries to the back of any of the levels. Think of it as a partially
124 * sorted queue.
125 */
126 #define NR_QUEUE_LEVELS 16u
127
128 struct queue {
129 struct list_head qs[NR_QUEUE_LEVELS];
130 };
131
132 static void queue_init(struct queue *q)
133 {
134 unsigned i;
135
136 for (i = 0; i < NR_QUEUE_LEVELS; i++)
137 INIT_LIST_HEAD(q->qs + i);
138 }
139
140 /*
141 * Checks to see if the queue is empty.
142 * FIXME: reduce cpu usage.
143 */
144 static bool queue_empty(struct queue *q)
145 {
146 unsigned i;
147
148 for (i = 0; i < NR_QUEUE_LEVELS; i++)
149 if (!list_empty(q->qs + i))
150 return false;
151
152 return true;
153 }
154
155 /*
156 * Insert an entry to the back of the given level.
157 */
158 static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
159 {
160 list_add_tail(elt, q->qs + level);
161 }
162
163 static void queue_remove(struct list_head *elt)
164 {
165 list_del(elt);
166 }
167
168 /*
169 * Shifts all regions down one level. This has no effect on the order of
170 * the queue.
171 */
172 static void queue_shift_down(struct queue *q)
173 {
174 unsigned level;
175
176 for (level = 1; level < NR_QUEUE_LEVELS; level++)
177 list_splice_init(q->qs + level, q->qs + level - 1);
178 }
179
180 /*
181 * Gives us the oldest entry of the lowest popoulated level. If the first
182 * level is emptied then we shift down one level.
183 */
184 static struct list_head *queue_pop(struct queue *q)
185 {
186 unsigned level;
187 struct list_head *r;
188
189 for (level = 0; level < NR_QUEUE_LEVELS; level++)
190 if (!list_empty(q->qs + level)) {
191 r = q->qs[level].next;
192 list_del(r);
193
194 /* have we just emptied the bottom level? */
195 if (level == 0 && list_empty(q->qs))
196 queue_shift_down(q);
197
198 return r;
199 }
200
201 return NULL;
202 }
203
204 static struct list_head *list_pop(struct list_head *lh)
205 {
206 struct list_head *r = lh->next;
207
208 BUG_ON(!r);
209 list_del_init(r);
210
211 return r;
212 }
213
214 /*----------------------------------------------------------------*/
215
216 /*
217 * Describes a cache entry. Used in both the cache and the pre_cache.
218 */
219 struct entry {
220 struct hlist_node hlist;
221 struct list_head list;
222 dm_oblock_t oblock;
223
224 /*
225 * FIXME: pack these better
226 */
227 bool dirty:1;
228 unsigned hit_count;
229 unsigned generation;
230 unsigned tick;
231 };
232
233 /*
234 * Rather than storing the cblock in an entry, we allocate all entries in
235 * an array, and infer the cblock from the entry position.
236 *
237 * Free entries are linked together into a list.
238 */
239 struct entry_pool {
240 struct entry *entries, *entries_end;
241 struct list_head free;
242 unsigned nr_allocated;
243 };
244
245 static int epool_init(struct entry_pool *ep, unsigned nr_entries)
246 {
247 unsigned i;
248
249 ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
250 if (!ep->entries)
251 return -ENOMEM;
252
253 ep->entries_end = ep->entries + nr_entries;
254
255 INIT_LIST_HEAD(&ep->free);
256 for (i = 0; i < nr_entries; i++)
257 list_add(&ep->entries[i].list, &ep->free);
258
259 ep->nr_allocated = 0;
260
261 return 0;
262 }
263
264 static void epool_exit(struct entry_pool *ep)
265 {
266 vfree(ep->entries);
267 }
268
269 static struct entry *alloc_entry(struct entry_pool *ep)
270 {
271 struct entry *e;
272
273 if (list_empty(&ep->free))
274 return NULL;
275
276 e = list_entry(list_pop(&ep->free), struct entry, list);
277 INIT_LIST_HEAD(&e->list);
278 INIT_HLIST_NODE(&e->hlist);
279 ep->nr_allocated++;
280
281 return e;
282 }
283
284 /*
285 * This assumes the cblock hasn't already been allocated.
286 */
287 static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
288 {
289 struct entry *e = ep->entries + from_cblock(cblock);
290 list_del(&e->list);
291
292 INIT_LIST_HEAD(&e->list);
293 INIT_HLIST_NODE(&e->hlist);
294 ep->nr_allocated++;
295
296 return e;
297 }
298
299 static void free_entry(struct entry_pool *ep, struct entry *e)
300 {
301 BUG_ON(!ep->nr_allocated);
302 ep->nr_allocated--;
303 INIT_HLIST_NODE(&e->hlist);
304 list_add(&e->list, &ep->free);
305 }
306
307 /*
308 * Returns NULL if the entry is free.
309 */
310 static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
311 {
312 struct entry *e = ep->entries + from_cblock(cblock);
313 return !hlist_unhashed(&e->hlist) ? e : NULL;
314 }
315
316 static bool epool_empty(struct entry_pool *ep)
317 {
318 return list_empty(&ep->free);
319 }
320
321 static bool in_pool(struct entry_pool *ep, struct entry *e)
322 {
323 return e >= ep->entries && e < ep->entries_end;
324 }
325
326 static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
327 {
328 return to_cblock(e - ep->entries);
329 }
330
331 /*----------------------------------------------------------------*/
332
333 struct mq_policy {
334 struct dm_cache_policy policy;
335
336 /* protects everything */
337 struct mutex lock;
338 dm_cblock_t cache_size;
339 struct io_tracker tracker;
340
341 /*
342 * Entries come from two pools, one of pre-cache entries, and one
343 * for the cache proper.
344 */
345 struct entry_pool pre_cache_pool;
346 struct entry_pool cache_pool;
347
348 /*
349 * We maintain three queues of entries. The cache proper,
350 * consisting of a clean and dirty queue, contains the currently
351 * active mappings. Whereas the pre_cache tracks blocks that
352 * are being hit frequently and potential candidates for promotion
353 * to the cache.
354 */
355 struct queue pre_cache;
356 struct queue cache_clean;
357 struct queue cache_dirty;
358
359 /*
360 * Keeps track of time, incremented by the core. We use this to
361 * avoid attributing multiple hits within the same tick.
362 *
363 * Access to tick_protected should be done with the spin lock held.
364 * It's copied to tick at the start of the map function (within the
365 * mutex).
366 */
367 spinlock_t tick_lock;
368 unsigned tick_protected;
369 unsigned tick;
370
371 /*
372 * A count of the number of times the map function has been called
373 * and found an entry in the pre_cache or cache. Currently used to
374 * calculate the generation.
375 */
376 unsigned hit_count;
377
378 /*
379 * A generation is a longish period that is used to trigger some
380 * book keeping effects. eg, decrementing hit counts on entries.
381 * This is needed to allow the cache to evolve as io patterns
382 * change.
383 */
384 unsigned generation;
385 unsigned generation_period; /* in lookups (will probably change) */
386
387 /*
388 * Entries in the pre_cache whose hit count passes the promotion
389 * threshold move to the cache proper. Working out the correct
390 * value for the promotion_threshold is crucial to this policy.
391 */
392 unsigned promote_threshold;
393
394 /*
395 * The hash table allows us to quickly find an entry by origin
396 * block. Both pre_cache and cache entries are in here.
397 */
398 unsigned nr_buckets;
399 dm_block_t hash_bits;
400 struct hlist_head *table;
401 };
402
403 /*----------------------------------------------------------------*/
404
405 /*
406 * Simple hash table implementation. Should replace with the standard hash
407 * table that's making its way upstream.
408 */
409 static void hash_insert(struct mq_policy *mq, struct entry *e)
410 {
411 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
412
413 hlist_add_head(&e->hlist, mq->table + h);
414 }
415
416 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
417 {
418 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
419 struct hlist_head *bucket = mq->table + h;
420 struct entry *e;
421
422 hlist_for_each_entry(e, bucket, hlist)
423 if (e->oblock == oblock) {
424 hlist_del(&e->hlist);
425 hlist_add_head(&e->hlist, bucket);
426 return e;
427 }
428
429 return NULL;
430 }
431
432 static void hash_remove(struct entry *e)
433 {
434 hlist_del(&e->hlist);
435 }
436
437 /*----------------------------------------------------------------*/
438
439 static bool any_free_cblocks(struct mq_policy *mq)
440 {
441 return !epool_empty(&mq->cache_pool);
442 }
443
444 static bool any_clean_cblocks(struct mq_policy *mq)
445 {
446 return !queue_empty(&mq->cache_clean);
447 }
448
449 /*----------------------------------------------------------------*/
450
451 /*
452 * Now we get to the meat of the policy. This section deals with deciding
453 * when to to add entries to the pre_cache and cache, and move between
454 * them.
455 */
456
457 /*
458 * The queue level is based on the log2 of the hit count.
459 */
460 static unsigned queue_level(struct entry *e)
461 {
462 return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
463 }
464
465 static bool in_cache(struct mq_policy *mq, struct entry *e)
466 {
467 return in_pool(&mq->cache_pool, e);
468 }
469
470 /*
471 * Inserts the entry into the pre_cache or the cache. Ensures the cache
472 * block is marked as allocated if necc. Inserts into the hash table.
473 * Sets the tick which records when the entry was last moved about.
474 */
475 static void push(struct mq_policy *mq, struct entry *e)
476 {
477 e->tick = mq->tick;
478 hash_insert(mq, e);
479
480 if (in_cache(mq, e))
481 queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
482 queue_level(e), &e->list);
483 else
484 queue_push(&mq->pre_cache, queue_level(e), &e->list);
485 }
486
487 /*
488 * Removes an entry from pre_cache or cache. Removes from the hash table.
489 */
490 static void del(struct mq_policy *mq, struct entry *e)
491 {
492 queue_remove(&e->list);
493 hash_remove(e);
494 }
495
496 /*
497 * Like del, except it removes the first entry in the queue (ie. the least
498 * recently used).
499 */
500 static struct entry *pop(struct mq_policy *mq, struct queue *q)
501 {
502 struct entry *e;
503 struct list_head *h = queue_pop(q);
504
505 if (!h)
506 return NULL;
507
508 e = container_of(h, struct entry, list);
509 hash_remove(e);
510
511 return e;
512 }
513
514 /*
515 * Has this entry already been updated?
516 */
517 static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
518 {
519 return mq->tick == e->tick;
520 }
521
522 /*
523 * The promotion threshold is adjusted every generation. As are the counts
524 * of the entries.
525 *
526 * At the moment the threshold is taken by averaging the hit counts of some
527 * of the entries in the cache (the first 20 entries across all levels in
528 * ascending order, giving preference to the clean entries at each level).
529 *
530 * We can be much cleverer than this though. For example, each promotion
531 * could bump up the threshold helping to prevent churn. Much more to do
532 * here.
533 */
534
535 #define MAX_TO_AVERAGE 20
536
537 static void check_generation(struct mq_policy *mq)
538 {
539 unsigned total = 0, nr = 0, count = 0, level;
540 struct list_head *head;
541 struct entry *e;
542
543 if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
544 mq->hit_count = 0;
545 mq->generation++;
546
547 for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
548 head = mq->cache_clean.qs + level;
549 list_for_each_entry(e, head, list) {
550 nr++;
551 total += e->hit_count;
552
553 if (++count >= MAX_TO_AVERAGE)
554 break;
555 }
556
557 head = mq->cache_dirty.qs + level;
558 list_for_each_entry(e, head, list) {
559 nr++;
560 total += e->hit_count;
561
562 if (++count >= MAX_TO_AVERAGE)
563 break;
564 }
565 }
566
567 mq->promote_threshold = nr ? total / nr : 1;
568 if (mq->promote_threshold * nr < total)
569 mq->promote_threshold++;
570 }
571 }
572
573 /*
574 * Whenever we use an entry we bump up it's hit counter, and push it to the
575 * back to it's current level.
576 */
577 static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
578 {
579 if (updated_this_tick(mq, e))
580 return;
581
582 e->hit_count++;
583 mq->hit_count++;
584 check_generation(mq);
585
586 /* generation adjustment, to stop the counts increasing forever. */
587 /* FIXME: divide? */
588 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
589 e->generation = mq->generation;
590
591 del(mq, e);
592 push(mq, e);
593 }
594
595 /*
596 * Demote the least recently used entry from the cache to the pre_cache.
597 * Returns the new cache entry to use, and the old origin block it was
598 * mapped to.
599 *
600 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
601 * straight back into the cache if it's subsequently hit. There are
602 * various options here, and more experimentation would be good:
603 *
604 * - just forget about the demoted entry completely (ie. don't insert it
605 into the pre_cache).
606 * - divide the hit count rather that setting to some hard coded value.
607 * - set the hit count to a hard coded value other than 1, eg, is it better
608 * if it goes in at level 2?
609 */
610 static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
611 {
612 struct entry *demoted = pop(mq, &mq->cache_clean);
613
614 if (!demoted)
615 /*
616 * We could get a block from mq->cache_dirty, but that
617 * would add extra latency to the triggering bio as it
618 * waits for the writeback. Better to not promote this
619 * time and hope there's a clean block next time this block
620 * is hit.
621 */
622 return -ENOSPC;
623
624 *oblock = demoted->oblock;
625 free_entry(&mq->cache_pool, demoted);
626
627 /*
628 * We used to put the demoted block into the pre-cache, but I think
629 * it's simpler to just let it work it's way up from zero again.
630 * Stops blocks flickering in and out of the cache.
631 */
632
633 return 0;
634 }
635
636 /*
637 * We modify the basic promotion_threshold depending on the specific io.
638 *
639 * If the origin block has been discarded then there's no cost to copy it
640 * to the cache.
641 *
642 * We bias towards reads, since they can be demoted at no cost if they
643 * haven't been dirtied.
644 */
645 #define DISCARDED_PROMOTE_THRESHOLD 1
646 #define READ_PROMOTE_THRESHOLD 4
647 #define WRITE_PROMOTE_THRESHOLD 8
648
649 static unsigned adjusted_promote_threshold(struct mq_policy *mq,
650 bool discarded_oblock, int data_dir)
651 {
652 if (data_dir == READ)
653 return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
654
655 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
656 /*
657 * We don't need to do any copying at all, so give this a
658 * very low threshold.
659 */
660 return DISCARDED_PROMOTE_THRESHOLD;
661 }
662
663 return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
664 }
665
666 static bool should_promote(struct mq_policy *mq, struct entry *e,
667 bool discarded_oblock, int data_dir)
668 {
669 return e->hit_count >=
670 adjusted_promote_threshold(mq, discarded_oblock, data_dir);
671 }
672
673 static int cache_entry_found(struct mq_policy *mq,
674 struct entry *e,
675 struct policy_result *result)
676 {
677 requeue_and_update_tick(mq, e);
678
679 if (in_cache(mq, e)) {
680 result->op = POLICY_HIT;
681 result->cblock = infer_cblock(&mq->cache_pool, e);
682 }
683
684 return 0;
685 }
686
687 /*
688 * Moves an entry from the pre_cache to the cache. The main work is
689 * finding which cache block to use.
690 */
691 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
692 struct policy_result *result)
693 {
694 int r;
695 struct entry *new_e;
696
697 /* Ensure there's a free cblock in the cache */
698 if (epool_empty(&mq->cache_pool)) {
699 result->op = POLICY_REPLACE;
700 r = demote_cblock(mq, &result->old_oblock);
701 if (r) {
702 result->op = POLICY_MISS;
703 return 0;
704 }
705 } else
706 result->op = POLICY_NEW;
707
708 new_e = alloc_entry(&mq->cache_pool);
709 BUG_ON(!new_e);
710
711 new_e->oblock = e->oblock;
712 new_e->dirty = false;
713 new_e->hit_count = e->hit_count;
714 new_e->generation = e->generation;
715 new_e->tick = e->tick;
716
717 del(mq, e);
718 free_entry(&mq->pre_cache_pool, e);
719 push(mq, new_e);
720
721 result->cblock = infer_cblock(&mq->cache_pool, new_e);
722
723 return 0;
724 }
725
726 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
727 bool can_migrate, bool discarded_oblock,
728 int data_dir, struct policy_result *result)
729 {
730 int r = 0;
731 bool updated = updated_this_tick(mq, e);
732
733 if ((!discarded_oblock && updated) ||
734 !should_promote(mq, e, discarded_oblock, data_dir)) {
735 requeue_and_update_tick(mq, e);
736 result->op = POLICY_MISS;
737
738 } else if (!can_migrate)
739 r = -EWOULDBLOCK;
740
741 else {
742 requeue_and_update_tick(mq, e);
743 r = pre_cache_to_cache(mq, e, result);
744 }
745
746 return r;
747 }
748
749 static void insert_in_pre_cache(struct mq_policy *mq,
750 dm_oblock_t oblock)
751 {
752 struct entry *e = alloc_entry(&mq->pre_cache_pool);
753
754 if (!e)
755 /*
756 * There's no spare entry structure, so we grab the least
757 * used one from the pre_cache.
758 */
759 e = pop(mq, &mq->pre_cache);
760
761 if (unlikely(!e)) {
762 DMWARN("couldn't pop from pre cache");
763 return;
764 }
765
766 e->dirty = false;
767 e->oblock = oblock;
768 e->hit_count = 1;
769 e->generation = mq->generation;
770 push(mq, e);
771 }
772
773 static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
774 struct policy_result *result)
775 {
776 int r;
777 struct entry *e;
778
779 if (epool_empty(&mq->cache_pool)) {
780 result->op = POLICY_REPLACE;
781 r = demote_cblock(mq, &result->old_oblock);
782 if (unlikely(r)) {
783 result->op = POLICY_MISS;
784 insert_in_pre_cache(mq, oblock);
785 return;
786 }
787
788 /*
789 * This will always succeed, since we've just demoted.
790 */
791 e = alloc_entry(&mq->cache_pool);
792 BUG_ON(!e);
793
794 } else {
795 e = alloc_entry(&mq->cache_pool);
796 result->op = POLICY_NEW;
797 }
798
799 e->oblock = oblock;
800 e->dirty = false;
801 e->hit_count = 1;
802 e->generation = mq->generation;
803 push(mq, e);
804
805 result->cblock = infer_cblock(&mq->cache_pool, e);
806 }
807
808 static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
809 bool can_migrate, bool discarded_oblock,
810 int data_dir, struct policy_result *result)
811 {
812 if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
813 if (can_migrate)
814 insert_in_cache(mq, oblock, result);
815 else
816 return -EWOULDBLOCK;
817 } else {
818 insert_in_pre_cache(mq, oblock);
819 result->op = POLICY_MISS;
820 }
821
822 return 0;
823 }
824
825 /*
826 * Looks the oblock up in the hash table, then decides whether to put in
827 * pre_cache, or cache etc.
828 */
829 static int map(struct mq_policy *mq, dm_oblock_t oblock,
830 bool can_migrate, bool discarded_oblock,
831 int data_dir, struct policy_result *result)
832 {
833 int r = 0;
834 struct entry *e = hash_lookup(mq, oblock);
835
836 if (e && in_cache(mq, e))
837 r = cache_entry_found(mq, e, result);
838
839 else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
840 result->op = POLICY_MISS;
841
842 else if (e)
843 r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
844 data_dir, result);
845
846 else
847 r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
848 data_dir, result);
849
850 if (r == -EWOULDBLOCK)
851 result->op = POLICY_MISS;
852
853 return r;
854 }
855
856 /*----------------------------------------------------------------*/
857
858 /*
859 * Public interface, via the policy struct. See dm-cache-policy.h for a
860 * description of these.
861 */
862
863 static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
864 {
865 return container_of(p, struct mq_policy, policy);
866 }
867
868 static void mq_destroy(struct dm_cache_policy *p)
869 {
870 struct mq_policy *mq = to_mq_policy(p);
871
872 kfree(mq->table);
873 epool_exit(&mq->cache_pool);
874 epool_exit(&mq->pre_cache_pool);
875 kfree(mq);
876 }
877
878 static void copy_tick(struct mq_policy *mq)
879 {
880 unsigned long flags;
881
882 spin_lock_irqsave(&mq->tick_lock, flags);
883 mq->tick = mq->tick_protected;
884 spin_unlock_irqrestore(&mq->tick_lock, flags);
885 }
886
887 static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
888 bool can_block, bool can_migrate, bool discarded_oblock,
889 struct bio *bio, struct policy_result *result)
890 {
891 int r;
892 struct mq_policy *mq = to_mq_policy(p);
893
894 result->op = POLICY_MISS;
895
896 if (can_block)
897 mutex_lock(&mq->lock);
898 else if (!mutex_trylock(&mq->lock))
899 return -EWOULDBLOCK;
900
901 copy_tick(mq);
902
903 iot_examine_bio(&mq->tracker, bio);
904 r = map(mq, oblock, can_migrate, discarded_oblock,
905 bio_data_dir(bio), result);
906
907 mutex_unlock(&mq->lock);
908
909 return r;
910 }
911
912 static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
913 {
914 int r;
915 struct mq_policy *mq = to_mq_policy(p);
916 struct entry *e;
917
918 if (!mutex_trylock(&mq->lock))
919 return -EWOULDBLOCK;
920
921 e = hash_lookup(mq, oblock);
922 if (e && in_cache(mq, e)) {
923 *cblock = infer_cblock(&mq->cache_pool, e);
924 r = 0;
925 } else
926 r = -ENOENT;
927
928 mutex_unlock(&mq->lock);
929
930 return r;
931 }
932
933 static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
934 {
935 struct entry *e;
936
937 e = hash_lookup(mq, oblock);
938 BUG_ON(!e || !in_cache(mq, e));
939
940 del(mq, e);
941 e->dirty = set;
942 push(mq, e);
943 }
944
945 static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
946 {
947 struct mq_policy *mq = to_mq_policy(p);
948
949 mutex_lock(&mq->lock);
950 __mq_set_clear_dirty(mq, oblock, true);
951 mutex_unlock(&mq->lock);
952 }
953
954 static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
955 {
956 struct mq_policy *mq = to_mq_policy(p);
957
958 mutex_lock(&mq->lock);
959 __mq_set_clear_dirty(mq, oblock, false);
960 mutex_unlock(&mq->lock);
961 }
962
963 static int mq_load_mapping(struct dm_cache_policy *p,
964 dm_oblock_t oblock, dm_cblock_t cblock,
965 uint32_t hint, bool hint_valid)
966 {
967 struct mq_policy *mq = to_mq_policy(p);
968 struct entry *e;
969
970 e = alloc_particular_entry(&mq->cache_pool, cblock);
971 e->oblock = oblock;
972 e->dirty = false; /* this gets corrected in a minute */
973 e->hit_count = hint_valid ? hint : 1;
974 e->generation = mq->generation;
975 push(mq, e);
976
977 return 0;
978 }
979
980 static int mq_save_hints(struct mq_policy *mq, struct queue *q,
981 policy_walk_fn fn, void *context)
982 {
983 int r;
984 unsigned level;
985 struct entry *e;
986
987 for (level = 0; level < NR_QUEUE_LEVELS; level++)
988 list_for_each_entry(e, q->qs + level, list) {
989 r = fn(context, infer_cblock(&mq->cache_pool, e),
990 e->oblock, e->hit_count);
991 if (r)
992 return r;
993 }
994
995 return 0;
996 }
997
998 static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
999 void *context)
1000 {
1001 struct mq_policy *mq = to_mq_policy(p);
1002 int r = 0;
1003
1004 mutex_lock(&mq->lock);
1005
1006 r = mq_save_hints(mq, &mq->cache_clean, fn, context);
1007 if (!r)
1008 r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
1009
1010 mutex_unlock(&mq->lock);
1011
1012 return r;
1013 }
1014
1015 static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
1016 {
1017 struct entry *e;
1018
1019 e = hash_lookup(mq, oblock);
1020 BUG_ON(!e || !in_cache(mq, e));
1021
1022 del(mq, e);
1023 free_entry(&mq->cache_pool, e);
1024 }
1025
1026 static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
1027 {
1028 struct mq_policy *mq = to_mq_policy(p);
1029
1030 mutex_lock(&mq->lock);
1031 __remove_mapping(mq, oblock);
1032 mutex_unlock(&mq->lock);
1033 }
1034
1035 static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
1036 {
1037 struct entry *e = epool_find(&mq->cache_pool, cblock);
1038
1039 if (!e)
1040 return -ENODATA;
1041
1042 del(mq, e);
1043 free_entry(&mq->cache_pool, e);
1044
1045 return 0;
1046 }
1047
1048 static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
1049 {
1050 int r;
1051 struct mq_policy *mq = to_mq_policy(p);
1052
1053 mutex_lock(&mq->lock);
1054 r = __remove_cblock(mq, cblock);
1055 mutex_unlock(&mq->lock);
1056
1057 return r;
1058 }
1059
1060 static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
1061 dm_cblock_t *cblock)
1062 {
1063 struct entry *e = pop(mq, &mq->cache_dirty);
1064
1065 if (!e)
1066 return -ENODATA;
1067
1068 *oblock = e->oblock;
1069 *cblock = infer_cblock(&mq->cache_pool, e);
1070 e->dirty = false;
1071 push(mq, e);
1072
1073 return 0;
1074 }
1075
1076 static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
1077 dm_cblock_t *cblock)
1078 {
1079 int r;
1080 struct mq_policy *mq = to_mq_policy(p);
1081
1082 mutex_lock(&mq->lock);
1083 r = __mq_writeback_work(mq, oblock, cblock);
1084 mutex_unlock(&mq->lock);
1085
1086 return r;
1087 }
1088
1089 static void __force_mapping(struct mq_policy *mq,
1090 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1091 {
1092 struct entry *e = hash_lookup(mq, current_oblock);
1093
1094 if (e && in_cache(mq, e)) {
1095 del(mq, e);
1096 e->oblock = new_oblock;
1097 e->dirty = true;
1098 push(mq, e);
1099 }
1100 }
1101
1102 static void mq_force_mapping(struct dm_cache_policy *p,
1103 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1104 {
1105 struct mq_policy *mq = to_mq_policy(p);
1106
1107 mutex_lock(&mq->lock);
1108 __force_mapping(mq, current_oblock, new_oblock);
1109 mutex_unlock(&mq->lock);
1110 }
1111
1112 static dm_cblock_t mq_residency(struct dm_cache_policy *p)
1113 {
1114 dm_cblock_t r;
1115 struct mq_policy *mq = to_mq_policy(p);
1116
1117 mutex_lock(&mq->lock);
1118 r = to_cblock(mq->cache_pool.nr_allocated);
1119 mutex_unlock(&mq->lock);
1120
1121 return r;
1122 }
1123
1124 static void mq_tick(struct dm_cache_policy *p)
1125 {
1126 struct mq_policy *mq = to_mq_policy(p);
1127 unsigned long flags;
1128
1129 spin_lock_irqsave(&mq->tick_lock, flags);
1130 mq->tick_protected++;
1131 spin_unlock_irqrestore(&mq->tick_lock, flags);
1132 }
1133
1134 static int mq_set_config_value(struct dm_cache_policy *p,
1135 const char *key, const char *value)
1136 {
1137 struct mq_policy *mq = to_mq_policy(p);
1138 enum io_pattern pattern;
1139 unsigned long tmp;
1140
1141 if (!strcasecmp(key, "random_threshold"))
1142 pattern = PATTERN_RANDOM;
1143 else if (!strcasecmp(key, "sequential_threshold"))
1144 pattern = PATTERN_SEQUENTIAL;
1145 else
1146 return -EINVAL;
1147
1148 if (kstrtoul(value, 10, &tmp))
1149 return -EINVAL;
1150
1151 mq->tracker.thresholds[pattern] = tmp;
1152
1153 return 0;
1154 }
1155
1156 static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
1157 {
1158 ssize_t sz = 0;
1159 struct mq_policy *mq = to_mq_policy(p);
1160
1161 DMEMIT("4 random_threshold %u sequential_threshold %u",
1162 mq->tracker.thresholds[PATTERN_RANDOM],
1163 mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
1164
1165 return 0;
1166 }
1167
1168 /* Init the policy plugin interface function pointers. */
1169 static void init_policy_functions(struct mq_policy *mq)
1170 {
1171 mq->policy.destroy = mq_destroy;
1172 mq->policy.map = mq_map;
1173 mq->policy.lookup = mq_lookup;
1174 mq->policy.set_dirty = mq_set_dirty;
1175 mq->policy.clear_dirty = mq_clear_dirty;
1176 mq->policy.load_mapping = mq_load_mapping;
1177 mq->policy.walk_mappings = mq_walk_mappings;
1178 mq->policy.remove_mapping = mq_remove_mapping;
1179 mq->policy.remove_cblock = mq_remove_cblock;
1180 mq->policy.writeback_work = mq_writeback_work;
1181 mq->policy.force_mapping = mq_force_mapping;
1182 mq->policy.residency = mq_residency;
1183 mq->policy.tick = mq_tick;
1184 mq->policy.emit_config_values = mq_emit_config_values;
1185 mq->policy.set_config_value = mq_set_config_value;
1186 }
1187
1188 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1189 sector_t origin_size,
1190 sector_t cache_block_size)
1191 {
1192 struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1193
1194 if (!mq)
1195 return NULL;
1196
1197 init_policy_functions(mq);
1198 iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
1199 mq->cache_size = cache_size;
1200
1201 if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
1202 DMERR("couldn't initialize pool of pre-cache entries");
1203 goto bad_pre_cache_init;
1204 }
1205
1206 if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
1207 DMERR("couldn't initialize pool of cache entries");
1208 goto bad_cache_init;
1209 }
1210
1211 mq->tick_protected = 0;
1212 mq->tick = 0;
1213 mq->hit_count = 0;
1214 mq->generation = 0;
1215 mq->promote_threshold = 0;
1216 mutex_init(&mq->lock);
1217 spin_lock_init(&mq->tick_lock);
1218
1219 queue_init(&mq->pre_cache);
1220 queue_init(&mq->cache_clean);
1221 queue_init(&mq->cache_dirty);
1222
1223 mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
1224
1225 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
1226 mq->hash_bits = ffs(mq->nr_buckets) - 1;
1227 mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
1228 if (!mq->table)
1229 goto bad_alloc_table;
1230
1231 return &mq->policy;
1232
1233 bad_alloc_table:
1234 epool_exit(&mq->cache_pool);
1235 bad_cache_init:
1236 epool_exit(&mq->pre_cache_pool);
1237 bad_pre_cache_init:
1238 kfree(mq);
1239
1240 return NULL;
1241 }
1242
1243 /*----------------------------------------------------------------*/
1244
1245 static struct dm_cache_policy_type mq_policy_type = {
1246 .name = "mq",
1247 .version = {1, 1, 0},
1248 .hint_size = 4,
1249 .owner = THIS_MODULE,
1250 .create = mq_create
1251 };
1252
1253 static struct dm_cache_policy_type default_policy_type = {
1254 .name = "default",
1255 .version = {1, 1, 0},
1256 .hint_size = 4,
1257 .owner = THIS_MODULE,
1258 .create = mq_create
1259 };
1260
1261 static int __init mq_init(void)
1262 {
1263 int r;
1264
1265 mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
1266 sizeof(struct entry),
1267 __alignof__(struct entry),
1268 0, NULL);
1269 if (!mq_entry_cache)
1270 goto bad;
1271
1272 r = dm_cache_policy_register(&mq_policy_type);
1273 if (r) {
1274 DMERR("register failed %d", r);
1275 goto bad_register_mq;
1276 }
1277
1278 r = dm_cache_policy_register(&default_policy_type);
1279 if (!r) {
1280 DMINFO("version %u.%u.%u loaded",
1281 mq_policy_type.version[0],
1282 mq_policy_type.version[1],
1283 mq_policy_type.version[2]);
1284 return 0;
1285 }
1286
1287 DMERR("register failed (as default) %d", r);
1288
1289 dm_cache_policy_unregister(&mq_policy_type);
1290 bad_register_mq:
1291 kmem_cache_destroy(mq_entry_cache);
1292 bad:
1293 return -ENOMEM;
1294 }
1295
1296 static void __exit mq_exit(void)
1297 {
1298 dm_cache_policy_unregister(&mq_policy_type);
1299 dm_cache_policy_unregister(&default_policy_type);
1300
1301 kmem_cache_destroy(mq_entry_cache);
1302 }
1303
1304 module_init(mq_init);
1305 module_exit(mq_exit);
1306
1307 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1308 MODULE_LICENSE("GPL");
1309 MODULE_DESCRIPTION("mq cache policy");
1310
1311 MODULE_ALIAS("dm-cache-default");