]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/lightnvm/pblk-rl.c
Merge tag 'backlight-next-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk-rl.c
1 /*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-rl.c - pblk's rate limiter for user I/O
16 *
17 */
18
19 #include "pblk.h"
20
21 static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
22 {
23 mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
24 }
25
26 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
27 {
28 int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
29
30 return (!(rb_user_cnt + nr_entries > rl->rb_user_max));
31 }
32
33 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
34 {
35 int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
36 int rb_user_active;
37
38 /* If there is no user I/O let GC take over space on the write buffer */
39 rb_user_active = READ_ONCE(rl->rb_user_active);
40 return (!(rb_gc_cnt + nr_entries > rl->rb_gc_max && rb_user_active));
41 }
42
43 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
44 {
45 atomic_add(nr_entries, &rl->rb_user_cnt);
46
47 /* Release user I/O state. Protect from GC */
48 smp_store_release(&rl->rb_user_active, 1);
49 pblk_rl_kick_u_timer(rl);
50 }
51
52 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
53 {
54 atomic_add(nr_entries, &rl->rb_gc_cnt);
55 }
56
57 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
58 {
59 atomic_sub(nr_user, &rl->rb_user_cnt);
60 atomic_sub(nr_gc, &rl->rb_gc_cnt);
61 }
62
63 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
64 {
65 return atomic_read(&rl->free_blocks);
66 }
67
68 /*
69 * We check for (i) the number of free blocks in the current LUN and (ii) the
70 * total number of free blocks in the pblk instance. This is to even out the
71 * number of free blocks on each LUN when GC kicks in.
72 *
73 * Only the total number of free blocks is used to configure the rate limiter.
74 */
75 static int pblk_rl_update_rates(struct pblk_rl *rl, unsigned long max)
76 {
77 unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
78
79 if (free_blocks >= rl->high) {
80 rl->rb_user_max = max - rl->rb_gc_rsv;
81 rl->rb_gc_max = rl->rb_gc_rsv;
82 rl->rb_state = PBLK_RL_HIGH;
83 } else if (free_blocks < rl->high) {
84 int shift = rl->high_pw - rl->rb_windows_pw;
85 int user_windows = free_blocks >> shift;
86 int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
87 int gc_max;
88
89 rl->rb_user_max = user_max;
90 gc_max = max - rl->rb_user_max;
91 rl->rb_gc_max = max(gc_max, rl->rb_gc_rsv);
92
93 if (free_blocks > rl->low)
94 rl->rb_state = PBLK_RL_MID;
95 else
96 rl->rb_state = PBLK_RL_LOW;
97 }
98
99 return rl->rb_state;
100 }
101
102 void pblk_rl_set_gc_rsc(struct pblk_rl *rl, int rsv)
103 {
104 rl->rb_gc_rsv = rl->rb_gc_max = rsv;
105 }
106
107 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
108 {
109 struct pblk *pblk = container_of(rl, struct pblk, rl);
110 int blk_in_line = atomic_read(&line->blk_in_line);
111 int ret;
112
113 atomic_add(blk_in_line, &rl->free_blocks);
114 /* Rates will not change that often - no need to lock update */
115 ret = pblk_rl_update_rates(rl, rl->rb_budget);
116
117 if (ret == (PBLK_RL_MID | PBLK_RL_LOW))
118 pblk_gc_should_start(pblk);
119 else
120 pblk_gc_should_stop(pblk);
121 }
122
123 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
124 {
125 struct pblk *pblk = container_of(rl, struct pblk, rl);
126 int blk_in_line = atomic_read(&line->blk_in_line);
127 int ret;
128
129 atomic_sub(blk_in_line, &rl->free_blocks);
130
131 /* Rates will not change that often - no need to lock update */
132 ret = pblk_rl_update_rates(rl, rl->rb_budget);
133 if (ret == (PBLK_RL_MID | PBLK_RL_LOW))
134 pblk_gc_should_start(pblk);
135 else
136 pblk_gc_should_stop(pblk);
137 }
138
139 int pblk_rl_gc_thrs(struct pblk_rl *rl)
140 {
141 return rl->high;
142 }
143
144 int pblk_rl_sysfs_rate_show(struct pblk_rl *rl)
145 {
146 return rl->rb_user_max;
147 }
148
149 static void pblk_rl_u_timer(unsigned long data)
150 {
151 struct pblk_rl *rl = (struct pblk_rl *)data;
152
153 /* Release user I/O state. Protect from GC */
154 smp_store_release(&rl->rb_user_active, 0);
155 }
156
157 void pblk_rl_free(struct pblk_rl *rl)
158 {
159 del_timer(&rl->u_timer);
160 }
161
162 void pblk_rl_init(struct pblk_rl *rl, int budget)
163 {
164 unsigned int rb_windows;
165
166 rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
167 rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
168 rl->high_pw = get_count_order(rl->high);
169
170 /* This will always be a power-of-2 */
171 rb_windows = budget / PBLK_MAX_REQ_ADDRS;
172 rl->rb_windows_pw = get_count_order(rb_windows) + 1;
173
174 /* To start with, all buffer is available to user I/O writers */
175 rl->rb_budget = budget;
176 rl->rb_user_max = budget;
177 atomic_set(&rl->rb_user_cnt, 0);
178 rl->rb_gc_max = 0;
179 rl->rb_state = PBLK_RL_HIGH;
180 atomic_set(&rl->rb_gc_cnt, 0);
181
182 setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
183 rl->rb_user_active = 0;
184 }