]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/lightnvm/pblk-rl.c
lightnvm: Convert timers to use timer_setup()
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk-rl.c
1 /*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-rl.c - pblk's rate limiter for user I/O
16 *
17 */
18
19 #include "pblk.h"
20
21 static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
22 {
23 mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
24 }
25
26 int pblk_rl_is_limit(struct pblk_rl *rl)
27 {
28 int rb_space;
29
30 rb_space = atomic_read(&rl->rb_space);
31
32 return (rb_space == 0);
33 }
34
35 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
36 {
37 int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
38 int rb_space = atomic_read(&rl->rb_space);
39
40 if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
41 return NVM_IO_ERR;
42
43 if (rb_user_cnt >= rl->rb_user_max)
44 return NVM_IO_REQUEUE;
45
46 return NVM_IO_OK;
47 }
48
49 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
50 {
51 int rb_space = atomic_read(&rl->rb_space);
52
53 if (unlikely(rb_space >= 0))
54 atomic_sub(nr_entries, &rl->rb_space);
55 }
56
57 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
58 {
59 int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
60 int rb_user_active;
61
62 /* If there is no user I/O let GC take over space on the write buffer */
63 rb_user_active = READ_ONCE(rl->rb_user_active);
64 return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
65 }
66
67 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
68 {
69 atomic_add(nr_entries, &rl->rb_user_cnt);
70
71 /* Release user I/O state. Protect from GC */
72 smp_store_release(&rl->rb_user_active, 1);
73 pblk_rl_kick_u_timer(rl);
74 }
75
76 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
77 {
78 atomic_add(nr_entries, &rl->rb_gc_cnt);
79 }
80
81 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
82 {
83 atomic_sub(nr_user, &rl->rb_user_cnt);
84 atomic_sub(nr_gc, &rl->rb_gc_cnt);
85 }
86
87 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
88 {
89 return atomic_read(&rl->free_blocks);
90 }
91
92 /*
93 * We check for (i) the number of free blocks in the current LUN and (ii) the
94 * total number of free blocks in the pblk instance. This is to even out the
95 * number of free blocks on each LUN when GC kicks in.
96 *
97 * Only the total number of free blocks is used to configure the rate limiter.
98 */
99 void pblk_rl_update_rates(struct pblk_rl *rl)
100 {
101 struct pblk *pblk = container_of(rl, struct pblk, rl);
102 unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
103 int max = rl->rb_budget;
104
105 if (free_blocks >= rl->high) {
106 rl->rb_user_max = max;
107 rl->rb_gc_max = 0;
108 rl->rb_state = PBLK_RL_HIGH;
109 } else if (free_blocks < rl->high) {
110 int shift = rl->high_pw - rl->rb_windows_pw;
111 int user_windows = free_blocks >> shift;
112 int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
113
114 rl->rb_user_max = user_max;
115 rl->rb_gc_max = max - user_max;
116
117 if (free_blocks <= rl->rsv_blocks) {
118 rl->rb_user_max = 0;
119 rl->rb_gc_max = max;
120 }
121
122 /* In the worst case, we will need to GC lines in the low list
123 * (high valid sector count). If there are lines to GC on high
124 * or mid lists, these will be prioritized
125 */
126 rl->rb_state = PBLK_RL_LOW;
127 }
128
129 if (rl->rb_state == (PBLK_RL_MID | PBLK_RL_LOW))
130 pblk_gc_should_start(pblk);
131 else
132 pblk_gc_should_stop(pblk);
133 }
134
135 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
136 {
137 int blk_in_line = atomic_read(&line->blk_in_line);
138
139 atomic_add(blk_in_line, &rl->free_blocks);
140 pblk_rl_update_rates(rl);
141 }
142
143 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
144 {
145 int blk_in_line = atomic_read(&line->blk_in_line);
146
147 atomic_sub(blk_in_line, &rl->free_blocks);
148 pblk_rl_update_rates(rl);
149 }
150
151 int pblk_rl_high_thrs(struct pblk_rl *rl)
152 {
153 return rl->high;
154 }
155
156 int pblk_rl_max_io(struct pblk_rl *rl)
157 {
158 return rl->rb_max_io;
159 }
160
161 static void pblk_rl_u_timer(struct timer_list *t)
162 {
163 struct pblk_rl *rl = from_timer(rl, t, u_timer);
164
165 /* Release user I/O state. Protect from GC */
166 smp_store_release(&rl->rb_user_active, 0);
167 }
168
169 void pblk_rl_free(struct pblk_rl *rl)
170 {
171 del_timer(&rl->u_timer);
172 }
173
174 void pblk_rl_init(struct pblk_rl *rl, int budget)
175 {
176 struct pblk *pblk = container_of(rl, struct pblk, rl);
177 struct pblk_line_meta *lm = &pblk->lm;
178 int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
179 unsigned int rb_windows;
180
181 rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
182 rl->high_pw = get_count_order(rl->high);
183
184 rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
185 if (rl->low < min_blocks)
186 rl->low = min_blocks;
187
188 rl->rsv_blocks = min_blocks;
189
190 /* This will always be a power-of-2 */
191 rb_windows = budget / PBLK_MAX_REQ_ADDRS;
192 rl->rb_windows_pw = get_count_order(rb_windows);
193
194 /* To start with, all buffer is available to user I/O writers */
195 rl->rb_budget = budget;
196 rl->rb_user_max = budget;
197 rl->rb_max_io = budget >> 1;
198 rl->rb_gc_max = 0;
199 rl->rb_state = PBLK_RL_HIGH;
200
201 atomic_set(&rl->rb_user_cnt, 0);
202 atomic_set(&rl->rb_gc_cnt, 0);
203 atomic_set(&rl->rb_space, -1);
204
205 timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
206
207 rl->rb_user_active = 0;
208 rl->rb_gc_active = 0;
209 }