]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/lightnvm/pblk-rl.c
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-rl.c - pblk's rate limiter for user I/O
21 static void pblk_rl_kick_u_timer(struct pblk_rl
*rl
)
23 mod_timer(&rl
->u_timer
, jiffies
+ msecs_to_jiffies(5000));
26 int pblk_rl_is_limit(struct pblk_rl
*rl
)
30 rb_space
= atomic_read(&rl
->rb_space
);
32 return (rb_space
== 0);
35 int pblk_rl_user_may_insert(struct pblk_rl
*rl
, int nr_entries
)
37 int rb_user_cnt
= atomic_read(&rl
->rb_user_cnt
);
38 int rb_space
= atomic_read(&rl
->rb_space
);
40 if (unlikely(rb_space
>= 0) && (rb_space
- nr_entries
< 0))
43 if (rb_user_cnt
>= rl
->rb_user_max
)
44 return NVM_IO_REQUEUE
;
49 void pblk_rl_inserted(struct pblk_rl
*rl
, int nr_entries
)
51 int rb_space
= atomic_read(&rl
->rb_space
);
53 if (unlikely(rb_space
>= 0))
54 atomic_sub(nr_entries
, &rl
->rb_space
);
57 int pblk_rl_gc_may_insert(struct pblk_rl
*rl
, int nr_entries
)
59 int rb_gc_cnt
= atomic_read(&rl
->rb_gc_cnt
);
62 /* If there is no user I/O let GC take over space on the write buffer */
63 rb_user_active
= READ_ONCE(rl
->rb_user_active
);
64 return (!(rb_gc_cnt
>= rl
->rb_gc_max
&& rb_user_active
));
67 void pblk_rl_user_in(struct pblk_rl
*rl
, int nr_entries
)
69 atomic_add(nr_entries
, &rl
->rb_user_cnt
);
71 /* Release user I/O state. Protect from GC */
72 smp_store_release(&rl
->rb_user_active
, 1);
73 pblk_rl_kick_u_timer(rl
);
76 void pblk_rl_gc_in(struct pblk_rl
*rl
, int nr_entries
)
78 atomic_add(nr_entries
, &rl
->rb_gc_cnt
);
81 void pblk_rl_out(struct pblk_rl
*rl
, int nr_user
, int nr_gc
)
83 atomic_sub(nr_user
, &rl
->rb_user_cnt
);
84 atomic_sub(nr_gc
, &rl
->rb_gc_cnt
);
87 unsigned long pblk_rl_nr_free_blks(struct pblk_rl
*rl
)
89 return atomic_read(&rl
->free_blocks
);
93 * We check for (i) the number of free blocks in the current LUN and (ii) the
94 * total number of free blocks in the pblk instance. This is to even out the
95 * number of free blocks on each LUN when GC kicks in.
97 * Only the total number of free blocks is used to configure the rate limiter.
99 void pblk_rl_update_rates(struct pblk_rl
*rl
)
101 struct pblk
*pblk
= container_of(rl
, struct pblk
, rl
);
102 unsigned long free_blocks
= pblk_rl_nr_free_blks(rl
);
103 int max
= rl
->rb_budget
;
105 if (free_blocks
>= rl
->high
) {
106 rl
->rb_user_max
= max
;
108 rl
->rb_state
= PBLK_RL_HIGH
;
109 } else if (free_blocks
< rl
->high
) {
110 int shift
= rl
->high_pw
- rl
->rb_windows_pw
;
111 int user_windows
= free_blocks
>> shift
;
112 int user_max
= user_windows
<< PBLK_MAX_REQ_ADDRS_PW
;
114 rl
->rb_user_max
= user_max
;
115 rl
->rb_gc_max
= max
- user_max
;
117 if (free_blocks
<= rl
->rsv_blocks
) {
122 /* In the worst case, we will need to GC lines in the low list
123 * (high valid sector count). If there are lines to GC on high
124 * or mid lists, these will be prioritized
126 rl
->rb_state
= PBLK_RL_LOW
;
129 if (rl
->rb_state
== (PBLK_RL_MID
| PBLK_RL_LOW
))
130 pblk_gc_should_start(pblk
);
132 pblk_gc_should_stop(pblk
);
135 void pblk_rl_free_lines_inc(struct pblk_rl
*rl
, struct pblk_line
*line
)
137 int blk_in_line
= atomic_read(&line
->blk_in_line
);
139 atomic_add(blk_in_line
, &rl
->free_blocks
);
140 pblk_rl_update_rates(rl
);
143 void pblk_rl_free_lines_dec(struct pblk_rl
*rl
, struct pblk_line
*line
)
145 int blk_in_line
= atomic_read(&line
->blk_in_line
);
147 atomic_sub(blk_in_line
, &rl
->free_blocks
);
148 pblk_rl_update_rates(rl
);
151 int pblk_rl_high_thrs(struct pblk_rl
*rl
)
156 int pblk_rl_max_io(struct pblk_rl
*rl
)
158 return rl
->rb_max_io
;
161 static void pblk_rl_u_timer(struct timer_list
*t
)
163 struct pblk_rl
*rl
= from_timer(rl
, t
, u_timer
);
165 /* Release user I/O state. Protect from GC */
166 smp_store_release(&rl
->rb_user_active
, 0);
169 void pblk_rl_free(struct pblk_rl
*rl
)
171 del_timer(&rl
->u_timer
);
174 void pblk_rl_init(struct pblk_rl
*rl
, int budget
)
176 struct pblk
*pblk
= container_of(rl
, struct pblk
, rl
);
177 struct pblk_line_meta
*lm
= &pblk
->lm
;
178 int min_blocks
= lm
->blk_per_line
* PBLK_GC_RSV_LINE
;
179 unsigned int rb_windows
;
181 rl
->high
= rl
->total_blocks
/ PBLK_USER_HIGH_THRS
;
182 rl
->high_pw
= get_count_order(rl
->high
);
184 rl
->low
= rl
->total_blocks
/ PBLK_USER_LOW_THRS
;
185 if (rl
->low
< min_blocks
)
186 rl
->low
= min_blocks
;
188 rl
->rsv_blocks
= min_blocks
;
190 /* This will always be a power-of-2 */
191 rb_windows
= budget
/ PBLK_MAX_REQ_ADDRS
;
192 rl
->rb_windows_pw
= get_count_order(rb_windows
);
194 /* To start with, all buffer is available to user I/O writers */
195 rl
->rb_budget
= budget
;
196 rl
->rb_user_max
= budget
;
197 rl
->rb_max_io
= budget
>> 1;
199 rl
->rb_state
= PBLK_RL_HIGH
;
201 atomic_set(&rl
->rb_user_cnt
, 0);
202 atomic_set(&rl
->rb_gc_cnt
, 0);
203 atomic_set(&rl
->rb_space
, -1);
205 timer_setup(&rl
->u_timer
, pblk_rl_u_timer
, 0);
207 rl
->rb_user_active
= 0;
208 rl
->rb_gc_active
= 0;