1 /*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-rl.c - pblk's rate limiter for user I/O
16 *
17 */
18
19 #include "pblk.h"
20
pblk_rl_kick_u_timer(struct pblk_rl * rl)21 static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
22 {
23 mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
24 }
25
pblk_rl_is_limit(struct pblk_rl * rl)26 int pblk_rl_is_limit(struct pblk_rl *rl)
27 {
28 int rb_space;
29
30 rb_space = atomic_read(&rl->rb_space);
31
32 return (rb_space == 0);
33 }
34
pblk_rl_user_may_insert(struct pblk_rl * rl,int nr_entries)35 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
36 {
37 int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
38 int rb_space = atomic_read(&rl->rb_space);
39
40 if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
41 return NVM_IO_ERR;
42
43 if (rb_user_cnt >= rl->rb_user_max)
44 return NVM_IO_REQUEUE;
45
46 return NVM_IO_OK;
47 }
48
pblk_rl_inserted(struct pblk_rl * rl,int nr_entries)49 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
50 {
51 int rb_space = atomic_read(&rl->rb_space);
52
53 if (unlikely(rb_space >= 0))
54 atomic_sub(nr_entries, &rl->rb_space);
55 }
56
pblk_rl_gc_may_insert(struct pblk_rl * rl,int nr_entries)57 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
58 {
59 int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
60 int rb_user_active;
61
62 /* If there is no user I/O let GC take over space on the write buffer */
63 rb_user_active = READ_ONCE(rl->rb_user_active);
64 return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
65 }
66
pblk_rl_user_in(struct pblk_rl * rl,int nr_entries)67 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
68 {
69 atomic_add(nr_entries, &rl->rb_user_cnt);
70
71 /* Release user I/O state. Protect from GC */
72 smp_store_release(&rl->rb_user_active, 1);
73 pblk_rl_kick_u_timer(rl);
74 }
75
pblk_rl_werr_line_in(struct pblk_rl * rl)76 void pblk_rl_werr_line_in(struct pblk_rl *rl)
77 {
78 atomic_inc(&rl->werr_lines);
79 }
80
pblk_rl_werr_line_out(struct pblk_rl * rl)81 void pblk_rl_werr_line_out(struct pblk_rl *rl)
82 {
83 atomic_dec(&rl->werr_lines);
84 }
85
pblk_rl_gc_in(struct pblk_rl * rl,int nr_entries)86 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
87 {
88 atomic_add(nr_entries, &rl->rb_gc_cnt);
89 }
90
pblk_rl_out(struct pblk_rl * rl,int nr_user,int nr_gc)91 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
92 {
93 atomic_sub(nr_user, &rl->rb_user_cnt);
94 atomic_sub(nr_gc, &rl->rb_gc_cnt);
95 }
96
pblk_rl_nr_free_blks(struct pblk_rl * rl)97 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
98 {
99 return atomic_read(&rl->free_blocks);
100 }
101
pblk_rl_nr_user_free_blks(struct pblk_rl * rl)102 unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
103 {
104 return atomic_read(&rl->free_user_blocks);
105 }
106
__pblk_rl_update_rates(struct pblk_rl * rl,unsigned long free_blocks)107 static void __pblk_rl_update_rates(struct pblk_rl *rl,
108 unsigned long free_blocks)
109 {
110 struct pblk *pblk = container_of(rl, struct pblk, rl);
111 int max = rl->rb_budget;
112 int werr_gc_needed = atomic_read(&rl->werr_lines);
113
114 if (free_blocks >= rl->high) {
115 if (werr_gc_needed) {
116 /* Allocate a small budget for recovering
117 * lines with write errors
118 */
119 rl->rb_gc_max = 1 << rl->rb_windows_pw;
120 rl->rb_user_max = max - rl->rb_gc_max;
121 rl->rb_state = PBLK_RL_WERR;
122 } else {
123 rl->rb_user_max = max;
124 rl->rb_gc_max = 0;
125 rl->rb_state = PBLK_RL_OFF;
126 }
127 } else if (free_blocks < rl->high) {
128 int shift = rl->high_pw - rl->rb_windows_pw;
129 int user_windows = free_blocks >> shift;
130 int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
131
132 rl->rb_user_max = user_max;
133 rl->rb_gc_max = max - user_max;
134
135 if (free_blocks <= rl->rsv_blocks) {
136 rl->rb_user_max = 0;
137 rl->rb_gc_max = max;
138 }
139
140 /* In the worst case, we will need to GC lines in the low list
141 * (high valid sector count). If there are lines to GC on high
142 * or mid lists, these will be prioritized
143 */
144 rl->rb_state = PBLK_RL_LOW;
145 }
146
147 if (rl->rb_state != PBLK_RL_OFF)
148 pblk_gc_should_start(pblk);
149 else
150 pblk_gc_should_stop(pblk);
151 }
152
pblk_rl_update_rates(struct pblk_rl * rl)153 void pblk_rl_update_rates(struct pblk_rl *rl)
154 {
155 __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
156 }
157
pblk_rl_free_lines_inc(struct pblk_rl * rl,struct pblk_line * line)158 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
159 {
160 int blk_in_line = atomic_read(&line->blk_in_line);
161 int free_blocks;
162
163 atomic_add(blk_in_line, &rl->free_blocks);
164 free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
165
166 __pblk_rl_update_rates(rl, free_blocks);
167 }
168
pblk_rl_free_lines_dec(struct pblk_rl * rl,struct pblk_line * line,bool used)169 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
170 bool used)
171 {
172 int blk_in_line = atomic_read(&line->blk_in_line);
173 int free_blocks;
174
175 atomic_sub(blk_in_line, &rl->free_blocks);
176
177 if (used)
178 free_blocks = atomic_sub_return(blk_in_line,
179 &rl->free_user_blocks);
180 else
181 free_blocks = atomic_read(&rl->free_user_blocks);
182
183 __pblk_rl_update_rates(rl, free_blocks);
184 }
185
pblk_rl_high_thrs(struct pblk_rl * rl)186 int pblk_rl_high_thrs(struct pblk_rl *rl)
187 {
188 return rl->high;
189 }
190
pblk_rl_max_io(struct pblk_rl * rl)191 int pblk_rl_max_io(struct pblk_rl *rl)
192 {
193 return rl->rb_max_io;
194 }
195
pblk_rl_u_timer(struct timer_list * t)196 static void pblk_rl_u_timer(struct timer_list *t)
197 {
198 struct pblk_rl *rl = from_timer(rl, t, u_timer);
199
200 /* Release user I/O state. Protect from GC */
201 smp_store_release(&rl->rb_user_active, 0);
202 }
203
pblk_rl_free(struct pblk_rl * rl)204 void pblk_rl_free(struct pblk_rl *rl)
205 {
206 del_timer(&rl->u_timer);
207 }
208
pblk_rl_init(struct pblk_rl * rl,int budget)209 void pblk_rl_init(struct pblk_rl *rl, int budget)
210 {
211 struct pblk *pblk = container_of(rl, struct pblk, rl);
212 struct nvm_tgt_dev *dev = pblk->dev;
213 struct nvm_geo *geo = &dev->geo;
214 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
215 struct pblk_line_meta *lm = &pblk->lm;
216 int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
217 int sec_meta, blk_meta;
218
219 unsigned int rb_windows;
220
221 /* Consider sectors used for metadata */
222 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
223 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
224
225 rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
226 rl->high_pw = get_count_order(rl->high);
227
228 rl->rsv_blocks = min_blocks;
229
230 /* This will always be a power-of-2 */
231 rb_windows = budget / PBLK_MAX_REQ_ADDRS;
232 rl->rb_windows_pw = get_count_order(rb_windows);
233
234 /* To start with, all buffer is available to user I/O writers */
235 rl->rb_budget = budget;
236 rl->rb_user_max = budget;
237 rl->rb_max_io = budget >> 1;
238 rl->rb_gc_max = 0;
239 rl->rb_state = PBLK_RL_HIGH;
240
241 atomic_set(&rl->rb_user_cnt, 0);
242 atomic_set(&rl->rb_gc_cnt, 0);
243 atomic_set(&rl->rb_space, -1);
244 atomic_set(&rl->werr_lines, 0);
245
246 timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
247
248 rl->rb_user_active = 0;
249 rl->rb_gc_active = 0;
250 }
251