1 /*
2  * UWB reservation management.
3  *
4  * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uwb.h>
21 
22 #include "uwb-internal.h"
23 
uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info * ai)24 static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai)
25 {
26 	int col, mas, safe_mas, unsafe_mas;
27 	unsigned char *bm = ai->bm;
28 	struct uwb_rsv_col_info *ci = ai->ci;
29 	unsigned char c;
30 
31 	for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) {
32 
33 		safe_mas   = ci->csi.safe_mas_per_col;
34 		unsafe_mas = ci->csi.unsafe_mas_per_col;
35 
36 		for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) {
37 			if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) {
38 
39 				if (safe_mas > 0) {
40 					safe_mas--;
41 					c = UWB_RSV_MAS_SAFE;
42 				} else if (unsafe_mas > 0) {
43 					unsafe_mas--;
44 					c = UWB_RSV_MAS_UNSAFE;
45 				} else {
46 					break;
47 				}
48 				bm[col * UWB_MAS_PER_ZONE + mas] = c;
49 			}
50 		}
51 	}
52 }
53 
uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info * ai)54 static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai)
55 {
56 	int mas, col, rows;
57 	unsigned char *bm = ai->bm;
58 	struct uwb_rsv_row_info *ri = &ai->ri;
59 	unsigned char c;
60 
61 	rows = 1;
62 	c = UWB_RSV_MAS_SAFE;
63 	for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) {
64 		if (ri->avail[mas] == 1) {
65 
66 			if (rows > ri->used_rows) {
67 				break;
68 			} else if (rows > 7) {
69 				c = UWB_RSV_MAS_UNSAFE;
70 			}
71 
72 			for (col = 0; col < UWB_NUM_ZONES; col++) {
73 				if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) {
74 					bm[col * UWB_NUM_ZONES + mas] = c;
75 					if(c == UWB_RSV_MAS_SAFE)
76 						ai->safe_allocated_mases++;
77 					else
78 						ai->unsafe_allocated_mases++;
79 				}
80 			}
81 			rows++;
82 		}
83 	}
84 	ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
85 }
86 
87 /*
88  * Find the best column set for a given availability, interval, num safe mas and
89  * num unsafe mas.
90  *
91  * The different sets are tried in order as shown below, depending on the interval.
92  *
93  * interval = 16
94  *	deep = 0
95  *		set 1 ->  {  8 }
96  *	deep = 1
97  *		set 1 ->  {  4 }
98  *		set 2 ->  { 12 }
99  *	deep = 2
100  *		set 1 ->  {  2 }
101  *		set 2 ->  {  6 }
102  *		set 3 ->  { 10 }
103  *		set 4 ->  { 14 }
104  *	deep = 3
105  *		set 1 ->  {  1 }
106  *		set 2 ->  {  3 }
107  *		set 3 ->  {  5 }
108  *		set 4 ->  {  7 }
109  *		set 5 ->  {  9 }
110  *		set 6 ->  { 11 }
111  *		set 7 ->  { 13 }
112  *		set 8 ->  { 15 }
113  *
114  * interval = 8
115  *	deep = 0
116  *		set 1 ->  {  4  12 }
117  *	deep = 1
118  *		set 1 ->  {  2  10 }
119  *		set 2 ->  {  6  14 }
120  *	deep = 2
121  *		set 1 ->  {  1   9 }
122  *		set 2 ->  {  3  11 }
123  *		set 3 ->  {  5  13 }
124  *		set 4 ->  {  7  15 }
125  *
126  * interval = 4
127  *	deep = 0
128  *		set 1 ->  {  2   6  10  14 }
129  *	deep = 1
130  *		set 1 ->  {  1   5   9  13 }
131  *		set 2 ->  {  3   7  11  15 }
132  *
133  * interval = 2
134  *	deep = 0
135  *		set 1 ->  {  1   3   5   7   9  11  13  15 }
136  */
uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info * ai,int interval,int num_safe_mas,int num_unsafe_mas)137 static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval,
138 					int num_safe_mas, int num_unsafe_mas)
139 {
140 	struct uwb_rsv_col_info *ci = ai->ci;
141 	struct uwb_rsv_col_set_info *csi = &ci->csi;
142 	struct uwb_rsv_col_set_info tmp_csi;
143 	int deep, set, col, start_col_deep, col_start_set;
144 	int start_col, max_mas_in_set, lowest_max_mas_in_deep;
145 	int n_mas;
146 	int found = UWB_RSV_ALLOC_NOT_FOUND;
147 
148 	tmp_csi.start_col = 0;
149 	start_col_deep = interval;
150 	n_mas = num_unsafe_mas + num_safe_mas;
151 
152 	for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) {
153 		start_col_deep /= 2;
154 		col_start_set = 0;
155 		lowest_max_mas_in_deep = UWB_MAS_PER_ZONE;
156 
157 		for (set = 1; set <= (1 << deep); set++) {
158 			max_mas_in_set = 0;
159 			start_col = start_col_deep + col_start_set;
160 			for (col = start_col; col < UWB_NUM_ZONES; col += interval) {
161 
162 				if (ci[col].max_avail_safe >= num_safe_mas &&
163 				    ci[col].max_avail_unsafe >= n_mas) {
164 					if (ci[col].highest_mas[n_mas] > max_mas_in_set)
165 						max_mas_in_set = ci[col].highest_mas[n_mas];
166 				} else {
167 					max_mas_in_set = 0;
168 					break;
169 				}
170 			}
171 			if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) {
172 				lowest_max_mas_in_deep = max_mas_in_set;
173 
174 				tmp_csi.start_col = start_col;
175 			}
176 			col_start_set += (interval >> deep);
177 		}
178 
179 		if (lowest_max_mas_in_deep < 8) {
180 			csi->start_col = tmp_csi.start_col;
181 			found = UWB_RSV_ALLOC_FOUND;
182 			break;
183 		} else if ((lowest_max_mas_in_deep > 8) &&
184 			   (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) &&
185 			   (found == UWB_RSV_ALLOC_NOT_FOUND)) {
186 			csi->start_col = tmp_csi.start_col;
187 			found = UWB_RSV_ALLOC_FOUND;
188 		}
189 	}
190 
191 	if (found == UWB_RSV_ALLOC_FOUND) {
192 		csi->interval = interval;
193 		csi->safe_mas_per_col = num_safe_mas;
194 		csi->unsafe_mas_per_col = num_unsafe_mas;
195 
196 		ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas;
197 		ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas;
198 		ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
199 		ai->interval = interval;
200 	}
201 	return found;
202 }
203 
get_row_descriptors(struct uwb_rsv_alloc_info * ai)204 static void get_row_descriptors(struct uwb_rsv_alloc_info *ai)
205 {
206 	unsigned char *bm = ai->bm;
207 	struct uwb_rsv_row_info *ri = &ai->ri;
208 	int col, mas;
209 
210 	ri->free_rows = 16;
211 	for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
212 		ri->avail[mas] = 1;
213 		for (col = 1; col < UWB_NUM_ZONES; col++) {
214 			if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) {
215 				ri->free_rows--;
216 				ri->avail[mas]=0;
217 				break;
218 			}
219 		}
220 	}
221 }
222 
uwb_rsv_fill_column_info(unsigned char * bm,int column,struct uwb_rsv_col_info * rci)223 static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci)
224 {
225 	int mas;
226 	int block_count = 0, start_block = 0;
227 	int previous_avail = 0;
228 	int available = 0;
229 	int safe_mas_in_row[UWB_MAS_PER_ZONE] = {
230 		8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
231 	};
232 
233 	rci->max_avail_safe = 0;
234 
235 	for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
236 		if (!bm[column * UWB_NUM_ZONES + mas]) {
237 			available++;
238 			rci->max_avail_unsafe = available;
239 
240 			rci->highest_mas[available] = mas;
241 
242 			if (previous_avail) {
243 				block_count++;
244 				if ((block_count > safe_mas_in_row[start_block]) &&
245 				    (!rci->max_avail_safe))
246 					rci->max_avail_safe = available - 1;
247 			} else {
248 				previous_avail = 1;
249 				start_block = mas;
250 				block_count = 1;
251 			}
252 		} else {
253 			previous_avail = 0;
254 		}
255 	}
256 	if (!rci->max_avail_safe)
257 		rci->max_avail_safe = rci->max_avail_unsafe;
258 }
259 
get_column_descriptors(struct uwb_rsv_alloc_info * ai)260 static void get_column_descriptors(struct uwb_rsv_alloc_info *ai)
261 {
262 	unsigned char *bm = ai->bm;
263 	struct uwb_rsv_col_info *ci = ai->ci;
264 	int col;
265 
266 	for (col = 1; col < UWB_NUM_ZONES; col++) {
267 		uwb_rsv_fill_column_info(bm, col, &ci[col]);
268 	}
269 }
270 
uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info * ai)271 static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai)
272 {
273 	int n_rows;
274 	int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW;
275 	int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW;
276 	if (ai->min_mas % UWB_USABLE_MAS_PER_ROW)
277 		min_rows++;
278 	for (n_rows = max_rows; n_rows >= min_rows; n_rows--) {
279 		if (n_rows <= ai->ri.free_rows) {
280 			ai->ri.used_rows = n_rows;
281 			ai->interval = 1; /* row reservation */
282 			uwb_rsv_fill_row_alloc(ai);
283 			return UWB_RSV_ALLOC_FOUND;
284 		}
285 	}
286 	return UWB_RSV_ALLOC_NOT_FOUND;
287 }
288 
uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info * ai,int interval)289 static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval)
290 {
291 	int n_safe, n_unsafe, n_mas;
292 	int n_column = UWB_NUM_ZONES / interval;
293 	int max_per_zone = ai->max_mas / n_column;
294 	int min_per_zone = ai->min_mas / n_column;
295 
296 	if (ai->min_mas % n_column)
297 		min_per_zone++;
298 
299 	if (min_per_zone > UWB_MAS_PER_ZONE) {
300 		return UWB_RSV_ALLOC_NOT_FOUND;
301 	}
302 
303 	if (max_per_zone > UWB_MAS_PER_ZONE) {
304 		max_per_zone = UWB_MAS_PER_ZONE;
305 	}
306 
307 	for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) {
308 		if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND)
309 			continue;
310 		for (n_safe = n_mas; n_safe >= 0; n_safe--) {
311 			n_unsafe = n_mas - n_safe;
312 			if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) {
313 				uwb_rsv_fill_column_alloc(ai);
314 				return UWB_RSV_ALLOC_FOUND;
315 			}
316 		}
317 	}
318 	return UWB_RSV_ALLOC_NOT_FOUND;
319 }
320 
uwb_rsv_find_best_allocation(struct uwb_rsv * rsv,struct uwb_mas_bm * available,struct uwb_mas_bm * result)321 int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available,
322 				 struct uwb_mas_bm *result)
323 {
324 	struct uwb_rsv_alloc_info *ai;
325 	int interval;
326 	int bit_index;
327 
328 	ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
329 	if (!ai)
330 		return UWB_RSV_ALLOC_NOT_FOUND;
331 	ai->min_mas = rsv->min_mas;
332 	ai->max_mas = rsv->max_mas;
333 	ai->max_interval = rsv->max_interval;
334 
335 
336 	/* fill the not available vector from the available bm */
337 	for_each_clear_bit(bit_index, available->bm, UWB_NUM_MAS)
338 		ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL;
339 
340 	if (ai->max_interval == 1) {
341 		get_row_descriptors(ai);
342 		if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
343 			goto alloc_found;
344 		else
345 			goto alloc_not_found;
346 	}
347 
348 	get_column_descriptors(ai);
349 
350 	for (interval = 16; interval >= 2; interval>>=1) {
351 		if (interval > ai->max_interval)
352 			continue;
353 		if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND)
354 			goto alloc_found;
355 	}
356 
357 	/* try row reservation if no column is found */
358 	get_row_descriptors(ai);
359 	if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
360 		goto alloc_found;
361 	else
362 		goto alloc_not_found;
363 
364   alloc_found:
365 	bitmap_zero(result->bm, UWB_NUM_MAS);
366 	bitmap_zero(result->unsafe_bm, UWB_NUM_MAS);
367 	/* fill the safe and unsafe bitmaps */
368 	for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) {
369 		if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE)
370 			set_bit(bit_index, result->bm);
371 		else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE)
372 			set_bit(bit_index, result->unsafe_bm);
373 	}
374 	bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS);
375 
376 	result->safe   = ai->safe_allocated_mases;
377 	result->unsafe = ai->unsafe_allocated_mases;
378 
379 	kfree(ai);
380 	return UWB_RSV_ALLOC_FOUND;
381 
382   alloc_not_found:
383 	kfree(ai);
384 	return UWB_RSV_ALLOC_NOT_FOUND;
385 }
386