xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_txrx_flow_control.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /* OS abstraction libraries */
21 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
22 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
23 #include <qdf_util.h>           /* qdf_unlikely */
24 
25 /* APIs for other modules */
26 #include <htt.h>                /* HTT_TX_EXT_TID_MGMT */
27 #include <ol_htt_tx_api.h>      /* htt_tx_desc_tid */
28 
29 /* internal header files relevant for all systems */
30 #include <ol_txrx_internal.h>   /* TXRX_ASSERT1 */
31 #include <ol_tx_desc.h>         /* ol_tx_desc */
32 #include <ol_tx_send.h>         /* ol_tx_send */
33 #include <ol_txrx.h>            /* ol_txrx_get_vdev_from_vdev_id */
34 
35 /* internal header files relevant only for HL systems */
36 #include <ol_tx_queue.h>        /* ol_tx_enqueue */
37 
38 /* internal header files relevant only for specific systems (Pronto) */
39 #include <ol_txrx_encap.h>      /* OL_TX_ENCAP, etc */
40 #include <ol_tx.h>
41 #include <ol_cfg.h>
42 #include <cdp_txrx_handle.h>
43 #define INVALID_FLOW_ID 0xFF
44 #define MAX_INVALID_BIN 3
45 
46 #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
47 #define TX_FLOW_MGMT_POOL_ID	0xEF
48 #define TX_FLOW_MGMT_POOL_SIZE  32
49 
50 /**
51  * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
52  * @pdev: pdev handler
53  *
54  * Return: none
55  */
56 static void
ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t * pdev)57 ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
58 {
59 	pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
60 						 TX_FLOW_MGMT_POOL_SIZE);
61 	if (!pdev->mgmt_pool)
62 		ol_txrx_err("Management pool creation failed");
63 }
64 
65 /**
66  * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
67  * @pdev: pdev handler
68  *
69  * Return: none
70  */
71 static void
ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t * pdev)72 ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
73 {
74 	ol_tx_dec_pool_ref(pdev->mgmt_pool, false);
75 }
76 #else
77 static inline void
ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t * pdev)78 ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
79 {
80 }
81 static inline void
ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t * pdev)82 ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
83 {
84 }
85 #endif
86 
ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t * txrx_vdev)87 bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *txrx_vdev)
88 {
89 	struct ol_tx_flow_pool_t *pool;
90 	bool enough_desc_flag;
91 
92 	if (!txrx_vdev)
93 		return false;
94 
95 	pool = txrx_vdev->pool;
96 
97 	if (!pool)
98 		return false;
99 
100 	qdf_spin_lock_bh(&pool->flow_pool_lock);
101 	enough_desc_flag = (pool->avail_desc < (pool->stop_th +
102 				OL_TX_NON_FWD_RESERVE))
103 		? false : true;
104 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
105 	return enough_desc_flag;
106 }
107 
108 /**
109  * ol_tx_set_desc_global_pool_size() - set global pool size
110  * @num_msdu_desc: total number of descriptors
111  *
112  * Return: none
113  */
ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)114 void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
115 {
116 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
117 	ol_txrx_pdev_handle pdev;
118 
119 	if (qdf_unlikely(!soc))
120 		return;
121 
122 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
123 	if (!pdev) {
124 		qdf_print("pdev is NULL");
125 		return;
126 	}
127 	pdev->num_msdu_desc = num_msdu_desc;
128 	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
129 		pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
130 	ol_txrx_info_high("Global pool size: %d", pdev->num_msdu_desc);
131 }
132 
133 /**
134  * ol_tx_get_total_free_desc() - get total free descriptors
135  * @pdev: pdev handle
136  *
137  * Return: total free descriptors
138  */
ol_tx_get_total_free_desc(struct ol_txrx_pdev_t * pdev)139 uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
140 {
141 	struct ol_tx_flow_pool_t *pool = NULL;
142 	uint32_t free_desc;
143 
144 	free_desc = pdev->tx_desc.num_free;
145 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
146 	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
147 		      flow_pool_list_elem) {
148 		qdf_spin_lock_bh(&pool->flow_pool_lock);
149 		free_desc += pool->avail_desc;
150 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
151 	}
152 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
153 
154 	return free_desc;
155 }
156 
157 /**
158  * ol_tx_register_flow_control() - Register fw based tx flow control
159  * @pdev: pdev handle
160  *
161  * Return: none
162  */
ol_tx_register_flow_control(struct ol_txrx_pdev_t * pdev)163 void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
164 {
165 	qdf_spinlock_create(&pdev->tx_desc.flow_pool_list_lock);
166 	TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
167 
168 	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
169 		ol_tx_register_global_mgmt_pool(pdev);
170 }
171 
172 /**
173  * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
174  * @pdev: pdev handle
175  *
176  * Return: none
177  */
ol_tx_deregister_flow_control(struct ol_txrx_pdev_t * pdev)178 void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
179 {
180 	int i = 0;
181 	struct ol_tx_flow_pool_t *pool = NULL;
182 	struct cdp_soc_t *soc;
183 
184 	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
185 		ol_tx_deregister_global_mgmt_pool(pdev);
186 
187 	soc = cds_get_context(QDF_MODULE_ID_SOC);
188 
189 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
190 	while (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
191 		pool = TAILQ_FIRST(&pdev->tx_desc.flow_pool_list);
192 		if (!pool)
193 			break;
194 		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
195 		ol_txrx_info("flow pool list is not empty %d!!!", i++);
196 
197 		if (i == 1)
198 			ol_tx_dump_flow_pool_info(soc);
199 
200 		ol_tx_dec_pool_ref(pool, true);
201 		qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
202 	}
203 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
204 	qdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
205 }
206 
207 /**
208  * ol_tx_delete_flow_pool() - delete flow pool
209  * @pool: flow pool pointer
210  * @force: free pool forcefully
211  *
212  * Delete flow_pool if all tx descriptors are available.
213  * Otherwise put it in FLOW_POOL_INVALID state.
214  * If force is set then pull all available descriptors to
215  * global pool.
216  *
217  * Return: 0 for success or error
218  */
ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t * pool,bool force)219 static int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
220 {
221 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
222 	ol_txrx_pdev_handle pdev;
223 	uint16_t i, size;
224 	union ol_tx_desc_list_elem_t *temp_list = NULL;
225 	struct ol_tx_desc_t *tx_desc = NULL;
226 
227 	if (!pool) {
228 		ol_txrx_err("pool is NULL");
229 		QDF_ASSERT(0);
230 		return -ENOMEM;
231 	}
232 
233 	if (qdf_unlikely(!soc)) {
234 		QDF_ASSERT(0);
235 		return -ENOMEM;
236 	}
237 
238 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
239 	if (!pdev) {
240 		ol_txrx_err("pdev is NULL");
241 		QDF_ASSERT(0);
242 		return -ENOMEM;
243 	}
244 
245 	qdf_spin_lock_bh(&pool->flow_pool_lock);
246 	if (pool->avail_desc == pool->flow_pool_size || force == true)
247 		pool->status = FLOW_POOL_INACTIVE;
248 	else
249 		pool->status = FLOW_POOL_INVALID;
250 
251 	/* Take all free descriptors and put it in temp_list */
252 	temp_list = pool->freelist;
253 	size = pool->avail_desc;
254 	pool->freelist = NULL;
255 	pool->avail_desc = 0;
256 
257 	if (pool->status == FLOW_POOL_INACTIVE) {
258 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
259 		/* Free flow_pool */
260 		qdf_spinlock_destroy(&pool->flow_pool_lock);
261 		qdf_mem_free(pool);
262 	} else { /* FLOW_POOL_INVALID case*/
263 		pool->flow_pool_size -= size;
264 		pool->flow_pool_id = INVALID_FLOW_ID;
265 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
266 		ol_tx_inc_pool_ref(pool);
267 
268 		pdev->tx_desc.num_invalid_bin++;
269 		ol_txrx_info("invalid pool created %d",
270 			     pdev->tx_desc.num_invalid_bin);
271 		if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
272 			ASSERT(0);
273 
274 		qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
275 		TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
276 				 flow_pool_list_elem);
277 		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
278 	}
279 
280 	/* put free descriptors to global pool */
281 	qdf_spin_lock_bh(&pdev->tx_mutex);
282 	for (i = 0; i < size; i++) {
283 		tx_desc = &temp_list->tx_desc;
284 		temp_list = temp_list->next;
285 
286 		ol_tx_put_desc_global_pool(pdev, tx_desc);
287 	}
288 	qdf_spin_unlock_bh(&pdev->tx_mutex);
289 
290 	ol_tx_distribute_descs_to_deficient_pools_from_global_pool();
291 
292 	return 0;
293 }
294 
ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t * pool)295 QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool)
296 {
297 	if (!pool) {
298 		ol_txrx_err("flow pool is NULL");
299 		return QDF_STATUS_E_INVAL;
300 	}
301 
302 	qdf_spin_lock_bh(&pool->flow_pool_lock);
303 	qdf_atomic_inc(&pool->ref_cnt);
304 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
305 	ol_txrx_dbg("pool %pK, ref_cnt %x",
306 		    pool, qdf_atomic_read(&pool->ref_cnt));
307 
308 	return  QDF_STATUS_SUCCESS;
309 }
310 
ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t * pool,bool force)311 QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force)
312 {
313 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
314 	ol_txrx_pdev_handle pdev;
315 
316 	if (!pool) {
317 		ol_txrx_err("flow pool is NULL");
318 		QDF_ASSERT(0);
319 		return QDF_STATUS_E_INVAL;
320 	}
321 
322 	if (qdf_unlikely(!soc)) {
323 		QDF_ASSERT(0);
324 		return QDF_STATUS_E_INVAL;
325 	}
326 
327 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
328 	if (!pdev) {
329 		ol_txrx_err("pdev is NULL");
330 		QDF_ASSERT(0);
331 		return QDF_STATUS_E_INVAL;
332 	}
333 
334 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
335 	qdf_spin_lock_bh(&pool->flow_pool_lock);
336 	if (qdf_atomic_dec_and_test(&pool->ref_cnt)) {
337 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
338 		TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool,
339 			     flow_pool_list_elem);
340 		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
341 		ol_txrx_dbg("Deleting pool %pK", pool);
342 		ol_tx_delete_flow_pool(pool, force);
343 	} else {
344 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
345 		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
346 		ol_txrx_dbg("pool %pK, ref_cnt %x",
347 			    pool, qdf_atomic_read(&pool->ref_cnt));
348 	}
349 
350 	return  QDF_STATUS_SUCCESS;
351 }
352 
353 /**
354  * ol_tx_flow_pool_status_to_str() - convert flow pool status to string
355  * @status - flow pool status
356  *
357  * Returns: String corresponding to flow pool status
358  */
ol_tx_flow_pool_status_to_str(enum flow_pool_status status)359 static const char *ol_tx_flow_pool_status_to_str
360 					(enum flow_pool_status status)
361 {
362 	switch (status) {
363 	CASE_RETURN_STRING(FLOW_POOL_ACTIVE_UNPAUSED);
364 	CASE_RETURN_STRING(FLOW_POOL_ACTIVE_PAUSED);
365 	CASE_RETURN_STRING(FLOW_POOL_NON_PRIO_PAUSED);
366 	CASE_RETURN_STRING(FLOW_POOL_INVALID);
367 	CASE_RETURN_STRING(FLOW_POOL_INACTIVE);
368 	default:
369 		return "unknown";
370 	}
371 }
372 
ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t * pdev)373 void ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t *pdev)
374 {
375 	char *comb_log_str;
376 	int bytes_written = 0;
377 	uint32_t free_size;
378 	struct ol_tx_flow_pool_t *pool = NULL;
379 
380 	free_size = WLAN_MAX_VDEVS * 100 + 100;
381 	comb_log_str = qdf_mem_malloc(free_size);
382 	if (!comb_log_str)
383 		return;
384 
385 	bytes_written = snprintf(&comb_log_str[bytes_written], free_size,
386 				 "G:(%d,%d) ",
387 				 pdev->tx_desc.pool_size,
388 				 pdev->tx_desc.num_free);
389 
390 	free_size -= bytes_written;
391 
392 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
393 	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
394 		      flow_pool_list_elem) {
395 		qdf_spin_lock_bh(&pool->flow_pool_lock);
396 		bytes_written += snprintf(&comb_log_str[bytes_written],
397 					  free_size, "| %d (%d,%d)",
398 					  pool->flow_pool_id,
399 					  pool->flow_pool_size,
400 					  pool->avail_desc);
401 		free_size -= bytes_written;
402 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
403 	}
404 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
405 	qdf_nofl_debug("STATS | FC: %s", comb_log_str);
406 	qdf_mem_free(comb_log_str);
407 }
408 
409 /**
410  * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
411  * @soc_hdl: cdp_soc context, required only in lithium_dp flow control.
412  *
413  * Return: none
414  */
ol_tx_dump_flow_pool_info(struct cdp_soc_t * soc_hdl)415 void ol_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
416 {
417 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
418 	ol_txrx_pdev_handle pdev;
419 	struct ol_tx_flow_pool_t *pool = NULL, *pool_prev = NULL;
420 	struct ol_tx_flow_pool_t tmp_pool;
421 
422 	if (qdf_unlikely(!soc)) {
423 		ol_txrx_err("soc is NULL");
424 		QDF_ASSERT(0);
425 		return;
426 	}
427 
428 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
429 	if (!pdev) {
430 		ol_txrx_err("ERROR: pdev NULL");
431 		QDF_ASSERT(0); /* traceback */
432 		return;
433 	}
434 
435 	txrx_nofl_info("Global total %d :: avail %d invalid flow_pool %d ",
436 		       pdev->tx_desc.pool_size,
437 		       pdev->tx_desc.num_free,
438 		       pdev->tx_desc.num_invalid_bin);
439 
440 	txrx_nofl_info("maps %d pool unmaps %d pool resize %d pkt drops %d",
441 		       pdev->pool_stats.pool_map_count,
442 		       pdev->pool_stats.pool_unmap_count,
443 		       pdev->pool_stats.pool_resize_count,
444 		       pdev->pool_stats.pkt_drop_no_pool);
445 	/*
446 	 * Nested spin lock.
447 	 * Always take in below order.
448 	 * flow_pool_list_lock -> flow_pool_lock
449 	 */
450 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
451 	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
452 					 flow_pool_list_elem) {
453 		ol_tx_inc_pool_ref(pool);
454 		qdf_spin_lock_bh(&pool->flow_pool_lock);
455 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
456 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
457 		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
458 
459 		if (pool_prev)
460 			ol_tx_dec_pool_ref(pool_prev, false);
461 
462 		txrx_nofl_info("flow_pool_id %d ::", tmp_pool.flow_pool_id);
463 		txrx_nofl_info("status %s flow_id %d flow_type %d",
464 			       ol_tx_flow_pool_status_to_str
465 					(tmp_pool.status),
466 			       tmp_pool.member_flow_id, tmp_pool.flow_type);
467 		txrx_nofl_info("total %d :: available %d :: deficient %d :: overflow %d :: pkt dropped (no desc) %d",
468 			       tmp_pool.flow_pool_size, tmp_pool.avail_desc,
469 			       tmp_pool.deficient_desc,
470 			       tmp_pool.overflow_desc,
471 			       tmp_pool.pkt_drop_no_desc);
472 		txrx_nofl_info("thresh: start %d stop %d prio start %d prio stop %d",
473 			       tmp_pool.start_th, tmp_pool.stop_th,
474 			       tmp_pool.start_priority_th,
475 			       tmp_pool.stop_priority_th);
476 		pool_prev = pool;
477 		qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
478 	}
479 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
480 
481 	/* decrement ref count for last pool in list */
482 	if (pool_prev)
483 		ol_tx_dec_pool_ref(pool_prev, false);
484 
485 }
486 
487 /**
488  * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
489  *
490  * Return: none
491  */
ol_tx_clear_flow_pool_stats(void)492 void ol_tx_clear_flow_pool_stats(void)
493 {
494 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
495 	ol_txrx_pdev_handle pdev;
496 
497 	if (qdf_unlikely(!soc))
498 		return;
499 
500 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
501 	if (!pdev) {
502 		ol_txrx_err("pdev is null");
503 		return;
504 	}
505 	qdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
506 }
507 
508 /**
509  * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
510  * @src_pool: source pool
511  * @dst_pool: destination pool
512  * @desc_move_count: descriptor move count
513  *
514  * Return: actual descriptors moved
515  */
ol_tx_move_desc_n(struct ol_tx_flow_pool_t * src_pool,struct ol_tx_flow_pool_t * dst_pool,int desc_move_count)516 static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
517 		      struct ol_tx_flow_pool_t *dst_pool,
518 		      int desc_move_count)
519 {
520 	uint16_t count = 0, i;
521 	struct ol_tx_desc_t *tx_desc;
522 	union ol_tx_desc_list_elem_t *temp_list = NULL;
523 
524 	/* Take descriptors from source pool and put it in temp_list */
525 	qdf_spin_lock_bh(&src_pool->flow_pool_lock);
526 	for (i = 0; i < desc_move_count; i++) {
527 		tx_desc = ol_tx_get_desc_flow_pool(src_pool);
528 		((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
529 		temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
530 
531 	}
532 	qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
533 
534 	/* Take descriptors from temp_list and put it in destination pool */
535 	qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
536 	for (i = 0; i < desc_move_count; i++) {
537 		if (dst_pool->deficient_desc)
538 			dst_pool->deficient_desc--;
539 		else
540 			break;
541 		tx_desc = &temp_list->tx_desc;
542 		temp_list = temp_list->next;
543 		ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
544 		count++;
545 	}
546 	qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
547 
548 	/* If anything is there in temp_list put it back to source pool */
549 	qdf_spin_lock_bh(&src_pool->flow_pool_lock);
550 	while (temp_list) {
551 		tx_desc = &temp_list->tx_desc;
552 		temp_list = temp_list->next;
553 		ol_tx_put_desc_flow_pool(src_pool, tx_desc);
554 	}
555 	qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
556 
557 	return count;
558 }
559 
560 
561 /**
562  * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
563  * @src_pool: source pool
564  *
565  * Distribute all descriptors of source pool to all
566  * deficient pools as per flow_pool_list.
567  *
568  * Return: 0 for success
569  */
570 static int
ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t * src_pool)571 ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
572 {
573 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
574 	ol_txrx_pdev_handle pdev;
575 	struct ol_tx_flow_pool_t *dst_pool = NULL;
576 	uint16_t desc_count = src_pool->avail_desc;
577 	uint16_t desc_move_count = 0;
578 
579 	if (qdf_unlikely(!soc))
580 		return -EINVAL;
581 
582 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
583 	if (!pdev) {
584 		ol_txrx_err("pdev is NULL");
585 		return -EINVAL;
586 	}
587 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
588 	TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
589 					 flow_pool_list_elem) {
590 		qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
591 		if (dst_pool->deficient_desc) {
592 			desc_move_count =
593 				(dst_pool->deficient_desc > desc_count) ?
594 					desc_count : dst_pool->deficient_desc;
595 			qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
596 			desc_move_count = ol_tx_move_desc_n(src_pool,
597 						dst_pool, desc_move_count);
598 			desc_count -= desc_move_count;
599 
600 			qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
601 			if (dst_pool->status == FLOW_POOL_ACTIVE_PAUSED) {
602 				if (dst_pool->avail_desc > dst_pool->start_th) {
603 					pdev->pause_cb(dst_pool->member_flow_id,
604 					      WLAN_NETIF_PRIORITY_QUEUE_ON,
605 					      WLAN_DATA_FLOW_CONTROL_PRIORITY);
606 
607 					pdev->pause_cb(dst_pool->member_flow_id,
608 						      WLAN_WAKE_ALL_NETIF_QUEUE,
609 						      WLAN_DATA_FLOW_CONTROL);
610 
611 					dst_pool->status =
612 						FLOW_POOL_ACTIVE_UNPAUSED;
613 				}
614 			}
615 		}
616 		qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
617 		if (desc_count == 0)
618 			break;
619 	}
620 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
621 
622 	return 0;
623 }
624 
625 /**
626  * ol_tx_create_flow_pool() - create flow pool
627  * @flow_pool_id: flow pool id
628  * @flow_pool_size: flow pool size
629  *
630  * Return: flow_pool pointer / NULL for error
631  */
ol_tx_create_flow_pool(uint8_t flow_pool_id,uint16_t flow_pool_size)632 struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
633 						 uint16_t flow_pool_size)
634 {
635 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
636 	ol_txrx_pdev_handle pdev;
637 	struct ol_tx_flow_pool_t *pool;
638 	uint16_t size = 0, i;
639 	struct ol_tx_desc_t *tx_desc;
640 	union ol_tx_desc_list_elem_t *temp_list = NULL;
641 	uint32_t stop_threshold;
642 	uint32_t start_threshold;
643 
644 	if (qdf_unlikely(!soc))
645 		return NULL;
646 
647 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
648 	if (!pdev) {
649 		ol_txrx_err("pdev is NULL");
650 		return NULL;
651 	}
652 	stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
653 	start_threshold = stop_threshold +
654 		ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
655 	pool = qdf_mem_malloc(sizeof(*pool));
656 	if (!pool)
657 		return NULL;
658 
659 	pool->flow_pool_id = flow_pool_id;
660 	pool->flow_pool_size = flow_pool_size;
661 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
662 	pool->start_th = (start_threshold * flow_pool_size)/100;
663 	pool->stop_th = (stop_threshold * flow_pool_size)/100;
664 	pool->stop_priority_th = (TX_PRIORITY_TH * pool->stop_th)/100;
665 	if (pool->stop_priority_th >= MAX_TSO_SEGMENT_DESC)
666 		pool->stop_priority_th -= MAX_TSO_SEGMENT_DESC;
667 
668 	pool->start_priority_th = (TX_PRIORITY_TH * pool->start_th)/100;
669 	if (pool->start_priority_th >= MAX_TSO_SEGMENT_DESC)
670 			pool->start_priority_th -= MAX_TSO_SEGMENT_DESC;
671 
672 	qdf_spinlock_create(&pool->flow_pool_lock);
673 	qdf_atomic_init(&pool->ref_cnt);
674 	ol_tx_inc_pool_ref(pool);
675 
676 	/* Take TX descriptor from global_pool and put it in temp_list*/
677 	qdf_spin_lock_bh(&pdev->tx_mutex);
678 	if (pdev->tx_desc.num_free >= pool->flow_pool_size)
679 		size = pool->flow_pool_size;
680 	else
681 		size = pdev->tx_desc.num_free;
682 
683 	for (i = 0; i < size; i++) {
684 		tx_desc = ol_tx_get_desc_global_pool(pdev);
685 		tx_desc->pool = pool;
686 		((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
687 		temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
688 
689 	}
690 	qdf_spin_unlock_bh(&pdev->tx_mutex);
691 
692 	/* put temp_list to flow_pool */
693 	pool->freelist = temp_list;
694 	pool->avail_desc = size;
695 	pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
696 	/* used for resize pool*/
697 	pool->overflow_desc = 0;
698 
699 	/* Add flow_pool to flow_pool_list */
700 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
701 	TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
702 			 flow_pool_list_elem);
703 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
704 
705 	return pool;
706 }
707 
708 /**
709  * ol_tx_free_invalid_flow_pool() - free invalid pool
710  * @pool: pool
711  *
712  * Return: 0 for success or failure
713  */
ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t * pool)714 int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
715 {
716 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
717 	ol_txrx_pdev_handle pdev;
718 
719 	if (qdf_unlikely(!soc))
720 		return -EINVAL;
721 
722 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
723 	if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
724 		ol_txrx_err("Invalid pool/pdev");
725 		return -EINVAL;
726 	}
727 
728 	/* directly distribute to other deficient pools */
729 	ol_tx_distribute_descs_to_deficient_pools(pool);
730 
731 	qdf_spin_lock_bh(&pool->flow_pool_lock);
732 	pool->flow_pool_size = pool->avail_desc;
733 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
734 
735 	pdev->tx_desc.num_invalid_bin--;
736 	ol_txrx_info("invalid pool deleted %d",
737 		     pdev->tx_desc.num_invalid_bin);
738 
739 	return ol_tx_dec_pool_ref(pool, false);
740 }
741 
742 /**
743  * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
744  * @flow_pool_id: flow pool id
745  *
746  * Return: flow_pool ptr / NULL if not found
747  */
ol_tx_get_flow_pool(uint8_t flow_pool_id)748 static struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
749 {
750 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
751 	ol_txrx_pdev_handle pdev;
752 	struct ol_tx_flow_pool_t *pool = NULL;
753 	bool is_found = false;
754 
755 	if (qdf_unlikely(!soc)) {
756 		QDF_ASSERT(0);
757 		return NULL;
758 	}
759 
760 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
761 	if (!pdev) {
762 		ol_txrx_err("ERROR: pdev NULL");
763 		QDF_ASSERT(0); /* traceback */
764 		return NULL;
765 	}
766 
767 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
768 	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
769 					 flow_pool_list_elem) {
770 		qdf_spin_lock_bh(&pool->flow_pool_lock);
771 		if (pool->flow_pool_id == flow_pool_id) {
772 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
773 			is_found = true;
774 			break;
775 		}
776 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
777 	}
778 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
779 
780 	if (is_found == false)
781 		pool = NULL;
782 
783 	return pool;
784 }
785 
786 /**
787  * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
788  * @pool: flow_pool
789  * @vdev_id: flow_id /vdev_id
790  *
791  * Return: none
792  */
ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t * pool,uint8_t vdev_id)793 static void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
794 				     uint8_t vdev_id)
795 {
796 	struct ol_txrx_vdev_t *vdev;
797 
798 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
799 	if (!vdev) {
800 		ol_txrx_err("invalid vdev_id %d", vdev_id);
801 		return;
802 	}
803 
804 	vdev->pool = pool;
805 	qdf_spin_lock_bh(&pool->flow_pool_lock);
806 	pool->member_flow_id = vdev_id;
807 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
808 }
809 
810 /**
811  * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
812  * @pool: flow_pool
813  * @vdev_id: flow_id /vdev_id
814  *
815  * Return: none
816  */
ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t * pool,uint8_t vdev_id)817 static void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
818 				       uint8_t vdev_id)
819 {
820 	struct ol_txrx_vdev_t *vdev;
821 
822 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
823 	if (!vdev) {
824 		ol_txrx_dbg("invalid vdev_id %d", vdev_id);
825 		return;
826 	}
827 
828 	vdev->pool = NULL;
829 	qdf_spin_lock_bh(&pool->flow_pool_lock);
830 	pool->member_flow_id = INVALID_FLOW_ID;
831 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
832 }
833 
834 /**
835  * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
836  * @flow_id: flow id
837  * @flow_type: flow type
838  * @flow_pool_id: pool id
839  * @flow_pool_size: pool size
840  *
841  * Process below target to host message
842  * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
843  *
844  * Return: none
845  */
ol_tx_flow_pool_map_handler(uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id,uint16_t flow_pool_size)846 void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
847 				 uint8_t flow_pool_id, uint16_t flow_pool_size)
848 {
849 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
850 	ol_txrx_pdev_handle pdev;
851 	struct ol_tx_flow_pool_t *pool;
852 	uint8_t pool_create = 0;
853 	enum htt_flow_type type = flow_type;
854 
855 	ol_txrx_dbg("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
856 		    flow_id, flow_type, flow_pool_id, flow_pool_size);
857 
858 	if (qdf_unlikely(!soc))
859 		return;
860 
861 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
862 	if (qdf_unlikely(!pdev)) {
863 		ol_txrx_err("pdev is NULL");
864 		return;
865 	}
866 	pdev->pool_stats.pool_map_count++;
867 
868 	pool = ol_tx_get_flow_pool(flow_pool_id);
869 	if (!pool) {
870 		pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
871 		if (!pool) {
872 			ol_txrx_err("creation of flow_pool %d size %d failed",
873 				    flow_pool_id, flow_pool_size);
874 			return;
875 		}
876 		pool_create = 1;
877 	}
878 
879 	switch (type) {
880 
881 	case FLOW_TYPE_VDEV:
882 		ol_tx_flow_pool_vdev_map(pool, flow_id);
883 		pdev->pause_cb(flow_id,
884 			       WLAN_NETIF_PRIORITY_QUEUE_ON,
885 			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
886 		pdev->pause_cb(flow_id,
887 			       WLAN_WAKE_ALL_NETIF_QUEUE,
888 			       WLAN_DATA_FLOW_CONTROL);
889 		break;
890 	default:
891 		if (pool_create)
892 			ol_tx_dec_pool_ref(pool, false);
893 		ol_txrx_err("flow type %d not supported", type);
894 		break;
895 	}
896 }
897 
898 /**
899  * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
900  * @flow_id: flow id
901  * @flow_type: flow type
902  * @flow_pool_id: pool id
903  *
904  * Process below target to host message
905  * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
906  *
907  * Return: none
908  */
ol_tx_flow_pool_unmap_handler(uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id)909 void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
910 							  uint8_t flow_pool_id)
911 {
912 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
913 	ol_txrx_pdev_handle pdev;
914 	struct ol_tx_flow_pool_t *pool;
915 	enum htt_flow_type type = flow_type;
916 
917 	ol_txrx_dbg("flow_id %d flow_type %d flow_pool_id %d",
918 		    flow_id, flow_type, flow_pool_id);
919 
920 	if (qdf_unlikely(!soc))
921 		return;
922 
923 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
924 	if (qdf_unlikely(!pdev)) {
925 		ol_txrx_err("pdev is NULL");
926 		return;
927 	}
928 	pdev->pool_stats.pool_unmap_count++;
929 
930 	pool = ol_tx_get_flow_pool(flow_pool_id);
931 	if (!pool) {
932 		ol_txrx_info("flow_pool not available flow_pool_id %d", type);
933 		return;
934 	}
935 
936 	switch (type) {
937 
938 	case FLOW_TYPE_VDEV:
939 		ol_tx_flow_pool_vdev_unmap(pool, flow_id);
940 		break;
941 	default:
942 		ol_txrx_info("flow type %d not supported", type);
943 		return;
944 	}
945 
946 	/*
947 	 * only delete if all descriptors are available
948 	 * and pool ref count becomes 0
949 	 */
950 	ol_tx_dec_pool_ref(pool, false);
951 }
952 
953 #ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
954 /**
955  * ol_tx_distribute_descs_to_deficient_pools_from_global_pool()
956  *
957  * Distribute descriptors of global pool to all
958  * deficient pools as per need.
959  *
960  * Return: 0 for success
961  */
ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)962 int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)
963 {
964 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
965 	ol_txrx_pdev_handle pdev;
966 	struct ol_tx_flow_pool_t *dst_pool = NULL;
967 	struct ol_tx_flow_pool_t *tmp_pool = NULL;
968 	uint16_t total_desc_req = 0;
969 	uint16_t desc_move_count = 0;
970 	uint16_t temp_count = 0, i;
971 	union ol_tx_desc_list_elem_t *temp_list = NULL;
972 	struct ol_tx_desc_t *tx_desc;
973 	uint8_t free_invalid_pool = 0;
974 
975 	if (qdf_unlikely(!soc))
976 		return -EINVAL;
977 
978 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
979 	if (!pdev) {
980 		ol_txrx_err("pdev is NULL");
981 		return -EINVAL;
982 	}
983 
984 	/* Nested locks: maintain flow_pool_list_lock->flow_pool_lock */
985 	/* find out total deficient desc required */
986 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
987 	TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
988 		      flow_pool_list_elem) {
989 		qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
990 		total_desc_req += dst_pool->deficient_desc;
991 		qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
992 	}
993 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
994 
995 	qdf_spin_lock_bh(&pdev->tx_mutex);
996 	desc_move_count = (pdev->tx_desc.num_free >= total_desc_req) ?
997 				 total_desc_req : pdev->tx_desc.num_free;
998 
999 	for (i = 0; i < desc_move_count; i++) {
1000 		tx_desc = ol_tx_get_desc_global_pool(pdev);
1001 		((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
1002 		temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
1003 	}
1004 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1005 
1006 	if (!desc_move_count)
1007 		return 0;
1008 
1009 	/* distribute desc to deficient pool */
1010 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
1011 	TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
1012 		      flow_pool_list_elem) {
1013 		qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
1014 		if (dst_pool->deficient_desc) {
1015 			temp_count =
1016 				(dst_pool->deficient_desc > desc_move_count) ?
1017 				desc_move_count : dst_pool->deficient_desc;
1018 
1019 			desc_move_count -= temp_count;
1020 			dst_pool->deficient_desc -= temp_count;
1021 			for (i = 0; i < temp_count; i++) {
1022 				tx_desc = &temp_list->tx_desc;
1023 				temp_list = temp_list->next;
1024 				ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
1025 			}
1026 
1027 			if (dst_pool->status == FLOW_POOL_ACTIVE_PAUSED) {
1028 				if (dst_pool->avail_desc > dst_pool->start_th) {
1029 					pdev->pause_cb(dst_pool->member_flow_id,
1030 						      WLAN_WAKE_ALL_NETIF_QUEUE,
1031 						      WLAN_DATA_FLOW_CONTROL);
1032 					dst_pool->status =
1033 						FLOW_POOL_ACTIVE_UNPAUSED;
1034 				}
1035 			} else if ((dst_pool->status == FLOW_POOL_INVALID) &&
1036 				   (dst_pool->avail_desc ==
1037 					 dst_pool->flow_pool_size)) {
1038 				free_invalid_pool = 1;
1039 				tmp_pool = dst_pool;
1040 			}
1041 		}
1042 		qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
1043 		if (desc_move_count == 0)
1044 			break;
1045 	}
1046 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
1047 
1048 	if (free_invalid_pool && tmp_pool)
1049 		ol_tx_free_invalid_flow_pool(tmp_pool);
1050 
1051 	return 0;
1052 }
1053 
1054 /**
1055  * ol_tx_flow_pool_update_queue_state() - update network queue for pool based on
1056  *                                        new available count.
1057  * @pool : pool handle
1058  *
1059  * Return : none
1060  */
ol_tx_flow_pool_update_queue_state(struct ol_txrx_pdev_t * pdev,struct ol_tx_flow_pool_t * pool)1061 static void ol_tx_flow_pool_update_queue_state(struct ol_txrx_pdev_t *pdev,
1062 					       struct ol_tx_flow_pool_t *pool)
1063 {
1064 	qdf_spin_lock_bh(&pool->flow_pool_lock);
1065 	if (pool->avail_desc > pool->start_th) {
1066 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
1067 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
1068 		pdev->pause_cb(pool->member_flow_id,
1069 			       WLAN_WAKE_ALL_NETIF_QUEUE,
1070 			       WLAN_DATA_FLOW_CONTROL);
1071 	} else if (pool->avail_desc < pool->stop_th &&
1072 		   pool->avail_desc >= pool->stop_priority_th) {
1073 		pool->status = FLOW_POOL_NON_PRIO_PAUSED;
1074 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
1075 		pdev->pause_cb(pool->member_flow_id,
1076 			       WLAN_STOP_NON_PRIORITY_QUEUE,
1077 			       WLAN_DATA_FLOW_CONTROL);
1078 		pdev->pause_cb(pool->member_flow_id,
1079 			       WLAN_NETIF_PRIORITY_QUEUE_ON,
1080 			       WLAN_DATA_FLOW_CONTROL);
1081 	} else if (pool->avail_desc < pool->stop_priority_th) {
1082 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
1083 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
1084 		pdev->pause_cb(pool->member_flow_id,
1085 			       WLAN_STOP_ALL_NETIF_QUEUE,
1086 			       WLAN_DATA_FLOW_CONTROL);
1087 	} else {
1088 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
1089 	}
1090 }
1091 
1092 /**
1093  * ol_tx_flow_pool_update() - update pool parameters with new size
1094  * @pool : pool handle
1095  * @new_pool_size : new pool size
1096  * @deficient_count : deficient count
1097  * @overflow_count : overflow count
1098  *
1099  * Return : none
1100  */
ol_tx_flow_pool_update(struct ol_tx_flow_pool_t * pool,uint16_t new_pool_size,uint16_t deficient_count,uint16_t overflow_count)1101 static void ol_tx_flow_pool_update(struct ol_tx_flow_pool_t *pool,
1102 				   uint16_t new_pool_size,
1103 				   uint16_t deficient_count,
1104 				   uint16_t overflow_count)
1105 {
1106 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1107 	ol_txrx_pdev_handle pdev;
1108 	uint32_t stop_threshold;
1109 	uint32_t start_threshold;
1110 
1111 	if (qdf_unlikely(!soc))
1112 		return;
1113 
1114 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1115 	if (!pdev) {
1116 		ol_txrx_err("pdev is NULL");
1117 		return;
1118 	}
1119 
1120 	stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
1121 	start_threshold = stop_threshold +
1122 			ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
1123 	pool->flow_pool_size = new_pool_size;
1124 	pool->start_th = (start_threshold * new_pool_size) / 100;
1125 	pool->stop_th = (stop_threshold * new_pool_size) / 100;
1126 	pool->stop_priority_th = (TX_PRIORITY_TH * pool->stop_th) / 100;
1127 	if (pool->stop_priority_th >= MAX_TSO_SEGMENT_DESC)
1128 		pool->stop_priority_th -= MAX_TSO_SEGMENT_DESC;
1129 
1130 	pool->start_priority_th = (TX_PRIORITY_TH * pool->start_th) / 100;
1131 	if (pool->start_priority_th >= MAX_TSO_SEGMENT_DESC)
1132 		pool->start_priority_th -= MAX_TSO_SEGMENT_DESC;
1133 
1134 	if (deficient_count)
1135 		pool->deficient_desc = deficient_count;
1136 
1137 	if (overflow_count)
1138 		pool->overflow_desc = overflow_count;
1139 }
1140 
1141 /**
1142  * ol_tx_flow_pool_resize() - resize pool with new size
1143  * @pool: pool pointer
1144  * @new_pool_size: new pool size
1145  *
1146  * Return: none
1147  */
ol_tx_flow_pool_resize(struct ol_tx_flow_pool_t * pool,uint16_t new_pool_size)1148 static void ol_tx_flow_pool_resize(struct ol_tx_flow_pool_t *pool,
1149 				   uint16_t new_pool_size)
1150 {
1151 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1152 	ol_txrx_pdev_handle pdev;
1153 	uint16_t diff = 0, overflow_count = 0, deficient_count = 0;
1154 	uint16_t move_desc_to_global = 0, move_desc_from_global = 0;
1155 	union ol_tx_desc_list_elem_t *temp_list = NULL;
1156 	int i = 0, update_done = 0;
1157 	struct ol_tx_desc_t *tx_desc = NULL;
1158 	uint16_t temp = 0;
1159 
1160 	if (qdf_unlikely(!soc))
1161 		return;
1162 
1163 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1164 	if (!pdev) {
1165 		ol_txrx_err("pdev is NULL");
1166 		return;
1167 	}
1168 
1169 	qdf_spin_lock_bh(&pool->flow_pool_lock);
1170 	if (pool->flow_pool_size == new_pool_size) {
1171 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
1172 		ol_txrx_info("pool resize received with same size");
1173 		return;
1174 	}
1175 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
1176 
1177 	/* Reduce pool size */
1178 	/* start_priority_th desc should available after reduction */
1179 	qdf_spin_lock_bh(&pool->flow_pool_lock);
1180 	if (pool->flow_pool_size > new_pool_size) {
1181 		diff = pool->flow_pool_size - new_pool_size;
1182 		diff += pool->overflow_desc;
1183 		pool->overflow_desc = 0;
1184 		temp = QDF_MIN(pool->deficient_desc, diff);
1185 		pool->deficient_desc -= temp;
1186 		diff -= temp;
1187 
1188 		if (diff) {
1189 			/* Have enough descriptors */
1190 			if (pool->avail_desc >=
1191 				 (diff + pool->start_priority_th)) {
1192 				move_desc_to_global = diff;
1193 			}
1194 			/* Do not have enough descriptors */
1195 			else if (pool->avail_desc > pool->start_priority_th) {
1196 				move_desc_to_global = pool->avail_desc -
1197 						 pool->start_priority_th;
1198 				overflow_count = diff - move_desc_to_global;
1199 			}
1200 
1201 			/* Move desc to temp_list */
1202 			for (i = 0; i < move_desc_to_global; i++) {
1203 				tx_desc = ol_tx_get_desc_flow_pool(pool);
1204 				((union ol_tx_desc_list_elem_t *)tx_desc)->next
1205 								 = temp_list;
1206 				temp_list =
1207 				  (union ol_tx_desc_list_elem_t *)tx_desc;
1208 			}
1209 		}
1210 
1211 		/* update pool size and threshold */
1212 		ol_tx_flow_pool_update(pool, new_pool_size, 0, overflow_count);
1213 		update_done = 1;
1214 	}
1215 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
1216 
1217 	if (move_desc_to_global && temp_list) {
1218 		/* put free descriptors to global pool */
1219 		qdf_spin_lock_bh(&pdev->tx_mutex);
1220 		for (i = 0; i < move_desc_to_global; i++) {
1221 			tx_desc = &temp_list->tx_desc;
1222 			temp_list = temp_list->next;
1223 			ol_tx_put_desc_global_pool(pdev, tx_desc);
1224 		}
1225 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1226 	}
1227 
1228 	if (update_done)
1229 		goto update_done;
1230 
1231 	/* Increase pool size */
1232 	qdf_spin_lock_bh(&pool->flow_pool_lock);
1233 	if (pool->flow_pool_size < new_pool_size) {
1234 		diff = new_pool_size - pool->flow_pool_size;
1235 		diff += pool->deficient_desc;
1236 		pool->deficient_desc = 0;
1237 		temp = QDF_MIN(pool->overflow_desc, diff);
1238 		pool->overflow_desc -= temp;
1239 		diff -= temp;
1240 	}
1241 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
1242 
1243 	if (diff) {
1244 		/* take descriptors from global pool */
1245 		qdf_spin_lock_bh(&pdev->tx_mutex);
1246 
1247 		if (pdev->tx_desc.num_free >= diff) {
1248 			move_desc_from_global = diff;
1249 		} else {
1250 			move_desc_from_global = pdev->tx_desc.num_free;
1251 			deficient_count = diff - move_desc_from_global;
1252 		}
1253 
1254 		for (i = 0; i < move_desc_from_global; i++) {
1255 			tx_desc = ol_tx_get_desc_global_pool(pdev);
1256 			((union ol_tx_desc_list_elem_t *)tx_desc)->next =
1257 								 temp_list;
1258 			temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
1259 		}
1260 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1261 	}
1262 	/* update desc to pool */
1263 	qdf_spin_lock_bh(&pool->flow_pool_lock);
1264 	if (move_desc_from_global && temp_list) {
1265 		for (i = 0; i < move_desc_from_global; i++) {
1266 			tx_desc = &temp_list->tx_desc;
1267 			temp_list = temp_list->next;
1268 			ol_tx_put_desc_flow_pool(pool, tx_desc);
1269 		}
1270 	}
1271 	/* update pool size and threshold */
1272 	ol_tx_flow_pool_update(pool, new_pool_size, deficient_count, 0);
1273 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
1274 
1275 update_done:
1276 
1277 	ol_tx_flow_pool_update_queue_state(pdev, pool);
1278 }
1279 
1280 /**
1281  * ol_tx_flow_pool_resize_handler() - Resize pool with new size
1282  * @flow_pool_id: pool id
1283  * @flow_pool_size: pool size
1284  *
1285  * Process below target to host message
1286  * HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE
1287  *
1288  * Return: none
1289  */
ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,uint16_t flow_pool_size)1290 void ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,
1291 				    uint16_t flow_pool_size)
1292 {
1293 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1294 	ol_txrx_pdev_handle pdev;
1295 	struct ol_tx_flow_pool_t *pool;
1296 
1297 	ol_txrx_dbg("flow_pool_id %d flow_pool_size %d",
1298 		    flow_pool_id, flow_pool_size);
1299 
1300 	if (qdf_unlikely(!soc))
1301 		return;
1302 
1303 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1304 	if (qdf_unlikely(!pdev)) {
1305 		ol_txrx_err("pdev is NULL");
1306 		return;
1307 	}
1308 	pdev->pool_stats.pool_resize_count++;
1309 
1310 	pool = ol_tx_get_flow_pool(flow_pool_id);
1311 	if (!pool) {
1312 		ol_txrx_err("resize for flow_pool %d size %d failed",
1313 			    flow_pool_id, flow_pool_size);
1314 		return;
1315 	}
1316 
1317 	ol_tx_inc_pool_ref(pool);
1318 	ol_tx_flow_pool_resize(pool, flow_pool_size);
1319 	ol_tx_dec_pool_ref(pool, false);
1320 }
1321 #endif
1322 
1323 /**
1324  * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
1325  * @reason: network queue pause reason
1326  *
1327  * Return: netif_reason_type
1328  */
1329 static enum netif_reason_type
ol_txrx_map_to_netif_reason_type(uint32_t reason)1330 ol_txrx_map_to_netif_reason_type(uint32_t reason)
1331 {
1332 	switch (reason) {
1333 	case OL_TXQ_PAUSE_REASON_FW:
1334 		return WLAN_FW_PAUSE;
1335 	case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
1336 		return WLAN_PEER_UNAUTHORISED;
1337 	case OL_TXQ_PAUSE_REASON_TX_ABORT:
1338 		return WLAN_TX_ABORT;
1339 	case OL_TXQ_PAUSE_REASON_VDEV_STOP:
1340 		return WLAN_VDEV_STOP;
1341 	case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
1342 		return WLAN_THERMAL_MITIGATION;
1343 	default:
1344 		ol_txrx_err("reason not supported %d", reason);
1345 		return WLAN_REASON_TYPE_MAX;
1346 	}
1347 }
1348 
ol_txrx_vdev_pause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)1349 void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1350 			uint32_t reason, uint32_t pause_type)
1351 {
1352 	struct ol_txrx_vdev_t *vdev =
1353 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
1354 	struct ol_txrx_pdev_t *pdev;
1355 	enum netif_reason_type netif_reason;
1356 
1357 	if (qdf_unlikely(!vdev)) {
1358 		ol_txrx_err("vdev is NULL");
1359 		return;
1360 	}
1361 
1362 	pdev = vdev->pdev;
1363 	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
1364 		ol_txrx_err("invalid pdev");
1365 		return;
1366 	}
1367 
1368 	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
1369 	if (netif_reason == WLAN_REASON_TYPE_MAX)
1370 		return;
1371 
1372 	pdev->pause_cb(vdev->vdev_id, WLAN_STOP_ALL_NETIF_QUEUE, netif_reason);
1373 }
1374 
1375 /**
1376  * ol_txrx_vdev_unpause() - unpause vdev network queues
1377  * @soc_hdl: datapath soc handle
1378  * @vdev: vdev handle
1379  * @reason: network queue pause reason
1380  * @pause_type: type of pause
1381  *
1382  * Return: none
1383  */
ol_txrx_vdev_unpause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)1384 void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1385 			  uint32_t reason, uint32_t pause_type)
1386 {
1387 	struct ol_txrx_vdev_t *vdev =
1388 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
1389 	struct ol_txrx_pdev_t *pdev;
1390 	enum netif_reason_type netif_reason;
1391 
1392 	if (qdf_unlikely(!vdev)) {
1393 		ol_txrx_err("vdev is NULL");
1394 		return;
1395 	}
1396 
1397 	pdev = vdev->pdev;
1398 	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
1399 		ol_txrx_err("invalid pdev");
1400 		return;
1401 	}
1402 
1403 	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
1404 	if (netif_reason == WLAN_REASON_TYPE_MAX)
1405 		return;
1406 
1407 	pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
1408 			netif_reason);
1409 }
1410 
1411 /**
1412  * ol_txrx_pdev_pause() - pause network queues for each vdev
1413  * @pdev: pdev handle
1414  * @reason: network queue pause reason
1415  *
1416  * Return: none
1417  */
ol_txrx_pdev_pause(struct ol_txrx_pdev_t * pdev,uint32_t reason)1418 void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1419 {
1420 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1421 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1422 
1423 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1424 		ol_txrx_vdev_pause(ol_txrx_soc_t_to_cdp_soc_t(soc),
1425 				   vdev->vdev_id, reason, 0);
1426 	}
1427 }
1428 
1429 /**
1430  * ol_txrx_pdev_unpause() - unpause network queues for each vdev
1431  * @pdev: pdev handle
1432  * @reason: network queue pause reason
1433  *
1434  * Return: none
1435  */
ol_txrx_pdev_unpause(struct ol_txrx_pdev_t * pdev,uint32_t reason)1436 void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1437 {
1438 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1439 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1440 
1441 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1442 		ol_txrx_vdev_unpause(ol_txrx_soc_t_to_cdp_soc_t(soc),
1443 				     vdev->vdev_id, reason, 0);
1444 	}
1445 }
1446