xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_desc.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_net_types.h>      /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
21 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
22 #include <qdf_util.h>           /* qdf_assert */
23 #include <qdf_lock.h>           /* qdf_spinlock */
24 #include <qdf_trace.h>          /* qdf_tso_seg_dbg stuff */
25 #ifdef QCA_COMPUTE_TX_DELAY
26 #include <qdf_time.h>           /* qdf_system_ticks */
27 #endif
28 
29 #include <ol_htt_tx_api.h>      /* htt_tx_desc_id */
30 
31 #include <ol_tx_desc.h>
32 #include <ol_txrx_internal.h>
33 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
34 #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
35 #endif
36 #include <ol_txrx.h>
37 
38 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)39 static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
40 					struct ol_tx_desc_t *tx_desc)
41 {
42 	if (tx_desc->pkt_type != ol_tx_frm_freed) {
43 		ol_txrx_err("Potential tx_desc corruption pkt_type:0x%x pdev:0x%pK",
44 			    tx_desc->pkt_type, pdev);
45 		qdf_assert(0);
46 	}
47 }
ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t * tx_desc)48 static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
49 {
50 	tx_desc->pkt_type = ol_tx_frm_freed;
51 }
52 #ifdef QCA_COMPUTE_TX_DELAY
ol_tx_desc_compute_delay(struct ol_tx_desc_t * tx_desc)53 static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
54 {
55 	if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
56 		ol_txrx_err("Timestamp:0x%x", tx_desc->entry_timestamp_ticks);
57 		qdf_assert(0);
58 	}
59 	tx_desc->entry_timestamp_ticks = qdf_system_ticks();
60 }
ol_tx_desc_reset_timestamp(struct ol_tx_desc_t * tx_desc)61 static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
62 {
63 	tx_desc->entry_timestamp_ticks = 0xffffffff;
64 }
65 #endif
66 #else
ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)67 static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
68 						struct ol_tx_desc_t *tx_desc)
69 {
70 }
ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t * tx_desc)71 static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
72 {
73 }
ol_tx_desc_compute_delay(struct ol_tx_desc_t * tx_desc)74 static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
75 {
76 }
ol_tx_desc_reset_timestamp(struct ol_tx_desc_t * tx_desc)77 static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
78 {
79 }
80 #endif
81 
82 #ifdef DESC_TIMESTAMP_DEBUG_INFO
ol_tx_desc_update_tx_ts(struct ol_tx_desc_t * tx_desc)83 static inline void ol_tx_desc_update_tx_ts(struct ol_tx_desc_t *tx_desc)
84 {
85 	tx_desc->desc_debug_info.prev_tx_ts = tx_desc
86 						->desc_debug_info.curr_tx_ts;
87 	tx_desc->desc_debug_info.curr_tx_ts = qdf_get_log_timestamp();
88 }
89 #else
ol_tx_desc_update_tx_ts(struct ol_tx_desc_t * tx_desc)90 static inline void ol_tx_desc_update_tx_ts(struct ol_tx_desc_t *tx_desc)
91 {
92 }
93 #endif
94 
95 /**
96  * ol_tx_desc_vdev_update() - vedv assign.
97  * @tx_desc: tx descriptor pointer
98  * @vdev: vdev handle
99  *
100  * Return: None
101  */
102 static inline void
ol_tx_desc_vdev_update(struct ol_tx_desc_t * tx_desc,struct ol_txrx_vdev_t * vdev)103 ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
104 		       struct ol_txrx_vdev_t *vdev)
105 {
106 	tx_desc->vdev = vdev;
107 	tx_desc->vdev_id = vdev->vdev_id;
108 }
109 
110 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
111 
112 /**
113  * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
114  * @vdev: vdev handle
115  *
116  * Return: None
117  */
118 static inline void
ol_tx_desc_count_inc(struct ol_txrx_vdev_t * vdev)119 ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
120 {
121 	qdf_atomic_inc(&vdev->tx_desc_count);
122 }
123 #else
124 
125 static inline void
ol_tx_desc_count_inc(struct ol_txrx_vdev_t * vdev)126 ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
127 {
128 }
129 
130 #endif
131 
132 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
133 #ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
134 /**
135  * ol_tx_do_pdev_flow_control_pause - pause queues when stop_th reached.
136  * @pdev: pdev handle
137  *
138  * Return: void
139  */
ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t * pdev)140 static void ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t *pdev)
141 {
142 	struct ol_txrx_vdev_t *vdev;
143 
144 	if (qdf_unlikely(pdev->tx_desc.num_free <
145 				pdev->tx_desc.stop_th &&
146 			pdev->tx_desc.num_free >=
147 			 pdev->tx_desc.stop_priority_th &&
148 			pdev->tx_desc.status ==
149 			 FLOW_POOL_ACTIVE_UNPAUSED)) {
150 		pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
151 		/* pause network NON PRIORITY queues */
152 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
153 			pdev->pause_cb(vdev->vdev_id,
154 				       WLAN_STOP_NON_PRIORITY_QUEUE,
155 				       WLAN_DATA_FLOW_CONTROL);
156 		}
157 	} else if (qdf_unlikely((pdev->tx_desc.num_free <
158 				 pdev->tx_desc.stop_priority_th) &&
159 			pdev->tx_desc.status ==
160 			FLOW_POOL_NON_PRIO_PAUSED)) {
161 		pdev->tx_desc.status = FLOW_POOL_ACTIVE_PAUSED;
162 		/* pause priority queue */
163 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
164 			pdev->pause_cb(vdev->vdev_id,
165 				       WLAN_NETIF_PRIORITY_QUEUE_OFF,
166 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
167 		}
168 	}
169 }
170 
171 /**
172  * ol_tx_do_pdev_flow_control_unpause - unpause queues when start_th restored.
173  * @pdev: pdev handle
174  *
175  * Return: void
176  */
ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t * pdev)177 static void ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t *pdev)
178 {
179 	struct ol_txrx_vdev_t *vdev;
180 
181 	switch (pdev->tx_desc.status) {
182 	case FLOW_POOL_ACTIVE_PAUSED:
183 		if (pdev->tx_desc.num_free >
184 		    pdev->tx_desc.start_priority_th) {
185 			/* unpause priority queue */
186 			TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
187 				pdev->pause_cb(vdev->vdev_id,
188 				       WLAN_NETIF_PRIORITY_QUEUE_ON,
189 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
190 			}
191 			pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
192 		}
193 		break;
194 	case FLOW_POOL_NON_PRIO_PAUSED:
195 		if (pdev->tx_desc.num_free > pdev->tx_desc.start_th) {
196 			TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
197 				pdev->pause_cb(vdev->vdev_id,
198 					       WLAN_WAKE_NON_PRIORITY_QUEUE,
199 					       WLAN_DATA_FLOW_CONTROL);
200 			}
201 			pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
202 		}
203 		break;
204 	case FLOW_POOL_INVALID:
205 		if (pdev->tx_desc.num_free == pdev->tx_desc.pool_size)
206 			ol_txrx_err("pool is INVALID State!!");
207 		break;
208 	case FLOW_POOL_ACTIVE_UNPAUSED:
209 		break;
210 	default:
211 		ol_txrx_err("pool is INACTIVE State!!");
212 		break;
213 	};
214 }
215 #else
216 static inline void
ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t * pdev)217 ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t *pdev)
218 {
219 }
220 
221 static inline void
ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t * pdev)222 ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t *pdev)
223 {
224 }
225 #endif
226 /**
227  * ol_tx_desc_alloc() - allocate descriptor from freelist
228  * @pdev: pdev handle
229  * @vdev: vdev handle
230  *
231  * Return: tx descriptor pointer/ NULL in case of error
232  */
233 static
ol_tx_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev)234 struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
235 					     struct ol_txrx_vdev_t *vdev)
236 {
237 	struct ol_tx_desc_t *tx_desc = NULL;
238 
239 	qdf_spin_lock_bh(&pdev->tx_mutex);
240 	if (pdev->tx_desc.freelist) {
241 		tx_desc = ol_tx_get_desc_global_pool(pdev);
242 		if (!tx_desc) {
243 			qdf_spin_unlock_bh(&pdev->tx_mutex);
244 			return NULL;
245 		}
246 		ol_tx_desc_dup_detect_set(pdev, tx_desc);
247 		ol_tx_do_pdev_flow_control_pause(pdev);
248 		ol_tx_desc_sanity_checks(pdev, tx_desc);
249 		ol_tx_desc_compute_delay(tx_desc);
250 		ol_tx_desc_vdev_update(tx_desc, vdev);
251 		ol_tx_desc_count_inc(vdev);
252 		ol_tx_desc_update_tx_ts(tx_desc);
253 		qdf_atomic_inc(&tx_desc->ref_cnt);
254 	}
255 	qdf_spin_unlock_bh(&pdev->tx_mutex);
256 	return tx_desc;
257 }
258 
259 /**
260  * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
261  * @pdev: pdev handler
262  * @vdev: vdev handler
263  * @msdu_info: msdu handler
264  *
265  * Return: tx descriptor or NULL
266  */
267 struct ol_tx_desc_t *
ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,struct ol_txrx_msdu_info_t * msdu_info)268 ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
269 			 struct ol_txrx_vdev_t *vdev,
270 			 struct ol_txrx_msdu_info_t *msdu_info)
271 {
272 	return ol_tx_desc_alloc(pdev, vdev);
273 }
274 
275 #else
276 /**
277  * ol_tx_desc_alloc() -allocate tx descriptor
278  * @pdev: pdev handler
279  * @vdev: vdev handler
280  * @pool: flow pool
281  *
282  * Return: tx descriptor or NULL
283  */
284 static
ol_tx_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,struct ol_tx_flow_pool_t * pool)285 struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
286 				      struct ol_txrx_vdev_t *vdev,
287 				      struct ol_tx_flow_pool_t *pool)
288 {
289 	struct ol_tx_desc_t *tx_desc = NULL;
290 
291 	if (!pool) {
292 		pdev->pool_stats.pkt_drop_no_pool++;
293 		goto end;
294 	}
295 
296 	qdf_spin_lock_bh(&pool->flow_pool_lock);
297 	if (pool->avail_desc) {
298 		tx_desc = ol_tx_get_desc_flow_pool(pool);
299 		ol_tx_desc_dup_detect_set(pdev, tx_desc);
300 		if (qdf_unlikely(pool->avail_desc < pool->stop_th &&
301 				(pool->avail_desc >= pool->stop_priority_th) &&
302 				(pool->status == FLOW_POOL_ACTIVE_UNPAUSED))) {
303 			pool->status = FLOW_POOL_NON_PRIO_PAUSED;
304 			/* pause network NON PRIORITY queues */
305 			pdev->pause_cb(vdev->vdev_id,
306 				       WLAN_STOP_NON_PRIORITY_QUEUE,
307 				       WLAN_DATA_FLOW_CONTROL);
308 		} else if (qdf_unlikely((pool->avail_desc <
309 						pool->stop_priority_th) &&
310 				pool->status == FLOW_POOL_NON_PRIO_PAUSED)) {
311 			pool->status = FLOW_POOL_ACTIVE_PAUSED;
312 			/* pause priority queue */
313 			pdev->pause_cb(vdev->vdev_id,
314 				       WLAN_NETIF_PRIORITY_QUEUE_OFF,
315 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
316 		}
317 
318 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
319 
320 		ol_tx_desc_sanity_checks(pdev, tx_desc);
321 		ol_tx_desc_compute_delay(tx_desc);
322 		ol_tx_desc_update_tx_ts(tx_desc);
323 		ol_tx_desc_vdev_update(tx_desc, vdev);
324 		qdf_atomic_inc(&tx_desc->ref_cnt);
325 	} else {
326 		pool->pkt_drop_no_desc++;
327 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
328 	}
329 
330 end:
331 	return tx_desc;
332 }
333 
334 /**
335  * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
336  * @pdev: pdev handler
337  * @vdev: vdev handler
338  * @msdu_info: msdu handler
339  *
340  * Return: tx descriptor or NULL
341  */
342 #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
343 struct ol_tx_desc_t *
ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,struct ol_txrx_msdu_info_t * msdu_info)344 ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
345 			 struct ol_txrx_vdev_t *vdev,
346 			 struct ol_txrx_msdu_info_t *msdu_info)
347 {
348 	if (qdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
349 		return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
350 	else
351 		return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
352 }
353 #else
354 struct ol_tx_desc_t *
ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,struct ol_txrx_msdu_info_t * msdu_info)355 ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
356 			 struct ol_txrx_vdev_t *vdev,
357 			 struct ol_txrx_msdu_info_t *msdu_info)
358 {
359 	return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
360 }
361 #endif
362 #endif
363 
364 /**
365  * ol_tx_desc_alloc_hl() - allocate tx descriptor
366  * @pdev: pdev handle
367  * @vdev: vdev handle
368  * @msdu_info: tx msdu info
369  *
370  * Return: tx descriptor pointer/ NULL in case of error
371  */
372 static struct ol_tx_desc_t *
ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,struct ol_txrx_msdu_info_t * msdu_info)373 ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
374 		    struct ol_txrx_vdev_t *vdev,
375 		    struct ol_txrx_msdu_info_t *msdu_info)
376 {
377 	struct ol_tx_desc_t *tx_desc;
378 
379 	tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
380 	if (!tx_desc)
381 		return NULL;
382 
383 	qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt);
384 
385 	return tx_desc;
386 }
387 
388 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
389 
390 /**
391  * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.
392  * @tx_desc: tx desc
393  *
394  * Return: None
395  */
396 static inline void
ol_tx_desc_vdev_rm(struct ol_tx_desc_t * tx_desc)397 ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
398 {
399 	/*
400 	 * In module exit context, vdev handle could be destroyed but still
401 	 * we need to free pending completion tx_desc.
402 	 */
403 	if (!tx_desc || !tx_desc->vdev)
404 		return;
405 
406 	qdf_atomic_dec(&tx_desc->vdev->tx_desc_count);
407 	tx_desc->vdev = NULL;
408 }
409 #else
410 
411 static inline void
ol_tx_desc_vdev_rm(struct ol_tx_desc_t * tx_desc)412 ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
413 {
414 }
415 #endif
416 
417 #ifdef FEATURE_TSO
418 /**
419  * ol_tso_unmap_tso_segment() - Unmap TSO segment
420  * @pdev: pointer to ol_txrx_pdev_t structure
421  * @tx_desc: pointer to ol_tx_desc_t containing the TSO segment
422  *
423  * Unmap TSO segment (frag[1]). If it is the last TSO segment corresponding the
424  * nbuf, also unmap the EIT header(frag[0]).
425  *
426  * Return: None
427  */
ol_tso_unmap_tso_segment(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)428 static void ol_tso_unmap_tso_segment(struct ol_txrx_pdev_t *pdev,
429 						struct ol_tx_desc_t *tx_desc)
430 {
431 	bool is_last_seg = false;
432 	struct qdf_tso_num_seg_elem_t *tso_num_desc = NULL;
433 
434 	if (qdf_unlikely(!tx_desc->tso_desc)) {
435 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
436 			  "%s %d TSO desc is NULL!",
437 			  __func__, __LINE__);
438 		qdf_assert(0);
439 		return;
440 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
441 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
442 			  "%s %d TSO common info is NULL!",
443 			  __func__, __LINE__);
444 		qdf_assert(0);
445 		return;
446 	}
447 
448 	tso_num_desc = tx_desc->tso_num_desc;
449 
450 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
451 
452 	tso_num_desc->num_seg.tso_cmn_num_seg--;
453 	is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg == 0) ?
454 								true : false;
455 	qdf_nbuf_unmap_tso_segment(pdev->osdev, tx_desc->tso_desc, is_last_seg);
456 
457 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
458 
459 }
460 
461 /**
462  * ol_tx_tso_desc_free() - Add TSO TX descs back to the freelist
463  * @pdev: pointer to ol_txrx_pdev_t structure
464  * @tx_desc: pointer to ol_tx_desc_t containing the TSO segment
465  *
466  * Add qdf_tso_seg_elem_t corresponding to the TSO seg back to freelist.
467  * If it is the last segment of the jumbo skb, also add the
468  * qdf_tso_num_seg_elem_t to the free list.
469  *
470  * Return: None
471  */
ol_tx_tso_desc_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)472 static void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
473 				struct ol_tx_desc_t *tx_desc)
474 {
475 	bool is_last_seg;
476 	struct qdf_tso_num_seg_elem_t *tso_num_desc = tx_desc->tso_num_desc;
477 
478 	is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg == 0) ?
479 								true : false;
480 	if (is_last_seg) {
481 		ol_tso_num_seg_free(pdev, tx_desc->tso_num_desc);
482 		tx_desc->tso_num_desc = NULL;
483 	}
484 
485 	ol_tso_free_segment(pdev, tx_desc->tso_desc);
486 	tx_desc->tso_desc = NULL;
487 }
488 
489 #else
ol_tx_tso_desc_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)490 static inline void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
491 				       struct ol_tx_desc_t *tx_desc)
492 {
493 }
494 
ol_tso_unmap_tso_segment(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)495 static inline void ol_tso_unmap_tso_segment(
496 					struct ol_txrx_pdev_t *pdev,
497 					struct ol_tx_desc_t *tx_desc)
498 {
499 }
500 #endif
501 
502 /**
503  * ol_tx_desc_free_common() - common funcs to free tx_desc for all flow ctl vers
504  * @pdev: pdev handle
505  * @tx_desc: tx descriptor
506  *
507  * Set of common functions needed for QCA_LL_TX_FLOW_CONTROL_V2 and older
508  * versions of flow control. Needs to be called from within a spinlock.
509  *
510  * Return: None
511  */
ol_tx_desc_free_common(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)512 static void ol_tx_desc_free_common(struct ol_txrx_pdev_t *pdev,
513 						struct ol_tx_desc_t *tx_desc)
514 {
515 	ol_tx_desc_dup_detect_reset(pdev, tx_desc);
516 
517 	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
518 		ol_tx_tso_desc_free(pdev, tx_desc);
519 
520 	ol_tx_desc_reset_pkt_type(tx_desc);
521 	ol_tx_desc_reset_timestamp(tx_desc);
522 	/* clear the ref cnt */
523 	qdf_atomic_init(&tx_desc->ref_cnt);
524 	tx_desc->vdev_id = OL_TXRX_INVALID_VDEV_ID;
525 }
526 
527 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
528 /**
529  * ol_tx_desc_free() - put descriptor to freelist
530  * @pdev: pdev handle
531  * @tx_desc: tx descriptor
532  *
533  * Return: None
534  */
ol_tx_desc_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)535 void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
536 {
537 	qdf_spin_lock_bh(&pdev->tx_mutex);
538 
539 	ol_tx_desc_free_common(pdev, tx_desc);
540 
541 	ol_tx_put_desc_global_pool(pdev, tx_desc);
542 	ol_tx_desc_vdev_rm(tx_desc);
543 	ol_tx_do_pdev_flow_control_unpause(pdev);
544 
545 	qdf_spin_unlock_bh(&pdev->tx_mutex);
546 }
547 
548 #else
549 
550 /**
551  * ol_tx_update_free_desc_to_pool() - update free desc to pool
552  * @pdev: pdev handle
553  * @tx_desc: descriptor
554  *
555  * Return : 1 desc distribution required / 0 don't need distribution
556  */
557 #ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
ol_tx_update_free_desc_to_pool(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)558 static inline bool ol_tx_update_free_desc_to_pool(struct ol_txrx_pdev_t *pdev,
559 						  struct ol_tx_desc_t *tx_desc)
560 {
561 	struct ol_tx_flow_pool_t *pool = tx_desc->pool;
562 	bool distribute_desc = false;
563 
564 	if (unlikely(pool->overflow_desc)) {
565 		ol_tx_put_desc_global_pool(pdev, tx_desc);
566 		--pool->overflow_desc;
567 		distribute_desc = true;
568 	} else {
569 		ol_tx_put_desc_flow_pool(pool, tx_desc);
570 	}
571 
572 	return distribute_desc;
573 }
574 #else
ol_tx_update_free_desc_to_pool(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)575 static inline bool ol_tx_update_free_desc_to_pool(struct ol_txrx_pdev_t *pdev,
576 						  struct ol_tx_desc_t *tx_desc)
577 {
578 	ol_tx_put_desc_flow_pool(tx_desc->pool, tx_desc);
579 	return false;
580 }
581 #endif
582 
583 /**
584  * ol_tx_desc_free() - put descriptor to pool freelist
585  * @pdev: pdev handle
586  * @tx_desc: tx descriptor
587  *
588  * Return: None
589  */
ol_tx_desc_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)590 void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
591 {
592 	bool distribute_desc = false;
593 	struct ol_tx_flow_pool_t *pool = tx_desc->pool;
594 
595 	qdf_spin_lock_bh(&pool->flow_pool_lock);
596 
597 	ol_tx_desc_free_common(pdev, tx_desc);
598 	distribute_desc = ol_tx_update_free_desc_to_pool(pdev, tx_desc);
599 
600 	switch (pool->status) {
601 	case FLOW_POOL_ACTIVE_PAUSED:
602 		if (pool->avail_desc > pool->start_priority_th) {
603 			/* unpause priority queue */
604 			pdev->pause_cb(pool->member_flow_id,
605 			       WLAN_NETIF_PRIORITY_QUEUE_ON,
606 			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
607 			pool->status = FLOW_POOL_NON_PRIO_PAUSED;
608 		}
609 		break;
610 	case FLOW_POOL_NON_PRIO_PAUSED:
611 		if (pool->avail_desc > pool->start_th) {
612 			pdev->pause_cb(pool->member_flow_id,
613 				       WLAN_WAKE_NON_PRIORITY_QUEUE,
614 				       WLAN_DATA_FLOW_CONTROL);
615 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
616 		}
617 		break;
618 	case FLOW_POOL_INVALID:
619 		if (pool->avail_desc == pool->flow_pool_size) {
620 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
621 			ol_tx_free_invalid_flow_pool(pool);
622 			qdf_print("pool is INVALID State!!");
623 			return;
624 		}
625 		break;
626 	case FLOW_POOL_ACTIVE_UNPAUSED:
627 		break;
628 	default:
629 		qdf_print("pool is INACTIVE State!!");
630 		break;
631 	};
632 
633 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
634 
635 	if (unlikely(distribute_desc))
636 		ol_tx_distribute_descs_to_deficient_pools_from_global_pool();
637 
638 }
639 #endif
640 
641 const uint32_t htt_to_ce_pkt_type[] = {
642 	[htt_pkt_type_raw] = tx_pkt_type_raw,
643 	[htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
644 	[htt_pkt_type_ethernet] = tx_pkt_type_802_3,
645 	[htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
646 	[htt_pkt_type_eth2] = tx_pkt_type_eth2,
647 	[htt_pkt_num_types] = 0xffffffff
648 };
649 
650 #define WISA_DEST_PORT_6MBPS	50000
651 #define WISA_DEST_PORT_24MBPS	50001
652 
653 /**
654  * ol_tx_get_wisa_ext_hdr_type() - get header type for WiSA mode
655  * @netbuf: network buffer
656  *
657  * Return: extension header type
658  */
659 static enum extension_header_type
ol_tx_get_wisa_ext_hdr_type(qdf_nbuf_t netbuf)660 ol_tx_get_wisa_ext_hdr_type(qdf_nbuf_t netbuf)
661 {
662 	uint8_t *buf = qdf_nbuf_data(netbuf);
663 	uint16_t dport;
664 
665 	if (qdf_is_macaddr_group(
666 		(struct qdf_mac_addr *)(buf + QDF_NBUF_DEST_MAC_OFFSET))) {
667 
668 		dport = (uint16_t)(*(uint16_t *)(buf +
669 			QDF_NBUF_TRAC_IPV4_OFFSET +
670 			QDF_NBUF_TRAC_IPV4_HEADER_SIZE + sizeof(uint16_t)));
671 
672 		if (dport == QDF_SWAP_U16(WISA_DEST_PORT_6MBPS))
673 			return WISA_MODE_EXT_HEADER_6MBPS;
674 		else if (dport == QDF_SWAP_U16(WISA_DEST_PORT_24MBPS))
675 			return WISA_MODE_EXT_HEADER_24MBPS;
676 		else
677 			return EXT_HEADER_NOT_PRESENT;
678 	} else {
679 		return EXT_HEADER_NOT_PRESENT;
680 	}
681 }
682 
683 /**
684  * ol_tx_get_ext_header_type() - extension header is required or not
685  * @vdev: vdev pointer
686  * @netbuf: network buffer
687  *
688  * This function returns header type and if extension header is
689  * not required than returns EXT_HEADER_NOT_PRESENT.
690  *
691  * Return: extension header type
692  */
693 enum extension_header_type
ol_tx_get_ext_header_type(struct ol_txrx_vdev_t * vdev,qdf_nbuf_t netbuf)694 ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev,
695 	qdf_nbuf_t netbuf)
696 {
697 	if (vdev->is_wisa_mode_enable == true)
698 		return ol_tx_get_wisa_ext_hdr_type(netbuf);
699 	else
700 		return EXT_HEADER_NOT_PRESENT;
701 }
702 
ol_tx_desc_ll(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t netbuf,struct ol_txrx_msdu_info_t * msdu_info)703 struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
704 				   struct ol_txrx_vdev_t *vdev,
705 				   qdf_nbuf_t netbuf,
706 				   struct ol_txrx_msdu_info_t *msdu_info)
707 {
708 	struct ol_tx_desc_t *tx_desc;
709 	unsigned int i;
710 	uint32_t num_frags;
711 	enum extension_header_type type;
712 
713 	msdu_info->htt.info.vdev_id = vdev->vdev_id;
714 	msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
715 	switch (qdf_nbuf_get_exemption_type(netbuf)) {
716 	case QDF_NBUF_EXEMPT_NO_EXEMPTION:
717 	case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
718 		/* We want to encrypt this frame */
719 		msdu_info->htt.action.do_encrypt = 1;
720 		break;
721 	case QDF_NBUF_EXEMPT_ALWAYS:
722 		/* We don't want to encrypt this frame */
723 		msdu_info->htt.action.do_encrypt = 0;
724 		break;
725 	default:
726 		qdf_assert(0);
727 		break;
728 	}
729 
730 	/* allocate the descriptor */
731 	tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
732 	if (!tx_desc)
733 		return NULL;
734 
735 	/* initialize the SW tx descriptor */
736 	tx_desc->netbuf = netbuf;
737 
738 	if (msdu_info->tso_info.is_tso) {
739 		tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
740 		tx_desc->tso_num_desc = msdu_info->tso_info.tso_num_seg_list;
741 		tx_desc->pkt_type = OL_TX_FRM_TSO;
742 		TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
743 	} else {
744 		tx_desc->pkt_type = OL_TX_FRM_STD;
745 	}
746 
747 	type = ol_tx_get_ext_header_type(vdev, netbuf);
748 
749 	/* initialize the HW tx descriptor */
750 	if (qdf_unlikely(htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
751 			 tx_desc->htt_tx_desc_paddr,
752 			 ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
753 			 &msdu_info->tso_info, NULL, type))) {
754 		/*
755 		 * HTT Tx descriptor initialization failed.
756 		 * therefore, free the tx desc
757 		 */
758 		ol_tx_desc_free(pdev, tx_desc);
759 		return NULL;
760 	}
761 
762 	/*
763 	 * Initialize the fragmentation descriptor.
764 	 * Skip the prefix fragment (HTT tx descriptor) that was added
765 	 * during the call to htt_tx_desc_init above.
766 	 */
767 	num_frags = qdf_nbuf_get_num_frags(netbuf);
768 	/* num_frags are expected to be 2 max */
769 	num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
770 		? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
771 		: num_frags;
772 #if defined(HELIUMPLUS)
773 	/*
774 	 * Use num_frags - 1, since 1 frag is used to store
775 	 * the HTT/HTC descriptor
776 	 * Refer to htt_tx_desc_init()
777 	 */
778 	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
779 			      num_frags - 1);
780 #else /* ! defined(HELIUMPLUS) */
781 	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
782 			      num_frags - 1);
783 #endif /* defined(HELIUMPLUS) */
784 
785 	if (msdu_info->tso_info.is_tso) {
786 		htt_tx_desc_fill_tso_info(pdev->htt_pdev,
787 			 tx_desc->htt_frag_desc, &msdu_info->tso_info);
788 		TXRX_STATS_TSO_SEG_UPDATE(pdev,
789 			 msdu_info->tso_info.msdu_stats_idx,
790 			 msdu_info->tso_info.curr_seg->seg);
791 	} else {
792 		for (i = 1; i < num_frags; i++) {
793 			qdf_size_t frag_len;
794 			qdf_dma_addr_t frag_paddr;
795 #ifdef HELIUMPLUS_DEBUG
796 			void *frag_vaddr;
797 
798 			frag_vaddr = qdf_nbuf_get_frag_vaddr(netbuf, i);
799 #endif
800 			frag_len = qdf_nbuf_get_frag_len(netbuf, i);
801 			frag_paddr = qdf_nbuf_get_frag_paddr(netbuf, i);
802 #if defined(HELIUMPLUS)
803 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
804 					 i - 1, frag_paddr, frag_len);
805 #if defined(HELIUMPLUS_DEBUG)
806 			qdf_debug("htt_fdesc=%pK frag=%d frag_vaddr=0x%pK frag_paddr=0x%llx len=%zu",
807 				  tx_desc->htt_frag_desc,
808 				  i-1, frag_vaddr, frag_paddr, frag_len);
809 			ol_txrx_dump_pkt(netbuf, frag_paddr, 64);
810 #endif /* HELIUMPLUS_DEBUG */
811 #else /* ! defined(HELIUMPLUS) */
812 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
813 					 i - 1, frag_paddr, frag_len);
814 #endif /* defined(HELIUMPLUS) */
815 		}
816 	}
817 
818 #if defined(HELIUMPLUS_DEBUG)
819 	ol_txrx_dump_frag_desc("ol_tx_desc_ll()", tx_desc);
820 #endif
821 	return tx_desc;
822 }
823 
824 struct ol_tx_desc_t *
ol_tx_desc_hl(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t netbuf,struct ol_txrx_msdu_info_t * msdu_info)825 ol_tx_desc_hl(
826 	struct ol_txrx_pdev_t *pdev,
827 	struct ol_txrx_vdev_t *vdev,
828 	qdf_nbuf_t netbuf,
829 	struct ol_txrx_msdu_info_t *msdu_info)
830 {
831 	struct ol_tx_desc_t *tx_desc;
832 
833 	/* FIX THIS: these inits should probably be done by tx classify */
834 	msdu_info->htt.info.vdev_id = vdev->vdev_id;
835 	msdu_info->htt.info.frame_type = pdev->htt_pkt_type;
836 	msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
837 	switch (qdf_nbuf_get_exemption_type(netbuf)) {
838 	case QDF_NBUF_EXEMPT_NO_EXEMPTION:
839 	case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
840 		/* We want to encrypt this frame */
841 		msdu_info->htt.action.do_encrypt = 1;
842 		break;
843 	case QDF_NBUF_EXEMPT_ALWAYS:
844 		/* We don't want to encrypt this frame */
845 		msdu_info->htt.action.do_encrypt = 0;
846 		break;
847 	default:
848 		qdf_assert(0);
849 		break;
850 	}
851 
852 	/* allocate the descriptor */
853 	tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info);
854 	if (!tx_desc)
855 		return NULL;
856 
857 	/* initialize the SW tx descriptor */
858 	tx_desc->netbuf = netbuf;
859 	/* fix this - get pkt_type from msdu_info */
860 	tx_desc->pkt_type = OL_TX_FRM_STD;
861 
862 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
863 	tx_desc->orig_l2_hdr_bytes = 0;
864 #endif
865 	/* the HW tx descriptor will be initialized later by the caller */
866 
867 	return tx_desc;
868 }
869 
ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t * pdev,ol_tx_desc_list * tx_descs,int had_error)870 void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
871 				ol_tx_desc_list *tx_descs, int had_error)
872 {
873 	struct ol_tx_desc_t *tx_desc, *tmp;
874 	qdf_nbuf_t msdus = NULL;
875 
876 	TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
877 		qdf_nbuf_t msdu = tx_desc->netbuf;
878 
879 		qdf_atomic_init(&tx_desc->ref_cnt);   /* clear the ref cnt */
880 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
881 		/* restore original hdr offset */
882 		OL_TX_RESTORE_HDR(tx_desc, msdu);
883 #endif
884 
885 		/*
886 		 * In MCC IPA tx context, IPA driver provides skb with directly
887 		 * DMA mapped address. In such case, there's no need for WLAN
888 		 * driver to DMA unmap the skb.
889 		 */
890 		if (qdf_nbuf_get_users(msdu) <= 1) {
891 			if (!qdf_nbuf_ipa_owned_get(msdu))
892 				qdf_nbuf_unmap(pdev->osdev, msdu,
893 					       QDF_DMA_TO_DEVICE);
894 		}
895 
896 		/* free the tx desc */
897 		ol_tx_desc_free(pdev, tx_desc);
898 		/* link the netbuf into a list to free as a batch */
899 		qdf_nbuf_set_next(msdu, msdus);
900 		msdus = msdu;
901 	}
902 	/* free the netbufs as a batch */
903 	qdf_nbuf_tx_free(msdus, had_error);
904 }
905 
ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc,int had_error)906 void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
907 				  struct ol_tx_desc_t *tx_desc, int had_error)
908 {
909 	int mgmt_type;
910 	ol_txrx_mgmt_tx_cb ota_ack_cb;
911 
912 	qdf_atomic_init(&tx_desc->ref_cnt);     /* clear the ref cnt */
913 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
914 	/* restore original hdr offset */
915 	OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
916 #endif
917 	if (tx_desc->pkt_type == OL_TX_FRM_NO_FREE) {
918 
919 		/* free the tx desc but don't unmap or free the frame */
920 		if (pdev->tx_data_callback.func) {
921 			qdf_nbuf_set_next(tx_desc->netbuf, NULL);
922 			pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
923 						    tx_desc->netbuf, had_error);
924 			goto free_tx_desc;
925 		}
926 		/* let the code below unmap and free the frame */
927 	}
928 	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
929 		ol_tso_unmap_tso_segment(pdev, tx_desc);
930 	else
931 		qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
932 	/* check the frame type to see what kind of special steps are needed */
933 	if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
934 		   (tx_desc->pkt_type != ol_tx_frm_freed)) {
935 		qdf_dma_addr_t frag_desc_paddr = 0;
936 
937 #if defined(HELIUMPLUS)
938 		frag_desc_paddr = tx_desc->htt_frag_desc_paddr;
939 		/* FIX THIS -
940 		 * The FW currently has trouble using the host's fragments
941 		 * table for management frames.  Until this is fixed,
942 		 * rather than specifying the fragment table to the FW,
943 		 * the host SW will specify just the address of the initial
944 		 * fragment.
945 		 * Now that the mgmt frame is done, the HTT tx desc's frags
946 		 * table pointer needs to be reset.
947 		 */
948 #if defined(HELIUMPLUS_DEBUG)
949 		qdf_print("Frag Descriptor Reset [%d] to 0x%x",
950 			  tx_desc->id,
951 			  frag_desc_paddr);
952 #endif /* HELIUMPLUS_DEBUG */
953 #endif /* HELIUMPLUS */
954 		htt_tx_desc_frags_table_set(pdev->htt_pdev,
955 					    tx_desc->htt_tx_desc, 0,
956 					    frag_desc_paddr, 1);
957 
958 		mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
959 		/*
960 		 *  we already checked the value when the mgmt frame was
961 		 *  provided to the txrx layer.
962 		 *  no need to check it a 2nd time.
963 		 */
964 		ota_ack_cb = pdev->tx_mgmt_cb.ota_ack_cb;
965 		if (ota_ack_cb) {
966 			void *ctxt;
967 			ctxt = pdev->tx_mgmt_cb.ctxt;
968 			ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
969 		}
970 	} else if (had_error == htt_tx_status_download_fail) {
971 		/* Failed to send to target */
972 		goto free_tx_desc;
973 	} else {
974 		/* single regular frame, called from completion path */
975 		qdf_nbuf_set_next(tx_desc->netbuf, NULL);
976 		qdf_nbuf_tx_free(tx_desc->netbuf, had_error);
977 	}
978 free_tx_desc:
979 	/* free the tx desc */
980 	ol_tx_desc_free(pdev, tx_desc);
981 }
982 
983 #if defined(FEATURE_TSO)
984 #ifdef TSOSEG_DEBUG
985 static int
ol_tso_seg_dbg_sanitize(struct qdf_tso_seg_elem_t * tsoseg)986 ol_tso_seg_dbg_sanitize(struct qdf_tso_seg_elem_t *tsoseg)
987 {
988 	int rc = -1;
989 	struct ol_tx_desc_t *txdesc;
990 
991 	if (tsoseg) {
992 		txdesc = tsoseg->dbg.txdesc;
993 		/* Don't validate if TX desc is NULL*/
994 		if (!txdesc)
995 			return 0;
996 		if (txdesc->tso_desc != tsoseg)
997 			qdf_tso_seg_dbg_bug("Owner sanity failed");
998 		else
999 			rc = 0;
1000 	}
1001 	return rc;
1002 
1003 };
1004 #else
1005 static int
ol_tso_seg_dbg_sanitize(struct qdf_tso_seg_elem_t * tsoseg)1006 ol_tso_seg_dbg_sanitize(struct qdf_tso_seg_elem_t *tsoseg)
1007 {
1008 	return 0;
1009 }
1010 #endif /* TSOSEG_DEBUG */
1011 
1012 /**
1013  * ol_tso_alloc_segment() - function to allocate a TSO segment
1014  * element
1015  * @pdev: the data physical device sending the data
1016  *
1017  * Allocates a TSO segment element from the free list held in
1018  * the pdev
1019  *
1020  * Return: tso_seg
1021  */
ol_tso_alloc_segment(struct ol_txrx_pdev_t * pdev)1022 struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
1023 {
1024 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
1025 
1026 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
1027 	if (pdev->tso_seg_pool.freelist) {
1028 		pdev->tso_seg_pool.num_free--;
1029 		tso_seg = pdev->tso_seg_pool.freelist;
1030 		if (tso_seg->on_freelist != 1) {
1031 			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1032 			qdf_print("tso seg alloc failed: not in freelist");
1033 			QDF_BUG(0);
1034 			return NULL;
1035 		} else if (tso_seg->cookie != TSO_SEG_MAGIC_COOKIE) {
1036 			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1037 			qdf_print("tso seg alloc failed: bad cookie");
1038 			QDF_BUG(0);
1039 			return NULL;
1040 		}
1041 		/*this tso seg is not a part of freelist now.*/
1042 		tso_seg->on_freelist = 0;
1043 		tso_seg->sent_to_target = 0;
1044 		tso_seg->force_free = 0;
1045 		pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
1046 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_ALLOC);
1047 	}
1048 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1049 
1050 	return tso_seg;
1051 }
1052 
1053 /**
1054  * ol_tso_free_segment() - function to free a TSO segment
1055  * element
1056  * @pdev: the data physical device sending the data
1057  * @tso_seg: The TSO segment element to be freed
1058  *
1059  * Returns a TSO segment element to the free list held in the
1060  * pdev
1061  *
1062  * Return: none
1063  */
ol_tso_free_segment(struct ol_txrx_pdev_t * pdev,struct qdf_tso_seg_elem_t * tso_seg)1064 void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
1065 	 struct qdf_tso_seg_elem_t *tso_seg)
1066 {
1067 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
1068 	if (tso_seg->on_freelist != 0) {
1069 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1070 		qdf_print("Do not free tso seg, already freed");
1071 		QDF_BUG(0);
1072 		return;
1073 	} else if (tso_seg->cookie != TSO_SEG_MAGIC_COOKIE) {
1074 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1075 		qdf_print("Do not free tso seg: cookie is not good.");
1076 		QDF_BUG(0);
1077 		return;
1078 	} else if ((tso_seg->sent_to_target != 1) &&
1079 		   (tso_seg->force_free != 1)) {
1080 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1081 		qdf_print("Do not free tso seg:  yet to be sent to target");
1082 		QDF_BUG(0);
1083 		return;
1084 	}
1085 	/* sanitize before free */
1086 	ol_tso_seg_dbg_sanitize(tso_seg);
1087 	qdf_tso_seg_dbg_setowner(tso_seg, NULL);
1088 	/*this tso seg is now a part of freelist*/
1089 	/* retain segment history, if debug is enabled */
1090 	qdf_tso_seg_dbg_zero(tso_seg);
1091 	tso_seg->next = pdev->tso_seg_pool.freelist;
1092 	tso_seg->on_freelist = 1;
1093 	tso_seg->sent_to_target = 0;
1094 	tso_seg->cookie = TSO_SEG_MAGIC_COOKIE;
1095 	pdev->tso_seg_pool.freelist = tso_seg;
1096 	pdev->tso_seg_pool.num_free++;
1097 	qdf_tso_seg_dbg_record(tso_seg, tso_seg->force_free
1098 			       ? TSOSEG_LOC_FORCE_FREE
1099 			       : TSOSEG_LOC_FREE);
1100 	tso_seg->force_free = 0;
1101 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1102 }
1103 
1104 /**
1105  * ol_tso_num_seg_alloc() - function to allocate a element to count TSO segments
1106  *			    in a jumbo skb packet.
1107  * @pdev: the data physical device sending the data
1108  *
1109  * Allocates a element to count TSO segments from the free list held in
1110  * the pdev
1111  *
1112  * Return: tso_num_seg
1113  */
ol_tso_num_seg_alloc(struct ol_txrx_pdev_t * pdev)1114 struct qdf_tso_num_seg_elem_t *ol_tso_num_seg_alloc(struct ol_txrx_pdev_t *pdev)
1115 {
1116 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1117 
1118 	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1119 	if (pdev->tso_num_seg_pool.freelist) {
1120 		pdev->tso_num_seg_pool.num_free--;
1121 		tso_num_seg = pdev->tso_num_seg_pool.freelist;
1122 		pdev->tso_num_seg_pool.freelist =
1123 				pdev->tso_num_seg_pool.freelist->next;
1124 	}
1125 	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1126 
1127 	return tso_num_seg;
1128 }
1129 
1130 /**
1131  * ol_tso_num_seg_free() - function to free a TSO segment
1132  * element
1133  * @pdev: the data physical device sending the data
1134  * @tso_seg: The TSO segment element to be freed
1135  *
1136  * Returns a element to the free list held in the pdev
1137  *
1138  * Return: none
1139  */
ol_tso_num_seg_free(struct ol_txrx_pdev_t * pdev,struct qdf_tso_num_seg_elem_t * tso_num_seg)1140 void ol_tso_num_seg_free(struct ol_txrx_pdev_t *pdev,
1141 	 struct qdf_tso_num_seg_elem_t *tso_num_seg)
1142 {
1143 	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1144 	tso_num_seg->next = pdev->tso_num_seg_pool.freelist;
1145 	pdev->tso_num_seg_pool.freelist = tso_num_seg;
1146 		pdev->tso_num_seg_pool.num_free++;
1147 	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1148 }
1149 #endif
1150