xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_txrx.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _OL_TXRX__H_
21 #define _OL_TXRX__H_
22 
23 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
24 #include <cdp_txrx_cmn.h>       /* ol_txrx_vdev_t, etc. */
25 #include "cds_sched.h"
26 #include <cdp_txrx_handle.h>
27 #include <ol_txrx_types.h>
28 #include <ol_txrx_internal.h>
29 #include <qdf_hrtimer.h>
30 
31 /*
32  * Pool of tx descriptors reserved for
33  * high-priority traffic, such as ARP/EAPOL etc
34  * only for forwarding path.
35  */
36 #define OL_TX_NON_FWD_RESERVE	100
37 
38 /**
39  * enum ol_txrx_fc_limit_id - Flow control identifier for
40  * vdev limits based on band, channel bw and number of spatial streams
41  * @TXRX_FC_5GH_80M_2x2: Limit for 5GHz, 80MHz BW, 2x2 NSS
42  * @TXRX_FC_5GH_40M_2x2:
43  * @TXRX_FC_5GH_20M_2x2:
44  * @TXRX_FC_5GH_80M_1x1:
45  * @TXRX_FC_5GH_40M_1x1:
46  * @TXRX_FC_5GH_20M_1x1:
47  * @TXRX_FC_2GH_40M_2x2:
48  * @TXRX_FC_2GH_20M_2x2:
49  * @TXRX_FC_2GH_40M_1x1:
50  * @TXRX_FC_2GH_20M_1x1:
51  */
52 enum ol_txrx_fc_limit_id {
53 	TXRX_FC_5GH_80M_2x2,
54 	TXRX_FC_5GH_40M_2x2,
55 	TXRX_FC_5GH_20M_2x2,
56 	TXRX_FC_5GH_80M_1x1,
57 	TXRX_FC_5GH_40M_1x1,
58 	TXRX_FC_5GH_20M_1x1,
59 	TXRX_FC_2GH_40M_2x2,
60 	TXRX_FC_2GH_20M_2x2,
61 	TXRX_FC_2GH_40M_1x1,
62 	TXRX_FC_2GH_20M_1x1,
63 	TXRX_FC_MAX
64 };
65 
66 #define TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT    3
67 #define TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT   1
68 
69 ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
70 						 u8 *peer_addr,
71 						 enum peer_debug_id_type
72 						 dbg_id);
73 
74 int  ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
75 			      enum peer_debug_id_type dbg_id);
76 
77 /**
78  * ol_txrx_soc_attach() - initialize the soc
79  * @scn_handle: Opaque SOC handle from control plane
80  * @dp_ol_if_ops: Offload Operations
81  *
82  * Return: SOC handle on success, NULL on failure
83  */
84 ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle,
85 				      struct ol_if_ops *dp_ol_if_ops);
86 
87 /**
88  * ol_tx_desc_pool_size_hl() - allocate tx descriptor pool size for HL systems
89  * @ctrl_pdev: the control pdev handle
90  *
91  * Return: allocated pool size
92  */
93 u_int16_t
94 ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev);
95 
96 #ifndef OL_TX_AVG_FRM_BYTES
97 #define OL_TX_AVG_FRM_BYTES 1000
98 #endif
99 
100 #ifndef OL_TX_DESC_POOL_SIZE_MIN_HL
101 #define OL_TX_DESC_POOL_SIZE_MIN_HL 500
102 #endif
103 
104 #ifndef OL_TX_DESC_POOL_SIZE_MAX_HL
105 #define OL_TX_DESC_POOL_SIZE_MAX_HL 5000
106 #endif
107 
108 #ifndef FW_STATS_DESC_POOL_SIZE
109 #define FW_STATS_DESC_POOL_SIZE 10
110 #endif
111 
112 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
113 #define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
114 #define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100
115 #endif
116 
117 #define TXRX_HL_TX_DESC_HI_PRIO_RESERVED 20
118 #define TXRX_HL_TX_DESC_QUEUE_RESTART_TH \
119 		(TXRX_HL_TX_DESC_HI_PRIO_RESERVED + 100)
120 
121 struct peer_hang_data {
122 	uint16_t tlv_header;
123 	uint8_t peer_mac_addr[QDF_MAC_ADDR_SIZE];
124 	uint16_t peer_timeout_bitmask;
125 } qdf_packed;
126 
127 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
128 
129 void
130 ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t *soc_hdl,
131 			   uint8_t vdev_id, bool flag);
132 #else
133 
134 static inline void
ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,bool flag)135 ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t *soc_hdl,
136 			   uint8_t vdev_id, bool flag)
137 {
138 }
139 #endif
140 
141 #ifdef WDI_EVENT_ENABLE
142 void *ol_get_pldev(struct cdp_soc_t *soc, uint8_t pdev_id);
143 #else
144 static inline
ol_get_pldev(struct cdp_soc_t * soc,uint8_t pdev_id)145 void *ol_get_pldev(struct cdp_soc_t *soc, uint8_t pdev_id)
146 {
147 	return NULL;
148 }
149 #endif
150 
151 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
152 ol_txrx_peer_handle
153 ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
154 				 uint8_t local_peer_id,
155 				 enum peer_debug_id_type dbg_id);
156 #endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
157 
158 /**
159  * ol_txrx_get_pdev_from_pdev_id() - Returns pdev object given the pdev id
160  * @soc: core DP soc context
161  * @pdev_id: pdev id from pdev object can be retrieved
162  *
163  * Return: Pointer to DP pdev object
164  */
165 
166 static inline struct ol_txrx_pdev_t *
ol_txrx_get_pdev_from_pdev_id(struct ol_txrx_soc_t * soc,uint8_t pdev_id)167 ol_txrx_get_pdev_from_pdev_id(struct ol_txrx_soc_t *soc,
168 			      uint8_t pdev_id)
169 {
170 	return soc->pdev_list[pdev_id];
171 }
172 
173 /*
174  * @nbuf: buffer which contains data to be displayed
175  * @nbuf_paddr: physical address of the buffer
176  * @len: defines the size of the data to be displayed
177  *
178  * Return: None
179  */
180 void
181 ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
182 
183 /**
184  * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
185  * @vdev_id: vdev_id
186  *
187  * Return: vdev handle
188  *            NULL if not found.
189  */
190 struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
191 
192 /**
193  * ol_txrx_get_vdev_from_soc_vdev_id() - get vdev from soc and vdev_id
194  * @soc: datapath soc handle
195  * @vdev_id: vdev_id
196  *
197  * Return: vdev handle
198  *            NULL if not found.
199  */
200 struct ol_txrx_vdev_t *ol_txrx_get_vdev_from_soc_vdev_id(
201 				struct ol_txrx_soc_t *soc, uint8_t vdev_id);
202 
203 /**
204  * ol_txrx_get_mon_vdev_from_pdev() - get monitor mode vdev from pdev
205  * @soc: datapath soc handle
206  * @pdev_id: the physical device id the virtual device belongs to
207  *
208  * Return: vdev id
209  *         error if not found.
210  */
211 uint8_t ol_txrx_get_mon_vdev_from_pdev(struct cdp_soc_t *soc,
212 				       uint8_t pdev_id);
213 
214 /**
215  * ol_txrx_get_vdev_by_peer_addr() - Get vdev handle by peer mac address
216  * @ppdev - data path device instance
217  * @peer_addr - peer mac address
218  *
219  * Get virtual interface handle by local peer mac address
220  *
221  * Return: Virtual interface instance handle
222  *         NULL in case cannot find
223  */
224 ol_txrx_vdev_handle
225 ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
226 			      struct qdf_mac_addr peer_addr);
227 
228 void *ol_txrx_find_peer_by_addr(struct cdp_pdev *pdev,
229 				uint8_t *peer_addr);
230 
231 /**
232  * @brief specify the peer's authentication state
233  * @details
234  *  Specify the peer's authentication state (none, connected, authenticated)
235  *  to allow the data SW to determine whether to filter out invalid data frames.
236  *  (In the "connected" state, where security is enabled, but authentication
237  *  has not completed, tx and rx data frames other than EAPOL or WAPI should
238  *  be discarded.)
239  *  This function is only relevant for systems in which the tx and rx filtering
240  *  are done in the host rather than in the target.
241  *
242  * @param soc - datapath soc handle
243  * @param peer_mac - mac address of which peer has changed its state
244  * @param state - the new state of the peer
245  *
246  * Return: QDF Status
247  */
248 QDF_STATUS ol_txrx_peer_state_update(struct cdp_soc_t *soc_hdl,
249 				     uint8_t *peer_mac,
250 				     enum ol_txrx_peer_state state);
251 
252 void htt_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn);
253 void peer_unmap_timer_handler(void *data);
254 
255 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
256 /**
257  * ol_txrx_register_tx_flow_control() - register tx flow control callback
258  * @soc_hdl: soc handle
259  * @vdev_id: vdev_id
260  * @flowControl: flow control callback
261  * @osif_fc_ctx: callback context
262  * @flow_control_is_pause: is vdev paused by flow control
263  *
264  * Return: 0 for success or error code
265  */
266 int ol_txrx_register_tx_flow_control(struct cdp_soc_t *soc_hdl,
267 				     uint8_t vdev_id,
268 				     ol_txrx_tx_flow_control_fp flow_control,
269 				     void *osif_fc_ctx,
270 				     ol_txrx_tx_flow_control_is_pause_fp
271 				     flow_control_is_pause);
272 
273 /**
274  * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
275  *                                            callback
276  * @soc_hdl: soc handle
277  * @vdev_id: vdev_id
278  *
279  * Return: 0 for success or error code
280  */
281 int ol_txrx_deregister_tx_flow_control_cb(struct cdp_soc_t *soc_hdl,
282 					  uint8_t vdev_id);
283 
284 bool ol_txrx_get_tx_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
285 			     struct qdf_mac_addr peer_addr,
286 			     unsigned int low_watermark,
287 			     unsigned int high_watermark_offset);
288 
289 /**
290  * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
291  * @soc_hdl: soc handle
292  * @vdev_id: vdev id
293  * @pause_q_depth: pause queue depth
294  *
295  * Return: 0 for success or error code
296  */
297 int ol_txrx_ll_set_tx_pause_q_depth(struct cdp_soc_t *soc_hdl,
298 				    uint8_t vdev_id, int pause_q_depth);
299 #endif
300 
301 void ol_tx_init_pdev(ol_txrx_pdev_handle pdev);
302 
303 #ifdef CONFIG_HL_SUPPORT
304 void ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev);
305 void ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev);
306 void ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
307 			    struct ol_txrx_peer_t *peer);
308 void ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
309 				struct ol_txrx_peer_t *peer);
310 #else
311 static inline void
ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t * vdev)312 ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev) {}
313 
314 static inline void
ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t * vdev)315 ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev) {}
316 
317 static inline void
ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)318 ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
319 		       struct ol_txrx_peer_t *peer) {}
320 
321 static inline void
ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)322 ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
323 			   struct ol_txrx_peer_t *peer) {}
324 #endif
325 
326 #if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
327 void ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev);
328 void ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev);
329 void ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev);
330 void ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev);
331 #else
332 static inline void
ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t * pdev)333 ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev) {}
334 
335 static inline void
ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t * pdev)336 ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev) {}
337 
338 static inline void
ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t * pdev)339 ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev) {}
340 
341 static inline void
ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t * pdev)342 ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev) {}
343 #endif
344 
345 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
346 /**
347  * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
348  * @soc_hdl: datapath soc handle
349  * @vdev_id: the data virtual device id
350  * @bss_addr: bss address
351  *
352  * Return: None
353  */
354 void ol_txrx_copy_mac_addr_raw(struct cdp_soc_t *soc, uint8_t vdev_id,
355 			       uint8_t *bss_addr);
356 
357 /**
358  * ol_txrx_add_last_real_peer() - add last peer
359  * @soc_hdl: datapath soc handle
360  * @pdev_id: the data physical device id
361  * @vdev_id: virtual device id
362  *
363  * Return: None
364  */
365 void ol_txrx_add_last_real_peer(struct cdp_soc_t *soc, uint8_t pdev_id,
366 				uint8_t vdev_id);
367 
368 /**
369  * is_vdev_restore_last_peer() - check for vdev last peer
370  * @soc: datapath soc handle
371  * vdev_id: vdev id
372  * @peer_mac: peer mac address
373  *
374  * Return: true if last peer is not null
375  */
376 bool is_vdev_restore_last_peer(struct cdp_soc_t *soc, uint8_t vdev_id,
377 			       uint8_t *peer_mac);
378 
379 /**
380  * ol_txrx_update_last_real_peer() - check for vdev last peer
381  * @soc: datapath soc handle
382  * @pdev_id: the data physical device id
383  * @vdev_id: vdev_id
384  * @restore_last_peer: restore last peer flag
385  *
386  * Return: None
387  */
388 void ol_txrx_update_last_real_peer(struct cdp_soc_t *soc, uint8_t pdev_id,
389 				   uint8_t vdev_id,
390 				   bool restore_last_peer);
391 
392 /**
393  * ol_txrx_set_peer_as_tdls_peer() - mark peer as tdls peer
394  * @soc: pointer to SOC handle
395  * @vdev_id: virtual interface id
396  * @peer_mac: peer mac address
397  * @value: false/true
398  *
399  * Return: None
400  */
401 void ol_txrx_set_peer_as_tdls_peer(struct cdp_soc_t *soc, uint8_t vdev_id,
402 				   uint8_t *peer_mac, bool val);
403 
404 /**
405  * ol_txrx_set_tdls_offchan_enabled() - set tdls offchan enabled
406  * @soc: pointer to SOC handle
407  * @vdev_id: virtual interface id
408  * @peer_mac: peer mac address
409  * @value: false/true
410  *
411  * Return: None
412  */
413 void ol_txrx_set_tdls_offchan_enabled(struct cdp_soc_t *soc, uint8_t vdev_id,
414 				      uint8_t *peer_mac, bool val);
415 #endif
416 
417 #if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
418 void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev);
419 void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev);
420 void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev);
421 void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev);
422 #else
423 static inline
ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)424 void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
425 {
426 	ol_txrx_err("TSO is not supported");
427 }
428 
429 static inline
ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)430 void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev) {}
431 
432 static inline
ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)433 void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev) {}
434 
435 static inline
ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)436 void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev) {}
437 #endif
438 
439 struct ol_tx_desc_t *
440 ol_txrx_mgmt_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
441 			   struct ol_txrx_vdev_t *vdev,
442 			   qdf_nbuf_t tx_mgmt_frm,
443 			   struct ol_txrx_msdu_info_t *tx_msdu_info);
444 
445 int ol_txrx_mgmt_send_frame(struct ol_txrx_vdev_t *vdev,
446 			    struct ol_tx_desc_t *tx_desc,
447 			    qdf_nbuf_t tx_mgmt_frm,
448 			    struct ol_txrx_msdu_info_t *tx_msdu_info,
449 			    uint16_t chanfreq);
450 
451 #ifdef CONFIG_HL_SUPPORT
452 static inline
ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t * pdev)453 uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
454 {
455 	return ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
456 }
457 #else
458 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
459 static inline
ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t * pdev)460 uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
461 {
462 	return pdev->num_msdu_desc;
463 }
464 #else
465 static inline
ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t * pdev)466 uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
467 {
468 	return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
469 }
470 #endif
471 #endif
472 
473 /**
474  * cdp_soc_t_to_ol_txrx_soc_t() - typecast cdp_soc_t to ol_txrx_soc_t
475  * @soc: OL soc handle
476  *
477  * Return: struct ol_txrx_soc_t pointer
478  */
479 static inline
cdp_soc_t_to_ol_txrx_soc_t(ol_txrx_soc_handle soc)480 struct ol_txrx_soc_t *cdp_soc_t_to_ol_txrx_soc_t(ol_txrx_soc_handle soc)
481 {
482 	return (struct ol_txrx_soc_t *)soc;
483 }
484 
485 /**
486  * ol_txrx_soc_t_to_cdp_soc_t() - typecast ol_txrx_soc_t to cdp_soc
487  * @soc: Opaque soc handle
488  *
489  * Return: struct cdp_soc_t pointer
490  */
491 static inline
ol_txrx_soc_t_to_cdp_soc_t(struct ol_txrx_soc_t * soc)492 ol_txrx_soc_handle ol_txrx_soc_t_to_cdp_soc_t(struct ol_txrx_soc_t *soc)
493 {
494 	return (struct cdp_soc_t *)soc;
495 }
496 
497 /**
498  * cdp_pdev_to_ol_txrx_pdev_t() - typecast cdp_pdev to ol_txrx_pdev_t
499  * @pdev: OL pdev handle
500  *
501  * Return: struct ol_txrx_pdev_t pointer
502  */
503 static inline
cdp_pdev_to_ol_txrx_pdev_t(struct cdp_pdev * pdev)504 struct ol_txrx_pdev_t *cdp_pdev_to_ol_txrx_pdev_t(struct cdp_pdev *pdev)
505 {
506 	return (struct ol_txrx_pdev_t *)pdev;
507 }
508 
509 /**
510  * ol_txrx_pdev_t_to_cdp_pdev() - typecast ol_txrx_pdev_t to cdp_pdev
511  * @pdev: Opaque pdev handle
512  *
513  * Return: struct cdp_pdev pointer
514  */
515 static inline
ol_txrx_pdev_t_to_cdp_pdev(struct ol_txrx_pdev_t * pdev)516 struct cdp_pdev *ol_txrx_pdev_t_to_cdp_pdev(struct ol_txrx_pdev_t *pdev)
517 {
518 	return (struct cdp_pdev *)pdev;
519 }
520 
521 /**
522  * cdp_vdev_to_ol_txrx_vdev_t() - typecast cdp_vdev to ol_txrx_vdev_t
523  * @vdev: OL vdev handle
524  *
525  * Return: struct ol_txrx_vdev_t pointer
526  */
527 static inline
cdp_vdev_to_ol_txrx_vdev_t(struct cdp_vdev * vdev)528 struct ol_txrx_vdev_t *cdp_vdev_to_ol_txrx_vdev_t(struct cdp_vdev *vdev)
529 {
530 	return (struct ol_txrx_vdev_t *)vdev;
531 }
532 
533 /**
534  * ol_txrx_vdev_t_to_cdp_vdev() - typecast ol_txrx_vdev_t to cdp_vdev
535  * @vdev: Opaque vdev handle
536  *
537  * Return: struct cdp_vdev pointer
538  */
539 static inline
ol_txrx_vdev_t_to_cdp_vdev(struct ol_txrx_vdev_t * vdev)540 struct cdp_vdev *ol_txrx_vdev_t_to_cdp_vdev(struct ol_txrx_vdev_t *vdev)
541 {
542 	return (struct cdp_vdev *)vdev;
543 }
544 
545 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
546 void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
547 uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev);
548 /**
549  * ol_txrx_fwd_desc_thresh_check() - check to forward packet to tx path
550  * @vdev: which virtual device the frames were addressed to
551  *
552  * This API is to check whether enough descriptors are available or not
553  * to forward packet to tx path. If not enough descriptors left,
554  * start dropping tx-path packets.
555  * Do not pause netif queues as still a pool of descriptors is reserved
556  * for high-priority traffic such as EAPOL/ARP etc.
557  * In case of intra-bss forwarding, it could be possible that tx-path can
558  * consume all the tx descriptors and pause netif queues. Due to this,
559  * there would be some left for stack triggered packets such as ARP packets
560  * which could lead to disconnection of device. To avoid this, reserved
561  * a pool of descriptors for high-priority packets, i.e., reduce the
562  * threshold of drop in the intra-bss forwarding path.
563  *
564  * Return: true ; forward the packet, i.e., below threshold
565  *         false; not enough descriptors, drop the packet
566  */
567 bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *txrx_vdev);
568 
569 /**
570  * ol_tx_desc_thresh_reached() - is tx desc threshold reached
571  * @soc_hdl: Datapath soc handle
572  * @vdev_id: id of vdev
573  *
574  * Return: true if tx desc available reached threshold or false otherwise
575  */
ol_tx_desc_thresh_reached(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)576 static inline bool ol_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl,
577 					     uint8_t vdev_id)
578 {
579 	struct ol_txrx_vdev_t *vdev;
580 
581 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
582 	if (!vdev) {
583 		dp_err("vdev is NULL");
584 		return false;
585 	}
586 
587 	return !(ol_txrx_fwd_desc_thresh_check(vdev));
588 }
589 
590 #else
591 /**
592  * ol_tx_get_total_free_desc() - get total free descriptors
593  * @pdev: pdev handle
594  *
595  * Return: total free descriptors
596  */
597 static inline
ol_tx_get_total_free_desc(struct ol_txrx_pdev_t * pdev)598 uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
599 {
600 	return pdev->tx_desc.num_free;
601 }
602 
603 static inline
ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t * txrx_vdev)604 bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *txrx_vdev)
605 {
606 	return true;
607 }
608 
609 #endif
610 
611 #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
612 	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
613 static inline void
ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t * pdev)614 ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t *pdev)
615 {
616 	BUILD_BUG_ON(OL_TX_MAX_GROUPS_PER_QUEUE > 1);
617 	BUILD_BUG_ON(OL_TX_MAX_TXQ_GROUPS > 2);
618 	pdev->limit_lend = 0;
619 	pdev->min_reserve = 0;
620 }
621 #else
622 static inline void
ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t * pdev)623 ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t *pdev)
624 {}
625 #endif
626 
627 int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
628 				    uint8_t pool_size);
629 void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev);
630 struct ol_txrx_fw_stats_desc_t
631 	*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev);
632 struct ol_txrx_stats_req_internal
633 	*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
634 				       uint8_t desc_id);
635 
636 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
637 /**
638  * ol_txrx_register_hl_flow_control() -register hl netdev flow control callback
639  * @soc_hdl: soc handle
640  * @pdev_id: datapath pdev identifier
641  * @flowControl: flow control callback
642  *
643  * Return: 0 for success or error code
644  */
645 int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc_hdl,
646 				     uint8_t pdev_id,
647 				     tx_pause_callback flowcontrol);
648 
649 /**
650  * ol_txrx_set_vdev_os_queue_status() - Set OS queue status for a vdev
651  * @soc_hdl: soc handle
652  * @vdev_id: vdev id for the vdev under consideration.
653  * @action: action to be done on queue for vdev
654  *
655  * Return: 0 on success, -EINVAL on failure
656  */
657 int ol_txrx_set_vdev_os_queue_status(struct cdp_soc_t *soc_hdl, u8 vdev_id,
658 				     enum netif_action_type action);
659 
660 /**
661  * ol_txrx_set_vdev_tx_desc_limit() - Set TX descriptor limits for a vdev
662  * @soc_hdl: soc handle
663  * @vdev_id: vdev id for the vdev under consideration.
664  * @chan_freq: channel frequency on which the vdev has been started.
665  *
666  * Return: 0 on success, -EINVAL on failure
667  */
668 int ol_txrx_set_vdev_tx_desc_limit(struct cdp_soc_t *soc_hdl, u8 vdev_id,
669 				   u32 chan_freq);
670 #endif
671 
672 /**
673  * ol_txrx_get_new_htt_msg_format() - check htt h2t msg feature
674  * @pdev - datapath device instance
675  *
676  * Check if h2t message length includes htc header length
677  *
678  * return if new htt h2t msg feature enabled
679  */
680 bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev);
681 
682 /**
683  * ol_txrx_set_new_htt_msg_format() - set htt h2t msg feature
684  * @val - enable or disable new htt h2t msg feature
685  *
686  * Set if h2t message length includes htc header length
687  *
688  * return NONE
689  */
690 void ol_txrx_set_new_htt_msg_format(uint8_t val);
691 
692 /**
693  * ol_txrx_set_peer_unmap_conf_support() - set peer unmap conf feature
694  * @val - enable or disable peer unmap conf feature
695  *
696  * Set if peer unamp conf feature is supported by both FW and in INI
697  *
698  * return NONE
699  */
700 void ol_txrx_set_peer_unmap_conf_support(bool val);
701 
702 /**
703  * ol_txrx_get_peer_unmap_conf_support() - check peer unmap conf feature
704  *
705  * Check if peer unmap conf feature is enabled
706  *
707  * return true is peer unmap conf feature is enabled else false
708  */
709 bool ol_txrx_get_peer_unmap_conf_support(void);
710 
711 /**
712  * ol_txrx_get_tx_compl_tsf64() - check tx compl tsf64 feature
713  *
714  * Check if tx compl tsf64 feature is enabled
715  *
716  * return true is tx compl tsf64 feature is enabled else false
717  */
718 bool ol_txrx_get_tx_compl_tsf64(void);
719 
720 /**
721  * ol_txrx_set_tx_compl_tsf64() - set tx compl tsf64 feature
722  * @val - enable or disable tx compl tsf64 feature
723  *
724  * Set if tx compl tsf64 feature is supported FW
725  *
726  * return NONE
727  */
728 void ol_txrx_set_tx_compl_tsf64(bool val);
729 
730 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
731 
732 /**
733  * ol_txrx_vdev_init_tcp_del_ack() - initialize tcp delayed ack structure
734  * @vdev: vdev handle
735  *
736  * Return: none
737  */
738 void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev);
739 
740 /**
741  * ol_txrx_vdev_deinit_tcp_del_ack() - deinitialize tcp delayed ack structure
742  * @vdev: vdev handle
743  *
744  * Return: none
745  */
746 void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev);
747 
748 /**
749  * ol_txrx_vdev_free_tcp_node() - add tcp node in free list
750  * @vdev: vdev handle
751  * @node: tcp stream node
752  *
753  * Return: none
754  */
755 void ol_txrx_vdev_free_tcp_node(struct ol_txrx_vdev_t *vdev,
756 				struct tcp_stream_node *node);
757 
758 /**
759  * ol_txrx_vdev_alloc_tcp_node() - allocate tcp node
760  * @vdev: vdev handle
761  *
762  * Return: tcp stream node
763  */
764 struct tcp_stream_node *
765 ol_txrx_vdev_alloc_tcp_node(struct ol_txrx_vdev_t *vdev);
766 
767 /**
768  * ol_tx_pdev_reset_driver_del_ack() - reset driver delayed ack enabled flag
769  * @ppdev: the data physical device
770  *
771  * Return: none
772  */
773 void
774 ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
775 
776 /**
777  * ol_tx_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag
778  * @soc_hdl: datapath soc handle
779  * @vdev_id: vdev id
780  * @rx_packets: number of rx packets
781  * @time_in_ms: time in ms
782  * @high_th: high threshold
783  * @low_th: low threshold
784  *
785  * Return: none
786  */
787 void
788 ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t *soc_hdl,
789 				     uint8_t vdev_id,
790 				     unsigned long rx_packets,
791 				     uint32_t time_in_ms,
792 				     uint32_t high_th,
793 				     uint32_t low_th);
794 
795 /**
796  * ol_tx_hl_send_all_tcp_ack() - send all queued tcp ack packets
797  * @vdev: vdev handle
798  *
799  * Return: none
800  */
801 void ol_tx_hl_send_all_tcp_ack(struct ol_txrx_vdev_t *vdev);
802 
803 /**
804  * tcp_del_ack_tasklet() - tasklet function to send ack packets
805  * @data: vdev handle
806  *
807  * Return: none
808  */
809 void tcp_del_ack_tasklet(void *data);
810 
811 /**
812  * ol_tx_get_stream_id() - get stream_id from packet info
813  * @info: packet info
814  *
815  * Return: stream_id
816  */
817 uint16_t ol_tx_get_stream_id(struct packet_info *info);
818 
819 /**
820  * ol_tx_get_packet_info() - update packet info for passed msdu
821  * @msdu: packet
822  * @info: packet info
823  *
824  * Return: none
825  */
826 void ol_tx_get_packet_info(qdf_nbuf_t msdu, struct packet_info *info);
827 
828 /**
829  * ol_tx_hl_find_and_send_tcp_stream() - find and send tcp stream for passed
830  *                                       stream info
831  * @vdev: vdev handle
832  * @info: packet info
833  *
834  * Return: none
835  */
836 void ol_tx_hl_find_and_send_tcp_stream(struct ol_txrx_vdev_t *vdev,
837 				       struct packet_info *info);
838 
839 /**
840  * ol_tx_hl_find_and_replace_tcp_ack() - find and replace tcp ack packet for
841  *                                       passed packet info
842  * @vdev: vdev handle
843  * @msdu: packet
844  * @info: packet info
845  *
846  * Return: none
847  */
848 void ol_tx_hl_find_and_replace_tcp_ack(struct ol_txrx_vdev_t *vdev,
849 				       qdf_nbuf_t msdu,
850 				       struct packet_info *info);
851 
852 /**
853  * ol_tx_hl_vdev_tcp_del_ack_timer() - delayed ack timer function
854  * @timer: timer handle
855  *
856  * Return: enum
857  */
858 enum qdf_hrtimer_restart_status
859 ol_tx_hl_vdev_tcp_del_ack_timer(qdf_hrtimer_data_t *timer);
860 
861 /**
862  * ol_tx_hl_del_ack_queue_flush_all() - drop all queued packets
863  * @vdev: vdev handle
864  *
865  * Return: none
866  */
867 void ol_tx_hl_del_ack_queue_flush_all(struct ol_txrx_vdev_t *vdev);
868 
869 #else
870 
871 static inline
ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t * vdev)872 void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
873 {
874 }
875 
876 static inline
ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t * vdev)877 void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
878 {
879 }
880 
881 static inline
ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)882 void ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
883 {
884 }
885 
886 static inline
ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,unsigned long rx_packets,uint32_t time_in_ms,uint32_t high_th,uint32_t low_th)887 void ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t *soc_hdl,
888 					  uint8_t vdev_id,
889 					  unsigned long rx_packets,
890 					  uint32_t time_in_ms,
891 					  uint32_t high_th,
892 					  uint32_t low_th)
893 {
894 }
895 
896 #endif
897 
898 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
899 void ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
900 				   uint32_t time_in_ms, uint32_t high_th,
901 				   uint32_t low_th);
902 
903 void ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
904 
905 #else
906 
907 static inline
ol_tx_vdev_set_bundle_require(uint8_t vdev_id,unsigned long tx_bytes,uint32_t time_in_ms,uint32_t high_th,uint32_t low_th)908 void ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
909 				   uint32_t time_in_ms, uint32_t high_th,
910 				   uint32_t low_th)
911 {
912 }
913 
914 static inline
ol_tx_pdev_reset_bundle_require(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)915 void ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
916 {
917 }
918 #endif
919 
920 #endif /* _OL_TXRX__H_ */
921